Sync to upstream/release/573 (#903)

* Work toward affording parallel type checking
* The interface to `LazyType` has changed:
* `LazyType` now takes a second callback that is passed the `LazyType&`
itself. This new callback is responsible for populating the field
`TypeId LazyType::unwrapped`. Multithreaded implementations should
acquire a lock in this callback.
* Modules now retain their `humanReadableNames`. This reduces the number
of cases where type checking has to call back to a `ModuleResolver`.
* https://github.com/Roblox/luau/pull/902
* Add timing info to the Luau REPL compilation output

We've also fixed some bugs and crashes in the new solver as we march
toward readiness.
* Thread ICEs (Internal Compiler Errors) back to the Frontend properly
* Refinements are no longer applied to lvalues
* More miscellaneous stability improvements

Lots of activity in the new JIT engine:

* Implement register spilling/restore for A64
* Correct Luau IR value restore location tracking
* Fixed use-after-free in x86 register allocator spill restore
* Use btz for bit tests
* Finish branch assembly support for A64
* Codesize and performance improvements for A64
* The bit32 library has been implemented for arm and x64

---------

Co-authored-by: Arseny Kapoulkine <arseny.kapoulkine@gmail.com>
Co-authored-by: Vyacheslav Egorov <vegorov@roblox.com>
This commit is contained in:
Andy Friesen 2023-04-21 15:14:26 -07:00 committed by GitHub
parent 8ed808eb52
commit fe7621ee8c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
69 changed files with 2657 additions and 640 deletions

View File

@ -60,7 +60,6 @@ struct ConstraintGraphBuilder
// define the scope hierarchy.
std::vector<std::pair<Location, ScopePtr>> scopes;
ModuleName moduleName;
ModulePtr module;
NotNull<BuiltinTypes> builtinTypes;
const NotNull<TypeArena> arena;
@ -94,9 +93,8 @@ struct ConstraintGraphBuilder
ScopePtr globalScope;
DcrLogger* logger;
ConstraintGraphBuilder(const ModuleName& moduleName, ModulePtr module, TypeArena* arena, NotNull<ModuleResolver> moduleResolver,
NotNull<BuiltinTypes> builtinTypes, NotNull<InternalErrorReporter> ice, const ScopePtr& globalScope, DcrLogger* logger,
NotNull<DataFlowGraph> dfg);
ConstraintGraphBuilder(ModulePtr module, TypeArena* arena, NotNull<ModuleResolver> moduleResolver, NotNull<BuiltinTypes> builtinTypes,
NotNull<InternalErrorReporter> ice, const ScopePtr& globalScope, DcrLogger* logger, NotNull<DataFlowGraph> dfg);
/**
* Fabricates a new free type belonging to a given scope.

View File

@ -49,7 +49,7 @@ struct HashInstantiationSignature
struct ConstraintSolver
{
TypeArena* arena;
NotNull<TypeArena> arena;
NotNull<BuiltinTypes> builtinTypes;
InternalErrorReporter iceReporter;
NotNull<Normalizer> normalizer;

View File

@ -8,6 +8,8 @@
#include "Luau/Scope.h"
#include "Luau/TypeInfer.h"
#include "Luau/Variant.h"
#include <mutex>
#include <string>
#include <vector>
#include <optional>
@ -67,6 +69,7 @@ struct SourceNode
}
ModuleName name;
std::string humanReadableName;
std::unordered_set<ModuleName> requireSet;
std::vector<std::pair<ModuleName, Location>> requireLocations;
bool dirtySourceModule = true;
@ -114,7 +117,13 @@ struct FrontendModuleResolver : ModuleResolver
std::optional<ModuleInfo> resolveModuleInfo(const ModuleName& currentModuleName, const AstExpr& pathExpr) override;
std::string getHumanReadableModuleName(const ModuleName& moduleName) const override;
void setModule(const ModuleName& moduleName, ModulePtr module);
void clearModules();
private:
Frontend* frontend;
mutable std::mutex moduleMutex;
std::unordered_map<ModuleName, ModulePtr> modules;
};

View File

@ -28,7 +28,9 @@ class AstTypePack;
/// Root of the AST of a parsed source file
struct SourceModule
{
ModuleName name; // DataModel path if possible. Filename if not.
ModuleName name; // Module identifier or a filename
std::string humanReadableName;
SourceCode::Type type = SourceCode::None;
std::optional<std::string> environmentName;
bool cyclic = false;
@ -63,6 +65,9 @@ struct Module
{
~Module();
ModuleName name;
std::string humanReadableName;
TypeArena interfaceTypes;
TypeArena internalTypes;

View File

@ -10,6 +10,7 @@
#include "Luau/Unifiable.h"
#include "Luau/Variant.h"
#include <atomic>
#include <deque>
#include <map>
#include <memory>
@ -550,7 +551,50 @@ struct IntersectionType
struct LazyType
{
std::function<TypeId()> thunk;
LazyType() = default;
LazyType(std::function<TypeId()> thunk_DEPRECATED, std::function<TypeId(LazyType&)> unwrap)
: thunk_DEPRECATED(thunk_DEPRECATED)
, unwrap(unwrap)
{
}
// std::atomic is sad and requires a manual copy
LazyType(const LazyType& rhs)
: thunk_DEPRECATED(rhs.thunk_DEPRECATED)
, unwrap(rhs.unwrap)
, unwrapped(rhs.unwrapped.load())
{
}
LazyType(LazyType&& rhs) noexcept
: thunk_DEPRECATED(std::move(rhs.thunk_DEPRECATED))
, unwrap(std::move(rhs.unwrap))
, unwrapped(rhs.unwrapped.load())
{
}
LazyType& operator=(const LazyType& rhs)
{
thunk_DEPRECATED = rhs.thunk_DEPRECATED;
unwrap = rhs.unwrap;
unwrapped = rhs.unwrapped.load();
return *this;
}
LazyType& operator=(LazyType&& rhs) noexcept
{
thunk_DEPRECATED = std::move(rhs.thunk_DEPRECATED);
unwrap = std::move(rhs.unwrap);
unwrapped = rhs.unwrapped.load();
return *this;
}
std::function<TypeId()> thunk_DEPRECATED;
std::function<TypeId(LazyType&)> unwrap;
std::atomic<TypeId> unwrapped = nullptr;
};
struct UnknownType
@ -798,7 +842,7 @@ struct TypeIterator
TypeIterator<T> operator++(int)
{
TypeIterator<T> copy = *this;
++copy;
++*this;
return copy;
}

View File

@ -12,6 +12,6 @@ namespace Luau
struct DcrLogger;
struct BuiltinTypes;
void check(NotNull<BuiltinTypes> builtinTypes, DcrLogger* logger, const SourceModule& sourceModule, Module* module);
void check(NotNull<BuiltinTypes> builtinTypes, NotNull<struct UnifierSharedState> sharedState, DcrLogger* logger, const SourceModule& sourceModule, Module* module);
} // namespace Luau

View File

@ -372,7 +372,6 @@ public:
ModuleResolver* resolver;
ModulePtr currentModule;
ModuleName currentModuleName;
std::function<void(const ModuleName&, const ScopePtr&)> prepareModuleScope;
NotNull<BuiltinTypes> builtinTypes;

View File

@ -9,6 +9,7 @@
#include "Luau/Type.h"
LUAU_FASTINT(LuauVisitRecursionLimit)
LUAU_FASTFLAG(LuauBoundLazyTypes)
namespace Luau
{
@ -291,9 +292,14 @@ struct GenericTypeVisitor
traverse(partTy);
}
}
else if (get<LazyType>(ty))
else if (auto ltv = get<LazyType>(ty))
{
// Visiting into LazyType may necessarily cause infinite expansion, so we don't do that on purpose.
if (FFlag::LuauBoundLazyTypes)
{
if (TypeId unwrapped = ltv->unwrapped)
traverse(unwrapped);
}
// Visiting into LazyType that hasn't been unwrapped may necessarily cause infinite expansion, so we don't do that on purpose.
// Asserting also makes no sense, because the type _will_ happen here, most likely as a property of some ClassType
// that doesn't need to be expanded.
}

View File

@ -606,7 +606,7 @@ static std::optional<WithPredicate<TypePackId>> magicFunctionRequire(
if (!checkRequirePath(typechecker, expr.args.data[0]))
return std::nullopt;
if (auto moduleInfo = typechecker.resolver->resolveModuleInfo(typechecker.currentModuleName, expr))
if (auto moduleInfo = typechecker.resolver->resolveModuleInfo(typechecker.currentModule->name, expr))
return WithPredicate<TypePackId>{arena.addTypePack({typechecker.checkRequire(scope, *moduleInfo, expr.location)})};
return std::nullopt;

View File

@ -325,7 +325,14 @@ void TypeCloner::operator()(const IntersectionType& t)
void TypeCloner::operator()(const LazyType& t)
{
if (TypeId unwrapped = t.unwrapped.load())
{
seenTypes[typeId] = clone(unwrapped, dest, cloneState);
}
else
{
defaultClone(t);
}
}
void TypeCloner::operator()(const UnknownType& t)

View File

@ -133,11 +133,10 @@ void forEachConstraint(const Checkpoint& start, const Checkpoint& end, const Con
} // namespace
ConstraintGraphBuilder::ConstraintGraphBuilder(const ModuleName& moduleName, ModulePtr module, TypeArena* arena,
NotNull<ModuleResolver> moduleResolver, NotNull<BuiltinTypes> builtinTypes, NotNull<InternalErrorReporter> ice, const ScopePtr& globalScope,
DcrLogger* logger, NotNull<DataFlowGraph> dfg)
: moduleName(moduleName)
, module(module)
ConstraintGraphBuilder::ConstraintGraphBuilder(ModulePtr module, TypeArena* arena, NotNull<ModuleResolver> moduleResolver,
NotNull<BuiltinTypes> builtinTypes, NotNull<InternalErrorReporter> ice, const ScopePtr& globalScope, DcrLogger* logger,
NotNull<DataFlowGraph> dfg)
: module(module)
, builtinTypes(builtinTypes)
, arena(arena)
, rootScope(nullptr)
@ -599,7 +598,7 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* l
{
AstExpr* require = *maybeRequire;
if (auto moduleInfo = moduleResolver->resolveModuleInfo(moduleName, *require))
if (auto moduleInfo = moduleResolver->resolveModuleInfo(module->name, *require))
{
const Name name{local->vars.data[i]->name.value};
@ -1043,7 +1042,7 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatDeclareC
Name className(declaredClass->name.value);
TypeId classTy = arena->addType(ClassType(className, {}, superTy, std::nullopt, {}, {}, moduleName));
TypeId classTy = arena->addType(ClassType(className, {}, superTy, std::nullopt, {}, {}, module->name));
ClassType* ctv = getMutable<ClassType>(classTy);
TypeId metaTy = arena->addType(TableType{TableState::Sealed, scope->level, scope.get()});
@ -2609,7 +2608,7 @@ Inference ConstraintGraphBuilder::flattenPack(const ScopePtr& scope, Location lo
void ConstraintGraphBuilder::reportError(Location location, TypeErrorData err)
{
errors.push_back(TypeError{location, moduleName, std::move(err)});
errors.push_back(TypeError{location, module->name, std::move(err)});
if (logger)
logger->captureGenerationError(errors.back());
@ -2617,7 +2616,7 @@ void ConstraintGraphBuilder::reportError(Location location, TypeErrorData err)
void ConstraintGraphBuilder::reportCodeTooComplex(Location location)
{
errors.push_back(TypeError{location, moduleName, CodeTooComplex{}});
errors.push_back(TypeError{location, module->name, CodeTooComplex{}});
if (logger)
logger->captureGenerationError(errors.back());

View File

@ -18,6 +18,7 @@
#include "Luau/VisitType.h"
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolver, false);
LUAU_FASTFLAG(LuauRequirePathTrueModuleName)
namespace Luau
{
@ -1989,7 +1990,7 @@ static TypePackId getErrorType(NotNull<BuiltinTypes> builtinTypes, TypePackId)
template <typename TID>
bool ConstraintSolver::tryUnify(NotNull<const Constraint> constraint, TID subTy, TID superTy)
{
Unifier u{normalizer, Mode::Strict, constraint->scope, Location{}, Covariant};
Unifier u{normalizer, Mode::Strict, constraint->scope, constraint->location, Covariant};
u.useScopes = true;
u.tryUnify(subTy, superTy);
@ -2257,11 +2258,9 @@ TypeId ConstraintSolver::resolveModule(const ModuleInfo& info, const Location& l
return errorRecoveryType();
}
std::string humanReadableName = moduleResolver->getHumanReadableModuleName(info.name);
for (const auto& [location, path] : requireCycles)
{
if (!path.empty() && path.front() == humanReadableName)
if (!path.empty() && path.front() == (FFlag::LuauRequirePathTrueModuleName ? info.name : moduleResolver->getHumanReadableModuleName(info.name)))
return builtinTypes->anyType;
}
@ -2269,14 +2268,14 @@ TypeId ConstraintSolver::resolveModule(const ModuleInfo& info, const Location& l
if (!module)
{
if (!moduleResolver->moduleExists(info.name) && !info.optional)
reportError(UnknownRequire{humanReadableName}, location);
reportError(UnknownRequire{moduleResolver->getHumanReadableModuleName(info.name)}, location);
return errorRecoveryType();
}
if (module->type != SourceCode::Type::Module)
{
reportError(IllegalRequire{humanReadableName, "Module is not a ModuleScript. It cannot be required."}, location);
reportError(IllegalRequire{module->humanReadableName, "Module is not a ModuleScript. It cannot be required."}, location);
return errorRecoveryType();
}
@ -2287,7 +2286,7 @@ TypeId ConstraintSolver::resolveModule(const ModuleInfo& info, const Location& l
std::optional<TypeId> moduleType = first(modulePack);
if (!moduleType)
{
reportError(IllegalRequire{humanReadableName, "Module does not return exactly 1 value. It cannot be required."}, location);
reportError(IllegalRequire{module->humanReadableName, "Module does not return exactly 1 value. It cannot be required."}, location);
return errorRecoveryType();
}

View File

@ -11,6 +11,7 @@
#include <type_traits>
LUAU_FASTFLAGVARIABLE(LuauTypeMismatchInvarianceInError, false)
LUAU_FASTFLAGVARIABLE(LuauRequirePathTrueModuleName, false)
static std::string wrongNumberOfArgsString(
size_t expectedCount, std::optional<size_t> maximumCount, size_t actualCount, const char* argPrefix = nullptr, bool isVariadic = false)
@ -349,6 +350,9 @@ struct ErrorConverter
else
s += " -> ";
if (FFlag::LuauRequirePathTrueModuleName && fileResolver != nullptr)
s += fileResolver->getHumanReadableModuleName(name);
else
s += name;
}

View File

@ -33,6 +33,7 @@ LUAU_FASTINTVARIABLE(LuauAutocompleteCheckTimeoutMs, 100)
LUAU_FASTFLAGVARIABLE(DebugLuauDeferredConstraintResolution, false)
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false)
LUAU_FASTFLAGVARIABLE(LuauOnDemandTypecheckers, false)
LUAU_FASTFLAG(LuauRequirePathTrueModuleName)
namespace Luau
{
@ -245,7 +246,7 @@ namespace
{
static ErrorVec accumulateErrors(
const std::unordered_map<ModuleName, SourceNode>& sourceNodes, const std::unordered_map<ModuleName, ModulePtr>& modules, const ModuleName& name)
const std::unordered_map<ModuleName, SourceNode>& sourceNodes, ModuleResolver& moduleResolver, const ModuleName& name)
{
std::unordered_set<ModuleName> seen;
std::vector<ModuleName> queue{name};
@ -271,11 +272,11 @@ static ErrorVec accumulateErrors(
// FIXME: If a module has a syntax error, we won't be able to re-report it here.
// The solution is probably to move errors from Module to SourceNode
auto it2 = modules.find(next);
if (it2 == modules.end())
auto modulePtr = moduleResolver.getModule(next);
if (!modulePtr)
continue;
Module& module = *it2->second;
Module& module = *modulePtr;
std::sort(module.errors.begin(), module.errors.end(), [](const TypeError& e1, const TypeError& e2) -> bool {
return e1.location.begin > e2.location.begin;
@ -345,9 +346,9 @@ std::vector<RequireCycle> getRequireCycles(
if (top == start)
{
for (const SourceNode* node : path)
cycle.push_back(resolver->getHumanReadableModuleName(node->name));
cycle.push_back(FFlag::LuauRequirePathTrueModuleName ? node->name : node->humanReadableName);
cycle.push_back(resolver->getHumanReadableModuleName(top->name));
cycle.push_back(FFlag::LuauRequirePathTrueModuleName ? top->name : top->humanReadableName);
break;
}
}
@ -415,11 +416,6 @@ Frontend::Frontend(FileResolver* fileResolver, ConfigResolver* configResolver, c
{
}
FrontendModuleResolver::FrontendModuleResolver(Frontend* frontend)
: frontend(frontend)
{
}
CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOptions> optionOverride)
{
LUAU_TIMETRACE_SCOPE("Frontend::check", "Frontend");
@ -428,31 +424,21 @@ CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOption
FrontendOptions frontendOptions = optionOverride.value_or(options);
CheckResult checkResult;
FrontendModuleResolver& resolver = frontendOptions.forAutocomplete ? moduleResolverForAutocomplete : moduleResolver;
auto it = sourceNodes.find(name);
if (it != sourceNodes.end() && !it->second.hasDirtyModule(frontendOptions.forAutocomplete))
{
// No recheck required.
if (frontendOptions.forAutocomplete)
{
auto it2 = moduleResolverForAutocomplete.modules.find(name);
if (it2 == moduleResolverForAutocomplete.modules.end() || it2->second == nullptr)
throw InternalCompilerError("Frontend::modules does not have data for " + name, name);
}
else
{
auto it2 = moduleResolver.modules.find(name);
if (it2 == moduleResolver.modules.end() || it2->second == nullptr)
throw InternalCompilerError("Frontend::modules does not have data for " + name, name);
}
ModulePtr module = resolver.getModule(name);
std::unordered_map<ModuleName, ModulePtr>& modules =
frontendOptions.forAutocomplete ? moduleResolverForAutocomplete.modules : moduleResolver.modules;
if (!module)
throw InternalCompilerError("Frontend::modules does not have data for " + name, name);
checkResult.errors = accumulateErrors(sourceNodes, modules, name);
checkResult.errors = accumulateErrors(sourceNodes, resolver, name);
// Get lint result only for top checked module
if (auto it = modules.find(name); it != modules.end())
checkResult.lintResult = it->second->lintResult;
checkResult.lintResult = module->lintResult;
return checkResult;
}
@ -556,7 +542,7 @@ CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOption
/*recordJsonLog*/ false, typeCheckLimits);
}
moduleResolverForAutocomplete.modules[moduleName] = moduleForAutocomplete;
resolver.setModule(moduleName, moduleForAutocomplete);
double duration = getTimestamp() - timestamp;
@ -664,16 +650,13 @@ CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOption
checkResult.errors.insert(checkResult.errors.end(), module->errors.begin(), module->errors.end());
moduleResolver.modules[moduleName] = std::move(module);
resolver.setModule(moduleName, std::move(module));
sourceNode.dirtyModule = false;
}
// Get lint result only for top checked module
std::unordered_map<ModuleName, ModulePtr>& modules =
frontendOptions.forAutocomplete ? moduleResolverForAutocomplete.modules : moduleResolver.modules;
if (auto it = modules.find(name); it != modules.end())
checkResult.lintResult = it->second->lintResult;
if (ModulePtr module = resolver.getModule(name))
checkResult.lintResult = module->lintResult;
return checkResult;
}
@ -817,7 +800,7 @@ bool Frontend::isDirty(const ModuleName& name, bool forAutocomplete) const
*/
void Frontend::markDirty(const ModuleName& name, std::vector<ModuleName>* markedDirty)
{
if (!moduleResolver.modules.count(name) && !moduleResolverForAutocomplete.modules.count(name))
if (!moduleResolver.getModule(name) && !moduleResolverForAutocomplete.getModule(name))
return;
std::unordered_map<ModuleName, std::vector<ModuleName>> reverseDeps;
@ -884,13 +867,15 @@ ModulePtr check(const SourceModule& sourceModule, const std::vector<RequireCycle
const ScopePtr& parentScope, FrontendOptions options, bool recordJsonLog)
{
ModulePtr result = std::make_shared<Module>();
result->name = sourceModule.name;
result->humanReadableName = sourceModule.humanReadableName;
result->reduction = std::make_unique<TypeReduction>(NotNull{&result->internalTypes}, builtinTypes, iceHandler);
std::unique_ptr<DcrLogger> logger;
if (recordJsonLog)
{
logger = std::make_unique<DcrLogger>();
std::optional<SourceCode> source = fileResolver->readSource(sourceModule.name);
std::optional<SourceCode> source = fileResolver->readSource(result->name);
if (source)
{
logger->captureSource(source->source);
@ -906,7 +891,6 @@ ModulePtr check(const SourceModule& sourceModule, const std::vector<RequireCycle
Normalizer normalizer{&result->internalTypes, builtinTypes, NotNull{&unifierState}};
ConstraintGraphBuilder cgb{
sourceModule.name,
result,
&result->internalTypes,
moduleResolver,
@ -920,8 +904,8 @@ ModulePtr check(const SourceModule& sourceModule, const std::vector<RequireCycle
cgb.visit(sourceModule.root);
result->errors = std::move(cgb.errors);
ConstraintSolver cs{NotNull{&normalizer}, NotNull(cgb.rootScope), borrowConstraints(cgb.constraints), sourceModule.name, moduleResolver,
requireCycles, logger.get()};
ConstraintSolver cs{
NotNull{&normalizer}, NotNull(cgb.rootScope), borrowConstraints(cgb.constraints), result->name, moduleResolver, requireCycles, logger.get()};
if (options.randomizeConstraintResolutionSeed)
cs.randomize(*options.randomizeConstraintResolutionSeed);
@ -936,7 +920,7 @@ ModulePtr check(const SourceModule& sourceModule, const std::vector<RequireCycle
result->clonePublicInterface(builtinTypes, *iceHandler);
Luau::check(builtinTypes, logger.get(), sourceModule, result.get());
Luau::check(builtinTypes, NotNull{&unifierState}, logger.get(), sourceModule, result.get());
// Ideally we freeze the arenas before the call into Luau::check, but TypeReduction
// needs to allocate new types while Luau::check is in progress, so here we are.
@ -1033,7 +1017,8 @@ std::pair<SourceNode*, SourceModule*> Frontend::getSourceNode(const ModuleName&
sourceModule = std::move(result);
sourceModule.environmentName = environmentName;
sourceNode.name = name;
sourceNode.name = sourceModule.name;
sourceNode.humanReadableName = sourceModule.humanReadableName;
sourceNode.requireSet.clear();
sourceNode.requireLocations.clear();
sourceNode.dirtySourceModule = false;
@ -1095,6 +1080,7 @@ SourceModule Frontend::parse(const ModuleName& name, std::string_view src, const
}
sourceModule.name = name;
sourceModule.humanReadableName = fileResolver->getHumanReadableModuleName(name);
if (parseOptions.captureComments)
{
@ -1105,6 +1091,12 @@ SourceModule Frontend::parse(const ModuleName& name, std::string_view src, const
return sourceModule;
}
FrontendModuleResolver::FrontendModuleResolver(Frontend* frontend)
: frontend(frontend)
{
}
std::optional<ModuleInfo> FrontendModuleResolver::resolveModuleInfo(const ModuleName& currentModuleName, const AstExpr& pathExpr)
{
// FIXME I think this can be pushed into the FileResolver.
@ -1129,6 +1121,8 @@ std::optional<ModuleInfo> FrontendModuleResolver::resolveModuleInfo(const Module
const ModulePtr FrontendModuleResolver::getModule(const ModuleName& moduleName) const
{
std::scoped_lock lock(moduleMutex);
auto it = modules.find(moduleName);
if (it != modules.end())
return it->second;
@ -1146,6 +1140,20 @@ std::string FrontendModuleResolver::getHumanReadableModuleName(const ModuleName&
return frontend->fileResolver->getHumanReadableModuleName(moduleName);
}
void FrontendModuleResolver::setModule(const ModuleName& moduleName, ModulePtr module)
{
std::scoped_lock lock(moduleMutex);
modules[moduleName] = std::move(module);
}
void FrontendModuleResolver::clearModules()
{
std::scoped_lock lock(moduleMutex);
modules.clear();
}
ScopePtr Frontend::addEnvironment(const std::string& environmentName)
{
LUAU_ASSERT(environments.count(environmentName) == 0);
@ -1208,8 +1216,8 @@ void Frontend::clear()
{
sourceNodes.clear();
sourceModules.clear();
moduleResolver.modules.clear();
moduleResolverForAutocomplete.modules.clear();
moduleResolver.clearModules();
moduleResolverForAutocomplete.clearModules();
requireTrace.clear();
}

View File

@ -257,7 +257,7 @@ void Tarjan::visitChildren(TypeId ty, int index)
}
else if (const ClassType* ctv = get<ClassType>(ty); FFlag::LuauClassTypeVarsInSubstitution && ctv)
{
for (auto [name, prop] : ctv->props)
for (const auto& [name, prop] : ctv->props)
visitChild(prop.type);
if (ctv->parent)

View File

@ -833,10 +833,17 @@ struct TypeStringifier
}
void operator()(TypeId, const LazyType& ltv)
{
if (TypeId unwrapped = ltv.unwrapped.load())
{
stringify(unwrapped);
}
else
{
state.result.invalid = true;
state.emit("lazy?");
}
}
void operator()(TypeId, const UnknownType& ttv)
{

View File

@ -26,6 +26,7 @@ LUAU_FASTINTVARIABLE(LuauTableTypeMaximumStringifierLength, 0)
LUAU_FASTINT(LuauTypeInferRecursionLimit)
LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAG(LuauNormalizeBlockedTypes)
LUAU_FASTFLAGVARIABLE(LuauBoundLazyTypes, false)
namespace Luau
{
@ -56,18 +57,51 @@ TypeId follow(TypeId t)
TypeId follow(TypeId t, std::function<TypeId(TypeId)> mapper)
{
auto advance = [&mapper](TypeId ty) -> std::optional<TypeId> {
if (FFlag::LuauBoundLazyTypes)
{
TypeId mapped = mapper(ty);
if (auto btv = get<Unifiable::Bound<TypeId>>(mapped))
return btv->boundTo;
if (auto ttv = get<TableType>(mapped))
return ttv->boundTo;
if (auto ltv = getMutable<LazyType>(mapped))
{
TypeId unwrapped = ltv->unwrapped.load();
if (unwrapped)
return unwrapped;
unwrapped = ltv->unwrap(*ltv);
if (!unwrapped)
throw InternalCompilerError("Lazy Type didn't fill in unwrapped type field");
if (get<LazyType>(unwrapped))
throw InternalCompilerError("Lazy Type cannot resolve to another Lazy Type");
return unwrapped;
}
return std::nullopt;
}
else
{
if (auto btv = get<Unifiable::Bound<TypeId>>(mapper(ty)))
return btv->boundTo;
else if (auto ttv = get<TableType>(mapper(ty)))
return ttv->boundTo;
else
return std::nullopt;
}
};
auto force = [&mapper](TypeId ty) {
if (auto ltv = get_if<LazyType>(&mapper(ty)->ty))
{
TypeId res = ltv->thunk();
TypeId res = ltv->thunk_DEPRECATED();
if (get<LazyType>(res))
throw InternalCompilerError("Lazy Type cannot resolve to another Lazy Type");
@ -75,6 +109,7 @@ TypeId follow(TypeId t, std::function<TypeId(TypeId)> mapper)
}
};
if (!FFlag::LuauBoundLazyTypes)
force(t);
TypeId cycleTester = t; // Null once we've determined that there is no cycle
@ -85,7 +120,9 @@ TypeId follow(TypeId t, std::function<TypeId(TypeId)> mapper)
while (true)
{
if (!FFlag::LuauBoundLazyTypes)
force(t);
auto a1 = advance(t);
if (a1)
t = *a1;

View File

@ -344,6 +344,9 @@ public:
}
AstType* operator()(const LazyType& ltv)
{
if (TypeId unwrapped = ltv.unwrapped.load())
return Luau::visit(*this, unwrapped->ty);
return allocator->alloc<AstTypeReference>(Location(), std::nullopt, AstName("<Lazy?>"), std::nullopt, Location());
}
AstType* operator()(const UnknownType& ttv)

View File

@ -88,21 +88,22 @@ struct TypeChecker2
{
NotNull<BuiltinTypes> builtinTypes;
DcrLogger* logger;
InternalErrorReporter ice; // FIXME accept a pointer from Frontend
NotNull<InternalErrorReporter> ice;
const SourceModule* sourceModule;
Module* module;
TypeArena testArena;
std::vector<NotNull<Scope>> stack;
UnifierSharedState sharedState{&ice};
Normalizer normalizer{&testArena, builtinTypes, NotNull{&sharedState}};
Normalizer normalizer;
TypeChecker2(NotNull<BuiltinTypes> builtinTypes, DcrLogger* logger, const SourceModule* sourceModule, Module* module)
TypeChecker2(NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> unifierState, DcrLogger* logger, const SourceModule* sourceModule, Module* module)
: builtinTypes(builtinTypes)
, logger(logger)
, ice(unifierState->iceHandler)
, sourceModule(sourceModule)
, module(module)
, normalizer{&testArena, builtinTypes, unifierState}
{
}
@ -996,7 +997,7 @@ struct TypeChecker2
}
if (!fst)
ice.ice("UnionType had no elements, so fst is nullopt?");
ice->ice("UnionType had no elements, so fst is nullopt?");
if (std::optional<TypeId> instantiatedFunctionType = instantiation.substitute(*fst))
{
@ -1018,7 +1019,7 @@ struct TypeChecker2
{
AstExprIndexName* indexExpr = call->func->as<AstExprIndexName>();
if (!indexExpr)
ice.ice("method call expression has no 'self'");
ice->ice("method call expression has no 'self'");
args.head.push_back(lookupType(indexExpr->expr));
argLocs.push_back(indexExpr->expr->location);
@ -1646,7 +1647,7 @@ struct TypeChecker2
else if (finite(pack) && size(pack) == 0)
return builtinTypes->nilType; // `(f())` where `f()` returns no values is coerced into `nil`
else
ice.ice("flattenPack got a weird pack!");
ice->ice("flattenPack got a weird pack!");
}
void visitGenerics(AstArray<AstGenericType> generics, AstArray<AstGenericTypePack> genericPacks)
@ -2012,7 +2013,7 @@ struct TypeChecker2
void reportError(TypeErrorData data, const Location& location)
{
module->errors.emplace_back(location, sourceModule->name, std::move(data));
module->errors.emplace_back(location, module->name, std::move(data));
if (logger)
logger->captureTypeCheckError(module->errors.back());
@ -2160,9 +2161,9 @@ struct TypeChecker2
}
};
void check(NotNull<BuiltinTypes> builtinTypes, DcrLogger* logger, const SourceModule& sourceModule, Module* module)
void check(NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> unifierState, DcrLogger* logger, const SourceModule& sourceModule, Module* module)
{
TypeChecker2 typeChecker{builtinTypes, logger, &sourceModule, module};
TypeChecker2 typeChecker{builtinTypes, unifierState, logger, &sourceModule, module};
typeChecker.reduceTypes();
typeChecker.visit(sourceModule.root);

View File

@ -33,7 +33,6 @@ LUAU_FASTINTVARIABLE(LuauCheckRecursionLimit, 300)
LUAU_FASTINTVARIABLE(LuauVisitRecursionLimit, 500)
LUAU_FASTFLAG(LuauKnowsTheDataModel3)
LUAU_FASTFLAGVARIABLE(DebugLuauFreezeDuringUnification, false)
LUAU_FASTFLAGVARIABLE(LuauReturnAnyInsteadOfICE, false) // Eventually removed as false.
LUAU_FASTFLAGVARIABLE(DebugLuauSharedSelf, false)
LUAU_FASTFLAG(LuauInstantiateInSubtyping)
LUAU_FASTFLAG(LuauNegatedClassTypes)
@ -42,6 +41,7 @@ LUAU_FASTFLAG(LuauUninhabitedSubAnything2)
LUAU_FASTFLAG(LuauOccursIsntAlwaysFailure)
LUAU_FASTFLAGVARIABLE(LuauTypecheckTypeguards, false)
LUAU_FASTFLAGVARIABLE(LuauTinyControlFlowAnalysis, false)
LUAU_FASTFLAG(LuauRequirePathTrueModuleName)
namespace Luau
{
@ -264,8 +264,11 @@ ModulePtr TypeChecker::checkWithoutRecursionCheck(const SourceModule& module, Mo
{
LUAU_TIMETRACE_SCOPE("TypeChecker::check", "TypeChecker");
LUAU_TIMETRACE_ARGUMENT("module", module.name.c_str());
LUAU_TIMETRACE_ARGUMENT("name", module.humanReadableName.c_str());
currentModule.reset(new Module);
currentModule->name = module.name;
currentModule->humanReadableName = module.humanReadableName;
currentModule->reduction = std::make_unique<TypeReduction>(NotNull{&currentModule->internalTypes}, builtinTypes, NotNull{iceHandler});
currentModule->type = module.type;
currentModule->allocator = module.allocator;
@ -290,10 +293,8 @@ ModulePtr TypeChecker::checkWithoutRecursionCheck(const SourceModule& module, Mo
currentModule->scopes.push_back(std::make_pair(module.root->location, moduleScope));
currentModule->mode = mode;
currentModuleName = module.name;
if (prepareModuleScope)
prepareModuleScope(module.name, currentModule->getModuleScope());
prepareModuleScope(currentModule->name, currentModule->getModuleScope());
try
{
@ -1179,7 +1180,7 @@ ControlFlow TypeChecker::check(const ScopePtr& scope, const AstStatLocal& local)
{
AstExpr* require = *maybeRequire;
if (auto moduleInfo = resolver->resolveModuleInfo(currentModuleName, *require))
if (auto moduleInfo = resolver->resolveModuleInfo(currentModule->name, *require))
{
const Name name{local.vars.data[i]->name.value};
@ -1728,7 +1729,7 @@ void TypeChecker::prototype(const ScopePtr& scope, const AstStatDeclareClass& de
Name className(declaredClass.name.value);
TypeId classTy = addType(ClassType(className, {}, superTy, std::nullopt, {}, {}, currentModuleName));
TypeId classTy = addType(ClassType(className, {}, superTy, std::nullopt, {}, {}, currentModule->name));
ClassType* ctv = getMutable<ClassType>(classTy);
TypeId metaTy = addType(TableType{TableState::Sealed, scope->level});
@ -2000,12 +2001,7 @@ WithPredicate<TypeId> TypeChecker::checkExpr(const ScopePtr& scope, const AstExp
else if (auto vtp = get<VariadicTypePack>(retPack))
return {vtp->ty, std::move(result.predicates)};
else if (get<GenericTypePack>(retPack))
{
if (FFlag::LuauReturnAnyInsteadOfICE)
return {anyType, std::move(result.predicates)};
else
ice("Unexpected abstract type pack!", expr.location);
}
else
ice("Unknown TypePack type!", expr.location);
}
@ -2336,7 +2332,7 @@ TypeId TypeChecker::checkExprTable(
TableState state = TableState::Unsealed;
TableType table = TableType{std::move(props), indexer, scope->level, state};
table.definitionModuleName = currentModuleName;
table.definitionModuleName = currentModule->name;
table.definitionLocation = expr.location;
return addType(table);
}
@ -3663,7 +3659,7 @@ std::pair<TypeId, ScopePtr> TypeChecker::checkFunctionSignature(const ScopePtr&
TypePackId argPack = addTypePack(TypePackVar(TypePack{argTypes, funScope->varargPack}));
FunctionDefinition defn;
defn.definitionModuleName = currentModuleName;
defn.definitionModuleName = currentModule->name;
defn.definitionLocation = expr.location;
defn.varargLocation = expr.vararg ? std::make_optional(expr.varargLocation) : std::nullopt;
defn.originalNameLocation = originalName.value_or(Location(expr.location.begin, 0));
@ -4606,11 +4602,9 @@ TypeId TypeChecker::checkRequire(const ScopePtr& scope, const ModuleInfo& module
}
// Types of requires that transitively refer to current module have to be replaced with 'any'
std::string humanReadableName = resolver->getHumanReadableModuleName(moduleInfo.name);
for (const auto& [location, path] : requireCycles)
{
if (!path.empty() && path.front() == humanReadableName)
if (!path.empty() && path.front() == (FFlag::LuauRequirePathTrueModuleName ? moduleInfo.name : resolver->getHumanReadableModuleName(moduleInfo.name)))
return anyType;
}
@ -4621,14 +4615,14 @@ TypeId TypeChecker::checkRequire(const ScopePtr& scope, const ModuleInfo& module
// either the file does not exist or there's a cycle. If there's a cycle
// we will already have reported the error.
if (!resolver->moduleExists(moduleInfo.name) && !moduleInfo.optional)
reportError(TypeError{location, UnknownRequire{humanReadableName}});
reportError(TypeError{location, UnknownRequire{resolver->getHumanReadableModuleName(moduleInfo.name)}});
return errorRecoveryType(scope);
}
if (module->type != SourceCode::Module)
{
reportError(location, IllegalRequire{humanReadableName, "Module is not a ModuleScript. It cannot be required."});
reportError(location, IllegalRequire{module->humanReadableName, "Module is not a ModuleScript. It cannot be required."});
return errorRecoveryType(scope);
}
@ -4640,7 +4634,7 @@ TypeId TypeChecker::checkRequire(const ScopePtr& scope, const ModuleInfo& module
std::optional<TypeId> moduleType = first(modulePack);
if (!moduleType)
{
reportError(location, IllegalRequire{humanReadableName, "Module does not return exactly 1 value. It cannot be required."});
reportError(location, IllegalRequire{module->humanReadableName, "Module does not return exactly 1 value. It cannot be required."});
return errorRecoveryType(scope);
}
@ -4855,7 +4849,7 @@ void TypeChecker::reportError(const TypeError& error)
if (currentModule->mode == Mode::NoCheck)
return;
currentModule->errors.push_back(error);
currentModule->errors.back().moduleName = currentModuleName;
currentModule->errors.back().moduleName = currentModule->name;
}
void TypeChecker::reportError(const Location& location, TypeErrorData errorData)
@ -5329,7 +5323,7 @@ TypeId TypeChecker::resolveTypeWorker(const ScopePtr& scope, const AstType& anno
tableIndexer = TableIndexer(resolveType(scope, *indexer->indexType), resolveType(scope, *indexer->resultType));
TableType ttv{props, tableIndexer, scope->level, TableState::Sealed};
ttv.definitionModuleName = currentModuleName;
ttv.definitionModuleName = currentModule->name;
ttv.definitionLocation = annotation.location;
return addType(std::move(ttv));
}
@ -5531,7 +5525,7 @@ TypeId TypeChecker::instantiateTypeFun(const ScopePtr& scope, const TypeFun& tf,
{
ttv->instantiatedTypeParams = typeParams;
ttv->instantiatedTypePackParams = typePackParams;
ttv->definitionModuleName = currentModuleName;
ttv->definitionModuleName = currentModule->name;
ttv->definitionLocation = location;
}

View File

@ -10,7 +10,7 @@
#include <deque>
LUAU_FASTINTVARIABLE(LuauTypeReductionCartesianProductLimit, 100'000)
LUAU_FASTINTVARIABLE(LuauTypeReductionRecursionLimit, 400)
LUAU_FASTINTVARIABLE(LuauTypeReductionRecursionLimit, 300)
LUAU_FASTFLAGVARIABLE(DebugLuauDontReduceTypes, false)
namespace Luau

View File

@ -8,6 +8,7 @@
#include "Luau/Compiler.h"
#include "Luau/BytecodeBuilder.h"
#include "Luau/Parser.h"
#include "Luau/TimeTrace.h"
#include "Coverage.h"
#include "FileUtils.h"
@ -997,15 +998,18 @@ int replMain(int argc, char** argv)
CompileStats stats = {};
int failed = 0;
double startTime = Luau::TimeTrace::getClock();
for (const std::string& path : files)
failed += !compileFile(path.c_str(), compileFormat, stats);
double duration = Luau::TimeTrace::getClock() - startTime;
if (compileFormat == CompileFormat::Null)
printf("Compiled %d KLOC into %d KB bytecode\n", int(stats.lines / 1000), int(stats.bytecode / 1024));
printf("Compiled %d KLOC into %d KB bytecode in %.2fs\n", int(stats.lines / 1000), int(stats.bytecode / 1024), duration);
else if (compileFormat == CompileFormat::CodegenNull)
printf("Compiled %d KLOC into %d KB bytecode => %d KB native code\n", int(stats.lines / 1000), int(stats.bytecode / 1024),
int(stats.codegen / 1024));
printf("Compiled %d KLOC into %d KB bytecode => %d KB native code (%.2fx) in %.2fs\n", int(stats.lines / 1000), int(stats.bytecode / 1024),
int(stats.codegen / 1024), stats.bytecode == 0 ? 0.0 : double(stats.codegen) / double(stats.bytecode), duration);
return failed ? 1 : 0;
}

View File

@ -37,7 +37,6 @@ public:
void movk(RegisterA64 dst, uint16_t src, int shift = 0);
// Arithmetics
// TODO: support various kinds of shifts
void add(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void add(RegisterA64 dst, RegisterA64 src1, uint16_t src2);
void sub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
@ -52,13 +51,11 @@ public:
void cset(RegisterA64 dst, ConditionA64 cond);
// Bitwise
// TODO: support shifts
// TODO: support bitfield ops
void and_(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void orr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void eor(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void bic(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2);
void tst(RegisterA64 src1, RegisterA64 src2);
void and_(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void orr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void eor(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void bic(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift = 0);
void tst(RegisterA64 src1, RegisterA64 src2, int shift = 0);
void mvn(RegisterA64 dst, RegisterA64 src);
// Bitwise with immediate
@ -76,6 +73,13 @@ public:
void clz(RegisterA64 dst, RegisterA64 src);
void rbit(RegisterA64 dst, RegisterA64 src);
// Shifts with immediates
// Note: immediate value must be in [0, 31] or [0, 63] range based on register type
void lsl(RegisterA64 dst, RegisterA64 src1, uint8_t src2);
void lsr(RegisterA64 dst, RegisterA64 src1, uint8_t src2);
void asr(RegisterA64 dst, RegisterA64 src1, uint8_t src2);
void ror(RegisterA64 dst, RegisterA64 src1, uint8_t src2);
// Load
// Note: paired loads are currently omitted for simplicity
void ldr(RegisterA64 dst, AddressA64 src);
@ -93,15 +97,19 @@ public:
void stp(RegisterA64 src1, RegisterA64 src2, AddressA64 dst);
// Control flow
// TODO: support tbz/tbnz; they have 15-bit offsets but they can be useful in constrained cases
void b(Label& label);
void b(ConditionA64 cond, Label& label);
void cbz(RegisterA64 src, Label& label);
void cbnz(RegisterA64 src, Label& label);
void bl(Label& label);
void br(RegisterA64 src);
void blr(RegisterA64 src);
void ret();
// Conditional control flow
void b(ConditionA64 cond, Label& label);
void cbz(RegisterA64 src, Label& label);
void cbnz(RegisterA64 src, Label& label);
void tbz(RegisterA64 src, uint8_t bit, Label& label);
void tbnz(RegisterA64 src, uint8_t bit, Label& label);
// Address of embedded data
void adr(RegisterA64 dst, const void* ptr, size_t size);
void adr(RegisterA64 dst, uint64_t value);
@ -111,7 +119,9 @@ public:
void adr(RegisterA64 dst, Label& label);
// Floating-point scalar moves
// Note: constant must be compatible with immediate floating point moves (see isFmovSupported)
void fmov(RegisterA64 dst, RegisterA64 src);
void fmov(RegisterA64 dst, double src);
// Floating-point scalar math
void fabs(RegisterA64 dst, RegisterA64 src);
@ -173,6 +183,12 @@ public:
// Maximum immediate argument to functions like add/sub/cmp
static constexpr size_t kMaxImmediate = (1 << 12) - 1;
// Check if immediate mode mask is supported for bitwise operations (and/or/xor)
static bool isMaskSupported(uint32_t mask);
// Check if fmov can be used to synthesize a constant
static bool isFmovSupported(double value);
private:
// Instruction archetypes
void place0(const char* name, uint32_t word);
@ -183,20 +199,38 @@ private:
void placeI12(const char* name, RegisterA64 dst, RegisterA64 src1, int src2, uint8_t op);
void placeI16(const char* name, RegisterA64 dst, int src, uint8_t op, int shift = 0);
void placeA(const char* name, RegisterA64 dst, AddressA64 src, uint8_t op, uint8_t size, int sizelog);
void placeB(const char* name, Label& label, uint8_t op);
void placeBC(const char* name, Label& label, uint8_t op, uint8_t cond);
void placeBCR(const char* name, Label& label, uint8_t op, RegisterA64 cond);
void placeBR(const char* name, RegisterA64 src, uint32_t op);
void placeBTR(const char* name, Label& label, uint8_t op, RegisterA64 cond, uint8_t bit);
void placeADR(const char* name, RegisterA64 src, uint8_t op);
void placeADR(const char* name, RegisterA64 src, uint8_t op, Label& label);
void placeP(const char* name, RegisterA64 dst1, RegisterA64 dst2, AddressA64 src, uint8_t op, uint8_t opc, int sizelog);
void placeCS(const char* name, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond, uint8_t op, uint8_t opc, int invert = 0);
void placeFCMP(const char* name, RegisterA64 src1, RegisterA64 src2, uint8_t op, uint8_t opc);
void placeFMOV(const char* name, RegisterA64 dst, double src, uint32_t op);
void placeBM(const char* name, RegisterA64 dst, RegisterA64 src1, uint32_t src2, uint8_t op);
void placeBFM(const char* name, RegisterA64 dst, RegisterA64 src1, uint8_t src2, uint8_t op, int immr, int imms);
void place(uint32_t word);
void patchLabel(Label& label);
void patchImm19(uint32_t location, int value);
struct Patch
{
enum Kind
{
Imm26,
Imm19,
Imm14,
};
Kind kind : 2;
uint32_t label : 30;
uint32_t location;
};
void patchLabel(Label& label, Patch::Kind kind);
void patchOffset(uint32_t location, int value, Patch::Kind kind);
void commit();
LUAU_NOINLINE void extend();
@ -210,9 +244,10 @@ private:
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src1, int src2);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, int src, int shift = 0);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, double src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, AddressA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst1, RegisterA64 dst2, AddressA64 src);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 src, Label label);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 src, Label label, int imm = -1);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 src);
LUAU_NOINLINE void log(const char* opcode, Label label);
LUAU_NOINLINE void log(const char* opcode, RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond);
@ -221,7 +256,7 @@ private:
LUAU_NOINLINE void log(AddressA64 addr);
uint32_t nextLabel = 1;
std::vector<Label> pendingLabels;
std::vector<Patch> pendingLabels;
std::vector<uint32_t> labelLocations;
bool finalized = false;

View File

@ -58,6 +58,8 @@ public:
void sar(OperandX64 lhs, OperandX64 rhs);
void shl(OperandX64 lhs, OperandX64 rhs);
void shr(OperandX64 lhs, OperandX64 rhs);
void rol(OperandX64 lhs, OperandX64 rhs);
void ror(OperandX64 lhs, OperandX64 rhs);
// Two operand mov instruction has additional specialized encodings
void mov(OperandX64 lhs, OperandX64 rhs);
@ -97,6 +99,9 @@ public:
void int3();
void bsr(RegisterX64 dst, OperandX64 src);
void bsf(RegisterX64 dst, OperandX64 src);
// Code alignment
void nop(uint32_t length = 1);
void align(uint32_t alignment, AlignmentDataX64 data = AlignmentDataX64::Nop);

View File

@ -16,9 +16,9 @@ enum class ConditionA64
// NE: integer (not equal), floating-point (not equal or unordered)
NotEqual,
// CS: integer (carry set), floating-point (greater than, equal or unordered)
// CS: integer (carry set), unsigned integer (greater than, equal), floating-point (greater than, equal or unordered)
CarrySet,
// CC: integer (carry clear), floating-point (less than)
// CC: integer (carry clear), unsigned integer (less than), floating-point (less than)
CarryClear,
// MI: integer (negative), floating-point (less than)

View File

@ -186,6 +186,19 @@ enum class IrCmd : uint8_t
// D: block (if false)
JUMP_EQ_INT,
// Jump if A < B
// A, B: int
// C: block (if true)
// D: block (if false)
JUMP_LT_INT,
// Jump if A >= B
// A, B: uint
// C: condition
// D: block (if true)
// E: block (if false)
JUMP_GE_UINT,
// Jump if pointers are equal
// A, B: pointer (*)
// C: block (if true)
@ -240,6 +253,15 @@ enum class IrCmd : uint8_t
// Convert integer into a double number
// A: int
INT_TO_NUM,
UINT_TO_NUM,
// Converts a double number to an integer. 'A' may be any representable integer in a double.
// A: double
NUM_TO_INT,
// Converts a double number to an unsigned integer. For out-of-range values of 'A', the result is arch-specific.
// A: double
NUM_TO_UINT,
// Adjust stack top (L->top) to point at 'B' TValues *after* the specified register
// This is used to return muliple values
@ -517,10 +539,38 @@ enum class IrCmd : uint8_t
FALLBACK_FORGPREP,
// Instruction that passes value through, it is produced by constant folding and users substitute it with the value
// When operand location is set, updates the tracked location of the value in memory
SUBSTITUTE,
// A: operand of any type
// B: Rn/Kn/none (location of operand in memory; optional)
// Performs bitwise and/xor/or on two unsigned integers
// A, B: uint
BITAND_UINT,
BITXOR_UINT,
BITOR_UINT,
// Performs bitwise not on an unsigned integer
// A: uint
BITNOT_UINT,
// Performs bitwise shift/rotate on an unsigned integer
// A: uint (source)
// B: int (shift amount)
BITLSHIFT_UINT,
BITRSHIFT_UINT,
BITARSHIFT_UINT,
BITLROTATE_UINT,
BITRROTATE_UINT,
// Returns the number of consecutive zero bits in A starting from the left-most (most significant) bit.
// A: uint
BITCOUNTLZ_UINT,
BITCOUNTRZ_UINT,
// Calls native libm function with 1 or 2 arguments
// A: builtin function ID
// B: double
// C: double (optional, 2nd argument)
INVOKE_LIBM,
};
enum class IrConstKind : uint8_t
@ -654,6 +704,7 @@ struct IrInst
A64::RegisterA64 regA64 = A64::noreg;
bool reusedReg = false;
bool spilled = false;
bool needsReload = false;
};
// When IrInst operands are used, current instruction index is often required to track lifetime
@ -696,8 +747,9 @@ struct IrFunction
std::vector<BytecodeMapping> bcMapping;
// For each instruction, an operand that can be used to recompute the calue
// For each instruction, an operand that can be used to recompute the value
std::vector<IrOp> valueRestoreOps;
uint32_t validRestoreOpBlockIdx = 0;
Proto* proto = nullptr;
@ -861,6 +913,12 @@ struct IrFunction
if (instIdx >= valueRestoreOps.size())
return {};
const IrBlock& block = blocks[validRestoreOpBlockIdx];
// Values can only reference restore operands in the current block
if (instIdx < block.start || instIdx > block.finish)
return {};
return valueRestoreOps[instIdx];
}

View File

@ -95,6 +95,8 @@ inline bool isBlockTerminator(IrCmd cmd)
case IrCmd::JUMP_IF_FALSY:
case IrCmd::JUMP_EQ_TAG:
case IrCmd::JUMP_EQ_INT:
case IrCmd::JUMP_LT_INT:
case IrCmd::JUMP_GE_UINT:
case IrCmd::JUMP_EQ_POINTER:
case IrCmd::JUMP_CMP_NUM:
case IrCmd::JUMP_CMP_ANY:
@ -149,8 +151,23 @@ inline bool hasResult(IrCmd cmd)
case IrCmd::TRY_NUM_TO_INDEX:
case IrCmd::TRY_CALL_FASTGETTM:
case IrCmd::INT_TO_NUM:
case IrCmd::UINT_TO_NUM:
case IrCmd::NUM_TO_INT:
case IrCmd::NUM_TO_UINT:
case IrCmd::SUBSTITUTE:
case IrCmd::INVOKE_FASTCALL:
case IrCmd::BITAND_UINT:
case IrCmd::BITXOR_UINT:
case IrCmd::BITOR_UINT:
case IrCmd::BITNOT_UINT:
case IrCmd::BITLSHIFT_UINT:
case IrCmd::BITRSHIFT_UINT:
case IrCmd::BITARSHIFT_UINT:
case IrCmd::BITLROTATE_UINT:
case IrCmd::BITRROTATE_UINT:
case IrCmd::BITCOUNTLZ_UINT:
case IrCmd::BITCOUNTRZ_UINT:
case IrCmd::INVOKE_LIBM:
return true;
default:
break;
@ -200,7 +217,7 @@ void replace(IrFunction& function, IrOp& original, IrOp replacement);
void replace(IrFunction& function, IrBlock& block, uint32_t instIdx, IrInst replacement);
// Replace instruction with a different value (using IrCmd::SUBSTITUTE)
void substitute(IrFunction& function, IrInst& inst, IrOp replacement, IrOp location = {});
void substitute(IrFunction& function, IrInst& inst, IrOp replacement);
// Replace instruction arguments that point to substitutions with target values
void applySubstitutions(IrFunction& function, IrOp& op);
@ -214,5 +231,7 @@ bool compare(double a, double b, IrCondition cond);
// But it can also be successful on conditional control-flow, replacing it with an unconditional IrCmd::JUMP
void foldConstants(IrBuilder& build, IrFunction& function, IrBlock& block, uint32_t instIdx);
uint32_t getNativeContextOffset(LuauBuiltinFunction bfid);
} // namespace CodeGen
} // namespace Luau

View File

@ -11,6 +11,7 @@ namespace CodeGen
struct IrBuilder;
void constPropInBlockChains(IrBuilder& build);
void createLinearBlocks(IrBuilder& build);
} // namespace CodeGen
} // namespace Luau

View File

@ -22,6 +22,27 @@ static_assert(sizeof(textForCondition) / sizeof(textForCondition[0]) == size_t(C
const unsigned kMaxAlign = 32;
static int getFmovImm(double value)
{
uint64_t u;
static_assert(sizeof(u) == sizeof(value), "expected double to be 64-bit");
memcpy(&u, &value, sizeof(value));
// positive 0 is encodable via movi
if (u == 0)
return 256;
// early out: fmov can only encode doubles with 48 least significant zeros
if ((u & ((1ull << 48) - 1)) != 0)
return -1;
// f64 expansion is abcdfegh => aBbbbbbb bbcdefgh 00000000 00000000 00000000 00000000 00000000 00000000
int imm = (int(u >> 56) & 0x80) | (int(u >> 48) & 0x7f);
int dec = ((imm & 0x80) << 8) | ((imm & 0x40) ? 0b00111111'11000000 : 0b01000000'00000000) | (imm & 0x3f);
return dec == int(u >> 48) ? imm : -1;
}
AssemblyBuilderA64::AssemblyBuilderA64(bool logText, unsigned int features)
: logText(logText)
, features(features)
@ -136,31 +157,31 @@ void AssemblyBuilderA64::cset(RegisterA64 dst, ConditionA64 cond)
placeCS("cset", dst, src, src, cond, 0b11010'10'0, 0b01, /* invert= */ 1);
}
void AssemblyBuilderA64::and_(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
void AssemblyBuilderA64::and_(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift)
{
placeSR3("and", dst, src1, src2, 0b00'01010);
placeSR3("and", dst, src1, src2, 0b00'01010, shift);
}
void AssemblyBuilderA64::orr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
void AssemblyBuilderA64::orr(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift)
{
placeSR3("orr", dst, src1, src2, 0b01'01010);
placeSR3("orr", dst, src1, src2, 0b01'01010, shift);
}
void AssemblyBuilderA64::eor(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
void AssemblyBuilderA64::eor(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift)
{
placeSR3("eor", dst, src1, src2, 0b10'01010);
placeSR3("eor", dst, src1, src2, 0b10'01010, shift);
}
void AssemblyBuilderA64::bic(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2)
void AssemblyBuilderA64::bic(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, int shift)
{
placeSR3("bic", dst, src1, src2, 0b00'01010, /* shift= */ 0, /* N= */ 1);
placeSR3("bic", dst, src1, src2, 0b00'01010, shift, /* N= */ 1);
}
void AssemblyBuilderA64::tst(RegisterA64 src1, RegisterA64 src2)
void AssemblyBuilderA64::tst(RegisterA64 src1, RegisterA64 src2, int shift)
{
RegisterA64 dst = src1.kind == KindA64::x ? xzr : wzr;
placeSR3("tst", dst, src1, src2, 0b11'01010);
placeSR3("tst", dst, src1, src2, 0b11'01010, shift);
}
void AssemblyBuilderA64::mvn(RegisterA64 dst, RegisterA64 src)
@ -226,6 +247,39 @@ void AssemblyBuilderA64::rbit(RegisterA64 dst, RegisterA64 src)
placeR1("rbit", dst, src, 0b10'11010110'00000'0000'00);
}
void AssemblyBuilderA64::lsl(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
{
int size = dst.kind == KindA64::x ? 64 : 32;
LUAU_ASSERT(src2 < size);
placeBFM("lsl", dst, src1, src2, 0b10'100110, (-src2) & (size - 1), size - 1 - src2);
}
void AssemblyBuilderA64::lsr(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
{
int size = dst.kind == KindA64::x ? 64 : 32;
LUAU_ASSERT(src2 < size);
placeBFM("lsr", dst, src1, src2, 0b10'100110, src2, size - 1);
}
void AssemblyBuilderA64::asr(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
{
int size = dst.kind == KindA64::x ? 64 : 32;
LUAU_ASSERT(src2 < size);
placeBFM("asr", dst, src1, src2, 0b00'100110, src2, size - 1);
}
void AssemblyBuilderA64::ror(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
{
int size = dst.kind == KindA64::x ? 64 : 32;
LUAU_ASSERT(src2 < size);
// note: this is encoding src1 via immr which is a hack but the bit layout matches and a special archetype feels excessive
placeBFM("ror", dst, src1, src2, 0b00'100111, src1.index, src2);
}
void AssemblyBuilderA64::ldr(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w || dst.kind == KindA64::d || dst.kind == KindA64::q);
@ -233,16 +287,16 @@ void AssemblyBuilderA64::ldr(RegisterA64 dst, AddressA64 src)
switch (dst.kind)
{
case KindA64::w:
placeA("ldr", dst, src, 0b11100001, 0b10, 2);
placeA("ldr", dst, src, 0b11100001, 0b10, /* sizelog= */ 2);
break;
case KindA64::x:
placeA("ldr", dst, src, 0b11100001, 0b11, 3);
placeA("ldr", dst, src, 0b11100001, 0b11, /* sizelog= */ 3);
break;
case KindA64::d:
placeA("ldr", dst, src, 0b11110001, 0b11, 3);
placeA("ldr", dst, src, 0b11110001, 0b11, /* sizelog= */ 3);
break;
case KindA64::q:
placeA("ldr", dst, src, 0b11110011, 0b00, 4);
placeA("ldr", dst, src, 0b11110011, 0b00, /* sizelog= */ 4);
break;
case KindA64::none:
LUAU_ASSERT(!"Unexpected register kind");
@ -253,35 +307,35 @@ void AssemblyBuilderA64::ldrb(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::w);
placeA("ldrb", dst, src, 0b11100001, 0b00, 2);
placeA("ldrb", dst, src, 0b11100001, 0b00, /* sizelog= */ 0);
}
void AssemblyBuilderA64::ldrh(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::w);
placeA("ldrh", dst, src, 0b11100001, 0b01, 2);
placeA("ldrh", dst, src, 0b11100001, 0b01, /* sizelog= */ 1);
}
void AssemblyBuilderA64::ldrsb(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
placeA("ldrsb", dst, src, 0b11100010 | uint8_t(dst.kind == KindA64::w), 0b00, 0);
placeA("ldrsb", dst, src, 0b11100010 | uint8_t(dst.kind == KindA64::w), 0b00, /* sizelog= */ 0);
}
void AssemblyBuilderA64::ldrsh(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
placeA("ldrsh", dst, src, 0b11100010 | uint8_t(dst.kind == KindA64::w), 0b01, 1);
placeA("ldrsh", dst, src, 0b11100010 | uint8_t(dst.kind == KindA64::w), 0b01, /* sizelog= */ 1);
}
void AssemblyBuilderA64::ldrsw(RegisterA64 dst, AddressA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::x);
placeA("ldrsw", dst, src, 0b11100010, 0b10, 2);
placeA("ldrsw", dst, src, 0b11100010, 0b10, /* sizelog= */ 2);
}
void AssemblyBuilderA64::ldp(RegisterA64 dst1, RegisterA64 dst2, AddressA64 src)
@ -289,7 +343,7 @@ void AssemblyBuilderA64::ldp(RegisterA64 dst1, RegisterA64 dst2, AddressA64 src)
LUAU_ASSERT(dst1.kind == KindA64::x || dst1.kind == KindA64::w);
LUAU_ASSERT(dst1.kind == dst2.kind);
placeP("ldp", dst1, dst2, src, 0b101'0'010'1, uint8_t(dst1.kind == KindA64::x) << 1, dst1.kind == KindA64::x ? 3 : 2);
placeP("ldp", dst1, dst2, src, 0b101'0'010'1, uint8_t(dst1.kind == KindA64::x) << 1, /* sizelog= */ dst1.kind == KindA64::x ? 3 : 2);
}
void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst)
@ -299,16 +353,16 @@ void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst)
switch (src.kind)
{
case KindA64::w:
placeA("str", src, dst, 0b11100000, 0b10, 2);
placeA("str", src, dst, 0b11100000, 0b10, /* sizelog= */ 2);
break;
case KindA64::x:
placeA("str", src, dst, 0b11100000, 0b11, 3);
placeA("str", src, dst, 0b11100000, 0b11, /* sizelog= */ 3);
break;
case KindA64::d:
placeA("str", src, dst, 0b11110000, 0b11, 3);
placeA("str", src, dst, 0b11110000, 0b11, /* sizelog= */ 3);
break;
case KindA64::q:
placeA("str", src, dst, 0b11110010, 0b00, 4);
placeA("str", src, dst, 0b11110010, 0b00, /* sizelog= */ 4);
break;
case KindA64::none:
LUAU_ASSERT(!"Unexpected register kind");
@ -319,14 +373,14 @@ void AssemblyBuilderA64::strb(RegisterA64 src, AddressA64 dst)
{
LUAU_ASSERT(src.kind == KindA64::w);
placeA("strb", src, dst, 0b11100000, 0b00, 2);
placeA("strb", src, dst, 0b11100000, 0b00, /* sizelog= */ 0);
}
void AssemblyBuilderA64::strh(RegisterA64 src, AddressA64 dst)
{
LUAU_ASSERT(src.kind == KindA64::w);
placeA("strh", src, dst, 0b11100000, 0b01, 2);
placeA("strh", src, dst, 0b11100000, 0b01, /* sizelog= */ 1);
}
void AssemblyBuilderA64::stp(RegisterA64 src1, RegisterA64 src2, AddressA64 dst)
@ -334,28 +388,17 @@ void AssemblyBuilderA64::stp(RegisterA64 src1, RegisterA64 src2, AddressA64 dst)
LUAU_ASSERT(src1.kind == KindA64::x || src1.kind == KindA64::w);
LUAU_ASSERT(src1.kind == src2.kind);
placeP("stp", src1, src2, dst, 0b101'0'010'0, uint8_t(src1.kind == KindA64::x) << 1, src1.kind == KindA64::x ? 3 : 2);
placeP("stp", src1, src2, dst, 0b101'0'010'0, uint8_t(src1.kind == KindA64::x) << 1, /* sizelog= */ src1.kind == KindA64::x ? 3 : 2);
}
void AssemblyBuilderA64::b(Label& label)
{
// Note: we aren't using 'b' form since it has a 26-bit immediate which requires custom fixup logic
placeBC("b", label, 0b0101010'0, codeForCondition[int(ConditionA64::Always)]);
placeB("b", label, 0b0'00101);
}
void AssemblyBuilderA64::b(ConditionA64 cond, Label& label)
void AssemblyBuilderA64::bl(Label& label)
{
placeBC(textForCondition[int(cond)], label, 0b0101010'0, codeForCondition[int(cond)]);
}
void AssemblyBuilderA64::cbz(RegisterA64 src, Label& label)
{
placeBCR("cbz", label, 0b011010'0, src);
}
void AssemblyBuilderA64::cbnz(RegisterA64 src, Label& label)
{
placeBCR("cbnz", label, 0b011010'1, src);
placeB("bl", label, 0b1'00101);
}
void AssemblyBuilderA64::br(RegisterA64 src)
@ -373,6 +416,31 @@ void AssemblyBuilderA64::ret()
place0("ret", 0b1101011'0'0'10'11111'0000'0'0'11110'00000);
}
void AssemblyBuilderA64::b(ConditionA64 cond, Label& label)
{
placeBC(textForCondition[int(cond)], label, 0b0101010'0, codeForCondition[int(cond)]);
}
void AssemblyBuilderA64::cbz(RegisterA64 src, Label& label)
{
placeBCR("cbz", label, 0b011010'0, src);
}
void AssemblyBuilderA64::cbnz(RegisterA64 src, Label& label)
{
placeBCR("cbnz", label, 0b011010'1, src);
}
void AssemblyBuilderA64::tbz(RegisterA64 src, uint8_t bit, Label& label)
{
placeBTR("tbz", label, 0b011011'0, src, bit);
}
void AssemblyBuilderA64::tbnz(RegisterA64 src, uint8_t bit, Label& label)
{
placeBTR("tbnz", label, 0b011011'1, src, bit);
}
void AssemblyBuilderA64::adr(RegisterA64 dst, const void* ptr, size_t size)
{
size_t pos = allocateData(size, 4);
@ -381,7 +449,7 @@ void AssemblyBuilderA64::adr(RegisterA64 dst, const void* ptr, size_t size)
memcpy(&data[pos], ptr, size);
placeADR("adr", dst, 0b10000);
patchImm19(location, -int(location) - int((data.size() - pos) / 4));
patchOffset(location, -int(location) - int((data.size() - pos) / 4), Patch::Imm19);
}
void AssemblyBuilderA64::adr(RegisterA64 dst, uint64_t value)
@ -392,7 +460,7 @@ void AssemblyBuilderA64::adr(RegisterA64 dst, uint64_t value)
writeu64(&data[pos], value);
placeADR("adr", dst, 0b10000);
patchImm19(location, -int(location) - int((data.size() - pos) / 4));
patchOffset(location, -int(location) - int((data.size() - pos) / 4), Patch::Imm19);
}
void AssemblyBuilderA64::adr(RegisterA64 dst, double value)
@ -403,7 +471,7 @@ void AssemblyBuilderA64::adr(RegisterA64 dst, double value)
writef64(&data[pos], value);
placeADR("adr", dst, 0b10000);
patchImm19(location, -int(location) - int((data.size() - pos) / 4));
patchOffset(location, -int(location) - int((data.size() - pos) / 4), Patch::Imm19);
}
void AssemblyBuilderA64::adr(RegisterA64 dst, Label& label)
@ -418,6 +486,20 @@ void AssemblyBuilderA64::fmov(RegisterA64 dst, RegisterA64 src)
placeR1("fmov", dst, src, 0b000'11110'01'1'0000'00'10000);
}
void AssemblyBuilderA64::fmov(RegisterA64 dst, double src)
{
LUAU_ASSERT(dst.kind == KindA64::d);
int imm = getFmovImm(src);
LUAU_ASSERT(imm >= 0 && imm <= 256);
// fmov can't encode 0, but movi can; movi is otherwise not useful for 64-bit fp immediates because it encodes repeating patterns
if (imm == 256)
placeFMOV("movi", dst, src, 0b001'0111100000'000'1110'01'00000);
else
placeFMOV("fmov", dst, src, 0b000'11110'01'1'00000000'100'00000 | (imm << 8));
}
void AssemblyBuilderA64::fabs(RegisterA64 dst, RegisterA64 src)
{
LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
@ -555,13 +637,14 @@ bool AssemblyBuilderA64::finalize()
code.resize(codePos - code.data());
// Resolve jump targets
for (Label fixup : pendingLabels)
for (Patch fixup : pendingLabels)
{
// If this assertion fires, a label was used in jmp without calling setLabel
LUAU_ASSERT(labelLocations[fixup.id - 1] != ~0u);
int value = int(labelLocations[fixup.id - 1]) - int(fixup.location);
uint32_t label = fixup.label;
LUAU_ASSERT(labelLocations[label - 1] != ~0u);
int value = int(labelLocations[label - 1]) - int(fixup.location);
patchImm19(fixup.location, value);
patchOffset(fixup.location, value, fixup.kind);
}
size_t dataSize = data.size() - dataPos;
@ -618,6 +701,20 @@ uint32_t AssemblyBuilderA64::getCodeSize() const
return uint32_t(codePos - code.data());
}
bool AssemblyBuilderA64::isMaskSupported(uint32_t mask)
{
int lz = countlz(mask);
int rz = countrz(mask);
return lz + rz > 0 && lz + rz < 32 && // must have at least one 0 and at least one 1
(mask >> rz) == (1u << (32 - lz - rz)) - 1; // sequence of 1s must be contiguous
}
bool AssemblyBuilderA64::isFmovSupported(double value)
{
return getFmovImm(value) >= 0;
}
void AssemblyBuilderA64::place0(const char* name, uint32_t op)
{
if (logText)
@ -634,11 +731,12 @@ void AssemblyBuilderA64::placeSR3(const char* name, RegisterA64 dst, RegisterA64
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
LUAU_ASSERT(shift >= 0 && shift < 64); // right shift requires changing some encoding bits
LUAU_ASSERT(shift >= -63 && shift <= 63);
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
place(dst.index | (src1.index << 5) | (shift << 10) | (src2.index << 16) | (N << 21) | (op << 24) | sf);
place(dst.index | (src1.index << 5) | ((shift < 0 ? -shift : shift) << 10) | (src2.index << 16) | (N << 21) | (int(shift < 0) << 22) |
(op << 24) | sf);
commit();
}
@ -734,12 +832,23 @@ void AssemblyBuilderA64::placeA(const char* name, RegisterA64 dst, AddressA64 sr
commit();
}
void AssemblyBuilderA64::placeB(const char* name, Label& label, uint8_t op)
{
place(op << 26);
commit();
patchLabel(label, Patch::Imm26);
if (logText)
log(name, label);
}
void AssemblyBuilderA64::placeBC(const char* name, Label& label, uint8_t op, uint8_t cond)
{
place(cond | (op << 24));
commit();
patchLabel(label);
patchLabel(label, Patch::Imm19);
if (logText)
log(name, label);
@ -754,7 +863,7 @@ void AssemblyBuilderA64::placeBCR(const char* name, Label& label, uint8_t op, Re
place(cond.index | (op << 24) | sf);
commit();
patchLabel(label);
patchLabel(label, Patch::Imm19);
if (logText)
log(name, cond, label);
@ -771,6 +880,20 @@ void AssemblyBuilderA64::placeBR(const char* name, RegisterA64 src, uint32_t op)
commit();
}
void AssemblyBuilderA64::placeBTR(const char* name, Label& label, uint8_t op, RegisterA64 cond, uint8_t bit)
{
LUAU_ASSERT(cond.kind == KindA64::x || cond.kind == KindA64::w);
LUAU_ASSERT(bit < (cond.kind == KindA64::x ? 64 : 32));
place(cond.index | ((bit & 0x1f) << 19) | (op << 24) | ((bit >> 5) << 31));
commit();
patchLabel(label, Patch::Imm14);
if (logText)
log(name, cond, label, bit);
}
void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op)
{
if (logText)
@ -789,7 +912,7 @@ void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op,
place(dst.index | (op << 24));
commit();
patchLabel(label);
patchLabel(label, Patch::Imm19);
if (logText)
log(name, dst, label);
@ -838,7 +961,37 @@ void AssemblyBuilderA64::placeFCMP(const char* name, RegisterA64 src1, RegisterA
commit();
}
void AssemblyBuilderA64::placeFMOV(const char* name, RegisterA64 dst, double src, uint32_t op)
{
if (logText)
log(name, dst, src);
place(dst.index | (op << 5));
commit();
}
void AssemblyBuilderA64::placeBM(const char* name, RegisterA64 dst, RegisterA64 src1, uint32_t src2, uint8_t op)
{
if (logText)
log(name, dst, src1, src2);
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
LUAU_ASSERT(dst.kind == src1.kind);
LUAU_ASSERT(isMaskSupported(src2));
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
int lz = countlz(src2);
int rz = countrz(src2);
int imms = 31 - lz - rz; // count of 1s minus 1
int immr = (32 - rz) & 31; // right rotate amount
place(dst.index | (src1.index << 5) | (imms << 10) | (immr << 16) | (op << 23) | sf);
commit();
}
void AssemblyBuilderA64::placeBFM(const char* name, RegisterA64 dst, RegisterA64 src1, uint8_t src2, uint8_t op, int immr, int imms)
{
if (logText)
log(name, dst, src1, src2);
@ -847,17 +1000,9 @@ void AssemblyBuilderA64::placeBM(const char* name, RegisterA64 dst, RegisterA64
LUAU_ASSERT(dst.kind == src1.kind);
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
uint32_t n = (dst.kind == KindA64::x) ? 1 << 22 : 0;
int lz = countlz(src2);
int rz = countrz(src2);
LUAU_ASSERT(lz + rz > 0 && lz + rz < 32); // must have at least one 0 and at least one 1
LUAU_ASSERT((src2 >> rz) == (1u << (32 - lz - rz)) - 1u); // sequence of 1s must be contiguous
int imms = 31 - lz - rz; // count of 1s minus 1
int immr = (32 - rz) & 31; // right rotate amount
place(dst.index | (src1.index << 5) | (imms << 10) | (immr << 16) | (op << 23) | sf);
place(dst.index | (src1.index << 5) | (imms << 10) | (immr << 16) | n | (op << 23) | sf);
commit();
}
@ -867,7 +1012,7 @@ void AssemblyBuilderA64::place(uint32_t word)
*codePos++ = word;
}
void AssemblyBuilderA64::patchLabel(Label& label)
void AssemblyBuilderA64::patchLabel(Label& label, Patch::Kind kind)
{
uint32_t location = getCodeSize() - 1;
@ -879,22 +1024,25 @@ void AssemblyBuilderA64::patchLabel(Label& label)
labelLocations.push_back(~0u);
}
pendingLabels.push_back({label.id, location});
pendingLabels.push_back({kind, label.id, location});
}
else
{
int value = int(label.location) - int(location);
patchImm19(location, value);
patchOffset(location, value, kind);
}
}
void AssemblyBuilderA64::patchImm19(uint32_t location, int value)
void AssemblyBuilderA64::patchOffset(uint32_t location, int value, Patch::Kind kind)
{
// imm19 encoding word offset, at bit offset 5
// note that 18 bits of word offsets = 20 bits of byte offsets = +-1MB
if (value > -(1 << 18) && value < (1 << 18))
code[location] |= (value & ((1 << 19) - 1)) << 5;
int offset = (kind == Patch::Imm26) ? 0 : 5;
int range = (kind == Patch::Imm19) ? (1 << 19) : (kind == Patch::Imm26) ? (1 << 26) : (1 << 14);
LUAU_ASSERT((code[location] & ((range - 1) << offset)) == 0);
if (value > -(range >> 1) && value < (range >> 1))
code[location] |= (value & (range - 1)) << offset;
else
overflowed = true;
}
@ -952,6 +1100,8 @@ void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, RegisterA64 sr
log(src2);
if (shift > 0)
logAppend(" LSL #%d", shift);
else if (shift < 0)
logAppend(" LSR #%d", -shift);
text.append("\n");
}
@ -1009,11 +1159,22 @@ void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, int src, int s
text.append("\n");
}
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 src, Label label)
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 dst, double src)
{
logAppend(" %-12s", opcode);
log(dst);
text.append(",");
logAppend("#%.17g", src);
text.append("\n");
}
void AssemblyBuilderA64::log(const char* opcode, RegisterA64 src, Label label, int imm)
{
logAppend(" %-12s", opcode);
log(src);
text.append(",");
if (imm >= 0)
logAppend("#%d,", imm);
logAppend(".L%d\n", label.id);
}

View File

@ -144,6 +144,16 @@ void AssemblyBuilderX64::shr(OperandX64 lhs, OperandX64 rhs)
placeShift("shr", lhs, rhs, 5);
}
void AssemblyBuilderX64::rol(OperandX64 lhs, OperandX64 rhs)
{
placeShift("rol", lhs, rhs, 0);
}
void AssemblyBuilderX64::ror(OperandX64 lhs, OperandX64 rhs)
{
placeShift("ror", lhs, rhs, 1);
}
void AssemblyBuilderX64::mov(OperandX64 lhs, OperandX64 rhs)
{
if (logText)
@ -461,6 +471,34 @@ void AssemblyBuilderX64::int3()
commit();
}
void AssemblyBuilderX64::bsr(RegisterX64 dst, OperandX64 src)
{
if (logText)
log("bsr", dst, src);
LUAU_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword);
placeRex(dst, src);
place(0x0f);
place(0xbd);
placeRegAndModRegMem(dst, src);
commit();
}
void AssemblyBuilderX64::bsf(RegisterX64 dst, OperandX64 src)
{
if (logText)
log("bsf", dst, src);
LUAU_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword);
placeRex(dst, src);
place(0x0f);
place(0xbc);
placeRegAndModRegMem(dst, src);
commit();
}
void AssemblyBuilderX64::nop(uint32_t length)
{
while (length != 0)

View File

@ -50,6 +50,7 @@
#endif
LUAU_FASTFLAGVARIABLE(DebugCodegenNoOpt, false)
LUAU_FASTFLAGVARIABLE(DebugCodegenOptSize, false)
namespace Luau
{
@ -154,6 +155,9 @@ static bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
toStringDetailed(ctx, block, blockIndex, /* includeUseInfo */ true);
}
// Values can only reference restore operands in the current block
function.validRestoreOpBlockIdx = blockIndex;
build.setLabel(block.label);
for (uint32_t index = block.start; index <= block.finish; index++)
@ -176,10 +180,6 @@ static bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
IrInst& inst = function.instructions[index];
// Substitutions might have meta information about operand restore location from memory
if (inst.cmd == IrCmd::SUBSTITUTE && inst.b.kind != IrOpKind::None)
function.recordRestoreOp(inst.a.index, inst.b);
// Skip pseudo instructions, but make sure they are not used at this stage
// This also prevents them from getting into text output when that's enabled
if (isPseudo(inst.cmd))
@ -213,6 +213,8 @@ static bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
}
}
lowering.finishBlock();
if (options.includeIr)
build.logAppend("#\n");
}
@ -292,6 +294,9 @@ static NativeProto* assembleFunction(AssemblyBuilder& build, NativeState& data,
if (!FFlag::DebugCodegenNoOpt)
{
constPropInBlockChains(ir);
if (!FFlag::DebugCodegenOptSize)
createLinearBlocks(ir);
}
if (!lowerIr(build, ir, data, helpers, proto, options))

View File

@ -254,16 +254,14 @@ Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults)
}
// Extracted as-is from lvmexecute.cpp with the exception of control flow (reentry) and removed interrupts
Closure* returnFallback(lua_State* L, StkId ra, int n)
Closure* returnFallback(lua_State* L, StkId ra, StkId valend)
{
// ci is our callinfo, cip is our parent
CallInfo* ci = L->ci;
CallInfo* cip = ci - 1;
StkId res = ci->func; // note: we assume CALL always puts func+args and expects results to start at func
StkId vali = ra;
StkId valend = (n == LUA_MULTRET) ? L->top : ra + n; // copy as much as possible for MULTRET calls, and only as much as needed otherwise
int nresults = ci->nresults;

View File

@ -18,7 +18,7 @@ Closure* callProlog(lua_State* L, TValue* ra, StkId argtop, int nresults);
void callEpilogC(lua_State* L, int nresults, int n);
Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults);
Closure* returnFallback(lua_State* L, StkId ra, int n);
Closure* returnFallback(lua_State* L, StkId ra, StkId valend);
} // namespace CodeGen
} // namespace Luau

View File

@ -18,90 +18,6 @@ namespace CodeGen
namespace X64
{
static void emitBuiltinMathSingleArgFunc(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int arg, int32_t offset)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::xmmword, luauRegValue(arg));
callWrap.call(qword[rNativeContext + offset]);
build.vmovsd(luauRegValue(ra), xmm0);
}
void emitBuiltinMathExp(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_exp));
}
void emitBuiltinMathFmod(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::xmmword, luauRegValue(arg));
callWrap.addArgument(SizeX64::xmmword, qword[args + offsetof(TValue, value)]);
callWrap.call(qword[rNativeContext + offsetof(NativeContext, libm_fmod)]);
build.vmovsd(luauRegValue(ra), xmm0);
}
void emitBuiltinMathAsin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_asin));
}
void emitBuiltinMathSin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_sin));
}
void emitBuiltinMathSinh(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_sinh));
}
void emitBuiltinMathAcos(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_acos));
}
void emitBuiltinMathCos(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_cos));
}
void emitBuiltinMathCosh(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_cosh));
}
void emitBuiltinMathAtan(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_atan));
}
void emitBuiltinMathTan(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_tan));
}
void emitBuiltinMathTanh(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_tanh));
}
void emitBuiltinMathAtan2(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
IrCallWrapperX64 callWrap(regs, build);
callWrap.addArgument(SizeX64::xmmword, luauRegValue(arg));
callWrap.addArgument(SizeX64::xmmword, qword[args + offsetof(TValue, value)]);
callWrap.call(qword[rNativeContext + offsetof(NativeContext, libm_atan2)]);
build.vmovsd(luauRegValue(ra), xmm0);
}
void emitBuiltinMathLog10(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
emitBuiltinMathSingleArgFunc(regs, build, ra, arg, offsetof(NativeContext, libm_log10));
}
void emitBuiltinMathLog(IrRegAllocX64& regs, AssemblyBuilderX64& build, int nparams, int ra, int arg, OperandX64 args, int nresults)
{
regs.assertAllFree();
@ -220,45 +136,6 @@ void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int r
switch (bfid)
{
case LBF_MATH_EXP:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathExp(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_FMOD:
LUAU_ASSERT(nparams == 2 && nresults == 1);
return emitBuiltinMathFmod(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_ASIN:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathAsin(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_SIN:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathSin(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_SINH:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathSinh(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_ACOS:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathAcos(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_COS:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathCos(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_COSH:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathCosh(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_ATAN:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathAtan(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_TAN:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathTan(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_TANH:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathTanh(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_ATAN2:
LUAU_ASSERT(nparams == 2 && nresults == 1);
return emitBuiltinMathAtan2(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_LOG10:
LUAU_ASSERT(nparams == 1 && nresults == 1);
return emitBuiltinMathLog10(regs, build, nparams, ra, arg, argsOp, nresults);
case LBF_MATH_LOG:
LUAU_ASSERT((nparams == 1 || nparams == 2) && nresults == 1);
return emitBuiltinMathLog(regs, build, nparams, ra, arg, argsOp, nresults);

View File

@ -40,7 +40,7 @@ constexpr RegisterA64 rBase = x24; // StkId base
// Native code is as stackless as the interpreter, so we can place some data on the stack once and have it accessible at any point
// See CodeGenA64.cpp for layout
constexpr unsigned kStashSlots = 8; // stashed non-volatile registers
constexpr unsigned kSpillSlots = 0; // slots for spilling temporary registers (unused)
constexpr unsigned kSpillSlots = 22; // slots for spilling temporary registers
constexpr unsigned kTempSlots = 2; // 16 bytes of temporary space, such luxury!
constexpr unsigned kStackSize = (kStashSlots + kSpillSlots + kTempSlots) * 8;

View File

@ -468,8 +468,7 @@ void IrBuilder::clone(const IrBlock& source, bool removeCurrentTerminator)
IrInst clone = function.instructions[index];
// Skip pseudo instructions to make clone more compact, but validate that they have no users
// But if substitution tracks a location, that tracking has to be preserved
if (isPseudo(clone.cmd) && !(clone.cmd == IrCmd::SUBSTITUTE && clone.b.kind != IrOpKind::None))
if (isPseudo(clone.cmd))
{
LUAU_ASSERT(clone.useCount == 0);
continue;

View File

@ -150,6 +150,10 @@ const char* getCmdName(IrCmd cmd)
return "JUMP_EQ_TAG";
case IrCmd::JUMP_EQ_INT:
return "JUMP_EQ_INT";
case IrCmd::JUMP_LT_INT:
return "JUMP_LT_INT";
case IrCmd::JUMP_GE_UINT:
return "JUMP_GE_UINT";
case IrCmd::JUMP_EQ_POINTER:
return "JUMP_EQ_POINTER";
case IrCmd::JUMP_CMP_NUM:
@ -170,6 +174,12 @@ const char* getCmdName(IrCmd cmd)
return "TRY_CALL_FASTGETTM";
case IrCmd::INT_TO_NUM:
return "INT_TO_NUM";
case IrCmd::UINT_TO_NUM:
return "UINT_TO_NUM";
case IrCmd::NUM_TO_INT:
return "NUM_TO_INT";
case IrCmd::NUM_TO_UINT:
return "NUM_TO_UINT";
case IrCmd::ADJUST_STACK_TO_REG:
return "ADJUST_STACK_TO_REG";
case IrCmd::ADJUST_STACK_TO_TOP:
@ -264,6 +274,30 @@ const char* getCmdName(IrCmd cmd)
return "FALLBACK_FORGPREP";
case IrCmd::SUBSTITUTE:
return "SUBSTITUTE";
case IrCmd::BITAND_UINT:
return "BITAND_UINT";
case IrCmd::BITXOR_UINT:
return "BITXOR_UINT";
case IrCmd::BITOR_UINT:
return "BITOR_UINT";
case IrCmd::BITNOT_UINT:
return "BITNOT_UINT";
case IrCmd::BITLSHIFT_UINT:
return "BITLSHIFT_UINT";
case IrCmd::BITRSHIFT_UINT:
return "BITRSHIFT_UINT";
case IrCmd::BITARSHIFT_UINT:
return "BITARSHIFT_UINT";
case IrCmd::BITLROTATE_UINT:
return "BITLROTATE_UINT";
case IrCmd::BITRROTATE_UINT:
return "BITRROTATE_UINT";
case IrCmd::BITCOUNTLZ_UINT:
return "BITCOUNTLZ_UINT";
case IrCmd::BITCOUNTRZ_UINT:
return "BITCOUNTRZ_UINT";
case IrCmd::INVOKE_LIBM:
return "INVOKE_LIBM";
}
LUAU_UNREACHABLE();

File diff suppressed because it is too large Load Diff

View File

@ -27,6 +27,7 @@ struct IrLoweringA64
IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, NativeState& data, Proto* proto, IrFunction& function);
void lowerInst(IrInst& inst, uint32_t index, IrBlock& next);
void finishBlock();
bool hasError() const;
@ -34,13 +35,16 @@ struct IrLoweringA64
void jumpOrFallthrough(IrBlock& target, IrBlock& next);
// Operand data build helpers
// May emit data/address synthesis instructions
RegisterA64 tempDouble(IrOp op);
RegisterA64 tempInt(IrOp op);
RegisterA64 tempUint(IrOp op);
AddressA64 tempAddr(IrOp op, int offset);
// Operand data lookup helpers
RegisterA64 regOp(IrOp op) const;
// May emit restore instructions
RegisterA64 regOp(IrOp op);
// Operand data lookup helpers
IrConst constOp(IrOp op) const;
uint8_t tagOp(IrOp op) const;
bool boolOp(IrOp op) const;

View File

@ -28,10 +28,15 @@ IrLoweringX64::IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers,
, data(data)
, function(function)
, regs(build, function)
, valueTracker(function)
{
// In order to allocate registers during lowering, we need to know where instruction results are last used
updateLastUseLocations(function);
valueTracker.setRestoreCallack(&regs, [](void* context, IrInst& inst) {
((IrRegAllocX64*)context)->restore(inst, false);
});
build.align(kFunctionAlignment, X64::AlignmentDataX64::Ud2);
}
@ -58,6 +63,8 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
{
regs.currInstIdx = index;
valueTracker.beforeInstLowering(inst);
switch (inst.cmd)
{
case IrCmd::LOAD_TAG:
@ -206,7 +213,6 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
}
break;
case IrCmd::STORE_INT:
{
if (inst.b.kind == IrOpKind::Constant)
build.mov(luauRegValueInt(vmRegOp(inst.a)), intOp(inst.b));
else if (inst.b.kind == IrOpKind::Inst)
@ -214,14 +220,11 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
else
LUAU_ASSERT(!"Unsupported instruction form");
break;
}
case IrCmd::STORE_VECTOR:
{
storeDoubleAsFloat(luauRegValueVector(vmRegOp(inst.a), 0), inst.b);
storeDoubleAsFloat(luauRegValueVector(vmRegOp(inst.a), 1), inst.c);
storeDoubleAsFloat(luauRegValueVector(vmRegOp(inst.a), 2), inst.d);
break;
}
case IrCmd::STORE_TVALUE:
if (inst.a.kind == IrOpKind::VmReg)
build.vmovups(luauReg(vmRegOp(inst.a)), regOp(inst.b));
@ -236,7 +239,9 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
case IrCmd::ADD_INT:
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
if (inst.regX64 == regOp(inst.a) && intOp(inst.b) == 1)
if (inst.b.kind == IrOpKind::Inst)
build.lea(inst.regX64, addr[regOp(inst.a) + regOp(inst.b)]);
else if (inst.regX64 == regOp(inst.a) && intOp(inst.b) == 1)
build.inc(inst.regX64);
else if (inst.regX64 == regOp(inst.a))
build.add(inst.regX64, intOp(inst.b));
@ -525,6 +530,18 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
build.jcc(ConditionX64::Equal, labelOp(inst.c));
jumpOrFallthrough(blockOp(inst.d), next);
break;
case IrCmd::JUMP_LT_INT:
build.cmp(regOp(inst.a), intOp(inst.b));
build.jcc(ConditionX64::Less, labelOp(inst.c));
jumpOrFallthrough(blockOp(inst.d), next);
break;
case IrCmd::JUMP_GE_UINT:
build.cmp(regOp(inst.a), uintOp(inst.b));
build.jcc(ConditionX64::AboveEqual, labelOp(inst.c));
jumpOrFallthrough(blockOp(inst.d), next);
break;
case IrCmd::JUMP_EQ_POINTER:
build.cmp(regOp(inst.a), regOp(inst.b));
@ -626,6 +643,21 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
build.vcvtsi2sd(inst.regX64, inst.regX64, regOp(inst.a));
break;
case IrCmd::UINT_TO_NUM:
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
build.vcvtsi2sd(inst.regX64, inst.regX64, qwordReg(regOp(inst.a)));
break;
case IrCmd::NUM_TO_INT:
inst.regX64 = regs.allocReg(SizeX64::dword, index);
build.vcvttsd2si(inst.regX64, memRegDoubleOp(inst.a));
break;
case IrCmd::NUM_TO_UINT:
inst.regX64 = regs.allocReg(SizeX64::dword, index);
build.vcvttsd2si(qwordReg(inst.regX64), memRegDoubleOp(inst.a));
break;
case IrCmd::ADJUST_STACK_TO_REG:
{
ScopedRegX64 tmp{regs, SizeX64::qword};
@ -1106,6 +1138,174 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
emitFallback(regs, build, data, LOP_FORGPREP, uintOp(inst.a));
jumpOrFallthrough(blockOp(inst.c), next);
break;
case IrCmd::BITAND_UINT:
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
if (inst.regX64 != regOp(inst.a))
build.mov(inst.regX64, regOp(inst.a));
build.and_(inst.regX64, memRegUintOp(inst.b));
break;
case IrCmd::BITXOR_UINT:
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
if (inst.regX64 != regOp(inst.a))
build.mov(inst.regX64, regOp(inst.a));
build.xor_(inst.regX64, memRegUintOp(inst.b));
break;
case IrCmd::BITOR_UINT:
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
if (inst.regX64 != regOp(inst.a))
build.mov(inst.regX64, regOp(inst.a));
build.or_(inst.regX64, memRegUintOp(inst.b));
break;
case IrCmd::BITNOT_UINT:
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
if (inst.regX64 != regOp(inst.a))
build.mov(inst.regX64, regOp(inst.a));
build.not_(inst.regX64);
break;
case IrCmd::BITLSHIFT_UINT:
{
// Custom bit shift value can only be placed in cl
ScopedRegX64 shiftTmp{regs, regs.takeReg(ecx, kInvalidInstIdx)};
inst.regX64 = regs.allocReg(SizeX64::dword, index);
build.mov(shiftTmp.reg, memRegUintOp(inst.b));
if (inst.a.kind == IrOpKind::Constant)
build.mov(inst.regX64, uintOp(inst.a));
else if (inst.regX64 != regOp(inst.a))
build.mov(inst.regX64, regOp(inst.a));
build.shl(inst.regX64, byteReg(shiftTmp.reg));
break;
}
case IrCmd::BITRSHIFT_UINT:
{
// Custom bit shift value can only be placed in cl
ScopedRegX64 shiftTmp{regs, regs.takeReg(ecx, kInvalidInstIdx)};
inst.regX64 = regs.allocReg(SizeX64::dword, index);
build.mov(shiftTmp.reg, memRegUintOp(inst.b));
if (inst.a.kind == IrOpKind::Constant)
build.mov(inst.regX64, uintOp(inst.a));
else if (inst.regX64 != regOp(inst.a))
build.mov(inst.regX64, regOp(inst.a));
build.shr(inst.regX64, byteReg(shiftTmp.reg));
break;
}
case IrCmd::BITARSHIFT_UINT:
{
// Custom bit shift value can only be placed in cl
ScopedRegX64 shiftTmp{regs, regs.takeReg(ecx, kInvalidInstIdx)};
inst.regX64 = regs.allocReg(SizeX64::dword, index);
build.mov(shiftTmp.reg, memRegUintOp(inst.b));
if (inst.a.kind == IrOpKind::Constant)
build.mov(inst.regX64, uintOp(inst.a));
else if (inst.regX64 != regOp(inst.a))
build.mov(inst.regX64, regOp(inst.a));
build.sar(inst.regX64, byteReg(shiftTmp.reg));
break;
}
case IrCmd::BITLROTATE_UINT:
{
// Custom bit shift value can only be placed in cl
ScopedRegX64 shiftTmp{regs, regs.takeReg(ecx, kInvalidInstIdx)};
inst.regX64 = regs.allocReg(SizeX64::dword, index);
build.mov(shiftTmp.reg, memRegUintOp(inst.b));
if (inst.a.kind == IrOpKind::Constant)
build.mov(inst.regX64, uintOp(inst.a));
else if (inst.regX64 != regOp(inst.a))
build.mov(inst.regX64, regOp(inst.a));
build.rol(inst.regX64, byteReg(shiftTmp.reg));
break;
}
case IrCmd::BITRROTATE_UINT:
{
// Custom bit shift value can only be placed in cl
ScopedRegX64 shiftTmp{regs, regs.takeReg(ecx, kInvalidInstIdx)};
inst.regX64 = regs.allocReg(SizeX64::dword, index);
build.mov(shiftTmp.reg, memRegUintOp(inst.b));
if (inst.a.kind == IrOpKind::Constant)
build.mov(inst.regX64, uintOp(inst.a));
else if (inst.regX64 != regOp(inst.a))
build.mov(inst.regX64, regOp(inst.a));
build.ror(inst.regX64, byteReg(shiftTmp.reg));
break;
}
case IrCmd::BITCOUNTLZ_UINT:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
Label zero, exit;
build.test(regOp(inst.a), regOp(inst.a));
build.jcc(ConditionX64::Equal, zero);
build.bsr(inst.regX64, regOp(inst.a));
build.xor_(inst.regX64, 0x1f);
build.jmp(exit);
build.setLabel(zero);
build.mov(inst.regX64, 32);
build.setLabel(exit);
break;
}
case IrCmd::BITCOUNTRZ_UINT:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::dword, index, {inst.a});
Label zero, exit;
build.test(regOp(inst.a), regOp(inst.a));
build.jcc(ConditionX64::Equal, zero);
build.bsf(inst.regX64, regOp(inst.a));
build.jmp(exit);
build.setLabel(zero);
build.mov(inst.regX64, 32);
build.setLabel(exit);
break;
}
case IrCmd::INVOKE_LIBM:
{
LuauBuiltinFunction bfid = LuauBuiltinFunction(uintOp(inst.a));
IrCallWrapperX64 callWrap(regs, build, index);
callWrap.addArgument(SizeX64::xmmword, memRegDoubleOp(inst.b), inst.b);
if (inst.c.kind != IrOpKind::None)
callWrap.addArgument(SizeX64::xmmword, memRegDoubleOp(inst.c), inst.c);
callWrap.call(qword[rNativeContext + getNativeContextOffset(bfid)]);
inst.regX64 = regs.takeReg(xmm0, index);
break;
}
// Pseudo instructions
case IrCmd::NOP:
@ -1114,9 +1314,16 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break;
}
valueTracker.afterInstLowering(inst, index);
regs.freeLastUseRegs(inst, index);
}
void IrLoweringX64::finishBlock()
{
regs.assertNoSpills();
}
bool IrLoweringX64::hasError() const
{
// If register allocator had to use more stack slots than we have available, this function can't run natively
@ -1156,6 +1363,21 @@ OperandX64 IrLoweringX64::memRegDoubleOp(IrOp op)
return noreg;
}
OperandX64 IrLoweringX64::memRegUintOp(IrOp op)
{
switch (op.kind)
{
case IrOpKind::Inst:
return regOp(op);
case IrOpKind::Constant:
return OperandX64(uintOp(op));
default:
LUAU_ASSERT(!"Unsupported operand kind");
}
return noreg;
}
OperandX64 IrLoweringX64::memRegTagOp(IrOp op)
{
switch (op.kind)
@ -1177,7 +1399,7 @@ RegisterX64 IrLoweringX64::regOp(IrOp op)
{
IrInst& inst = function.instOp(op);
if (inst.spilled)
if (inst.spilled || inst.needsReload)
regs.restore(inst, false);
LUAU_ASSERT(inst.regX64 != noreg);

View File

@ -5,6 +5,8 @@
#include "Luau/IrData.h"
#include "Luau/IrRegAllocX64.h"
#include "IrValueLocationTracking.h"
#include <vector>
struct Proto;
@ -26,6 +28,7 @@ struct IrLoweringX64
IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function);
void lowerInst(IrInst& inst, uint32_t index, IrBlock& next);
void finishBlock();
bool hasError() const;
@ -36,6 +39,7 @@ struct IrLoweringX64
// Operand data lookup helpers
OperandX64 memRegDoubleOp(IrOp op);
OperandX64 memRegUintOp(IrOp op);
OperandX64 memRegTagOp(IrOp op);
RegisterX64 regOp(IrOp op);
@ -56,6 +60,8 @@ struct IrLoweringX64
IrFunction& function;
IrRegAllocX64 regs;
IrValueLocationTracking valueTracker;
};
} // namespace X64

View File

@ -1,7 +1,12 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "IrRegAllocA64.h"
#include "Luau/AssemblyBuilderA64.h"
#include "BitUtils.h"
#include "EmitCommonA64.h"
#include <string.h>
namespace Luau
{
@ -10,6 +15,32 @@ namespace CodeGen
namespace A64
{
static int allocSpill(uint32_t& free, KindA64 kind)
{
LUAU_ASSERT(kStackSize <= 256); // to support larger stack frames, we need to ensure qN is allocated at 16b boundary to fit in ldr/str encoding
// qN registers use two consecutive slots
int slot = countrz(kind == KindA64::q ? free & (free >> 1) : free);
if (slot == 32)
return -1;
uint32_t mask = (kind == KindA64::q ? 3u : 1u) << slot;
LUAU_ASSERT((free & mask) == mask);
free &= ~mask;
return slot;
}
static void freeSpill(uint32_t& free, KindA64 kind, uint8_t slot)
{
// qN registers use two consecutive slots
uint32_t mask = (kind == KindA64::q ? 3u : 1u) << slot;
LUAU_ASSERT((free & mask) == 0);
free |= mask;
}
IrRegAllocA64::IrRegAllocA64(IrFunction& function, std::initializer_list<std::pair<RegisterA64, RegisterA64>> regs)
: function(function)
{
@ -25,9 +56,15 @@ IrRegAllocA64::IrRegAllocA64(IrFunction& function, std::initializer_list<std::pa
gpr.free = gpr.base;
simd.free = simd.base;
memset(gpr.defs, -1, sizeof(gpr.defs));
memset(simd.defs, -1, sizeof(simd.defs));
LUAU_ASSERT(kSpillSlots <= 32);
freeSpillSlots = (kSpillSlots == 32) ? ~0u : (1u << kSpillSlots) - 1;
}
RegisterA64 IrRegAllocA64::allocReg(KindA64 kind)
RegisterA64 IrRegAllocA64::allocReg(KindA64 kind, uint32_t index)
{
Set& set = getSet(kind);
@ -37,10 +74,11 @@ RegisterA64 IrRegAllocA64::allocReg(KindA64 kind)
return noreg;
}
int index = 31 - countlz(set.free);
set.free &= ~(1u << index);
int reg = 31 - countlz(set.free);
set.free &= ~(1u << reg);
set.defs[reg] = index;
return RegisterA64{kind, uint8_t(index)};
return RegisterA64{kind, uint8_t(reg)};
}
RegisterA64 IrRegAllocA64::allocTemp(KindA64 kind)
@ -53,12 +91,13 @@ RegisterA64 IrRegAllocA64::allocTemp(KindA64 kind)
return noreg;
}
int index = 31 - countlz(set.free);
int reg = 31 - countlz(set.free);
set.free &= ~(1u << index);
set.temp |= 1u << index;
set.free &= ~(1u << reg);
set.temp |= 1u << reg;
LUAU_ASSERT(set.defs[reg] == kInvalidInstIdx);
return RegisterA64{kind, uint8_t(index)};
return RegisterA64{kind, uint8_t(reg)};
}
RegisterA64 IrRegAllocA64::allocReuse(KindA64 kind, uint32_t index, std::initializer_list<IrOp> oprefs)
@ -70,16 +109,33 @@ RegisterA64 IrRegAllocA64::allocReuse(KindA64 kind, uint32_t index, std::initial
IrInst& source = function.instructions[op.index];
if (source.lastUse == index && !source.reusedReg)
if (source.lastUse == index && !source.reusedReg && !source.spilled && source.regA64 != noreg)
{
LUAU_ASSERT(source.regA64.kind == kind);
Set& set = getSet(kind);
LUAU_ASSERT(set.defs[source.regA64.index] == op.index);
set.defs[source.regA64.index] = index;
source.reusedReg = true;
return source.regA64;
}
}
return allocReg(kind);
return allocReg(kind, index);
}
RegisterA64 IrRegAllocA64::takeReg(RegisterA64 reg, uint32_t index)
{
Set& set = getSet(reg.kind);
LUAU_ASSERT(set.free & (1u << reg.index));
LUAU_ASSERT(set.defs[reg.index] == kInvalidInstIdx);
set.free &= ~(1u << reg.index);
set.defs[reg.index] = index;
return reg;
}
void IrRegAllocA64::freeReg(RegisterA64 reg)
@ -88,13 +144,18 @@ void IrRegAllocA64::freeReg(RegisterA64 reg)
LUAU_ASSERT((set.base & (1u << reg.index)) != 0);
LUAU_ASSERT((set.free & (1u << reg.index)) == 0);
LUAU_ASSERT((set.temp & (1u << reg.index)) == 0);
set.free |= 1u << reg.index;
set.defs[reg.index] = kInvalidInstIdx;
}
void IrRegAllocA64::freeLastUseReg(IrInst& target, uint32_t index)
{
if (target.lastUse == index && !target.reusedReg)
{
LUAU_ASSERT(!target.spilled);
// Register might have already been freed if it had multiple uses inside a single instruction
if (target.regA64 == noreg)
return;
@ -130,19 +191,145 @@ void IrRegAllocA64::freeTempRegs()
simd.temp = 0;
}
void IrRegAllocA64::assertAllFree() const
size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::initializer_list<RegisterA64> live)
{
LUAU_ASSERT(gpr.free == gpr.base);
LUAU_ASSERT(simd.free == simd.base);
static const KindA64 sets[] = {KindA64::x, KindA64::q};
size_t start = spills.size();
for (RegisterA64 reg : live)
{
Set& set = getSet(reg.kind);
// make sure registers that we expect to survive past spill barrier are not allocated
// TODO: we need to handle this condition somehow in the future; if this fails, this likely means the caller has an aliasing hazard
LUAU_ASSERT(set.free & (1u << reg.index));
}
for (KindA64 kind : sets)
{
Set& set = getSet(kind);
// early-out
if (set.free == set.base)
continue;
// free all temp registers
LUAU_ASSERT((set.free & set.temp) == 0);
set.free |= set.temp;
set.temp = 0;
// spill all allocated registers unless they aren't used anymore
uint32_t regs = set.base & ~set.free;
while (regs)
{
int reg = 31 - countlz(regs);
uint32_t inst = set.defs[reg];
LUAU_ASSERT(inst != kInvalidInstIdx);
IrInst& def = function.instructions[inst];
LUAU_ASSERT(def.regA64.index == reg);
LUAU_ASSERT(!def.spilled);
LUAU_ASSERT(!def.reusedReg);
if (def.lastUse == index)
{
// instead of spilling the register to never reload it, we assume the register is not needed anymore
def.regA64 = noreg;
}
else
{
int slot = allocSpill(freeSpillSlots, def.regA64.kind);
LUAU_ASSERT(slot >= 0); // TODO: remember the error and fail lowering
Spill s = {inst, def.regA64, uint8_t(slot)};
spills.push_back(s);
def.spilled = true;
def.regA64 = noreg;
}
regs &= ~(1u << reg);
set.free |= 1u << reg;
set.defs[reg] = kInvalidInstIdx;
}
LUAU_ASSERT(set.free == set.base);
}
if (start < spills.size())
{
// TODO: use stp for consecutive slots
for (size_t i = start; i < spills.size(); ++i)
build.str(spills[i].origin, mem(sp, sSpillArea.data + spills[i].slot * 8));
}
return start;
}
void IrRegAllocA64::assertAllFreeExcept(RegisterA64 reg) const
void IrRegAllocA64::restore(AssemblyBuilderA64& build, size_t start)
{
const Set& set = const_cast<IrRegAllocA64*>(this)->getSet(reg.kind);
const Set& other = &set == &gpr ? simd : gpr;
LUAU_ASSERT(start <= spills.size());
LUAU_ASSERT(set.free == (set.base & ~(1u << reg.index)));
LUAU_ASSERT(other.free == other.base);
if (start < spills.size())
{
// TODO: use ldp for consecutive slots
for (size_t i = start; i < spills.size(); ++i)
build.ldr(spills[i].origin, mem(sp, sSpillArea.data + spills[i].slot * 8));
for (size_t i = start; i < spills.size(); ++i)
{
Spill s = spills[i]; // copy in case takeReg reallocates spills
IrInst& def = function.instructions[s.inst];
LUAU_ASSERT(def.spilled);
LUAU_ASSERT(def.regA64 == noreg);
def.spilled = false;
def.regA64 = takeReg(s.origin, s.inst);
freeSpill(freeSpillSlots, s.origin.kind, s.slot);
}
spills.resize(start);
}
}
void IrRegAllocA64::restoreReg(AssemblyBuilderA64& build, IrInst& inst)
{
uint32_t index = function.getInstIndex(inst);
LUAU_ASSERT(inst.spilled);
LUAU_ASSERT(inst.regA64 == noreg);
for (size_t i = 0; i < spills.size(); ++i)
{
if (spills[i].inst == index)
{
Spill s = spills[i]; // copy in case allocReg reallocates spills
RegisterA64 reg = allocReg(s.origin.kind, index);
build.ldr(reg, mem(sp, sSpillArea.data + s.slot * 8));
inst.spilled = false;
inst.regA64 = reg;
freeSpill(freeSpillSlots, reg.kind, s.slot);
spills[i] = spills.back();
spills.pop_back();
return;
}
}
LUAU_ASSERT(!"Expected to find a spill record");
}
void IrRegAllocA64::assertNoSpills() const
{
LUAU_ASSERT(spills.empty());
}
IrRegAllocA64::Set& IrRegAllocA64::getSet(KindA64 kind)

View File

@ -6,6 +6,7 @@
#include <initializer_list>
#include <utility>
#include <vector>
namespace Luau
{
@ -14,14 +15,18 @@ namespace CodeGen
namespace A64
{
class AssemblyBuilderA64;
struct IrRegAllocA64
{
IrRegAllocA64(IrFunction& function, std::initializer_list<std::pair<RegisterA64, RegisterA64>> regs);
RegisterA64 allocReg(KindA64 kind);
RegisterA64 allocReg(KindA64 kind, uint32_t index);
RegisterA64 allocTemp(KindA64 kind);
RegisterA64 allocReuse(KindA64 kind, uint32_t index, std::initializer_list<IrOp> oprefs);
RegisterA64 takeReg(RegisterA64 reg, uint32_t index);
void freeReg(RegisterA64 reg);
void freeLastUseReg(IrInst& target, uint32_t index);
@ -29,10 +34,16 @@ struct IrRegAllocA64
void freeTempRegs();
void assertAllFree() const;
void assertAllFreeExcept(RegisterA64 reg) const;
// Spills all live registers that outlive current instruction; all allocated registers are assumed to be undefined
size_t spill(AssemblyBuilderA64& build, uint32_t index, std::initializer_list<RegisterA64> live = {});
IrFunction& function;
// Restores registers starting from the offset returned by spill(); all spills will be restored to the original registers
void restore(AssemblyBuilderA64& build, size_t start);
// Restores register for a single instruction; may not assign the previously used register!
void restoreReg(AssemblyBuilderA64& build, IrInst& inst);
void assertNoSpills() const;
struct Set
{
@ -44,11 +55,28 @@ struct IrRegAllocA64
// which subset of initial set is allocated as temporary
uint32_t temp = 0;
// which instruction is defining which register (for spilling); only valid if not free and not temp
uint32_t defs[32];
};
Set gpr, simd;
struct Spill
{
uint32_t inst;
RegisterA64 origin;
uint8_t slot;
};
Set& getSet(KindA64 kind);
IrFunction& function;
Set gpr, simd;
std::vector<Spill> spills;
// which 8-byte slots are free
uint32_t freeSpillSlots = 0;
};
} // namespace A64

View File

@ -69,7 +69,7 @@ RegisterX64 IrRegAllocX64::allocRegOrReuse(SizeX64 size, uint32_t instIdx, std::
IrInst& source = function.instructions[op.index];
if (source.lastUse == instIdx && !source.reusedReg && !source.spilled)
if (source.lastUse == instIdx && !source.reusedReg && !source.spilled && !source.needsReload)
{
// Not comparing size directly because we only need matching register set
if ((size == SizeX64::xmmword) != (source.regX64.size == SizeX64::xmmword))
@ -141,6 +141,8 @@ void IrRegAllocX64::freeLastUseReg(IrInst& target, uint32_t instIdx)
{
if (isLastUseReg(target, instIdx))
{
LUAU_ASSERT(!target.spilled && !target.needsReload);
// Register might have already been freed if it had multiple uses inside a single instruction
if (target.regX64 == noreg)
return;
@ -208,14 +210,17 @@ void IrRegAllocX64::preserve(IrInst& inst)
}
spill.stackSlot = uint8_t(i);
inst.spilled = true;
}
else
{
inst.needsReload = true;
}
spills.push_back(spill);
freeReg(inst.regX64);
inst.regX64 = noreg;
inst.spilled = true;
}
void IrRegAllocX64::restore(IrInst& inst, bool intoOriginalLocation)
@ -224,13 +229,14 @@ void IrRegAllocX64::restore(IrInst& inst, bool intoOriginalLocation)
for (size_t i = 0; i < spills.size(); i++)
{
const IrSpillX64& spill = spills[i];
if (spill.instIdx == instIdx)
if (spills[i].instIdx == instIdx)
{
RegisterX64 reg = intoOriginalLocation ? takeReg(spill.originalLoc, instIdx) : allocReg(spill.originalLoc.size, instIdx);
RegisterX64 reg = intoOriginalLocation ? takeReg(spills[i].originalLoc, instIdx) : allocReg(spills[i].originalLoc.size, instIdx);
OperandX64 restoreLocation = noreg;
// Previous call might have relocated the spill vector, so this reference can't be taken earlier
const IrSpillX64& spill = spills[i];
if (spill.stackSlot != kNoStackSlot)
{
restoreLocation = addr[sSpillArea + spill.stackSlot * 8];
@ -255,6 +261,7 @@ void IrRegAllocX64::restore(IrInst& inst, bool intoOriginalLocation)
inst.regX64 = reg;
inst.spilled = false;
inst.needsReload = false;
spills[i] = spills.back();
spills.pop_back();
@ -317,29 +324,9 @@ unsigned IrRegAllocX64::findSpillStackSlot(IrValueKind valueKind)
IrOp IrRegAllocX64::getRestoreOp(const IrInst& inst) const
{
switch (inst.cmd)
{
case IrCmd::LOAD_TAG:
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_TVALUE:
{
IrOp location = inst.a;
// Might have an alternative location
if (IrOp alternative = function.findRestoreOp(inst); alternative.kind != IrOpKind::None)
location = alternative;
if (location.kind == IrOpKind::VmReg || location.kind == IrOpKind::VmConst)
if (IrOp location = function.findRestoreOp(inst); location.kind == IrOpKind::VmReg || location.kind == IrOpKind::VmConst)
return location;
break;
}
default:
break;
}
return IrOp();
}
@ -350,22 +337,26 @@ bool IrRegAllocX64::hasRestoreOp(const IrInst& inst) const
OperandX64 IrRegAllocX64::getRestoreAddress(const IrInst& inst, IrOp restoreOp)
{
switch (inst.cmd)
switch (getCmdValueKind(inst.cmd))
{
case IrCmd::LOAD_TAG:
case IrValueKind::Unknown:
case IrValueKind::None:
LUAU_ASSERT(!"Invalid operand restore value kind");
break;
case IrValueKind::Tag:
return restoreOp.kind == IrOpKind::VmReg ? luauRegTag(vmRegOp(restoreOp)) : luauConstantTag(vmConstOp(restoreOp));
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
return restoreOp.kind == IrOpKind::VmReg ? luauRegValue(vmRegOp(restoreOp)) : luauConstantValue(vmConstOp(restoreOp));
case IrCmd::LOAD_INT:
case IrValueKind::Int:
LUAU_ASSERT(restoreOp.kind == IrOpKind::VmReg);
return luauRegValueInt(vmRegOp(restoreOp));
case IrCmd::LOAD_TVALUE:
case IrValueKind::Pointer:
return restoreOp.kind == IrOpKind::VmReg ? luauRegValue(vmRegOp(restoreOp)) : luauConstantValue(vmConstOp(restoreOp));
case IrValueKind::Double:
return restoreOp.kind == IrOpKind::VmReg ? luauRegValue(vmRegOp(restoreOp)) : luauConstantValue(vmConstOp(restoreOp));
case IrValueKind::Tvalue:
return restoreOp.kind == IrOpKind::VmReg ? luauReg(vmRegOp(restoreOp)) : luauConstant(vmConstOp(restoreOp));
default:
break;
}
LUAU_ASSERT(!"Failed to find restore operand location");
return noreg;
}

View File

@ -9,6 +9,7 @@
// TODO: when nresults is less than our actual result count, we can skip computing/writing unused results
static const int kMinMaxUnrolledParams = 5;
static const int kBit32BinaryOpUnrolledParams = 5;
namespace Luau
{
@ -33,6 +34,25 @@ BuiltinImplResult translateBuiltinNumberToNumber(
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltinNumberToNumberLibm(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp res = build.inst(IrCmd::INVOKE_LIBM, build.constUint(bfid), va);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), res);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
// (number, number, ...) -> number
BuiltinImplResult translateBuiltin2NumberToNumber(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
@ -50,6 +70,28 @@ BuiltinImplResult translateBuiltin2NumberToNumber(
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltin2NumberToNumberLibm(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
build.loadAndCheckTag(args, LUA_TNUMBER, fallback);
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp vb = build.inst(IrCmd::LOAD_DOUBLE, args);
IrOp res = build.inst(IrCmd::INVOKE_LIBM, build.constUint(bfid), va, vb);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), res);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
// (number, ...) -> (number, number)
BuiltinImplResult translateBuiltinNumberTo2Number(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
@ -154,6 +196,15 @@ BuiltinImplResult translateBuiltinMathLog(
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
if (fcId == LBF_MATH_LOG10)
{
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp res = build.inst(IrCmd::INVOKE_LIBM, build.constUint(fcId), va);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), res);
}
else
build.inst(IrCmd::FASTCALL, build.constUint(fcId), build.vmReg(ra), build.vmReg(arg), args, build.constInt(fcParams), build.constInt(1));
if (ra != arg)
@ -315,6 +366,356 @@ BuiltinImplResult translateBuiltinTypeof(IrBuilder& build, int nparams, int ra,
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltinBit32BinaryOp(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nparams > kBit32BinaryOpUnrolledParams || nresults > 1)
return {BuiltinImplType::None, -1};
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
build.loadAndCheckTag(args, LUA_TNUMBER, fallback);
for (int i = 3; i <= nparams; ++i)
build.loadAndCheckTag(build.vmReg(vmRegOp(args) + (i - 2)), LUA_TNUMBER, fallback);
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp vb = build.inst(IrCmd::LOAD_DOUBLE, args);
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp vbui = build.inst(IrCmd::NUM_TO_UINT, vb);
IrCmd cmd = IrCmd::NOP;
if (bfid == LBF_BIT32_BAND || bfid == LBF_BIT32_BTEST)
cmd = IrCmd::BITAND_UINT;
else if (bfid == LBF_BIT32_BXOR)
cmd = IrCmd::BITXOR_UINT;
else if (bfid == LBF_BIT32_BOR)
cmd = IrCmd::BITOR_UINT;
LUAU_ASSERT(cmd != IrCmd::NOP);
IrOp res = build.inst(cmd, vaui, vbui);
for (int i = 3; i <= nparams; ++i)
{
IrOp vc = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(vmRegOp(args) + (i - 2)));
IrOp arg = build.inst(IrCmd::NUM_TO_UINT, vc);
res = build.inst(cmd, res, arg);
}
if (bfid == LBF_BIT32_BTEST)
{
IrOp falsey = build.block(IrBlockKind::Internal);
IrOp truthy = build.block(IrBlockKind::Internal);
IrOp exit = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_EQ_INT, res, build.constInt(0), falsey, truthy);
build.beginBlock(falsey);
build.inst(IrCmd::STORE_INT, build.vmReg(ra), build.constInt(0));
build.inst(IrCmd::JUMP, exit);
build.beginBlock(truthy);
build.inst(IrCmd::STORE_INT, build.vmReg(ra), build.constInt(1));
build.inst(IrCmd::JUMP, exit);
build.beginBlock(exit);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TBOOLEAN));
}
else
{
IrOp value = build.inst(IrCmd::UINT_TO_NUM, res);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
}
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltinBit32Bnot(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp not_ = build.inst(IrCmd::BITNOT_UINT, vaui);
IrOp value = build.inst(IrCmd::UINT_TO_NUM, not_);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltinBit32Shift(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
IrOp block = build.block(IrBlockKind::Internal);
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
build.loadAndCheckTag(args, LUA_TNUMBER, fallback);
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp vb = build.inst(IrCmd::LOAD_DOUBLE, args);
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp vbi = build.inst(IrCmd::NUM_TO_INT, vb);
build.inst(IrCmd::JUMP_GE_UINT, vbi, build.constUint(32), fallback, block);
build.beginBlock(block);
IrCmd cmd = IrCmd::NOP;
if (bfid == LBF_BIT32_LSHIFT)
cmd = IrCmd::BITLSHIFT_UINT;
else if (bfid == LBF_BIT32_RSHIFT)
cmd = IrCmd::BITRSHIFT_UINT;
else if (bfid == LBF_BIT32_ARSHIFT)
cmd = IrCmd::BITARSHIFT_UINT;
LUAU_ASSERT(cmd != IrCmd::NOP);
IrOp shift = build.inst(cmd, vaui, vbi);
IrOp value = build.inst(IrCmd::UINT_TO_NUM, shift);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltinBit32Rotate(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
build.loadAndCheckTag(args, LUA_TNUMBER, fallback);
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp vb = build.inst(IrCmd::LOAD_DOUBLE, args);
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp vbi = build.inst(IrCmd::NUM_TO_INT, vb);
IrCmd cmd = (bfid == LBF_BIT32_LROTATE) ? IrCmd::BITLROTATE_UINT : IrCmd::BITRROTATE_UINT;
IrOp shift = build.inst(cmd, vaui, vbi);
IrOp value = build.inst(IrCmd::UINT_TO_NUM, shift);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltinBit32Extract(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
build.loadAndCheckTag(args, LUA_TNUMBER, fallback);
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp vb = build.inst(IrCmd::LOAD_DOUBLE, args);
IrOp n = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp f = build.inst(IrCmd::NUM_TO_INT, vb);
IrOp value;
if (nparams == 2)
{
IrOp block = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_GE_UINT, f, build.constUint(32), fallback, block);
build.beginBlock(block);
// TODO: this can be optimized using a bit-select instruction (bt on x86)
IrOp shift = build.inst(IrCmd::BITRSHIFT_UINT, n, f);
value = build.inst(IrCmd::BITAND_UINT, shift, build.constUint(1));
}
else
{
build.loadAndCheckTag(build.vmReg(args.index + 1), LUA_TNUMBER, fallback);
IrOp vc = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(args.index + 1));
IrOp w = build.inst(IrCmd::NUM_TO_INT, vc);
IrOp block1 = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_LT_INT, f, build.constInt(0), fallback, block1);
build.beginBlock(block1);
IrOp block2 = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_LT_INT, w, build.constInt(1), fallback, block2);
build.beginBlock(block2);
IrOp block3 = build.block(IrBlockKind::Internal);
IrOp fw = build.inst(IrCmd::ADD_INT, f, w);
build.inst(IrCmd::JUMP_LT_INT, fw, build.constInt(33), block3, fallback);
build.beginBlock(block3);
IrOp shift = build.inst(IrCmd::BITLSHIFT_UINT, build.constUint(0xfffffffe), build.inst(IrCmd::SUB_INT, w, build.constInt(1)));
IrOp m = build.inst(IrCmd::BITNOT_UINT, shift);
IrOp nf = build.inst(IrCmd::BITRSHIFT_UINT, n, f);
value = build.inst(IrCmd::BITAND_UINT, nf, m);
}
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), build.inst(IrCmd::UINT_TO_NUM, value));
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltinBit32ExtractK(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp n = build.inst(IrCmd::NUM_TO_UINT, va);
double a2 = build.function.doubleOp(args);
int fw = int(a2);
int f = fw & 31;
int w1 = fw >> 5;
uint32_t m = ~(0xfffffffeu << w1);
IrOp nf = build.inst(IrCmd::BITRSHIFT_UINT, n, build.constUint(f));
IrOp and_ = build.inst(IrCmd::BITAND_UINT, nf, build.constUint(m));
IrOp value = build.inst(IrCmd::UINT_TO_NUM, and_);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltinBit32Countz(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
IrCmd cmd = (bfid == LBF_BIT32_COUNTLZ) ? IrCmd::BITCOUNTLZ_UINT : IrCmd::BITCOUNTRZ_UINT;
IrOp bin = build.inst(cmd, vaui);
IrOp value = build.inst(IrCmd::UINT_TO_NUM, bin);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), value);
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltinBit32Replace(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 3 || nresults > 1)
return {BuiltinImplType::None, -1};
build.loadAndCheckTag(build.vmReg(arg), LUA_TNUMBER, fallback);
build.loadAndCheckTag(args, LUA_TNUMBER, fallback);
build.loadAndCheckTag(build.vmReg(args.index + 1), LUA_TNUMBER, fallback);
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(arg));
IrOp vb = build.inst(IrCmd::LOAD_DOUBLE, args);
IrOp vc = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(args.index + 1));
IrOp n = build.inst(IrCmd::NUM_TO_UINT, va);
IrOp v = build.inst(IrCmd::NUM_TO_UINT, vb);
IrOp f = build.inst(IrCmd::NUM_TO_INT, vc);
IrOp value;
if (nparams == 3)
{
IrOp block = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_GE_UINT, f, build.constUint(32), fallback, block);
build.beginBlock(block);
// TODO: this can be optimized using a bit-select instruction (btr on x86)
IrOp m = build.constUint(1);
IrOp shift = build.inst(IrCmd::BITLSHIFT_UINT, m, f);
IrOp not_ = build.inst(IrCmd::BITNOT_UINT, shift);
IrOp lhs = build.inst(IrCmd::BITAND_UINT, n, not_);
IrOp vm = build.inst(IrCmd::BITAND_UINT, v, m);
IrOp rhs = build.inst(IrCmd::BITLSHIFT_UINT, vm, f);
value = build.inst(IrCmd::BITOR_UINT, lhs, rhs);
}
else
{
build.loadAndCheckTag(build.vmReg(args.index + 2), LUA_TNUMBER, fallback);
IrOp vd = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(args.index + 2));
IrOp w = build.inst(IrCmd::NUM_TO_INT, vd);
IrOp block1 = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_LT_INT, f, build.constInt(0), fallback, block1);
build.beginBlock(block1);
IrOp block2 = build.block(IrBlockKind::Internal);
build.inst(IrCmd::JUMP_LT_INT, w, build.constInt(1), fallback, block2);
build.beginBlock(block2);
IrOp block3 = build.block(IrBlockKind::Internal);
IrOp fw = build.inst(IrCmd::ADD_INT, f, w);
build.inst(IrCmd::JUMP_LT_INT, fw, build.constInt(33), block3, fallback);
build.beginBlock(block3);
IrOp shift1 = build.inst(IrCmd::BITLSHIFT_UINT, build.constUint(0xfffffffe), build.inst(IrCmd::SUB_INT, w, build.constInt(1)));
IrOp m = build.inst(IrCmd::BITNOT_UINT, shift1);
IrOp shift2 = build.inst(IrCmd::BITLSHIFT_UINT, m, f);
IrOp not_ = build.inst(IrCmd::BITNOT_UINT, shift2);
IrOp lhs = build.inst(IrCmd::BITAND_UINT, n, not_);
IrOp vm = build.inst(IrCmd::BITAND_UINT, v, m);
IrOp rhs = build.inst(IrCmd::BITLSHIFT_UINT, vm, f);
value = build.inst(IrCmd::BITOR_UINT, lhs, rhs);
}
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), build.inst(IrCmd::UINT_TO_NUM, value));
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
}
BuiltinImplResult translateBuiltinVector(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
{
if (nparams < 3 || nresults > 1)
@ -381,15 +782,40 @@ BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg,
case LBF_MATH_TAN:
case LBF_MATH_TANH:
case LBF_MATH_LOG10:
return translateBuiltinNumberToNumberLibm(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_SIGN:
return translateBuiltinNumberToNumber(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_FMOD:
case LBF_MATH_ATAN2:
return translateBuiltin2NumberToNumberLibm(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_LDEXP:
return translateBuiltin2NumberToNumber(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_FREXP:
case LBF_MATH_MODF:
return translateBuiltinNumberTo2Number(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_BAND:
case LBF_BIT32_BOR:
case LBF_BIT32_BXOR:
case LBF_BIT32_BTEST:
return translateBuiltinBit32BinaryOp(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_BNOT:
return translateBuiltinBit32Bnot(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_LSHIFT:
case LBF_BIT32_RSHIFT:
case LBF_BIT32_ARSHIFT:
return translateBuiltinBit32Shift(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_LROTATE:
case LBF_BIT32_RROTATE:
return translateBuiltinBit32Rotate(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_EXTRACT:
return translateBuiltinBit32Extract(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_EXTRACTK:
return translateBuiltinBit32ExtractK(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_COUNTLZ:
case LBF_BIT32_COUNTRZ:
return translateBuiltinBit32Countz(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_BIT32_REPLACE:
return translateBuiltinBit32Replace(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
case LBF_TYPE:
return translateBuiltinType(build, nparams, ra, arg, args, nresults, fallback);
case LBF_TYPEOF:

View File

@ -511,6 +511,12 @@ void translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool
build.inst(IrCmd::CHECK_SAFE_ENV, fallback);
if (bfid == LBF_BIT32_EXTRACTK)
{
TValue protok = build.function.proto->k[pc[1]];
args = build.constDouble(protok.value.n);
}
BuiltinImplResult br = translateBuiltin(build, LuauBuiltinFunction(bfid), ra, arg, args, nparams, nresults, fallback);
if (br.type == BuiltinImplType::UsesFallback)

View File

@ -3,6 +3,8 @@
#include "Luau/IrBuilder.h"
#include "NativeState.h"
#include "lua.h"
#include "lnumutils.h"
@ -69,6 +71,8 @@ IrValueKind getCmdValueKind(IrCmd cmd)
case IrCmd::JUMP_IF_FALSY:
case IrCmd::JUMP_EQ_TAG:
case IrCmd::JUMP_EQ_INT:
case IrCmd::JUMP_LT_INT:
case IrCmd::JUMP_GE_UINT:
case IrCmd::JUMP_EQ_POINTER:
case IrCmd::JUMP_CMP_NUM:
case IrCmd::JUMP_CMP_ANY:
@ -84,7 +88,11 @@ IrValueKind getCmdValueKind(IrCmd cmd)
case IrCmd::TRY_CALL_FASTGETTM:
return IrValueKind::Pointer;
case IrCmd::INT_TO_NUM:
case IrCmd::UINT_TO_NUM:
return IrValueKind::Double;
case IrCmd::NUM_TO_INT:
case IrCmd::NUM_TO_UINT:
return IrValueKind::Int;
case IrCmd::ADJUST_STACK_TO_REG:
case IrCmd::ADJUST_STACK_TO_TOP:
return IrValueKind::None;
@ -137,6 +145,20 @@ IrValueKind getCmdValueKind(IrCmd cmd)
return IrValueKind::None;
case IrCmd::SUBSTITUTE:
return IrValueKind::Unknown;
case IrCmd::BITAND_UINT:
case IrCmd::BITXOR_UINT:
case IrCmd::BITOR_UINT:
case IrCmd::BITNOT_UINT:
case IrCmd::BITLSHIFT_UINT:
case IrCmd::BITRSHIFT_UINT:
case IrCmd::BITARSHIFT_UINT:
case IrCmd::BITLROTATE_UINT:
case IrCmd::BITRROTATE_UINT:
case IrCmd::BITCOUNTLZ_UINT:
case IrCmd::BITCOUNTRZ_UINT:
return IrValueKind::Int;
case IrCmd::INVOKE_LIBM:
return IrValueKind::Double;
}
LUAU_UNREACHABLE();
@ -284,7 +306,7 @@ void replace(IrFunction& function, IrBlock& block, uint32_t instIdx, IrInst repl
block.useCount--;
}
void substitute(IrFunction& function, IrInst& inst, IrOp replacement, IrOp location)
void substitute(IrFunction& function, IrInst& inst, IrOp replacement)
{
LUAU_ASSERT(!isBlockTerminator(inst.cmd));
@ -298,7 +320,7 @@ void substitute(IrFunction& function, IrInst& inst, IrOp replacement, IrOp locat
removeUse(function, inst.f);
inst.a = replacement;
inst.b = location;
inst.b = {};
inst.c = {};
inst.d = {};
inst.e = {};
@ -499,6 +521,24 @@ void foldConstants(IrBuilder& build, IrFunction& function, IrBlock& block, uint3
replace(function, block, index, {IrCmd::JUMP, inst.d});
}
break;
case IrCmd::JUMP_LT_INT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
if (function.intOp(inst.a) < function.intOp(inst.b))
replace(function, block, index, {IrCmd::JUMP, inst.c});
else
replace(function, block, index, {IrCmd::JUMP, inst.d});
}
break;
case IrCmd::JUMP_GE_UINT:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
if (function.uintOp(inst.a) >= function.uintOp(inst.b))
replace(function, block, index, {IrCmd::JUMP, inst.c});
else
replace(function, block, index, {IrCmd::JUMP, inst.d});
}
break;
case IrCmd::JUMP_CMP_NUM:
if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Constant)
{
@ -547,5 +587,42 @@ void foldConstants(IrBuilder& build, IrFunction& function, IrBlock& block, uint3
}
}
uint32_t getNativeContextOffset(LuauBuiltinFunction bfid)
{
switch (bfid)
{
case LBF_MATH_ACOS:
return offsetof(NativeContext, libm_acos);
case LBF_MATH_ASIN:
return offsetof(NativeContext, libm_asin);
case LBF_MATH_ATAN2:
return offsetof(NativeContext, libm_atan2);
case LBF_MATH_ATAN:
return offsetof(NativeContext, libm_atan);
case LBF_MATH_COSH:
return offsetof(NativeContext, libm_cosh);
case LBF_MATH_COS:
return offsetof(NativeContext, libm_cos);
case LBF_MATH_EXP:
return offsetof(NativeContext, libm_exp);
case LBF_MATH_LOG10:
return offsetof(NativeContext, libm_log10);
case LBF_MATH_SINH:
return offsetof(NativeContext, libm_sinh);
case LBF_MATH_SIN:
return offsetof(NativeContext, libm_sin);
case LBF_MATH_TANH:
return offsetof(NativeContext, libm_tanh);
case LBF_MATH_TAN:
return offsetof(NativeContext, libm_tan);
case LBF_MATH_FMOD:
return offsetof(NativeContext, libm_fmod);
default:
LUAU_ASSERT(!"Unsupported bfid");
}
return 0;
}
} // namespace CodeGen
} // namespace Luau

View File

@ -0,0 +1,223 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "IrValueLocationTracking.h"
namespace Luau
{
namespace CodeGen
{
IrValueLocationTracking::IrValueLocationTracking(IrFunction& function)
: function(function)
{
vmRegValue.fill(kInvalidInstIdx);
}
void IrValueLocationTracking::setRestoreCallack(void* context, void (*callback)(void* context, IrInst& inst))
{
restoreCallbackCtx = context;
restoreCallback = callback;
}
void IrValueLocationTracking::beforeInstLowering(IrInst& inst)
{
switch (inst.cmd)
{
case IrCmd::STORE_TAG:
case IrCmd::STORE_POINTER:
case IrCmd::STORE_DOUBLE:
case IrCmd::STORE_INT:
case IrCmd::STORE_VECTOR:
case IrCmd::STORE_TVALUE:
invalidateRestoreOp(inst.a);
break;
case IrCmd::ADJUST_STACK_TO_REG:
invalidateRestoreVmRegs(vmRegOp(inst.a), -1);
break;
case IrCmd::FASTCALL:
invalidateRestoreVmRegs(vmRegOp(inst.b), function.intOp(inst.f));
break;
case IrCmd::INVOKE_FASTCALL:
// Multiple return sequences (count == -1) are defined by ADJUST_STACK_TO_REG
if (int count = function.intOp(inst.f); count != -1)
invalidateRestoreVmRegs(vmRegOp(inst.b), count);
break;
case IrCmd::DO_ARITH:
case IrCmd::DO_LEN:
case IrCmd::GET_TABLE:
case IrCmd::GET_IMPORT:
invalidateRestoreOp(inst.a);
break;
case IrCmd::CONCAT:
invalidateRestoreVmRegs(vmRegOp(inst.a), function.uintOp(inst.b));
break;
case IrCmd::GET_UPVALUE:
invalidateRestoreOp(inst.a);
break;
case IrCmd::PREPARE_FORN:
invalidateRestoreOp(inst.a);
invalidateRestoreOp(inst.b);
invalidateRestoreOp(inst.c);
break;
case IrCmd::CALL:
// Even if result count is limited, all registers starting from function (ra) might be modified
invalidateRestoreVmRegs(vmRegOp(inst.a), -1);
break;
case IrCmd::FORGLOOP:
case IrCmd::FORGLOOP_FALLBACK:
// Even if result count is limited, all registers starting from iteration index (ra+2) might be modified
invalidateRestoreVmRegs(vmRegOp(inst.a) + 2, -1);
break;
case IrCmd::FALLBACK_GETGLOBAL:
case IrCmd::FALLBACK_GETTABLEKS:
invalidateRestoreOp(inst.b);
break;
case IrCmd::FALLBACK_NAMECALL:
invalidateRestoreVmRegs(vmRegOp(inst.b), 2);
break;
case IrCmd::FALLBACK_GETVARARGS:
invalidateRestoreVmRegs(vmRegOp(inst.b), function.intOp(inst.c));
break;
case IrCmd::FALLBACK_NEWCLOSURE:
case IrCmd::FALLBACK_DUPCLOSURE:
invalidateRestoreOp(inst.b);
break;
case IrCmd::FALLBACK_FORGPREP:
invalidateRestoreVmRegs(vmRegOp(inst.b), 3);
break;
// Make sure all VmReg referencing instructions are handled explicitly (only register reads here)
case IrCmd::LOAD_TAG:
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_TVALUE:
case IrCmd::JUMP_IF_TRUTHY:
case IrCmd::JUMP_IF_FALSY:
case IrCmd::JUMP_CMP_ANY:
case IrCmd::SET_TABLE:
case IrCmd::SET_UPVALUE:
case IrCmd::INTERRUPT:
case IrCmd::BARRIER_OBJ:
case IrCmd::BARRIER_TABLE_FORWARD:
case IrCmd::CLOSE_UPVALS:
case IrCmd::CAPTURE:
case IrCmd::SETLIST:
case IrCmd::RETURN:
case IrCmd::FORGPREP_XNEXT_FALLBACK:
case IrCmd::FALLBACK_SETGLOBAL:
case IrCmd::FALLBACK_SETTABLEKS:
case IrCmd::FALLBACK_PREPVARARGS:
case IrCmd::ADJUST_STACK_TO_TOP:
break;
// These instrucitons read VmReg only after optimizeMemoryOperandsX64
case IrCmd::CHECK_TAG:
case IrCmd::ADD_NUM:
case IrCmd::SUB_NUM:
case IrCmd::MUL_NUM:
case IrCmd::DIV_NUM:
case IrCmd::MOD_NUM:
case IrCmd::POW_NUM:
case IrCmd::MIN_NUM:
case IrCmd::MAX_NUM:
case IrCmd::JUMP_EQ_TAG:
case IrCmd::JUMP_CMP_NUM:
break;
default:
// All instructions which reference registers have to be handled explicitly
LUAU_ASSERT(inst.a.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.b.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.c.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.d.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.e.kind != IrOpKind::VmReg);
LUAU_ASSERT(inst.f.kind != IrOpKind::VmReg);
break;
}
}
void IrValueLocationTracking::afterInstLowering(IrInst& inst, uint32_t instIdx)
{
switch (inst.cmd)
{
case IrCmd::LOAD_TAG:
case IrCmd::LOAD_POINTER:
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_TVALUE:
recordRestoreOp(instIdx, inst.a);
break;
case IrCmd::STORE_POINTER:
case IrCmd::STORE_DOUBLE:
case IrCmd::STORE_INT:
case IrCmd::STORE_TVALUE:
// If this is not the last use of the stored value, we can restore it from this new location
if (inst.b.kind == IrOpKind::Inst && function.instOp(inst.b).lastUse != instIdx)
recordRestoreOp(inst.b.index, inst.a);
break;
default:
break;
}
}
void IrValueLocationTracking::recordRestoreOp(uint32_t instIdx, IrOp location)
{
if (location.kind == IrOpKind::VmReg)
{
int reg = vmRegOp(location);
if (reg > maxReg)
maxReg = reg;
// Record location in register memory only if register is not captured
if (!function.cfg.captured.regs.test(reg))
function.recordRestoreOp(instIdx, location);
vmRegValue[reg] = instIdx;
}
else if (location.kind == IrOpKind::VmConst)
{
function.recordRestoreOp(instIdx, location);
}
}
void IrValueLocationTracking::invalidateRestoreOp(IrOp location)
{
if (location.kind == IrOpKind::VmReg)
{
uint32_t& instIdx = vmRegValue[vmRegOp(location)];
if (instIdx != kInvalidInstIdx)
{
IrInst& inst = function.instructions[instIdx];
// If instruction value is spilled and memory location is about to be lost, it has to be restored immediately
if (inst.needsReload)
restoreCallback(restoreCallbackCtx, inst);
// Instruction loses its memory storage location
function.recordRestoreOp(instIdx, IrOp());
// Register loses link with instruction
instIdx = kInvalidInstIdx;
}
}
else if (location.kind == IrOpKind::VmConst)
{
LUAU_ASSERT(!"VM constants are immutable");
}
}
void IrValueLocationTracking::invalidateRestoreVmRegs(int start, int count)
{
int end = count == -1 ? 255 : start + count;
if (end > maxReg)
end = maxReg;
for (int reg = start; reg <= end; reg++)
invalidateRestoreOp(IrOp{IrOpKind::VmReg, uint8_t(reg)});
}
} // namespace CodeGen
} // namespace Luau

View File

@ -0,0 +1,38 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/IrData.h"
#include <array>
namespace Luau
{
namespace CodeGen
{
struct IrValueLocationTracking
{
IrValueLocationTracking(IrFunction& function);
void setRestoreCallack(void* context, void (*callback)(void* context, IrInst& inst));
void beforeInstLowering(IrInst& inst);
void afterInstLowering(IrInst& inst, uint32_t instIdx);
void recordRestoreOp(uint32_t instIdx, IrOp location);
void invalidateRestoreOp(IrOp location);
void invalidateRestoreVmRegs(int start, int count);
IrFunction& function;
std::array<uint32_t, 256> vmRegValue;
// For range/full invalidations, we only want to visit a limited number of data that we have recorded
int maxReg = 0;
void* restoreCallbackCtx = nullptr;
void (*restoreCallback)(void* context, IrInst& inst) = nullptr;
};
} // namespace CodeGen
} // namespace Luau

View File

@ -96,7 +96,7 @@ struct NativeContext
void (*callEpilogC)(lua_State* L, int nresults, int n) = nullptr;
Closure* (*callFallback)(lua_State* L, StkId ra, StkId argtop, int nresults) = nullptr;
Closure* (*returnFallback)(lua_State* L, StkId ra, int n) = nullptr;
Closure* (*returnFallback)(lua_State* L, StkId ra, StkId valend) = nullptr;
// Opcode fallbacks, implemented in C
FallbackFn fallback[LOP__COUNT] = {};

View File

@ -425,6 +425,34 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
}
break;
}
case IrCmd::JUMP_LT_INT:
{
std::optional<int> valueA = function.asIntOp(inst.a.kind == IrOpKind::Constant ? inst.a : state.tryGetValue(inst.a));
std::optional<int> valueB = function.asIntOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b));
if (valueA && valueB)
{
if (*valueA < *valueB)
replace(function, block, index, {IrCmd::JUMP, inst.c});
else
replace(function, block, index, {IrCmd::JUMP, inst.d});
}
break;
}
case IrCmd::JUMP_GE_UINT:
{
std::optional<unsigned> valueA = function.asUintOp(inst.a.kind == IrOpKind::Constant ? inst.a : state.tryGetValue(inst.a));
std::optional<unsigned> valueB = function.asUintOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b));
if (valueA && valueB)
{
if (*valueA >= *valueB)
replace(function, block, index, {IrCmd::JUMP, inst.c});
else
replace(function, block, index, {IrCmd::JUMP, inst.d});
}
break;
}
case IrCmd::JUMP_CMP_NUM:
{
std::optional<double> valueA = function.asDoubleOp(inst.a.kind == IrOpKind::Constant ? inst.a : state.tryGetValue(inst.a));
@ -543,6 +571,9 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
case IrCmd::TRY_NUM_TO_INDEX:
case IrCmd::TRY_CALL_FASTGETTM:
case IrCmd::INT_TO_NUM:
case IrCmd::UINT_TO_NUM:
case IrCmd::NUM_TO_INT:
case IrCmd::NUM_TO_UINT:
case IrCmd::CHECK_ARRAY_SIZE:
case IrCmd::CHECK_SLOT_MATCH:
case IrCmd::CHECK_NODE_NO_NEXT:
@ -558,6 +589,18 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
case IrCmd::ADJUST_STACK_TO_REG: // Changes stack top, but not the values
case IrCmd::ADJUST_STACK_TO_TOP: // Changes stack top, but not the values
case IrCmd::CHECK_FASTCALL_RES: // Changes stack top, but not the values
case IrCmd::BITAND_UINT:
case IrCmd::BITXOR_UINT:
case IrCmd::BITOR_UINT:
case IrCmd::BITNOT_UINT:
case IrCmd::BITLSHIFT_UINT:
case IrCmd::BITRSHIFT_UINT:
case IrCmd::BITARSHIFT_UINT:
case IrCmd::BITRROTATE_UINT:
case IrCmd::BITLROTATE_UINT:
case IrCmd::BITCOUNTLZ_UINT:
case IrCmd::BITCOUNTRZ_UINT:
case IrCmd::INVOKE_LIBM:
break;
case IrCmd::JUMP_CMP_ANY:
@ -808,7 +851,6 @@ void constPropInBlockChains(IrBuilder& build)
std::vector<uint8_t> visited(function.blocks.size(), false);
// First pass: go over existing blocks once and propagate constants
for (IrBlock& block : function.blocks)
{
if (block.kind == IrBlockKind::Fallback || block.kind == IrBlockKind::Dead)
@ -819,13 +861,18 @@ void constPropInBlockChains(IrBuilder& build)
constPropInBlockChain(build, visited, &block);
}
}
// Second pass: go through internal block chains and outline them into a single new block
void createLinearBlocks(IrBuilder& build)
{
// Go through internal block chains and outline them into a single new block.
// Outlining will be able to linearize the execution, even if there was a jump to a block with multiple users,
// new 'block' will only be reachable from a single one and all gathered information can be preserved.
std::fill(visited.begin(), visited.end(), false);
IrFunction& function = build.function;
// This next loop can create new 'linear' blocks, so index-based loop has to be used (and it intentionally won't reach those new blocks)
std::vector<uint8_t> visited(function.blocks.size(), false);
// This loop can create new 'linear' blocks, so index-based loop has to be used (and it intentionally won't reach those new blocks)
size_t originalBlockCount = function.blocks.size();
for (size_t i = 0; i < originalBlockCount; i++)
{

View File

@ -103,6 +103,7 @@ target_sources(Luau.CodeGen PRIVATE
CodeGen/src/IrTranslateBuiltins.cpp
CodeGen/src/IrTranslation.cpp
CodeGen/src/IrUtils.cpp
CodeGen/src/IrValueLocationTracking.cpp
CodeGen/src/NativeState.cpp
CodeGen/src/OptimizeConstProp.cpp
CodeGen/src/OptimizeFinalX64.cpp
@ -127,6 +128,7 @@ target_sources(Luau.CodeGen PRIVATE
CodeGen/src/IrRegAllocA64.h
CodeGen/src/IrTranslateBuiltins.h
CodeGen/src/IrTranslation.h
CodeGen/src/IrValueLocationTracking.h
CodeGen/src/NativeState.h
)
@ -362,6 +364,7 @@ if(TARGET Luau.UnitTest)
tests/Frontend.test.cpp
tests/IrBuilder.test.cpp
tests/IrCallWrapperX64.test.cpp
tests/IrRegAllocX64.test.cpp
tests/JsonEmitter.test.cpp
tests/Lexer.test.cpp
tests/Linter.test.cpp

View File

@ -84,8 +84,11 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Binary")
SINGLE_COMPARE(add(x0, x1, x2), 0x8B020020);
SINGLE_COMPARE(add(w0, w1, w2), 0x0B020020);
SINGLE_COMPARE(add(x0, x1, x2, 7), 0x8B021C20);
SINGLE_COMPARE(add(x0, x1, x2, -7), 0x8B421C20);
SINGLE_COMPARE(sub(x0, x1, x2), 0xCB020020);
SINGLE_COMPARE(and_(x0, x1, x2), 0x8A020020);
SINGLE_COMPARE(and_(x0, x1, x2, 7), 0x8A021C20);
SINGLE_COMPARE(and_(x0, x1, x2, -7), 0x8A421C20);
SINGLE_COMPARE(bic(x0, x1, x2), 0x8A220020);
SINGLE_COMPARE(orr(x0, x1, x2), 0xAA020020);
SINGLE_COMPARE(eor(x0, x1, x2), 0xCA020020);
@ -120,6 +123,16 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "BinaryImm")
SINGLE_COMPARE(and_(w0, w0, 6), 0x121F0400);
SINGLE_COMPARE(and_(w0, w0, 12), 0x121E0400);
SINGLE_COMPARE(and_(w0, w0, 2147483648), 0x12010000);
// shifts
SINGLE_COMPARE(lsl(w1, w2, 1), 0x531F7841);
SINGLE_COMPARE(lsl(x1, x2, 1), 0xD37FF841);
SINGLE_COMPARE(lsr(w1, w2, 1), 0x53017C41);
SINGLE_COMPARE(lsr(x1, x2, 1), 0xD341FC41);
SINGLE_COMPARE(asr(w1, w2, 1), 0x13017C41);
SINGLE_COMPARE(asr(x1, x2, 1), 0x9341FC41);
SINGLE_COMPARE(ror(w1, w2, 1), 0x13820441);
SINGLE_COMPARE(ror(x1, x2, 1), 0x93C20441);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Loads")
@ -141,6 +154,14 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Loads")
SINGLE_COMPARE(ldrsh(w0, x1), 0x79C00020);
SINGLE_COMPARE(ldrsw(x0, x1), 0xB9800020);
// load sizes x offset scaling
SINGLE_COMPARE(ldr(x0, mem(x1, 8)), 0xF9400420);
SINGLE_COMPARE(ldr(w0, mem(x1, 8)), 0xB9400820);
SINGLE_COMPARE(ldrb(w0, mem(x1, 8)), 0x39402020);
SINGLE_COMPARE(ldrh(w0, mem(x1, 8)), 0x79401020);
SINGLE_COMPARE(ldrsb(w0, mem(x1, 8)), 0x39C02020);
SINGLE_COMPARE(ldrsh(w0, mem(x1, 8)), 0x79C01020);
// paired loads
SINGLE_COMPARE(ldp(x0, x1, mem(x2, 8)), 0xA9408440);
SINGLE_COMPARE(ldp(w0, w1, mem(x2, -8)), 0x297F0440);
@ -160,6 +181,12 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Stores")
SINGLE_COMPARE(strb(w0, x1), 0x39000020);
SINGLE_COMPARE(strh(w0, x1), 0x79000020);
// store sizes x offset scaling
SINGLE_COMPARE(str(x0, mem(x1, 8)), 0xF9000420);
SINGLE_COMPARE(str(w0, mem(x1, 8)), 0xB9000820);
SINGLE_COMPARE(strb(w0, mem(x1, 8)), 0x39002020);
SINGLE_COMPARE(strh(w0, mem(x1, 8)), 0x79001020);
// paired stores
SINGLE_COMPARE(stp(x0, x1, mem(x2, 8)), 0xA9008440);
SINGLE_COMPARE(stp(w0, w1, mem(x2, -8)), 0x293F0440);
@ -241,10 +268,13 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "ControlFlow")
build.b(ConditionA64::Equal, skip);
build.cbz(x0, skip);
build.cbnz(x0, skip);
build.tbz(x0, 5, skip);
build.tbnz(x0, 5, skip);
build.setLabel(skip);
build.b(skip);
build.bl(skip);
},
{0x54000060, 0xB4000040, 0xB5000020, 0x5400000E}));
{0x540000A0, 0xB4000080, 0xB5000060, 0x36280040, 0x37280020, 0x14000000, 0x97ffffff}));
// Basic control flow
SINGLE_COMPARE(br(x0), 0xD61F0000);
@ -358,6 +388,12 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPLoadStore")
SINGLE_COMPARE(ldr(q0, x1), 0x3DC00020);
SINGLE_COMPARE(str(d0, x1), 0xFD000020);
SINGLE_COMPARE(str(q0, x1), 0x3D800020);
// load/store sizes x offset scaling
SINGLE_COMPARE(ldr(q0, mem(x1, 16)), 0x3DC00420);
SINGLE_COMPARE(ldr(d0, mem(x1, 16)), 0xFD400820);
SINGLE_COMPARE(str(q0, mem(x1, 16)), 0x3D800420);
SINGLE_COMPARE(str(d0, mem(x1, 16)), 0xFD000820);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPCompare")
@ -366,6 +402,16 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPCompare")
SINGLE_COMPARE(fcmpz(d1), 0x1E602028);
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPImm")
{
SINGLE_COMPARE(fmov(d0, 0), 0x2F00E400);
SINGLE_COMPARE(fmov(d0, 0.125), 0x1E681000);
SINGLE_COMPARE(fmov(d0, -0.125), 0x1E781000);
CHECK(!AssemblyBuilderA64::isFmovSupported(-0.0));
CHECK(!AssemblyBuilderA64::isFmovSupported(0.12389));
}
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "AddressOffsetSize")
{
SINGLE_COMPARE(ldr(w0, mem(x1, 16)), 0xB9401020);
@ -395,6 +441,7 @@ TEST_CASE("LogTest")
build.add(sp, sp, 4);
build.add(w0, w1, w2);
build.add(x0, x1, x2, 2);
build.add(x0, x1, x2, -2);
build.add(w7, w8, 5);
build.add(x7, x8, 5);
build.ldr(x7, x8);
@ -421,6 +468,9 @@ TEST_CASE("LogTest")
build.fcmp(d0, d1);
build.fcmpz(d0);
build.fmov(d0, 0.25);
build.tbz(x0, 5, l);
build.setLabel(l);
build.ret();
@ -430,6 +480,7 @@ TEST_CASE("LogTest")
add sp,sp,#4
add w0,w1,w2
add x0,x1,x2 LSL #2
add x0,x1,x2 LSR #2
add w7,w8,#5
add x7,x8,#5
ldr x7,[x8]
@ -449,6 +500,8 @@ TEST_CASE("LogTest")
cset x0,eq
fcmp d0,d1
fcmp d0,#0
fmov d0,#0.25
tbz x0,#5,.L1
.L1:
ret
)";

View File

@ -244,6 +244,10 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "FormsOfShift")
SINGLE_COMPARE(sal(eax, 4), 0xc1, 0xe0, 0x04);
SINGLE_COMPARE(sar(rax, 4), 0x48, 0xc1, 0xf8, 0x04);
SINGLE_COMPARE(sar(r11, 1), 0x49, 0xd1, 0xfb);
SINGLE_COMPARE(rol(eax, 1), 0xd1, 0xc0);
SINGLE_COMPARE(rol(eax, cl), 0xd3, 0xc0);
SINGLE_COMPARE(ror(eax, 1), 0xd1, 0xc8);
SINGLE_COMPARE(ror(eax, cl), 0xd3, 0xc8);
}
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "FormsOfLea")
@ -533,6 +537,8 @@ TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "AVXTernaryInstructionForms")
TEST_CASE_FIXTURE(AssemblyBuilderX64Fixture, "MiscInstructions")
{
SINGLE_COMPARE(int3(), 0xcc);
SINGLE_COMPARE(bsr(eax, edx), 0x0f, 0xbd, 0xc2);
SINGLE_COMPARE(bsf(eax, edx), 0x0f, 0xbc, 0xc2);
}
TEST_CASE("LogTest")

View File

@ -1643,6 +1643,11 @@ TEST_CASE("SafeEnv")
runConformance("safeenv.lua");
}
TEST_CASE("Native")
{
runConformance("native.lua");
}
TEST_CASE("HugeFunction")
{
std::string source;

View File

@ -11,6 +11,8 @@ ConstraintGraphBuilderFixture::ConstraintGraphBuilderFixture()
, mainModule(new Module)
, forceTheFlag{"DebugLuauDeferredConstraintResolution", true}
{
mainModule->name = "MainModule";
mainModule->humanReadableName = "MainModule";
mainModule->reduction = std::make_unique<TypeReduction>(NotNull{&mainModule->internalTypes}, builtinTypes, NotNull{&ice});
BlockedType::DEPRECATED_nextIndex = 0;
@ -21,8 +23,8 @@ void ConstraintGraphBuilderFixture::generateConstraints(const std::string& code)
{
AstStatBlock* root = parse(code);
dfg = std::make_unique<DataFlowGraph>(DataFlowGraphBuilder::build(root, NotNull{&ice}));
cgb = std::make_unique<ConstraintGraphBuilder>("MainModule", mainModule, &arena, NotNull(&moduleResolver), builtinTypes, NotNull(&ice),
frontend.globals.globalScope, &logger, NotNull{dfg.get()});
cgb = std::make_unique<ConstraintGraphBuilder>(
mainModule, &arena, NotNull(&moduleResolver), builtinTypes, NotNull(&ice), frontend.globals.globalScope, &logger, NotNull{dfg.get()});
cgb->visit(root);
rootScope = cgb->rootScope;
constraints = Luau::borrowConstraints(cgb->constraints);

View File

@ -152,7 +152,7 @@ TEST_CASE_FIXTURE(FrontendFixture, "automatically_check_dependent_scripts")
frontend.check("game/Gui/Modules/B");
ModulePtr bModule = frontend.moduleResolver.modules["game/Gui/Modules/B"];
ModulePtr bModule = frontend.moduleResolver.getModule("game/Gui/Modules/B");
REQUIRE(bModule != nullptr);
CHECK(bModule->errors.empty());
Luau::dumpErrors(bModule);
@ -240,13 +240,13 @@ TEST_CASE_FIXTURE(FrontendFixture, "nocheck_modules_are_typed")
CheckResult result = frontend.check("game/Gui/Modules/C");
LUAU_REQUIRE_NO_ERRORS(result);
ModulePtr aModule = frontend.moduleResolver.modules["game/Gui/Modules/A"];
ModulePtr aModule = frontend.moduleResolver.getModule("game/Gui/Modules/A");
REQUIRE(bool(aModule));
std::optional<TypeId> aExports = first(aModule->returnType);
REQUIRE(bool(aExports));
ModulePtr bModule = frontend.moduleResolver.modules["game/Gui/Modules/B"];
ModulePtr bModule = frontend.moduleResolver.getModule("game/Gui/Modules/B");
REQUIRE(bool(bModule));
std::optional<TypeId> bExports = first(bModule->returnType);
@ -297,7 +297,7 @@ TEST_CASE_FIXTURE(FrontendFixture, "nocheck_cycle_used_by_checked")
CheckResult result = frontend.check("game/Gui/Modules/C");
LUAU_REQUIRE_NO_ERRORS(result);
ModulePtr cModule = frontend.moduleResolver.modules["game/Gui/Modules/C"];
ModulePtr cModule = frontend.moduleResolver.getModule("game/Gui/Modules/C");
REQUIRE(bool(cModule));
std::optional<TypeId> cExports = first(cModule->returnType);
@ -486,7 +486,7 @@ TEST_CASE_FIXTURE(FrontendFixture, "dont_recheck_script_that_hasnt_been_marked_d
frontend.check("game/Gui/Modules/B");
ModulePtr bModule = frontend.moduleResolver.modules["game/Gui/Modules/B"];
ModulePtr bModule = frontend.moduleResolver.getModule("game/Gui/Modules/B");
CHECK(bModule->errors.empty());
Luau::dumpErrors(bModule);
}
@ -507,7 +507,7 @@ TEST_CASE_FIXTURE(FrontendFixture, "recheck_if_dependent_script_is_dirty")
frontend.check("game/Gui/Modules/B");
ModulePtr bModule = frontend.moduleResolver.modules["game/Gui/Modules/B"];
ModulePtr bModule = frontend.moduleResolver.getModule("game/Gui/Modules/B");
CHECK(bModule->errors.empty());
Luau::dumpErrors(bModule);

View File

@ -1326,6 +1326,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "SimplePathExtraction")
updateUseCounts(build.function);
constPropInBlockChains(build);
createLinearBlocks(build);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
bb_0:
@ -1401,6 +1402,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "NoPathExtractionForBlocksWithLiveOutValues"
updateUseCounts(build.function);
constPropInBlockChains(build);
createLinearBlocks(build);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
bb_0:
@ -1452,6 +1454,7 @@ TEST_CASE_FIXTURE(IrBuilderFixture, "InfiniteLoopInPathAnalysis")
updateUseCounts(build.function);
constPropInBlockChains(build);
createLinearBlocks(build);
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
bb_0:

View File

@ -0,0 +1,58 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/IrRegAllocX64.h"
#include "doctest.h"
using namespace Luau::CodeGen;
using namespace Luau::CodeGen::X64;
class IrRegAllocX64Fixture
{
public:
IrRegAllocX64Fixture()
: build(/* logText */ true, ABIX64::Windows)
, regs(build, function)
{
}
void checkMatch(std::string expected)
{
build.finalize();
CHECK("\n" + build.text == expected);
}
AssemblyBuilderX64 build;
IrFunction function;
IrRegAllocX64 regs;
};
TEST_SUITE_BEGIN("IrRegAllocX64");
TEST_CASE_FIXTURE(IrRegAllocX64Fixture, "RelocateFix")
{
IrInst irInst0{IrCmd::LOAD_DOUBLE};
irInst0.lastUse = 2;
function.instructions.push_back(irInst0);
IrInst irInst1{IrCmd::LOAD_DOUBLE};
irInst1.lastUse = 2;
function.instructions.push_back(irInst1);
function.instructions[0].regX64 = regs.takeReg(rax, 0);
regs.preserve(function.instructions[0]);
function.instructions[1].regX64 = regs.takeReg(rax, 1);
regs.restore(function.instructions[0], true);
LUAU_ASSERT(function.instructions[0].regX64 == rax);
LUAU_ASSERT(function.instructions[1].spilled);
checkMatch(R"(
vmovsd qword ptr [rsp+048h],rax
vmovsd qword ptr [rsp+050h],rax
vmovsd rax,qword ptr [rsp+048h]
)");
}
TEST_SUITE_END();

View File

@ -369,6 +369,10 @@ type B = A
auto mod = frontend.moduleResolver.getModule("Module/A");
auto it = mod->exportedTypeBindings.find("A");
REQUIRE(it != mod->exportedTypeBindings.end());
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK(toString(it->second.type) == "any");
else
CHECK(toString(it->second.type) == "*error-type*");
}

View File

@ -40,7 +40,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "dcr_require_basic")
CheckResult bResult = frontend.check("game/B");
LUAU_REQUIRE_NO_ERRORS(bResult);
ModulePtr b = frontend.moduleResolver.modules["game/B"];
ModulePtr b = frontend.moduleResolver.getModule("game/B");
REQUIRE(b != nullptr);
std::optional<TypeId> bType = requireType(b, "b");
REQUIRE(bType);
@ -72,7 +72,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "require")
dumpErrors(bResult);
LUAU_REQUIRE_NO_ERRORS(bResult);
ModulePtr b = frontend.moduleResolver.modules["game/B"];
ModulePtr b = frontend.moduleResolver.getModule("game/B");
REQUIRE(b != nullptr);
@ -102,7 +102,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "require_types")
CheckResult bResult = frontend.check("workspace/B");
LUAU_REQUIRE_NO_ERRORS(bResult);
ModulePtr b = frontend.moduleResolver.modules["workspace/B"];
ModulePtr b = frontend.moduleResolver.getModule("workspace/B");
REQUIRE(b != nullptr);
TypeId hType = requireType(b, "h");
@ -167,8 +167,8 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "require_module_that_does_not_export")
frontend.check("game/Workspace/A");
frontend.check("game/Workspace/B");
ModulePtr aModule = frontend.moduleResolver.modules["game/Workspace/A"];
ModulePtr bModule = frontend.moduleResolver.modules["game/Workspace/B"];
ModulePtr aModule = frontend.moduleResolver.getModule("game/Workspace/A");
ModulePtr bModule = frontend.moduleResolver.getModule("game/Workspace/B");
CHECK(aModule->errors.empty());
REQUIRE_EQ(1, bModule->errors.size());

View File

@ -260,10 +260,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "bail_early_if_unification_is_too_complicated
// FIXME: Move this test to another source file when removing FFlag::LuauLowerBoundsCalculation
TEST_CASE_FIXTURE(Fixture, "do_not_ice_when_trying_to_pick_first_of_generic_type_pack")
{
ScopedFastFlag sff[]{
{"LuauReturnAnyInsteadOfICE", true},
};
// In-place quantification causes these types to have the wrong types but only because of nasty interaction with prototyping.
// The type of f is initially () -> free1...
// Then the prototype iterator advances, and checks the function expression assigned to g, which has the type () -> free2...
@ -528,7 +524,7 @@ return wrapStrictTable(Constants, "Constants")
frontend.check("game/B");
ModulePtr m = frontend.moduleResolver.modules["game/B"];
ModulePtr m = frontend.moduleResolver.getModule("game/B");
REQUIRE(m);
std::optional<TypeId> result = first(m->returnType);
@ -570,7 +566,7 @@ return wrapStrictTable(Constants, "Constants")
frontend.check("game/B");
ModulePtr m = frontend.moduleResolver.modules["game/B"];
ModulePtr m = frontend.moduleResolver.getModule("game/B");
REQUIRE(m);
std::optional<TypeId> result = first(m->returnType);

View File

@ -268,10 +268,7 @@ TEST_CASE_FIXTURE(Fixture, "should_be_able_to_infer_this_without_stack_overflowi
)");
if (FFlag::DebugLuauDeferredConstraintResolution)
{
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK(get<NormalizationTooComplex>(result.errors[0]));
}
else
LUAU_REQUIRE_NO_ERRORS(result);
}
@ -1056,7 +1053,11 @@ TEST_CASE_FIXTURE(Fixture, "type_infer_recursion_limit_normalizer")
end
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
validateErrors(result.errors);
REQUIRE_MESSAGE(!result.errors.empty(), getErrors(result));
CHECK(1 == result.errors.size());
CHECK(Location{{3, 12}, {3, 46}} == result.errors[0].location);
CHECK_EQ("Internal error: Code is too complex to typecheck! Consider adding type annotations around this area", toString(result.errors[0]));
}

View File

@ -125,6 +125,25 @@ TEST_CASE_FIXTURE(Fixture, "iterating_over_nested_UnionTypes")
CHECK_EQ(result[1], builtinTypes->numberType);
}
TEST_CASE_FIXTURE(Fixture, "iterating_over_nested_UnionTypes_postfix_operator_plus_plus")
{
Type subunion{UnionType{}};
UnionType* innerUtv = getMutable<UnionType>(&subunion);
innerUtv->options = {builtinTypes->numberType, builtinTypes->stringType};
UnionType utv;
utv.options = {builtinTypes->anyType, &subunion};
std::vector<TypeId> result;
for (auto it = begin(&utv); it != end(&utv); it++)
result.push_back(*it);
REQUIRE_EQ(result.size(), 3);
CHECK_EQ(result[0], builtinTypes->anyType);
CHECK_EQ(result[2], builtinTypes->stringType);
CHECK_EQ(result[1], builtinTypes->numberType);
}
TEST_CASE_FIXTURE(Fixture, "iterator_detects_cyclic_UnionTypes_and_skips_over_them")
{
Type atv{UnionType{}};

View File

@ -0,0 +1,17 @@
-- This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
print("testing native code generation")
assert((function(x, y)
-- trigger a linear sequence
local t1 = x + 2
local t2 = x - 7
local a = x * 10
local b = a + 1
a = y -- update 'a' version
local t = {} -- call to allocate table forces a spill
local c = x * 10
return c, b, t, t1, t2
end)(5, 10) == 50)
return('OK')

58
tools/codegenstat.py Normal file
View File

@ -0,0 +1,58 @@
#!/usr/bin/python3
# This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
# Given the output of --compile=codegenverbose in stdin, this script outputs statistics about bytecode/IR
import sys
import re
from collections import defaultdict
count_bc = defaultdict(int)
count_ir = defaultdict(int)
count_asm = defaultdict(int)
count_irasm = defaultdict(int)
# GETTABLEKS R10 R1 K18 ['s']
# L1: DIV R14 R13 R3
re_bc = re.compile(r'^(?:L\d+: )?([A-Z_]+) ')
# # CHECK_SLOT_MATCH %178, K3, bb_fallback_37
# # %175 = LOAD_TAG R15
re_ir = re.compile(r'^# (?:%\d+ = )?([A-Z_]+) ')
# cmp w14,#5
re_asm = re.compile(r'^ ([a-z.]+) ')
current_ir = None
for line in sys.stdin.buffer.readlines():
line = line.decode('utf-8', errors='ignore').rstrip()
if m := re_asm.match(line):
count_asm[m[1]] += 1
if current_ir:
count_irasm[current_ir] += 1
elif m := re_ir.match(line):
count_ir[m[1]] += 1
current_ir = m[1]
elif m := re_bc.match(line):
count_bc[m[1]] += 1
def display(name, counts, limit=None, extra=None):
items = sorted(counts.items(), key=lambda p: p[1], reverse=True)
total = 0
for k,v in items:
total += v
shown = 0
print(name)
for i, (k,v) in enumerate(items):
if i == limit:
if shown < total:
print(f' {"Others":25}: {total-shown} ({(total-shown)/total*100:.1f}%)')
break
print(f' {k:25}: {v} ({v/total*100:.1f}%){"; "+extra(k) if extra else ""}')
shown += v
print()
display("Bytecode", count_bc, limit=20)
display("IR", count_ir, limit=20)
display("Assembly", count_asm, limit=10)
display("IR->Assembly", count_irasm, limit=30, extra=lambda op: f'{count_irasm[op] / count_ir[op]:.1f} insn/op')

View File

@ -138,8 +138,10 @@ TypeInfer.fuzz_free_table_type_change_during_index_check
TypeInfer.infer_assignment_value_types_mutable_lval
TypeInfer.no_stack_overflow_from_isoptional
TypeInfer.no_stack_overflow_from_isoptional2
TypeInfer.should_be_able_to_infer_this_without_stack_overflowing
TypeInfer.tc_after_error_recovery_no_replacement_name_in_error
TypeInfer.type_infer_recursion_limit_no_ice
TypeInfer.type_infer_recursion_limit_normalizer
TypeInferAnyError.for_in_loop_iterator_is_any2
TypeInferClasses.class_type_mismatch_with_name_conflict
TypeInferClasses.classes_without_overloaded_operators_cannot_be_added

View File

@ -54,4 +54,13 @@
</Expand>
</Type>
<Type Name="Luau::CodeGen::IrOp">
<DisplayString Condition="kind == IrOpKind::None">none</DisplayString>
<DisplayString Condition="kind == IrOpKind::VmReg &amp;&amp; (index >> 8) != 0">R{index&amp;0xff}-v{index >> 8}</DisplayString>
<DisplayString Condition="kind == IrOpKind::VmReg">R{index&amp;0xff}</DisplayString>
<DisplayString Condition="kind == IrOpKind::VmConst">K{index}</DisplayString>
<DisplayString Condition="kind == IrOpKind::VmUpvalue">UP{index}</DisplayString>
<DisplayString Condition="kind == IrOpKind::Inst">%{index}</DisplayString>
</Type>
</AutoVisualizer>