Merge branch 'upstream' into merge

This commit is contained in:
Vyacheslav Egorov 2023-06-09 15:21:03 +03:00
commit a9becc9b70
42 changed files with 1493 additions and 283 deletions

View File

@ -0,0 +1,134 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include <unordered_map>
#include <vector>
#include <type_traits>
#include <iterator>
namespace Luau
{
template<typename K, typename V>
struct InsertionOrderedMap
{
static_assert(std::is_trivially_copyable_v<K>, "key must be trivially copyable");
private:
using vec = std::vector<std::pair<K, V>>;
public:
using iterator = typename vec::iterator;
using const_iterator = typename vec::const_iterator;
void insert(K k, V v)
{
if (indices.count(k) != 0)
return;
pairs.push_back(std::make_pair(k, std::move(v)));
indices[k] = pairs.size() - 1;
}
void clear()
{
pairs.clear();
indices.clear();
}
size_t size() const
{
LUAU_ASSERT(pairs.size() == indices.size());
return pairs.size();
}
bool contains(const K& k) const
{
return indices.count(k) > 0;
}
const V* get(const K& k) const
{
auto it = indices.find(k);
if (it == indices.end())
return nullptr;
else
return &pairs.at(it->second).second;
}
V* get(const K& k)
{
auto it = indices.find(k);
if (it == indices.end())
return nullptr;
else
return &pairs.at(it->second).second;
}
const_iterator begin() const
{
return pairs.begin();
}
const_iterator end() const
{
return pairs.end();
}
iterator begin()
{
return pairs.begin();
}
iterator end()
{
return pairs.end();
}
const_iterator find(K k) const
{
auto indicesIt = indices.find(k);
if (indicesIt == indices.end())
return end();
else
return begin() + indicesIt->second;
}
iterator find(K k)
{
auto indicesIt = indices.find(k);
if (indicesIt == indices.end())
return end();
else
return begin() + indicesIt->second;
}
void erase(iterator it)
{
if (it == pairs.end())
return;
K k = it->first;
auto indexIt = indices.find(k);
if (indexIt == indices.end())
return;
size_t removed = indexIt->second;
indices.erase(indexIt);
pairs.erase(it);
for (auto& [_, index] : indices)
{
if (index > removed)
--index;
}
}
private:
vec pairs;
std::unordered_map<K, size_t> indices;
};
}

View File

@ -64,6 +64,7 @@ public:
bool operator==(const TypeIds& there) const; bool operator==(const TypeIds& there) const;
size_t getHash() const; size_t getHash() const;
bool isNever() const;
}; };
} // namespace Luau } // namespace Luau
@ -269,12 +270,24 @@ struct NormalizedType
NormalizedType& operator=(NormalizedType&&) = default; NormalizedType& operator=(NormalizedType&&) = default;
// IsType functions // IsType functions
/// Returns true if the type is exactly a number. Behaves like Type::isNumber()
bool isExactlyNumber() const;
/// Returns true if the type is a subtype of function. This includes any and unknown. /// Returns true if the type is a subtype of string(it could be a singleton). Behaves like Type::isString()
bool isFunction() const; bool isSubtypeOfString() const;
/// Returns true if the type is a subtype of number. This includes any and unknown. // Helpers that improve readability of the above (they just say if the component is present)
bool isNumber() const; bool hasTops() const;
bool hasBooleans() const;
bool hasClasses() const;
bool hasErrors() const;
bool hasNils() const;
bool hasNumbers() const;
bool hasStrings() const;
bool hasThreads() const;
bool hasTables() const;
bool hasFunctions() const;
bool hasTyvars() const;
}; };

View File

@ -16,6 +16,7 @@
#include "Luau/TypeFamily.h" #include "Luau/TypeFamily.h"
#include "Luau/Simplify.h" #include "Luau/Simplify.h"
#include "Luau/VisitType.h" #include "Luau/VisitType.h"
#include "Luau/InsertionOrderedMap.h"
#include <algorithm> #include <algorithm>
@ -196,7 +197,7 @@ struct RefinementPartition
bool shouldAppendNilType = false; bool shouldAppendNilType = false;
}; };
using RefinementContext = std::unordered_map<DefId, RefinementPartition>; using RefinementContext = InsertionOrderedMap<DefId, RefinementPartition>;
static void unionRefinements(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeArena> arena, const RefinementContext& lhs, const RefinementContext& rhs, static void unionRefinements(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeArena> arena, const RefinementContext& lhs, const RefinementContext& rhs,
RefinementContext& dest, std::vector<ConstraintV>* constraints) RefinementContext& dest, std::vector<ConstraintV>* constraints)
@ -229,8 +230,9 @@ static void unionRefinements(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeAre
TypeId rightDiscriminantTy = TypeId rightDiscriminantTy =
rhsIt->second.discriminantTypes.size() == 1 ? rhsIt->second.discriminantTypes[0] : intersect(rhsIt->second.discriminantTypes); rhsIt->second.discriminantTypes.size() == 1 ? rhsIt->second.discriminantTypes[0] : intersect(rhsIt->second.discriminantTypes);
dest[def].discriminantTypes.push_back(simplifyUnion(builtinTypes, arena, leftDiscriminantTy, rightDiscriminantTy).result); dest.insert(def, {});
dest[def].shouldAppendNilType |= partition.shouldAppendNilType || rhsIt->second.shouldAppendNilType; dest.get(def)->discriminantTypes.push_back(simplifyUnion(builtinTypes, arena, leftDiscriminantTy, rightDiscriminantTy).result);
dest.get(def)->shouldAppendNilType |= partition.shouldAppendNilType || rhsIt->second.shouldAppendNilType;
} }
} }
@ -285,11 +287,12 @@ static void computeRefinement(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeAr
} }
RefinementContext uncommittedRefis; RefinementContext uncommittedRefis;
uncommittedRefis[proposition->breadcrumb->def].discriminantTypes.push_back(discriminantTy); uncommittedRefis.insert(proposition->breadcrumb->def, {});
uncommittedRefis.get(proposition->breadcrumb->def)->discriminantTypes.push_back(discriminantTy);
// When the top-level expression is `t[x]`, we want to refine it into `nil`, not `never`. // When the top-level expression is `t[x]`, we want to refine it into `nil`, not `never`.
if ((sense || !eq) && getMetadata<SubscriptMetadata>(proposition->breadcrumb)) if ((sense || !eq) && getMetadata<SubscriptMetadata>(proposition->breadcrumb))
uncommittedRefis[proposition->breadcrumb->def].shouldAppendNilType = true; uncommittedRefis.get(proposition->breadcrumb->def)->shouldAppendNilType = true;
for (NullableBreadcrumbId current = proposition->breadcrumb; current && current->previous; current = current->previous) for (NullableBreadcrumbId current = proposition->breadcrumb; current && current->previous; current = current->previous)
{ {
@ -302,17 +305,20 @@ static void computeRefinement(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeAr
{ {
TableType::Props props{{field->prop, Property{discriminantTy}}}; TableType::Props props{{field->prop, Property{discriminantTy}}};
discriminantTy = arena->addType(TableType{std::move(props), std::nullopt, TypeLevel{}, scope.get(), TableState::Sealed}); discriminantTy = arena->addType(TableType{std::move(props), std::nullopt, TypeLevel{}, scope.get(), TableState::Sealed});
uncommittedRefis[current->previous->def].discriminantTypes.push_back(discriminantTy); uncommittedRefis.insert(current->previous->def, {});
uncommittedRefis.get(current->previous->def)->discriminantTypes.push_back(discriminantTy);
} }
} }
// And now it's time to commit it. // And now it's time to commit it.
for (auto& [def, partition] : uncommittedRefis) for (auto& [def, partition] : uncommittedRefis)
{ {
for (TypeId discriminantTy : partition.discriminantTypes) (*refis).insert(def, {});
(*refis)[def].discriminantTypes.push_back(discriminantTy);
(*refis)[def].shouldAppendNilType |= partition.shouldAppendNilType; for (TypeId discriminantTy : partition.discriminantTypes)
(*refis).get(def)->discriminantTypes.push_back(discriminantTy);
(*refis).get(def)->shouldAppendNilType |= partition.shouldAppendNilType;
} }
} }
} }

View File

@ -785,7 +785,8 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
const NormalizedType* normLeftTy = normalizer->normalize(leftType); const NormalizedType* normLeftTy = normalizer->normalize(leftType);
if (hasTypeInIntersection<FreeType>(leftType) && force) if (hasTypeInIntersection<FreeType>(leftType) && force)
asMutable(leftType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : builtinTypes->numberType); asMutable(leftType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : builtinTypes->numberType);
if (normLeftTy && normLeftTy->isNumber()) // We want to check if the left type has tops because `any` is a valid type for the lhs
if (normLeftTy && (normLeftTy->isExactlyNumber() || get<AnyType>(normLeftTy->tops)))
{ {
unify(leftType, rightType, constraint->scope); unify(leftType, rightType, constraint->scope);
asMutable(resultType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : leftType); asMutable(resultType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : leftType);
@ -805,9 +806,11 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
// For concatenation, if the LHS is a string, the RHS must be a string as // For concatenation, if the LHS is a string, the RHS must be a string as
// well. The result will also be a string. // well. The result will also be a string.
case AstExprBinary::Op::Concat: case AstExprBinary::Op::Concat:
{
if (hasTypeInIntersection<FreeType>(leftType) && force) if (hasTypeInIntersection<FreeType>(leftType) && force)
asMutable(leftType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : builtinTypes->stringType); asMutable(leftType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : builtinTypes->stringType);
if (isString(leftType)) const NormalizedType* leftNormTy = normalizer->normalize(leftType);
if (leftNormTy && leftNormTy->isSubtypeOfString())
{ {
unify(leftType, rightType, constraint->scope); unify(leftType, rightType, constraint->scope);
asMutable(resultType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : leftType); asMutable(resultType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : leftType);
@ -823,14 +826,33 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
} }
break; break;
}
// Inexact comparisons require that the types be both numbers or both // Inexact comparisons require that the types be both numbers or both
// strings, and evaluate to a boolean. // strings, and evaluate to a boolean.
case AstExprBinary::Op::CompareGe: case AstExprBinary::Op::CompareGe:
case AstExprBinary::Op::CompareGt: case AstExprBinary::Op::CompareGt:
case AstExprBinary::Op::CompareLe: case AstExprBinary::Op::CompareLe:
case AstExprBinary::Op::CompareLt: case AstExprBinary::Op::CompareLt:
if ((isNumber(leftType) && isNumber(rightType)) || (isString(leftType) && isString(rightType)) || get<NeverType>(leftType) || {
get<NeverType>(rightType)) const NormalizedType* lt = normalizer->normalize(leftType);
const NormalizedType* rt = normalizer->normalize(rightType);
// If the lhs is any, comparisons should be valid.
if (lt && rt && (lt->isExactlyNumber() || get<AnyType>(lt->tops)) && rt->isExactlyNumber())
{
asMutable(resultType)->ty.emplace<BoundType>(builtinTypes->booleanType);
unblock(resultType);
return true;
}
if (lt && rt && (lt->isSubtypeOfString() || get<AnyType>(lt->tops)) && rt->isSubtypeOfString())
{
asMutable(resultType)->ty.emplace<BoundType>(builtinTypes->booleanType);
unblock(resultType);
return true;
}
if (get<NeverType>(leftType) || get<NeverType>(rightType))
{ {
asMutable(resultType)->ty.emplace<BoundType>(builtinTypes->booleanType); asMutable(resultType)->ty.emplace<BoundType>(builtinTypes->booleanType);
unblock(resultType); unblock(resultType);
@ -838,6 +860,8 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
} }
break; break;
}
// == and ~= always evaluate to a boolean, and impose no other constraints // == and ~= always evaluate to a boolean, and impose no other constraints
// on their parameters. // on their parameters.
case AstExprBinary::Op::CompareEq: case AstExprBinary::Op::CompareEq:
@ -1776,7 +1800,7 @@ struct FindRefineConstraintBlockers : TypeOnceVisitor
} }
}; };
} } // namespace
static bool isNegatedAny(TypeId ty) static bool isNegatedAny(TypeId ty)
{ {

View File

@ -35,7 +35,6 @@ LUAU_FASTINTVARIABLE(LuauAutocompleteCheckTimeoutMs, 100)
LUAU_FASTFLAGVARIABLE(DebugLuauDeferredConstraintResolution, false) LUAU_FASTFLAGVARIABLE(DebugLuauDeferredConstraintResolution, false)
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false) LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false)
LUAU_FASTFLAGVARIABLE(DebugLuauReadWriteProperties, false) LUAU_FASTFLAGVARIABLE(DebugLuauReadWriteProperties, false)
LUAU_FASTFLAGVARIABLE(LuauTypeCheckerUseCorrectScope, false)
namespace Luau namespace Luau
{ {
@ -1196,8 +1195,7 @@ ModulePtr Frontend::check(const SourceModule& sourceModule, Mode mode, std::vect
} }
else else
{ {
TypeChecker typeChecker(FFlag::LuauTypeCheckerUseCorrectScope ? (forAutocomplete ? globalsForAutocomplete.globalScope : globals.globalScope) TypeChecker typeChecker(forAutocomplete ? globalsForAutocomplete.globalScope : globals.globalScope,
: globals.globalScope,
forAutocomplete ? &moduleResolverForAutocomplete : &moduleResolver, builtinTypes, &iceHandler); forAutocomplete ? &moduleResolverForAutocomplete : &moduleResolver, builtinTypes, &iceHandler);
if (prepareModuleScope) if (prepareModuleScope)

View File

@ -108,6 +108,14 @@ size_t TypeIds::getHash() const
return hash; return hash;
} }
bool TypeIds::isNever() const
{
return std::all_of(begin(), end(), [&](TypeId i) {
// If each typeid is never, then I guess typeid's is also never?
return get<NeverType>(i) != nullptr;
});
}
bool TypeIds::operator==(const TypeIds& there) const bool TypeIds::operator==(const TypeIds& there) const
{ {
return hash == there.hash && types == there.types; return hash == there.hash && types == there.types;
@ -228,14 +236,72 @@ NormalizedType::NormalizedType(NotNull<BuiltinTypes> builtinTypes)
{ {
} }
bool NormalizedType::isFunction() const bool NormalizedType::isExactlyNumber() const
{ {
return !get<NeverType>(tops) || !functions.parts.empty(); return hasNumbers() && !hasTops() && !hasBooleans() && !hasClasses() && !hasErrors() && !hasNils() && !hasStrings() && !hasThreads() &&
!hasTables() && !hasFunctions() && !hasTyvars();
} }
bool NormalizedType::isNumber() const bool NormalizedType::isSubtypeOfString() const
{ {
return !get<NeverType>(tops) || !get<NeverType>(numbers); return hasStrings() && !hasTops() && !hasBooleans() && !hasClasses() && !hasErrors() && !hasNils() && !hasNumbers() && !hasThreads() &&
!hasTables() && !hasFunctions() && !hasTyvars();
}
bool NormalizedType::hasTops() const
{
return !get<NeverType>(tops);
}
bool NormalizedType::hasBooleans() const
{
return !get<NeverType>(booleans);
}
bool NormalizedType::hasClasses() const
{
return !classes.isNever();
}
bool NormalizedType::hasErrors() const
{
return !get<NeverType>(errors);
}
bool NormalizedType::hasNils() const
{
return !get<NeverType>(nils);
}
bool NormalizedType::hasNumbers() const
{
return !get<NeverType>(numbers);
}
bool NormalizedType::hasStrings() const
{
return !strings.isNever();
}
bool NormalizedType::hasThreads() const
{
return !get<NeverType>(threads);
}
bool NormalizedType::hasTables() const
{
return !tables.isNever();
}
bool NormalizedType::hasFunctions() const
{
return !functions.isNever();
}
bool NormalizedType::hasTyvars() const
{
return !tyvars.empty();
} }
static bool isShallowInhabited(const NormalizedType& norm) static bool isShallowInhabited(const NormalizedType& norm)

View File

@ -1067,9 +1067,7 @@ struct TypeChecker2
std::vector<Location> argLocs; std::vector<Location> argLocs;
argLocs.reserve(call->args.size + 1); argLocs.reserve(call->args.size + 1);
TypeId* maybeOriginalCallTy = module->astOriginalCallTypes.find(call); auto maybeOriginalCallTy = module->astOriginalCallTypes.find(call);
TypeId* maybeSelectedOverload = module->astOverloadResolvedTypes.find(call);
if (!maybeOriginalCallTy) if (!maybeOriginalCallTy)
return; return;
@ -1093,8 +1091,19 @@ struct TypeChecker2
return; return;
} }
} }
else if (get<FunctionType>(originalCallTy) || get<IntersectionType>(originalCallTy)) else if (get<FunctionType>(originalCallTy))
{ {
// ok.
}
else if (get<IntersectionType>(originalCallTy))
{
auto norm = normalizer.normalize(originalCallTy);
if (!norm)
return reportError(CodeTooComplex{}, call->location);
// NormalizedType::hasFunction returns true if its' tops component is `unknown`, but for soundness we want the reverse.
if (get<UnknownType>(norm->tops) || !norm->hasFunctions())
return reportError(CannotCallNonFunction{originalCallTy}, call->func->location);
} }
else if (auto utv = get<UnionType>(originalCallTy)) else if (auto utv = get<UnionType>(originalCallTy))
{ {
@ -1164,7 +1173,7 @@ struct TypeChecker2
TypePackId expectedArgTypes = testArena.addTypePack(args); TypePackId expectedArgTypes = testArena.addTypePack(args);
if (maybeSelectedOverload) if (auto maybeSelectedOverload = module->astOverloadResolvedTypes.find(call))
{ {
// This overload might not work still: the constraint solver will // This overload might not work still: the constraint solver will
// pass the type checker an instantiated function type that matches // pass the type checker an instantiated function type that matches
@ -1414,7 +1423,7 @@ struct TypeChecker2
{ {
// Nothing // Nothing
} }
else if (!normalizedFnTy->isFunction()) else if (!normalizedFnTy->hasFunctions())
{ {
ice->ice("Internal error: Lambda has non-function type " + toString(inferredFnTy), fn->location); ice->ice("Internal error: Lambda has non-function type " + toString(inferredFnTy), fn->location);
} }
@ -1793,12 +1802,14 @@ struct TypeChecker2
case AstExprBinary::Op::CompareGt: case AstExprBinary::Op::CompareGt:
case AstExprBinary::Op::CompareLe: case AstExprBinary::Op::CompareLe:
case AstExprBinary::Op::CompareLt: case AstExprBinary::Op::CompareLt:
if (isNumber(leftType)) {
const NormalizedType* leftTyNorm = normalizer.normalize(leftType);
if (leftTyNorm && leftTyNorm->isExactlyNumber())
{ {
reportErrors(tryUnify(scope, expr->right->location, rightType, builtinTypes->numberType)); reportErrors(tryUnify(scope, expr->right->location, rightType, builtinTypes->numberType));
return builtinTypes->numberType; return builtinTypes->numberType;
} }
else if (isString(leftType)) else if (leftTyNorm && leftTyNorm->isSubtypeOfString())
{ {
reportErrors(tryUnify(scope, expr->right->location, rightType, builtinTypes->stringType)); reportErrors(tryUnify(scope, expr->right->location, rightType, builtinTypes->stringType));
return builtinTypes->stringType; return builtinTypes->stringType;
@ -1810,6 +1821,8 @@ struct TypeChecker2
expr->location); expr->location);
return builtinTypes->errorRecoveryType(); return builtinTypes->errorRecoveryType();
} }
}
case AstExprBinary::Op::And: case AstExprBinary::Op::And:
case AstExprBinary::Op::Or: case AstExprBinary::Op::Or:
case AstExprBinary::Op::CompareEq: case AstExprBinary::Op::CompareEq:

View File

@ -346,7 +346,6 @@ TypeFamilyReductionResult<TypeId> addFamilyFn(std::vector<TypeId> typeParams, st
TypeId rhsTy = log->follow(typeParams.at(1)); TypeId rhsTy = log->follow(typeParams.at(1));
const NormalizedType* normLhsTy = normalizer->normalize(lhsTy); const NormalizedType* normLhsTy = normalizer->normalize(lhsTy);
const NormalizedType* normRhsTy = normalizer->normalize(rhsTy); const NormalizedType* normRhsTy = normalizer->normalize(rhsTy);
if (!normLhsTy || !normRhsTy) if (!normLhsTy || !normRhsTy)
{ {
return {std::nullopt, false, {}, {}}; return {std::nullopt, false, {}, {}};
@ -355,7 +354,7 @@ TypeFamilyReductionResult<TypeId> addFamilyFn(std::vector<TypeId> typeParams, st
{ {
return {builtins->anyType, false, {}, {}}; return {builtins->anyType, false, {}, {}};
} }
else if (normLhsTy->isNumber() && normRhsTy->isNumber()) else if ((normLhsTy->hasNumbers() || normLhsTy->hasTops()) && (normRhsTy->hasNumbers() || normRhsTy->hasTops()))
{ {
return {builtins->numberType, false, {}, {}}; return {builtins->numberType, false, {}, {}};
} }

346
CLI/Compile.cpp Normal file
View File

@ -0,0 +1,346 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "lua.h"
#include "lualib.h"
#include "Luau/CodeGen.h"
#include "Luau/Compiler.h"
#include "Luau/BytecodeBuilder.h"
#include "Luau/Parser.h"
#include "Luau/TimeTrace.h"
#include "FileUtils.h"
#include "Flags.h"
#include <memory>
#ifdef _WIN32
#include <io.h>
#include <fcntl.h>
#endif
LUAU_FASTFLAG(DebugLuauTimeTracing)
enum class CompileFormat
{
Text,
Binary,
Remarks,
Codegen, // Prints annotated native code including IR and assembly
CodegenAsm, // Prints annotated native code assembly
CodegenIr, // Prints annotated native code IR
CodegenVerbose, // Prints annotated native code including IR, assembly and outlined code
CodegenNull,
Null
};
struct GlobalOptions
{
int optimizationLevel = 1;
int debugLevel = 1;
} globalOptions;
static Luau::CompileOptions copts()
{
Luau::CompileOptions result = {};
result.optimizationLevel = globalOptions.optimizationLevel;
result.debugLevel = globalOptions.debugLevel;
return result;
}
static std::optional<CompileFormat> getCompileFormat(const char* name)
{
if (strcmp(name, "text") == 0)
return CompileFormat::Text;
else if (strcmp(name, "binary") == 0)
return CompileFormat::Binary;
else if (strcmp(name, "text") == 0)
return CompileFormat::Text;
else if (strcmp(name, "remarks") == 0)
return CompileFormat::Remarks;
else if (strcmp(name, "codegen") == 0)
return CompileFormat::Codegen;
else if (strcmp(name, "codegenasm") == 0)
return CompileFormat::CodegenAsm;
else if (strcmp(name, "codegenir") == 0)
return CompileFormat::CodegenIr;
else if (strcmp(name, "codegenverbose") == 0)
return CompileFormat::CodegenVerbose;
else if (strcmp(name, "codegennull") == 0)
return CompileFormat::CodegenNull;
else if (strcmp(name, "null") == 0)
return CompileFormat::Null;
else
return std::nullopt;
}
static void report(const char* name, const Luau::Location& location, const char* type, const char* message)
{
fprintf(stderr, "%s(%d,%d): %s: %s\n", name, location.begin.line + 1, location.begin.column + 1, type, message);
}
static void reportError(const char* name, const Luau::ParseError& error)
{
report(name, error.getLocation(), "SyntaxError", error.what());
}
static void reportError(const char* name, const Luau::CompileError& error)
{
report(name, error.getLocation(), "CompileError", error.what());
}
static std::string getCodegenAssembly(const char* name, const std::string& bytecode, Luau::CodeGen::AssemblyOptions options)
{
std::unique_ptr<lua_State, void (*)(lua_State*)> globalState(luaL_newstate(), lua_close);
lua_State* L = globalState.get();
if (luau_load(L, name, bytecode.data(), bytecode.size(), 0) == 0)
return Luau::CodeGen::getAssembly(L, -1, options);
fprintf(stderr, "Error loading bytecode %s\n", name);
return "";
}
static void annotateInstruction(void* context, std::string& text, int fid, int instpos)
{
Luau::BytecodeBuilder& bcb = *(Luau::BytecodeBuilder*)context;
bcb.annotateInstruction(text, fid, instpos);
}
struct CompileStats
{
size_t lines;
size_t bytecode;
size_t codegen;
double readTime;
double miscTime;
double parseTime;
double compileTime;
double codegenTime;
};
static double recordDeltaTime(double& timer)
{
double now = Luau::TimeTrace::getClock();
double delta = now - timer;
timer = now;
return delta;
}
static bool compileFile(const char* name, CompileFormat format, CompileStats& stats)
{
double currts = Luau::TimeTrace::getClock();
std::optional<std::string> source = readFile(name);
if (!source)
{
fprintf(stderr, "Error opening %s\n", name);
return false;
}
stats.readTime += recordDeltaTime(currts);
// NOTE: Normally, you should use Luau::compile or luau_compile (see lua_require as an example)
// This function is much more complicated because it supports many output human-readable formats through internal interfaces
try
{
Luau::BytecodeBuilder bcb;
Luau::CodeGen::AssemblyOptions options;
options.outputBinary = format == CompileFormat::CodegenNull;
if (!options.outputBinary)
{
options.includeAssembly = format != CompileFormat::CodegenIr;
options.includeIr = format != CompileFormat::CodegenAsm;
options.includeOutlinedCode = format == CompileFormat::CodegenVerbose;
}
options.annotator = annotateInstruction;
options.annotatorContext = &bcb;
if (format == CompileFormat::Text)
{
bcb.setDumpFlags(Luau::BytecodeBuilder::Dump_Code | Luau::BytecodeBuilder::Dump_Source | Luau::BytecodeBuilder::Dump_Locals |
Luau::BytecodeBuilder::Dump_Remarks);
bcb.setDumpSource(*source);
}
else if (format == CompileFormat::Remarks)
{
bcb.setDumpFlags(Luau::BytecodeBuilder::Dump_Source | Luau::BytecodeBuilder::Dump_Remarks);
bcb.setDumpSource(*source);
}
else if (format == CompileFormat::Codegen || format == CompileFormat::CodegenAsm || format == CompileFormat::CodegenIr ||
format == CompileFormat::CodegenVerbose)
{
bcb.setDumpFlags(Luau::BytecodeBuilder::Dump_Code | Luau::BytecodeBuilder::Dump_Source | Luau::BytecodeBuilder::Dump_Locals |
Luau::BytecodeBuilder::Dump_Remarks);
bcb.setDumpSource(*source);
}
stats.miscTime += recordDeltaTime(currts);
Luau::Allocator allocator;
Luau::AstNameTable names(allocator);
Luau::ParseResult result = Luau::Parser::parse(source->c_str(), source->size(), names, allocator);
if (!result.errors.empty())
throw Luau::ParseErrors(result.errors);
stats.lines += result.lines;
stats.parseTime += recordDeltaTime(currts);
Luau::compileOrThrow(bcb, result, names, copts());
stats.bytecode += bcb.getBytecode().size();
stats.compileTime += recordDeltaTime(currts);
switch (format)
{
case CompileFormat::Text:
printf("%s", bcb.dumpEverything().c_str());
break;
case CompileFormat::Remarks:
printf("%s", bcb.dumpSourceRemarks().c_str());
break;
case CompileFormat::Binary:
fwrite(bcb.getBytecode().data(), 1, bcb.getBytecode().size(), stdout);
break;
case CompileFormat::Codegen:
case CompileFormat::CodegenAsm:
case CompileFormat::CodegenIr:
case CompileFormat::CodegenVerbose:
printf("%s", getCodegenAssembly(name, bcb.getBytecode(), options).c_str());
break;
case CompileFormat::CodegenNull:
stats.codegen += getCodegenAssembly(name, bcb.getBytecode(), options).size();
stats.codegenTime += recordDeltaTime(currts);
break;
case CompileFormat::Null:
break;
}
return true;
}
catch (Luau::ParseErrors& e)
{
for (auto& error : e.getErrors())
reportError(name, error);
return false;
}
catch (Luau::CompileError& e)
{
reportError(name, e);
return false;
}
}
static void displayHelp(const char* argv0)
{
printf("Usage: %s [--mode] [options] [file list]\n", argv0);
printf("\n");
printf("Available modes:\n");
printf(" binary, text, remarks, codegen\n");
printf("\n");
printf("Available options:\n");
printf(" -h, --help: Display this usage message.\n");
printf(" -O<n>: compile with optimization level n (default 1, n should be between 0 and 2).\n");
printf(" -g<n>: compile with debug level n (default 1, n should be between 0 and 2).\n");
printf(" --timetrace: record compiler time tracing information into trace.json\n");
}
static int assertionHandler(const char* expr, const char* file, int line, const char* function)
{
printf("%s(%d): ASSERTION FAILED: %s\n", file, line, expr);
return 1;
}
int main(int argc, char** argv)
{
Luau::assertHandler() = assertionHandler;
setLuauFlagsDefault();
CompileFormat compileFormat = CompileFormat::Text;
for (int i = 1; i < argc; i++)
{
if (strcmp(argv[i], "-h") == 0 || strcmp(argv[i], "--help") == 0)
{
displayHelp(argv[0]);
return 0;
}
else if (strncmp(argv[i], "-O", 2) == 0)
{
int level = atoi(argv[i] + 2);
if (level < 0 || level > 2)
{
fprintf(stderr, "Error: Optimization level must be between 0 and 2 inclusive.\n");
return 1;
}
globalOptions.optimizationLevel = level;
}
else if (strncmp(argv[i], "-g", 2) == 0)
{
int level = atoi(argv[i] + 2);
if (level < 0 || level > 2)
{
fprintf(stderr, "Error: Debug level must be between 0 and 2 inclusive.\n");
return 1;
}
globalOptions.debugLevel = level;
}
else if (strcmp(argv[i], "--timetrace") == 0)
{
FFlag::DebugLuauTimeTracing.value = true;
}
else if (strncmp(argv[i], "--fflags=", 9) == 0)
{
setLuauFlags(argv[i] + 9);
}
else if (argv[i][0] == '-' && argv[i][1] == '-' && getCompileFormat(argv[i] + 2))
{
compileFormat = *getCompileFormat(argv[i] + 2);
}
else if (argv[i][0] == '-')
{
fprintf(stderr, "Error: Unrecognized option '%s'.\n\n", argv[i]);
displayHelp(argv[0]);
return 1;
}
}
#if !defined(LUAU_ENABLE_TIME_TRACE)
if (FFlag::DebugLuauTimeTracing)
{
fprintf(stderr, "To run with --timetrace, Luau has to be built with LUAU_ENABLE_TIME_TRACE enabled\n");
return 1;
}
#endif
const std::vector<std::string> files = getSourceFiles(argc, argv);
#ifdef _WIN32
if (compileFormat == CompileFormat::Binary)
_setmode(_fileno(stdout), _O_BINARY);
#endif
CompileStats stats = {};
int failed = 0;
for (const std::string& path : files)
failed += !compileFile(path.c_str(), compileFormat, stats);
if (compileFormat == CompileFormat::Null)
printf("Compiled %d KLOC into %d KB bytecode (read %.2fs, parse %.2fs, compile %.2fs)\n", int(stats.lines / 1000), int(stats.bytecode / 1024),
stats.readTime, stats.parseTime, stats.compileTime);
else if (compileFormat == CompileFormat::CodegenNull)
printf("Compiled %d KLOC into %d KB bytecode => %d KB native code (%.2fx) (read %.2fs, parse %.2fs, compile %.2fs, codegen %.2fs)\n",
int(stats.lines / 1000), int(stats.bytecode / 1024), int(stats.codegen / 1024),
stats.bytecode == 0 ? 0.0 : double(stats.codegen) / double(stats.bytecode), stats.readTime, stats.parseTime, stats.compileTime,
stats.codegenTime);
return failed ? 1 : 0;
}

View File

@ -36,12 +36,14 @@ if(LUAU_BUILD_CLI)
add_executable(Luau.Analyze.CLI) add_executable(Luau.Analyze.CLI)
add_executable(Luau.Ast.CLI) add_executable(Luau.Ast.CLI)
add_executable(Luau.Reduce.CLI) add_executable(Luau.Reduce.CLI)
add_executable(Luau.Compile.CLI)
# This also adds target `name` on Linux/macOS and `name.exe` on Windows # This also adds target `name` on Linux/macOS and `name.exe` on Windows
set_target_properties(Luau.Repl.CLI PROPERTIES OUTPUT_NAME luau) set_target_properties(Luau.Repl.CLI PROPERTIES OUTPUT_NAME luau)
set_target_properties(Luau.Analyze.CLI PROPERTIES OUTPUT_NAME luau-analyze) set_target_properties(Luau.Analyze.CLI PROPERTIES OUTPUT_NAME luau-analyze)
set_target_properties(Luau.Ast.CLI PROPERTIES OUTPUT_NAME luau-ast) set_target_properties(Luau.Ast.CLI PROPERTIES OUTPUT_NAME luau-ast)
set_target_properties(Luau.Reduce.CLI PROPERTIES OUTPUT_NAME luau-reduce) set_target_properties(Luau.Reduce.CLI PROPERTIES OUTPUT_NAME luau-reduce)
set_target_properties(Luau.Compile.CLI PROPERTIES OUTPUT_NAME luau-compile)
endif() endif()
if(LUAU_BUILD_TESTS) if(LUAU_BUILD_TESTS)
@ -186,6 +188,7 @@ if(LUAU_BUILD_CLI)
target_compile_options(Luau.Reduce.CLI PRIVATE ${LUAU_OPTIONS}) target_compile_options(Luau.Reduce.CLI PRIVATE ${LUAU_OPTIONS})
target_compile_options(Luau.Analyze.CLI PRIVATE ${LUAU_OPTIONS}) target_compile_options(Luau.Analyze.CLI PRIVATE ${LUAU_OPTIONS})
target_compile_options(Luau.Ast.CLI PRIVATE ${LUAU_OPTIONS}) target_compile_options(Luau.Ast.CLI PRIVATE ${LUAU_OPTIONS})
target_compile_options(Luau.Compile.CLI PRIVATE ${LUAU_OPTIONS})
target_include_directories(Luau.Repl.CLI PRIVATE extern extern/isocline/include) target_include_directories(Luau.Repl.CLI PRIVATE extern extern/isocline/include)
@ -206,6 +209,8 @@ if(LUAU_BUILD_CLI)
target_compile_features(Luau.Reduce.CLI PRIVATE cxx_std_17) target_compile_features(Luau.Reduce.CLI PRIVATE cxx_std_17)
target_include_directories(Luau.Reduce.CLI PUBLIC Reduce/include) target_include_directories(Luau.Reduce.CLI PUBLIC Reduce/include)
target_link_libraries(Luau.Reduce.CLI PRIVATE Luau.Common Luau.Ast Luau.Analysis) target_link_libraries(Luau.Reduce.CLI PRIVATE Luau.Common Luau.Ast Luau.Analysis)
target_link_libraries(Luau.Compile.CLI PRIVATE Luau.Compiler Luau.VM Luau.CodeGen)
endif() endif()
if(LUAU_BUILD_TESTS) if(LUAU_BUILD_TESTS)

View File

@ -14,13 +14,10 @@ namespace A64
enum class AddressKindA64 : uint8_t enum class AddressKindA64 : uint8_t
{ {
imm, // reg + imm
reg, // reg + reg reg, // reg + reg
imm, // reg + imm
// TODO: pre, // reg + imm, reg += imm
// reg + reg << shift post, // reg, reg += imm
// reg + sext(reg) << shift
// reg + uext(reg) << shift
}; };
struct AddressA64 struct AddressA64
@ -29,13 +26,14 @@ struct AddressA64
// For example, ldr x0, [reg+imm] is limited to 8 KB offsets assuming imm is divisible by 8, but loading into w0 reduces the range to 4 KB // For example, ldr x0, [reg+imm] is limited to 8 KB offsets assuming imm is divisible by 8, but loading into w0 reduces the range to 4 KB
static constexpr size_t kMaxOffset = 1023; static constexpr size_t kMaxOffset = 1023;
constexpr AddressA64(RegisterA64 base, int off = 0) constexpr AddressA64(RegisterA64 base, int off = 0, AddressKindA64 kind = AddressKindA64::imm)
: kind(AddressKindA64::imm) : kind(kind)
, base(base) , base(base)
, offset(xzr) , offset(xzr)
, data(off) , data(off)
{ {
LUAU_ASSERT(base.kind == KindA64::x || base == sp); LUAU_ASSERT(base.kind == KindA64::x || base == sp);
LUAU_ASSERT(kind != AddressKindA64::reg);
} }
constexpr AddressA64(RegisterA64 base, RegisterA64 offset) constexpr AddressA64(RegisterA64 base, RegisterA64 offset)

View File

@ -1,6 +1,8 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once #pragma once
#include "Luau/Common.h"
#include <bitset> #include <bitset>
#include <utility> #include <utility>
#include <vector> #include <vector>
@ -37,6 +39,16 @@ struct RegisterSet
void requireVariadicSequence(RegisterSet& sourceRs, const RegisterSet& defRs, uint8_t varargStart); void requireVariadicSequence(RegisterSet& sourceRs, const RegisterSet& defRs, uint8_t varargStart);
struct BlockOrdering
{
uint32_t depth = 0;
uint32_t preOrder = ~0u;
uint32_t postOrder = ~0u;
bool visited = false;
};
struct CfgInfo struct CfgInfo
{ {
std::vector<uint32_t> predecessors; std::vector<uint32_t> predecessors;
@ -45,6 +57,15 @@ struct CfgInfo
std::vector<uint32_t> successors; std::vector<uint32_t> successors;
std::vector<uint32_t> successorsOffsets; std::vector<uint32_t> successorsOffsets;
// Immediate dominators (unique parent in the dominator tree)
std::vector<uint32_t> idoms;
// Children in the dominator tree
std::vector<uint32_t> domChildren;
std::vector<uint32_t> domChildrenOffsets;
std::vector<BlockOrdering> domOrdering;
// VM registers that are live when the block is entered // VM registers that are live when the block is entered
// Additionally, an active variadic sequence can exist at the entry of the block // Additionally, an active variadic sequence can exist at the entry of the block
std::vector<RegisterSet> in; std::vector<RegisterSet> in;
@ -64,6 +85,18 @@ struct CfgInfo
RegisterSet captured; RegisterSet captured;
}; };
// A quick refresher on dominance and dominator trees:
// * If A is a dominator of B (A dom B), you can never execute B without executing A first
// * A is a strict dominator of B (A sdom B) is similar to previous one but A != B
// * Immediate dominator node N (idom N) is a unique node T so that T sdom N,
// but T does not strictly dominate any other node that dominates N.
// * Dominance frontier is a set of nodes where dominance of a node X ends.
// In practice this is where values established by node X might no longer hold because of join edges from other nodes coming in.
// This is also where PHI instructions in SSA are placed.
void computeCfgImmediateDominators(IrFunction& function);
void computeCfgDominanceTreeChildren(IrFunction& function);
// Function used to update all CFG data
void computeCfgInfo(IrFunction& function); void computeCfgInfo(IrFunction& function);
struct BlockIteratorWrapper struct BlockIteratorWrapper
@ -90,10 +123,17 @@ struct BlockIteratorWrapper
{ {
return itEnd; return itEnd;
} }
uint32_t operator[](size_t pos) const
{
LUAU_ASSERT(pos < size_t(itEnd - itBegin));
return itBegin[pos];
}
}; };
BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx); BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx);
BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx); BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx);
BlockIteratorWrapper domChildren(const CfgInfo& cfg, uint32_t blockIdx);
} // namespace CodeGen } // namespace CodeGen
} // namespace Luau } // namespace Luau

View File

@ -823,6 +823,7 @@ struct IrFunction
uint32_t validRestoreOpBlockIdx = 0; uint32_t validRestoreOpBlockIdx = 0;
Proto* proto = nullptr; Proto* proto = nullptr;
bool variadic = false;
CfgInfo cfg; CfgInfo cfg;

View File

@ -876,6 +876,9 @@ void AssemblyBuilderA64::placeA(const char* name, RegisterA64 dst, AddressA64 sr
switch (src.kind) switch (src.kind)
{ {
case AddressKindA64::reg:
place(dst.index | (src.base.index << 5) | (0b011'0'10 << 10) | (src.offset.index << 16) | (1 << 21) | (opsize << 22));
break;
case AddressKindA64::imm: case AddressKindA64::imm:
if (unsigned(src.data >> sizelog) < 1024 && (src.data & ((1 << sizelog) - 1)) == 0) if (unsigned(src.data >> sizelog) < 1024 && (src.data & ((1 << sizelog) - 1)) == 0)
place(dst.index | (src.base.index << 5) | ((src.data >> sizelog) << 10) | (opsize << 22) | (1 << 24)); place(dst.index | (src.base.index << 5) | ((src.data >> sizelog) << 10) | (opsize << 22) | (1 << 24));
@ -884,8 +887,13 @@ void AssemblyBuilderA64::placeA(const char* name, RegisterA64 dst, AddressA64 sr
else else
LUAU_ASSERT(!"Unable to encode large immediate offset"); LUAU_ASSERT(!"Unable to encode large immediate offset");
break; break;
case AddressKindA64::reg: case AddressKindA64::pre:
place(dst.index | (src.base.index << 5) | (0b011'0'10 << 10) | (src.offset.index << 16) | (1 << 21) | (opsize << 22)); LUAU_ASSERT(src.data >= -256 && src.data <= 255);
place(dst.index | (src.base.index << 5) | (0b11 << 10) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22));
break;
case AddressKindA64::post:
LUAU_ASSERT(src.data >= -256 && src.data <= 255);
place(dst.index | (src.base.index << 5) | (0b01 << 10) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22));
break; break;
} }
@ -1312,23 +1320,37 @@ void AssemblyBuilderA64::log(RegisterA64 reg)
void AssemblyBuilderA64::log(AddressA64 addr) void AssemblyBuilderA64::log(AddressA64 addr)
{ {
text.append("[");
switch (addr.kind) switch (addr.kind)
{ {
case AddressKindA64::imm:
log(addr.base);
if (addr.data != 0)
logAppend(",#%d", addr.data);
break;
case AddressKindA64::reg: case AddressKindA64::reg:
text.append("[");
log(addr.base); log(addr.base);
text.append(","); text.append(",");
log(addr.offset); log(addr.offset);
text.append("]");
break;
case AddressKindA64::imm:
text.append("[");
log(addr.base);
if (addr.data != 0) if (addr.data != 0)
logAppend(" LSL #%d", addr.data); logAppend(",#%d", addr.data);
text.append("]");
break;
case AddressKindA64::pre:
text.append("[");
log(addr.base);
if (addr.data != 0)
logAppend(",#%d", addr.data);
text.append("]!");
break;
case AddressKindA64::post:
text.append("[");
log(addr.base);
text.append("]!");
if (addr.data != 0)
logAppend(",#%d", addr.data);
break; break;
} }
text.append("]");
} }
} // namespace A64 } // namespace A64

View File

@ -1415,7 +1415,7 @@ void AssemblyBuilderX64::commit()
{ {
LUAU_ASSERT(codePos <= codeEnd); LUAU_ASSERT(codePos <= codeEnd);
if (unsigned(codeEnd - codePos) < kMaxInstructionLength) if (codeEnd - codePos < kMaxInstructionLength)
extend(); extend();
} }

View File

@ -56,10 +56,8 @@ static void makePagesExecutable(uint8_t* mem, size_t size)
static void flushInstructionCache(uint8_t* mem, size_t size) static void flushInstructionCache(uint8_t* mem, size_t size)
{ {
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM)
if (FlushInstructionCache(GetCurrentProcess(), mem, size) == 0) if (FlushInstructionCache(GetCurrentProcess(), mem, size) == 0)
LUAU_ASSERT(!"Failed to flush instruction cache"); LUAU_ASSERT(!"Failed to flush instruction cache");
#endif
} }
#else #else
static uint8_t* allocatePages(size_t size) static uint8_t* allocatePages(size_t size)

View File

@ -268,7 +268,7 @@ static bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
[[maybe_unused]] static bool lowerIr( [[maybe_unused]] static bool lowerIr(
A64::AssemblyBuilderA64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options) A64::AssemblyBuilderA64& build, IrBuilder& ir, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
{ {
A64::IrLoweringA64 lowering(build, helpers, data, proto, ir.function); A64::IrLoweringA64 lowering(build, helpers, data, ir.function);
return lowerImpl(build, lowering, ir.function, proto->bytecodeid, options); return lowerImpl(build, lowering, ir.function, proto->bytecodeid, options);
} }

View File

@ -117,6 +117,81 @@ static void emitReentry(AssemblyBuilderA64& build, ModuleHelpers& helpers)
build.br(x4); build.br(x4);
} }
void emitReturn(AssemblyBuilderA64& build, ModuleHelpers& helpers)
{
// x1 = res
// w2 = number of written values
// x0 = ci
build.ldr(x0, mem(rState, offsetof(lua_State, ci)));
// w3 = ci->nresults
build.ldr(w3, mem(x0, offsetof(CallInfo, nresults)));
Label skipResultCopy;
// Fill the rest of the expected results (nresults - written) with 'nil'
build.cmp(w2, w3);
build.b(ConditionA64::GreaterEqual, skipResultCopy);
// TODO: cmp above could compute this and flags using subs
build.sub(w2, w3, w2); // counter = nresults - written
build.mov(w4, LUA_TNIL);
Label repeatNilLoop = build.setLabel();
build.str(w4, mem(x1, offsetof(TValue, tt)));
build.add(x1, x1, sizeof(TValue));
build.sub(w2, w2, 1);
build.cbnz(w2, repeatNilLoop);
build.setLabel(skipResultCopy);
// x2 = cip = ci - 1
build.sub(x2, x0, sizeof(CallInfo));
// res = cip->top when nresults >= 0
Label skipFixedRetTop;
build.tbnz(w3, 31, skipFixedRetTop);
build.ldr(x1, mem(x2, offsetof(CallInfo, top))); // res = cip->top
build.setLabel(skipFixedRetTop);
// Update VM state (ci, base, top)
build.str(x2, mem(rState, offsetof(lua_State, ci))); // L->ci = cip
build.ldr(rBase, mem(x2, offsetof(CallInfo, base))); // sync base = L->base while we have a chance
build.str(rBase, mem(rState, offsetof(lua_State, base))); // L->base = cip->base
build.str(x1, mem(rState, offsetof(lua_State, top))); // L->top = res
// Unlikely, but this might be the last return from VM
build.ldr(w4, mem(x0, offsetof(CallInfo, flags)));
build.tbnz(w4, countrz(LUA_CALLINFO_RETURN), helpers.exitNoContinueVm);
// Continue in interpreter if function has no native data
build.ldr(w4, mem(x2, offsetof(CallInfo, flags)));
build.tbz(w4, countrz(LUA_CALLINFO_NATIVE), helpers.exitContinueVm);
// Need to update state of the current function before we jump away
build.ldr(rClosure, mem(x2, offsetof(CallInfo, func)));
build.ldr(rClosure, mem(rClosure, offsetof(TValue, value.gc)));
build.ldr(x1, mem(rClosure, offsetof(Closure, l.p))); // cl->l.p aka proto
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
// Get instruction index from instruction pointer
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 4 back to byte offset later so it cancels out
build.ldr(x2, mem(x2, offsetof(CallInfo, savedpc))); // cip->savedpc
build.sub(x2, x2, rCode);
// Get new instruction location and jump to it
LUAU_ASSERT(offsetof(Proto, exectarget) == offsetof(Proto, execdata) + 8);
build.ldp(x3, x4, mem(x1, offsetof(Proto, execdata)));
build.ldr(w2, mem(x3, x2));
build.add(x4, x4, x2);
build.br(x4);
}
static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilder& unwind) static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilder& unwind)
{ {
EntryLocations locations; EntryLocations locations;
@ -230,6 +305,11 @@ void assembleHelpers(AssemblyBuilderA64& build, ModuleHelpers& helpers)
build.logAppend("; interrupt\n"); build.logAppend("; interrupt\n");
helpers.interrupt = build.setLabel(); helpers.interrupt = build.setLabel();
emitInterrupt(build); emitInterrupt(build);
if (build.logText)
build.logAppend("; return\n");
helpers.return_ = build.setLabel();
emitReturn(build, helpers);
} }
} // namespace A64 } // namespace A64

View File

@ -17,8 +17,6 @@
#include <string.h> #include <string.h>
LUAU_FASTFLAG(LuauUniformTopHandling)
// All external function calls that can cause stack realloc or Lua calls have to be wrapped in VM_PROTECT // All external function calls that can cause stack realloc or Lua calls have to be wrapped in VM_PROTECT
// This makes sure that we save the pc (in case the Lua call needs to generate a backtrace) before the call, // This makes sure that we save the pc (in case the Lua call needs to generate a backtrace) before the call,
// and restores the stack pointer after in case stack gets reallocated // and restores the stack pointer after in case stack gets reallocated
@ -306,44 +304,6 @@ Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults)
} }
} }
// Extracted as-is from lvmexecute.cpp with the exception of control flow (reentry) and removed interrupts
Closure* returnFallback(lua_State* L, StkId ra, StkId valend)
{
// ci is our callinfo, cip is our parent
CallInfo* ci = L->ci;
CallInfo* cip = ci - 1;
StkId res = ci->func; // note: we assume CALL always puts func+args and expects results to start at func
StkId vali = ra;
int nresults = ci->nresults;
// copy return values into parent stack (but only up to nresults!), fill the rest with nil
// note: in MULTRET context nresults starts as -1 so i != 0 condition never activates intentionally
int i;
for (i = nresults; i != 0 && vali < valend; i--)
setobj2s(L, res++, vali++);
while (i-- > 0)
setnilvalue(res++);
// pop the stack frame
L->ci = cip;
L->base = cip->base;
L->top = (nresults == LUA_MULTRET) ? res : cip->top;
// we're done!
if (LUAU_UNLIKELY(ci->flags & LUA_CALLINFO_RETURN))
{
if (!FFlag::LuauUniformTopHandling)
L->top = res;
return NULL;
}
// keep executing new function
LUAU_ASSERT(isLua(cip));
return clvalue(cip->func);
}
const Instruction* executeGETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k) const Instruction* executeGETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
{ {
[[maybe_unused]] Closure* cl = clvalue(L->ci->func); [[maybe_unused]] Closure* cl = clvalue(L->ci->func);

View File

@ -18,7 +18,6 @@ Closure* callProlog(lua_State* L, TValue* ra, StkId argtop, int nresults);
void callEpilogC(lua_State* L, int nresults, int n); void callEpilogC(lua_State* L, int nresults, int n);
Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults); Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults);
Closure* returnFallback(lua_State* L, StkId ra, StkId valend);
const Instruction* executeGETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k); const Instruction* executeGETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
const Instruction* executeSETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k); const Instruction* executeSETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);

View File

@ -189,6 +189,11 @@ void assembleHelpers(X64::AssemblyBuilderX64& build, ModuleHelpers& helpers)
build.logAppend("; continueCallInVm\n"); build.logAppend("; continueCallInVm\n");
helpers.continueCallInVm = build.setLabel(); helpers.continueCallInVm = build.setLabel();
emitContinueCallInVm(build); emitContinueCallInVm(build);
if (build.logText)
build.logAppend("; return\n");
helpers.return_ = build.setLabel();
emitReturn(build, helpers);
} }
} // namespace X64 } // namespace X64

View File

@ -24,6 +24,7 @@ struct ModuleHelpers
// A64/X64 // A64/X64
Label exitContinueVm; Label exitContinueVm;
Label exitNoContinueVm; Label exitNoContinueVm;
Label return_;
// X64 // X64
Label continueCallInVm; Label continueCallInVm;

View File

@ -352,6 +352,89 @@ void emitContinueCallInVm(AssemblyBuilderX64& build)
emitExit(build, /* continueInVm */ true); emitExit(build, /* continueInVm */ true);
} }
void emitReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers)
{
// input: ci in r8, res in rdi, number of written values in ecx
RegisterX64 ci = r8;
RegisterX64 res = rdi;
RegisterX64 written = ecx;
RegisterX64 cip = r9;
RegisterX64 nresults = esi;
build.lea(cip, addr[ci - sizeof(CallInfo)]);
// nresults = ci->nresults
build.mov(nresults, dword[ci + offsetof(CallInfo, nresults)]);
Label skipResultCopy;
// Fill the rest of the expected results (nresults - written) with 'nil'
RegisterX64 counter = written;
build.sub(counter, nresults); // counter = -(nresults - written)
build.jcc(ConditionX64::GreaterEqual, skipResultCopy);
Label repeatNilLoop = build.setLabel();
build.mov(dword[res + offsetof(TValue, tt)], LUA_TNIL);
build.add(res, sizeof(TValue));
build.inc(counter);
build.jcc(ConditionX64::NotZero, repeatNilLoop);
build.setLabel(skipResultCopy);
build.mov(qword[rState + offsetof(lua_State, ci)], cip); // L->ci = cip
build.mov(rBase, qword[cip + offsetof(CallInfo, base)]); // sync base = L->base while we have a chance
build.mov(qword[rState + offsetof(lua_State, base)], rBase); // L->base = cip->base
Label skipFixedRetTop;
build.test(nresults, nresults); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(ConditionX64::Less, skipFixedRetTop); // jl jumps if SF != OF
build.mov(res, qword[cip + offsetof(CallInfo, top)]); // res = cip->top
build.setLabel(skipFixedRetTop);
build.mov(qword[rState + offsetof(lua_State, top)], res); // L->top = res
// Unlikely, but this might be the last return from VM
build.test(byte[ci + offsetof(CallInfo, flags)], LUA_CALLINFO_RETURN);
build.jcc(ConditionX64::NotZero, helpers.exitNoContinueVm);
// Returning back to the previous function is a bit tricky
// Registers alive: r9 (cip)
RegisterX64 proto = rcx;
RegisterX64 execdata = rbx;
// Change closure
build.mov(rax, qword[cip + offsetof(CallInfo, func)]);
build.mov(rax, qword[rax + offsetof(TValue, value.gc)]);
build.mov(sClosure, rax);
build.mov(proto, qword[rax + offsetof(Closure, l.p)]);
build.mov(execdata, qword[proto + offsetof(Proto, execdata)]);
build.test(byte[cip + offsetof(CallInfo, flags)], LUA_CALLINFO_NATIVE);
build.jcc(ConditionX64::Zero, helpers.exitContinueVm); // Continue in interpreter if function has no native data
// Change constants
build.mov(rConstants, qword[proto + offsetof(Proto, k)]);
// Change code
build.mov(rdx, qword[proto + offsetof(Proto, code)]);
build.mov(sCode, rdx);
build.mov(rax, qword[cip + offsetof(CallInfo, savedpc)]);
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 4 back to byte offset later so it cancels out
build.sub(rax, rdx);
// Get new instruction location and jump to it
build.mov(edx, dword[execdata + rax]);
build.add(rdx, qword[proto + offsetof(Proto, exectarget)]);
build.jmp(rdx);
}
} // namespace X64 } // namespace X64
} // namespace CodeGen } // namespace CodeGen
} // namespace Luau } // namespace Luau

View File

@ -207,6 +207,8 @@ void emitFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, int offset, in
void emitContinueCallInVm(AssemblyBuilderX64& build); void emitContinueCallInVm(AssemblyBuilderX64& build);
void emitReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers);
} // namespace X64 } // namespace X64
} // namespace CodeGen } // namespace CodeGen
} // namespace Luau } // namespace Luau

View File

@ -166,68 +166,33 @@ void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int
void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int actualResults) void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int actualResults)
{ {
RegisterX64 ci = r8; RegisterX64 ci = r8;
RegisterX64 cip = r9;
RegisterX64 res = rdi; RegisterX64 res = rdi;
RegisterX64 nresults = esi; RegisterX64 written = ecx;
build.mov(ci, qword[rState + offsetof(lua_State, ci)]); build.mov(ci, qword[rState + offsetof(lua_State, ci)]);
build.lea(cip, addr[ci - sizeof(CallInfo)]);
// res = ci->func; note: we assume CALL always puts func+args and expects results to start at func
build.mov(res, qword[ci + offsetof(CallInfo, func)]); build.mov(res, qword[ci + offsetof(CallInfo, func)]);
// nresults = ci->nresults
build.mov(nresults, dword[ci + offsetof(CallInfo, nresults)]);
{
Label skipResultCopy;
RegisterX64 counter = ecx;
if (actualResults == 0) if (actualResults == 0)
{ {
// Our instruction doesn't have any results, so just fill results expected in parent with 'nil' build.xor_(written, written);
build.test(nresults, nresults); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0 build.jmp(helpers.return_);
build.jcc(ConditionX64::LessEqual, skipResultCopy); // jle jumps if SF != OF or ZF == 1
build.mov(counter, nresults);
Label repeatNilLoop = build.setLabel();
build.mov(dword[res + offsetof(TValue, tt)], LUA_TNIL);
build.add(res, sizeof(TValue));
build.dec(counter);
build.jcc(ConditionX64::NotZero, repeatNilLoop);
} }
else if (actualResults == 1) else if (actualResults >= 1 && actualResults <= 3)
{ {
// Try setting our 1 result for (int r = 0; r < actualResults; ++r)
build.test(nresults, nresults); {
build.jcc(ConditionX64::Zero, skipResultCopy); build.vmovups(xmm0, luauReg(ra + r));
build.vmovups(xmmword[res + r * sizeof(TValue)], xmm0);
build.lea(counter, addr[nresults - 1]); }
build.add(res, actualResults * sizeof(TValue));
build.vmovups(xmm0, luauReg(ra)); build.mov(written, actualResults);
build.vmovups(xmmword[res], xmm0); build.jmp(helpers.return_);
build.add(res, sizeof(TValue));
// Fill the rest of the expected results with 'nil'
build.test(counter, counter); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0
build.jcc(ConditionX64::LessEqual, skipResultCopy); // jle jumps if SF != OF or ZF == 1
Label repeatNilLoop = build.setLabel();
build.mov(dword[res + offsetof(TValue, tt)], LUA_TNIL);
build.add(res, sizeof(TValue));
build.dec(counter);
build.jcc(ConditionX64::NotZero, repeatNilLoop);
} }
else else
{ {
RegisterX64 vali = rax; RegisterX64 vali = rax;
RegisterX64 valend = rdx; RegisterX64 valend = rdx;
// Copy return values into parent stack (but only up to nresults!)
build.test(nresults, nresults);
build.jcc(ConditionX64::Zero, skipResultCopy);
// vali = ra // vali = ra
build.lea(vali, luauRegAddress(ra)); build.lea(vali, luauRegAddress(ra));
@ -237,89 +202,25 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, i
else else
build.lea(valend, luauRegAddress(ra + actualResults)); // valend = ra + actualResults build.lea(valend, luauRegAddress(ra + actualResults)); // valend = ra + actualResults
build.mov(counter, nresults); build.xor_(written, written);
Label repeatValueLoop, exitValueLoop; Label repeatValueLoop, exitValueLoop;
build.setLabel(repeatValueLoop);
build.cmp(vali, valend); build.cmp(vali, valend);
build.jcc(ConditionX64::NotBelow, exitValueLoop); build.jcc(ConditionX64::NotBelow, exitValueLoop);
build.setLabel(repeatValueLoop);
build.vmovups(xmm0, xmmword[vali]); build.vmovups(xmm0, xmmword[vali]);
build.vmovups(xmmword[res], xmm0); build.vmovups(xmmword[res], xmm0);
build.add(vali, sizeof(TValue)); build.add(vali, sizeof(TValue));
build.add(res, sizeof(TValue)); build.add(res, sizeof(TValue));
build.dec(counter); build.inc(written);
build.jcc(ConditionX64::NotZero, repeatValueLoop); build.cmp(vali, valend);
build.jcc(ConditionX64::Below, repeatValueLoop);
build.setLabel(exitValueLoop); build.setLabel(exitValueLoop);
build.jmp(helpers.return_);
// Fill the rest of the expected results with 'nil'
build.test(counter, counter); // test here will set SF=1 for a negative number, ZF=1 for zero and OF=0
build.jcc(ConditionX64::LessEqual, skipResultCopy); // jle jumps if SF != OF or ZF == 1
Label repeatNilLoop = build.setLabel();
build.mov(dword[res + offsetof(TValue, tt)], LUA_TNIL);
build.add(res, sizeof(TValue));
build.dec(counter);
build.jcc(ConditionX64::NotZero, repeatNilLoop);
} }
build.setLabel(skipResultCopy);
}
build.mov(qword[rState + offsetof(lua_State, ci)], cip); // L->ci = cip
build.mov(rBase, qword[cip + offsetof(CallInfo, base)]); // sync base = L->base while we have a chance
build.mov(qword[rState + offsetof(lua_State, base)], rBase); // L->base = cip->base
// Start with result for LUA_MULTRET/exit value
build.mov(qword[rState + offsetof(lua_State, top)], res); // L->top = res
// Unlikely, but this might be the last return from VM
build.test(byte[ci + offsetof(CallInfo, flags)], LUA_CALLINFO_RETURN);
build.jcc(ConditionX64::NotZero, helpers.exitNoContinueVm);
Label skipFixedRetTop;
build.test(nresults, nresults); // test here will set SF=1 for a negative number and it always sets OF to 0
build.jcc(ConditionX64::Less, skipFixedRetTop); // jl jumps if SF != OF
build.mov(rax, qword[cip + offsetof(CallInfo, top)]);
build.mov(qword[rState + offsetof(lua_State, top)], rax); // L->top = cip->top
build.setLabel(skipFixedRetTop);
// Returning back to the previous function is a bit tricky
// Registers alive: r9 (cip)
RegisterX64 proto = rcx;
RegisterX64 execdata = rbx;
// Change closure
build.mov(rax, qword[cip + offsetof(CallInfo, func)]);
build.mov(rax, qword[rax + offsetof(TValue, value.gc)]);
build.mov(sClosure, rax);
build.mov(proto, qword[rax + offsetof(Closure, l.p)]);
build.mov(execdata, qword[proto + offsetof(Proto, execdata)]);
build.test(byte[cip + offsetof(CallInfo, flags)], LUA_CALLINFO_NATIVE);
build.jcc(ConditionX64::Zero, helpers.exitContinueVm); // Continue in interpreter if function has no native data
// Change constants
build.mov(rConstants, qword[proto + offsetof(Proto, k)]);
// Change code
build.mov(rdx, qword[proto + offsetof(Proto, code)]);
build.mov(sCode, rdx);
build.mov(rax, qword[cip + offsetof(CallInfo, savedpc)]);
// To get instruction index from instruction pointer, we need to divide byte offset by 4
// But we will actually need to scale instruction index by 4 back to byte offset later so it cancels out
build.sub(rax, rdx);
// Get new instruction location and jump to it
build.mov(edx, dword[execdata + rax]);
build.add(rdx, qword[proto + offsetof(Proto, exectarget)]);
build.jmp(rdx);
} }
void emitInstSetList(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb, int count, uint32_t index) void emitInstSetList(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int rb, int count, uint32_t index)

View File

@ -661,9 +661,212 @@ static void computeCfgBlockEdges(IrFunction& function)
} }
} }
// Assign tree depth and pre- and post- DFS visit order of the tree/graph nodes
// Optionally, collect required node order into a vector
template<auto childIt>
void computeBlockOrdering(
IrFunction& function, std::vector<BlockOrdering>& ordering, std::vector<uint32_t>* preOrder, std::vector<uint32_t>* postOrder)
{
CfgInfo& info = function.cfg;
LUAU_ASSERT(info.idoms.size() == function.blocks.size());
ordering.clear();
ordering.resize(function.blocks.size());
// Get depth-first post-order using manual stack instead of recursion
struct StackItem
{
uint32_t blockIdx;
uint32_t itPos;
};
std::vector<StackItem> stack;
if (preOrder)
preOrder->reserve(function.blocks.size());
if (postOrder)
postOrder->reserve(function.blocks.size());
uint32_t nextPreOrder = 0;
uint32_t nextPostOrder = 0;
stack.push_back({0, 0});
ordering[0].visited = true;
ordering[0].preOrder = nextPreOrder++;
while (!stack.empty())
{
StackItem& item = stack.back();
BlockIteratorWrapper children = childIt(info, item.blockIdx);
if (item.itPos < children.size())
{
uint32_t childIdx = children[item.itPos++];
BlockOrdering& childOrdering = ordering[childIdx];
if (!childOrdering.visited)
{
childOrdering.visited = true;
childOrdering.depth = uint32_t(stack.size());
childOrdering.preOrder = nextPreOrder++;
if (preOrder)
preOrder->push_back(item.blockIdx);
stack.push_back({childIdx, 0});
}
}
else
{
ordering[item.blockIdx].postOrder = nextPostOrder++;
if (postOrder)
postOrder->push_back(item.blockIdx);
stack.pop_back();
}
}
}
// Dominance tree construction based on 'A Simple, Fast Dominance Algorithm' [Keith D. Cooper, et al]
// This solution has quadratic complexity in the worst case.
// It is possible to switch to SEMI-NCA algorithm (also quadratic) mentioned in 'Linear-Time Algorithms for Dominators and Related Problems' [Loukas
// Georgiadis]
// Find block that is common between blocks 'a' and 'b' on the path towards the entry
static uint32_t findCommonDominator(const std::vector<uint32_t>& idoms, const std::vector<BlockOrdering>& data, uint32_t a, uint32_t b)
{
while (a != b)
{
while (data[a].postOrder < data[b].postOrder)
{
a = idoms[a];
LUAU_ASSERT(a != ~0u);
}
while (data[b].postOrder < data[a].postOrder)
{
b = idoms[b];
LUAU_ASSERT(b != ~0u);
}
}
return a;
}
void computeCfgImmediateDominators(IrFunction& function)
{
CfgInfo& info = function.cfg;
// Clear existing data
info.idoms.clear();
info.idoms.resize(function.blocks.size(), ~0u);
std::vector<BlockOrdering> ordering;
std::vector<uint32_t> blocksInPostOrder;
computeBlockOrdering<successors>(function, ordering, /* preOrder */ nullptr, &blocksInPostOrder);
// Entry node is temporarily marked to be an idom of itself to make algorithm work
info.idoms[0] = 0;
// Iteratively compute immediate dominators
bool updated = true;
while (updated)
{
updated = false;
// Go over blocks in reverse post-order of CFG
// '- 2' skips the root node which is last in post-order traversal
for (int i = int(blocksInPostOrder.size() - 2); i >= 0; i--)
{
uint32_t blockIdx = blocksInPostOrder[i];
uint32_t newIdom = ~0u;
for (uint32_t predIdx : predecessors(info, blockIdx))
{
if (uint32_t predIdom = info.idoms[predIdx]; predIdom != ~0u)
{
if (newIdom == ~0u)
newIdom = predIdx;
else
newIdom = findCommonDominator(info.idoms, ordering, newIdom, predIdx);
}
}
if (newIdom != info.idoms[blockIdx])
{
info.idoms[blockIdx] = newIdom;
// Run until a fixed point is reached
updated = true;
}
}
}
// Entry node doesn't have an immediate dominator
info.idoms[0] = ~0u;
}
void computeCfgDominanceTreeChildren(IrFunction& function)
{
CfgInfo& info = function.cfg;
// Clear existing data
info.domChildren.clear();
info.domChildrenOffsets.clear();
info.domChildrenOffsets.resize(function.blocks.size());
// First we need to know children count of each node in the dominance tree
// We use offset array for to hold this data, counts will be readjusted to offsets later
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
{
uint32_t domParent = info.idoms[blockIdx];
if (domParent != ~0u)
info.domChildrenOffsets[domParent]++;
}
// Convert counds to offsets using prefix sum
uint32_t total = 0;
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
{
uint32_t& offset = info.domChildrenOffsets[blockIdx];
uint32_t count = offset;
offset = total;
total += count;
}
info.domChildren.resize(total);
for (size_t blockIdx = 0; blockIdx < function.blocks.size(); blockIdx++)
{
// We use a trick here, where we use the starting offset of the dominance children list as the position where to write next child
// The values will be adjusted back in a separate loop later
uint32_t domParent = info.idoms[blockIdx];
if (domParent != ~0u)
info.domChildren[info.domChildrenOffsets[domParent]++] = uint32_t(blockIdx);
}
// Offsets into the dominance children list were used as iterators in the previous loop
// That process basically moved the values in the array 1 step towards the start
// Here we move them one step towards the end and restore 0 for first offset
for (int blockIdx = int(function.blocks.size() - 1); blockIdx > 0; blockIdx--)
info.domChildrenOffsets[blockIdx] = info.domChildrenOffsets[blockIdx - 1];
info.domChildrenOffsets[0] = 0;
computeBlockOrdering<domChildren>(function, info.domOrdering, /* preOrder */ nullptr, /* postOrder */ nullptr);
}
void computeCfgInfo(IrFunction& function) void computeCfgInfo(IrFunction& function)
{ {
computeCfgBlockEdges(function); computeCfgBlockEdges(function);
computeCfgImmediateDominators(function);
computeCfgDominanceTreeChildren(function);
computeCfgLiveInOutRegSets(function); computeCfgLiveInOutRegSets(function);
} }
@ -687,5 +890,15 @@ BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx)
return BlockIteratorWrapper{cfg.successors.data() + start, cfg.successors.data() + end}; return BlockIteratorWrapper{cfg.successors.data() + start, cfg.successors.data() + end};
} }
BlockIteratorWrapper domChildren(const CfgInfo& cfg, uint32_t blockIdx)
{
LUAU_ASSERT(blockIdx < cfg.domChildrenOffsets.size());
uint32_t start = cfg.domChildrenOffsets[blockIdx];
uint32_t end = blockIdx + 1 < cfg.domChildrenOffsets.size() ? cfg.domChildrenOffsets[blockIdx + 1] : uint32_t(cfg.domChildren.size());
return BlockIteratorWrapper{cfg.domChildren.data() + start, cfg.domChildren.data() + end};
}
} // namespace CodeGen } // namespace CodeGen
} // namespace Luau } // namespace Luau

View File

@ -25,6 +25,7 @@ IrBuilder::IrBuilder()
void IrBuilder::buildFunctionIr(Proto* proto) void IrBuilder::buildFunctionIr(Proto* proto)
{ {
function.proto = proto; function.proto = proto;
function.variadic = proto->is_vararg != 0;
// Rebuild original control flow blocks // Rebuild original control flow blocks
rebuildBytecodeBasicBlocks(proto); rebuildBytecodeBasicBlocks(proto);

View File

@ -185,11 +185,10 @@ static bool emitBuiltin(
} }
} }
IrLoweringA64::IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, NativeState& data, Proto* proto, IrFunction& function) IrLoweringA64::IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function)
: build(build) : build(build)
, helpers(helpers) , helpers(helpers)
, data(data) , data(data)
, proto(proto)
, function(function) , function(function)
, regs(function, {{x0, x15}, {x16, x17}, {q0, q7}, {q16, q31}}) , regs(function, {{x0, x15}, {x16, x17}, {q0, q7}, {q16, q31}})
, valueTracker(function) , valueTracker(function)
@ -1343,19 +1342,71 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break; break;
case IrCmd::RETURN: case IrCmd::RETURN:
regs.spill(build, index); regs.spill(build, index);
if (function.variadic)
{
build.ldr(x1, mem(rState, offsetof(lua_State, ci)));
build.ldr(x1, mem(x1, offsetof(CallInfo, func)));
}
else if (intOp(inst.b) != 1)
build.sub(x1, rBase, sizeof(TValue)); // invariant: ci->func + 1 == ci->base for non-variadic frames
if (intOp(inst.b) == 0)
{
build.mov(w2, 0);
build.b(helpers.return_);
}
else if (intOp(inst.b) == 1 && !function.variadic)
{
// fast path: minimizes x1 adjustments
// note that we skipped x1 computation for this specific case above
build.ldr(q0, mem(rBase, vmRegOp(inst.a) * sizeof(TValue)));
build.str(q0, mem(rBase, -int(sizeof(TValue))));
build.mov(x1, rBase);
build.mov(w2, 1);
build.b(helpers.return_);
}
else if (intOp(inst.b) >= 1 && intOp(inst.b) <= 3)
{
for (int r = 0; r < intOp(inst.b); ++r)
{
build.ldr(q0, mem(rBase, (vmRegOp(inst.a) + r) * sizeof(TValue)));
build.str(q0, mem(x1, sizeof(TValue), AddressKindA64::post));
}
build.mov(w2, intOp(inst.b));
build.b(helpers.return_);
}
else
{
build.mov(w2, 0);
// vali = ra
build.add(x3, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue)));
// valend = (n == LUA_MULTRET) ? L->top : ra + n // valend = (n == LUA_MULTRET) ? L->top : ra + n
if (intOp(inst.b) == LUA_MULTRET) if (intOp(inst.b) == LUA_MULTRET)
build.ldr(x2, mem(rState, offsetof(lua_State, top))); build.ldr(x4, mem(rState, offsetof(lua_State, top)));
else else
build.add(x2, rBase, uint16_t((vmRegOp(inst.a) + intOp(inst.b)) * sizeof(TValue))); build.add(x4, rBase, uint16_t((vmRegOp(inst.a) + intOp(inst.b)) * sizeof(TValue)));
// returnFallback(L, ra, valend)
build.mov(x0, rState);
build.add(x1, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue)));
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, returnFallback)));
build.blr(x3);
// reentry with x0=closure (NULL will trigger exit) Label repeatValueLoop, exitValueLoop;
build.b(helpers.reentry);
if (intOp(inst.b) == LUA_MULTRET)
{
build.cmp(x3, x4);
build.b(ConditionA64::CarrySet, exitValueLoop); // CarrySet == UnsignedGreaterEqual
}
build.setLabel(repeatValueLoop);
build.ldr(q0, mem(x3, sizeof(TValue), AddressKindA64::post));
build.str(q0, mem(x1, sizeof(TValue), AddressKindA64::post));
build.add(w2, w2, 1);
build.cmp(x3, x4);
build.b(ConditionA64::CarryClear, repeatValueLoop); // CarryClear == UnsignedLess
build.setLabel(exitValueLoop);
build.b(helpers.return_);
}
break; break;
case IrCmd::FORGLOOP: case IrCmd::FORGLOOP:
// register layout: ra + 1 = table, ra + 2 = internal index, ra + 3 .. ra + aux = iteration variables // register layout: ra + 1 = table, ra + 2 = internal index, ra + 3 .. ra + aux = iteration variables

View File

@ -9,8 +9,6 @@
#include <vector> #include <vector>
struct Proto;
namespace Luau namespace Luau
{ {
namespace CodeGen namespace CodeGen
@ -25,7 +23,7 @@ namespace A64
struct IrLoweringA64 struct IrLoweringA64
{ {
IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, NativeState& data, Proto* proto, IrFunction& function); IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers, NativeState& data, IrFunction& function);
void lowerInst(IrInst& inst, uint32_t index, IrBlock& next); void lowerInst(IrInst& inst, uint32_t index, IrBlock& next);
void finishBlock(); void finishBlock();
@ -58,7 +56,6 @@ struct IrLoweringA64
AssemblyBuilderA64& build; AssemblyBuilderA64& build;
ModuleHelpers& helpers; ModuleHelpers& helpers;
NativeState& data; NativeState& data;
Proto* proto = nullptr; // Temporarily required to provide 'Instruction* pc' to old emitInst* methods
IrFunction& function; IrFunction& function;

View File

@ -90,7 +90,6 @@ void initFunctions(NativeState& data)
data.context.callEpilogC = callEpilogC; data.context.callEpilogC = callEpilogC;
data.context.callFallback = callFallback; data.context.callFallback = callFallback;
data.context.returnFallback = returnFallback;
data.context.executeGETGLOBAL = executeGETGLOBAL; data.context.executeGETGLOBAL = executeGETGLOBAL;
data.context.executeSETGLOBAL = executeSETGLOBAL; data.context.executeSETGLOBAL = executeSETGLOBAL;

View File

@ -86,7 +86,6 @@ struct NativeContext
void (*callEpilogC)(lua_State* L, int nresults, int n) = nullptr; void (*callEpilogC)(lua_State* L, int nresults, int n) = nullptr;
Closure* (*callFallback)(lua_State* L, StkId ra, StkId argtop, int nresults) = nullptr; Closure* (*callFallback)(lua_State* L, StkId ra, StkId argtop, int nresults) = nullptr;
Closure* (*returnFallback)(lua_State* L, StkId ra, StkId valend) = nullptr;
// Opcode fallbacks, implemented in C // Opcode fallbacks, implemented in C
const Instruction* (*executeGETGLOBAL)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr; const Instruction* (*executeGETGLOBAL)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;

View File

@ -44,6 +44,10 @@ ANALYZE_CLI_SOURCES=CLI/FileUtils.cpp CLI/Flags.cpp CLI/Analyze.cpp
ANALYZE_CLI_OBJECTS=$(ANALYZE_CLI_SOURCES:%=$(BUILD)/%.o) ANALYZE_CLI_OBJECTS=$(ANALYZE_CLI_SOURCES:%=$(BUILD)/%.o)
ANALYZE_CLI_TARGET=$(BUILD)/luau-analyze ANALYZE_CLI_TARGET=$(BUILD)/luau-analyze
COMPILE_CLI_SOURCES=CLI/FileUtils.cpp CLI/Flags.cpp CLI/Compile.cpp
COMPILE_CLI_OBJECTS=$(COMPILE_CLI_SOURCES:%=$(BUILD)/%.o)
COMPILE_CLI_TARGET=$(BUILD)/luau-compile
FUZZ_SOURCES=$(wildcard fuzz/*.cpp) fuzz/luau.pb.cpp FUZZ_SOURCES=$(wildcard fuzz/*.cpp) fuzz/luau.pb.cpp
FUZZ_OBJECTS=$(FUZZ_SOURCES:%=$(BUILD)/%.o) FUZZ_OBJECTS=$(FUZZ_SOURCES:%=$(BUILD)/%.o)
@ -55,8 +59,8 @@ ifneq ($(opt),)
TESTS_ARGS+=-O$(opt) TESTS_ARGS+=-O$(opt)
endif endif
OBJECTS=$(AST_OBJECTS) $(COMPILER_OBJECTS) $(ANALYSIS_OBJECTS) $(CODEGEN_OBJECTS) $(VM_OBJECTS) $(ISOCLINE_OBJECTS) $(TESTS_OBJECTS) $(REPL_CLI_OBJECTS) $(ANALYZE_CLI_OBJECTS) $(FUZZ_OBJECTS) OBJECTS=$(AST_OBJECTS) $(COMPILER_OBJECTS) $(ANALYSIS_OBJECTS) $(CODEGEN_OBJECTS) $(VM_OBJECTS) $(ISOCLINE_OBJECTS) $(TESTS_OBJECTS) $(REPL_CLI_OBJECTS) $(ANALYZE_CLI_OBJECTS) $(COMPILE_CLI_OBJECTS) $(FUZZ_OBJECTS)
EXECUTABLE_ALIASES = luau luau-analyze luau-tests EXECUTABLE_ALIASES = luau luau-analyze luau-compile luau-tests
# common flags # common flags
CXXFLAGS=-g -Wall CXXFLAGS=-g -Wall
@ -132,6 +136,7 @@ $(ISOCLINE_OBJECTS): CXXFLAGS+=-Wno-unused-function -Iextern/isocline/include
$(TESTS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -ICodeGen/include -IVM/include -ICLI -Iextern -DDOCTEST_CONFIG_DOUBLE_STRINGIFY $(TESTS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -ICodeGen/include -IVM/include -ICLI -Iextern -DDOCTEST_CONFIG_DOUBLE_STRINGIFY
$(REPL_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IVM/include -ICodeGen/include -Iextern -Iextern/isocline/include $(REPL_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IVM/include -ICodeGen/include -Iextern -Iextern/isocline/include
$(ANALYZE_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include -Iextern $(ANALYZE_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include -Iextern
$(COMPILE_CLI_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IVM/include -ICodeGen/include
$(FUZZ_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -IVM/include -ICodeGen/include $(FUZZ_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -IVM/include -ICodeGen/include
$(TESTS_TARGET): LDFLAGS+=-lpthread $(TESTS_TARGET): LDFLAGS+=-lpthread
@ -189,6 +194,9 @@ luau: $(REPL_CLI_TARGET)
luau-analyze: $(ANALYZE_CLI_TARGET) luau-analyze: $(ANALYZE_CLI_TARGET)
ln -fs $^ $@ ln -fs $^ $@
luau-compile: $(COMPILE_CLI_TARGET)
ln -fs $^ $@
luau-tests: $(TESTS_TARGET) luau-tests: $(TESTS_TARGET)
ln -fs $^ $@ ln -fs $^ $@
@ -196,8 +204,9 @@ luau-tests: $(TESTS_TARGET)
$(TESTS_TARGET): $(TESTS_OBJECTS) $(ANALYSIS_TARGET) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET) $(TESTS_TARGET): $(TESTS_OBJECTS) $(ANALYSIS_TARGET) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(REPL_CLI_TARGET): $(REPL_CLI_OBJECTS) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET) $(REPL_CLI_TARGET): $(REPL_CLI_OBJECTS) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET) $(ISOCLINE_TARGET)
$(ANALYZE_CLI_TARGET): $(ANALYZE_CLI_OBJECTS) $(ANALYSIS_TARGET) $(AST_TARGET) $(ANALYZE_CLI_TARGET): $(ANALYZE_CLI_OBJECTS) $(ANALYSIS_TARGET) $(AST_TARGET)
$(COMPILE_CLI_TARGET): $(COMPILE_CLI_OBJECTS) $(COMPILER_TARGET) $(AST_TARGET) $(CODEGEN_TARGET) $(VM_TARGET)
$(TESTS_TARGET) $(REPL_CLI_TARGET) $(ANALYZE_CLI_TARGET): $(TESTS_TARGET) $(REPL_CLI_TARGET) $(ANALYZE_CLI_TARGET) $(COMPILE_CLI_TARGET):
$(CXX) $^ $(LDFLAGS) -o $@ $(CXX) $^ $(LDFLAGS) -o $@
# executable targets for fuzzing # executable targets for fuzzing

View File

@ -141,7 +141,6 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/BuiltinDefinitions.h Analysis/include/Luau/BuiltinDefinitions.h
Analysis/include/Luau/Clone.h Analysis/include/Luau/Clone.h
Analysis/include/Luau/Config.h Analysis/include/Luau/Config.h
Analysis/include/Luau/Refinement.h
Analysis/include/Luau/Constraint.h Analysis/include/Luau/Constraint.h
Analysis/include/Luau/ConstraintGraphBuilder.h Analysis/include/Luau/ConstraintGraphBuilder.h
Analysis/include/Luau/ConstraintSolver.h Analysis/include/Luau/ConstraintSolver.h
@ -153,6 +152,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/Error.h Analysis/include/Luau/Error.h
Analysis/include/Luau/FileResolver.h Analysis/include/Luau/FileResolver.h
Analysis/include/Luau/Frontend.h Analysis/include/Luau/Frontend.h
Analysis/include/Luau/InsertionOrderedMap.h
Analysis/include/Luau/Instantiation.h Analysis/include/Luau/Instantiation.h
Analysis/include/Luau/IostreamHelpers.h Analysis/include/Luau/IostreamHelpers.h
Analysis/include/Luau/JsonEmitter.h Analysis/include/Luau/JsonEmitter.h
@ -165,6 +165,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/Predicate.h Analysis/include/Luau/Predicate.h
Analysis/include/Luau/Quantify.h Analysis/include/Luau/Quantify.h
Analysis/include/Luau/RecursionCounter.h Analysis/include/Luau/RecursionCounter.h
Analysis/include/Luau/Refinement.h
Analysis/include/Luau/RequireTracer.h Analysis/include/Luau/RequireTracer.h
Analysis/include/Luau/Scope.h Analysis/include/Luau/Scope.h
Analysis/include/Luau/Simplify.h Analysis/include/Luau/Simplify.h
@ -175,6 +176,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/ToString.h Analysis/include/Luau/ToString.h
Analysis/include/Luau/Transpiler.h Analysis/include/Luau/Transpiler.h
Analysis/include/Luau/TxnLog.h Analysis/include/Luau/TxnLog.h
Analysis/include/Luau/Type.h
Analysis/include/Luau/TypeArena.h Analysis/include/Luau/TypeArena.h
Analysis/include/Luau/TypeAttach.h Analysis/include/Luau/TypeAttach.h
Analysis/include/Luau/TypeChecker2.h Analysis/include/Luau/TypeChecker2.h
@ -183,7 +185,6 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/TypeInfer.h Analysis/include/Luau/TypeInfer.h
Analysis/include/Luau/TypePack.h Analysis/include/Luau/TypePack.h
Analysis/include/Luau/TypeUtils.h Analysis/include/Luau/TypeUtils.h
Analysis/include/Luau/Type.h
Analysis/include/Luau/Unifiable.h Analysis/include/Luau/Unifiable.h
Analysis/include/Luau/Unifier.h Analysis/include/Luau/Unifier.h
Analysis/include/Luau/UnifierSharedState.h Analysis/include/Luau/UnifierSharedState.h
@ -198,7 +199,6 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/BuiltinDefinitions.cpp Analysis/src/BuiltinDefinitions.cpp
Analysis/src/Clone.cpp Analysis/src/Clone.cpp
Analysis/src/Config.cpp Analysis/src/Config.cpp
Analysis/src/Refinement.cpp
Analysis/src/Constraint.cpp Analysis/src/Constraint.cpp
Analysis/src/ConstraintGraphBuilder.cpp Analysis/src/ConstraintGraphBuilder.cpp
Analysis/src/ConstraintSolver.cpp Analysis/src/ConstraintSolver.cpp
@ -216,6 +216,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/Module.cpp Analysis/src/Module.cpp
Analysis/src/Normalize.cpp Analysis/src/Normalize.cpp
Analysis/src/Quantify.cpp Analysis/src/Quantify.cpp
Analysis/src/Refinement.cpp
Analysis/src/RequireTracer.cpp Analysis/src/RequireTracer.cpp
Analysis/src/Scope.cpp Analysis/src/Scope.cpp
Analysis/src/Simplify.cpp Analysis/src/Simplify.cpp
@ -226,6 +227,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/ToString.cpp Analysis/src/ToString.cpp
Analysis/src/Transpiler.cpp Analysis/src/Transpiler.cpp
Analysis/src/TxnLog.cpp Analysis/src/TxnLog.cpp
Analysis/src/Type.cpp
Analysis/src/TypeArena.cpp Analysis/src/TypeArena.cpp
Analysis/src/TypeAttach.cpp Analysis/src/TypeAttach.cpp
Analysis/src/TypeChecker2.cpp Analysis/src/TypeChecker2.cpp
@ -234,7 +236,6 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/TypeInfer.cpp Analysis/src/TypeInfer.cpp
Analysis/src/TypePack.cpp Analysis/src/TypePack.cpp
Analysis/src/TypeUtils.cpp Analysis/src/TypeUtils.cpp
Analysis/src/Type.cpp
Analysis/src/Unifiable.cpp Analysis/src/Unifiable.cpp
Analysis/src/Unifier.cpp Analysis/src/Unifier.cpp
) )
@ -326,6 +327,7 @@ if(TARGET Luau.Analyze.CLI)
endif() endif()
if(TARGET Luau.Ast.CLI) if(TARGET Luau.Ast.CLI)
# Luau.Ast.CLI Sources
target_sources(Luau.Ast.CLI PRIVATE target_sources(Luau.Ast.CLI PRIVATE
CLI/Ast.cpp CLI/Ast.cpp
CLI/FileUtils.h CLI/FileUtils.h
@ -415,6 +417,7 @@ if(TARGET Luau.UnitTest)
tests/TypeVar.test.cpp tests/TypeVar.test.cpp
tests/Variant.test.cpp tests/Variant.test.cpp
tests/VisitType.test.cpp tests/VisitType.test.cpp
tests/InsertionOrderedMap.test.cpp
tests/main.cpp) tests/main.cpp)
endif() endif()
@ -449,9 +452,20 @@ if(TARGET Luau.Web)
endif() endif()
if(TARGET Luau.Reduce.CLI) if(TARGET Luau.Reduce.CLI)
# Luau.Reduce.CLI Sources
target_sources(Luau.Reduce.CLI PRIVATE target_sources(Luau.Reduce.CLI PRIVATE
CLI/Reduce.cpp CLI/Reduce.cpp
CLI/FileUtils.cpp CLI/FileUtils.cpp
CLI/FileUtils.h CLI/FileUtils.h
) )
endif() endif()
if(TARGET Luau.Compile.CLI)
# Luau.Compile.CLI Sources
target_sources(Luau.Compile.CLI PRIVATE
CLI/FileUtils.h
CLI/FileUtils.cpp
CLI/Flags.h
CLI/Flags.cpp
CLI/Compile.cpp)
endif()

View File

@ -17,8 +17,6 @@
#include <string.h> #include <string.h>
LUAU_FASTFLAGVARIABLE(LuauUniformTopHandling, false)
/* /*
** {====================================================== ** {======================================================
** Error-recovery functions ** Error-recovery functions
@ -252,7 +250,7 @@ void luaD_call(lua_State* L, StkId func, int nresults)
L->isactive = false; L->isactive = false;
} }
if (FFlag::LuauUniformTopHandling && nresults != LUA_MULTRET) if (nresults != LUA_MULTRET)
L->top = restorestack(L, old_func) + nresults; L->top = restorestack(L, old_func) + nresults;
L->nCcalls--; L->nCcalls--;

View File

@ -16,7 +16,6 @@
#include <string.h> #include <string.h>
LUAU_FASTFLAG(LuauUniformTopHandling)
LUAU_FASTFLAG(LuauGetImportDirect) LUAU_FASTFLAG(LuauGetImportDirect)
// Disable c99-designator to avoid the warning in CGOTO dispatch table // Disable c99-designator to avoid the warning in CGOTO dispatch table
@ -1043,8 +1042,6 @@ reentry:
// we're done! // we're done!
if (LUAU_UNLIKELY(ci->flags & LUA_CALLINFO_RETURN)) if (LUAU_UNLIKELY(ci->flags & LUA_CALLINFO_RETURN))
{ {
if (!FFlag::LuauUniformTopHandling)
L->top = res;
goto exit; goto exit;
} }

View File

@ -460,6 +460,25 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Undefined")
SINGLE_COMPARE(udf(), 0x00000000); SINGLE_COMPARE(udf(), 0x00000000);
} }
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "PrePostIndexing")
{
SINGLE_COMPARE(ldr(x0, mem(x1, 1)), 0xF8401020);
SINGLE_COMPARE(ldr(x0, mem(x1, 1, AddressKindA64::pre)), 0xF8401C20);
SINGLE_COMPARE(ldr(x0, mem(x1, 1, AddressKindA64::post)), 0xF8401420);
SINGLE_COMPARE(ldr(q0, mem(x1, 1)), 0x3CC01020);
SINGLE_COMPARE(ldr(q0, mem(x1, 1, AddressKindA64::pre)), 0x3CC01C20);
SINGLE_COMPARE(ldr(q0, mem(x1, 1, AddressKindA64::post)), 0x3CC01420);
SINGLE_COMPARE(str(x0, mem(x1, 1)), 0xF8001020);
SINGLE_COMPARE(str(x0, mem(x1, 1, AddressKindA64::pre)), 0xF8001C20);
SINGLE_COMPARE(str(x0, mem(x1, 1, AddressKindA64::post)), 0xF8001420);
SINGLE_COMPARE(str(q0, mem(x1, 1)), 0x3C801020);
SINGLE_COMPARE(str(q0, mem(x1, 1, AddressKindA64::pre)), 0x3C801C20);
SINGLE_COMPARE(str(q0, mem(x1, 1, AddressKindA64::post)), 0x3C801420);
}
TEST_CASE("LogTest") TEST_CASE("LogTest")
{ {
AssemblyBuilderA64 build(/* logText= */ true); AssemblyBuilderA64 build(/* logText= */ true);
@ -501,6 +520,10 @@ TEST_CASE("LogTest")
build.ubfx(x1, x2, 37, 5); build.ubfx(x1, x2, 37, 5);
build.ldr(x0, mem(x1, 1));
build.ldr(x0, mem(x1, 1, AddressKindA64::pre));
build.ldr(x0, mem(x1, 1, AddressKindA64::post));
build.setLabel(l); build.setLabel(l);
build.ret(); build.ret();
@ -534,6 +557,9 @@ TEST_CASE("LogTest")
tbz x0,#5,.L1 tbz x0,#5,.L1
fcvt s1,d2 fcvt s1,d2
ubfx x1,x2,#3705 ubfx x1,x2,#3705
ldr x0,[x1,#1]
ldr x0,[x1,#1]!
ldr x0,[x1]!,#1
.L1: .L1:
ret ret
)"; )";

View File

@ -3471,8 +3471,6 @@ local a: T@1
TEST_CASE_FIXTURE(ACFixture, "frontend_use_correct_global_scope") TEST_CASE_FIXTURE(ACFixture, "frontend_use_correct_global_scope")
{ {
ScopedFastFlag sff("LuauTypeCheckerUseCorrectScope", true);
loadDefinition(R"( loadDefinition(R"(
declare class Instance declare class Instance
Name: string Name: string

View File

@ -0,0 +1,140 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/InsertionOrderedMap.h"
#include <memory>
#include "doctest.h"
using namespace Luau;
struct MapFixture
{
std::vector<std::unique_ptr<int>> ptrs;
int* makePtr()
{
ptrs.push_back(std::make_unique<int>(int{}));
return ptrs.back().get();
}
};
TEST_SUITE_BEGIN("InsertionOrderedMap");
TEST_CASE_FIXTURE(MapFixture, "map_insertion")
{
InsertionOrderedMap<int*, int> map;
int* a = makePtr();
int* b = makePtr();
map.insert(a, 1);
map.insert(b, 2);
}
TEST_CASE_FIXTURE(MapFixture, "map_lookup")
{
InsertionOrderedMap<int*, int> map;
int* a = makePtr();
map.insert(a, 1);
int* r = map.get(a);
REQUIRE(r != nullptr);
CHECK(*r == 1);
r = map.get(makePtr());
CHECK(r == nullptr);
}
TEST_CASE_FIXTURE(MapFixture, "insert_does_not_update")
{
InsertionOrderedMap<int*, int> map;
int* k = makePtr();
map.insert(k, 1);
map.insert(k, 2);
int* v = map.get(k);
REQUIRE(v != nullptr);
CHECK(*v == 1);
}
TEST_CASE_FIXTURE(MapFixture, "insertion_order_is_iteration_order")
{
// This one is a little hard to prove, in that if the ordering guarantees
// fail this test isn't guaranteed to fail, but it is strictly better than
// nothing.
InsertionOrderedMap<int*, int> map;
int* a = makePtr();
int* b = makePtr();
int* c = makePtr();
map.insert(a, 1);
map.insert(b, 1);
map.insert(c, 1);
auto it = map.begin();
REQUIRE(it != map.end());
CHECK(it->first == a);
CHECK(it->second == 1);
++it;
REQUIRE(it != map.end());
CHECK(it->first == b);
CHECK(it->second == 1);
++it;
REQUIRE(it != map.end());
CHECK(it->first == c);
CHECK(it->second == 1);
++it;
CHECK(it == map.end());
}
TEST_CASE_FIXTURE(MapFixture, "destructuring_iterator_compiles")
{
// This test's only purpose is to successfully compile.
InsertionOrderedMap<int*, int> map;
for (auto [k, v] : map)
{
// Checks here solely to silence unused variable warnings.
CHECK(k);
CHECK(v > 0);
}
}
TEST_CASE_FIXTURE(MapFixture, "map_erasure")
{
InsertionOrderedMap<int*, int> map;
int* a = makePtr();
int* b = makePtr();
map.insert(a, 1);
map.insert(b, 2);
map.erase(map.find(a));
CHECK(map.size() == 1);
CHECK(!map.contains(a));
CHECK(map.get(a) == nullptr);
int* v = map.get(b);
REQUIRE(v);
}
TEST_CASE_FIXTURE(MapFixture, "map_clear")
{
InsertionOrderedMap<int*, int> map;
int* a = makePtr();
map.insert(a, 1);
map.clear();
CHECK(map.size() == 0);
CHECK(!map.contains(a));
CHECK(map.get(a) == nullptr);
}
TEST_SUITE_END();

View File

@ -74,6 +74,35 @@ public:
CHECK(target.f == inst.f); CHECK(target.f == inst.f);
} }
void defineCfgTree(const std::vector<std::vector<uint32_t>>& successorSets)
{
for (const std::vector<uint32_t>& successorSet : successorSets)
{
build.beginBlock(build.block(IrBlockKind::Internal));
build.function.cfg.successorsOffsets.push_back(uint32_t(build.function.cfg.successors.size()));
build.function.cfg.successors.insert(build.function.cfg.successors.end(), successorSet.begin(), successorSet.end());
}
// Brute-force the predecessor list
for (int i = 0; i < int(build.function.blocks.size()); i++)
{
build.function.cfg.predecessorsOffsets.push_back(uint32_t(build.function.cfg.predecessors.size()));
for (int k = 0; k < int(build.function.blocks.size()); k++)
{
for (uint32_t succIdx : successors(build.function.cfg, k))
{
if (succIdx == i)
build.function.cfg.predecessors.push_back(k);
}
}
}
computeCfgImmediateDominators(build.function);
computeCfgDominanceTreeChildren(build.function);
}
IrBuilder build; IrBuilder build;
// Luau.VM headers are not accessible // Luau.VM headers are not accessible
@ -2164,6 +2193,30 @@ bb_0:
)"); )");
} }
// 'A Simple, Fast Dominance Algorithm' [Keith D. Cooper, et al]. Figure 2.
TEST_CASE_FIXTURE(IrBuilderFixture, "DominanceVerification1")
{
defineCfgTree({{1, 2}, {3}, {4}, {4}, {3}});
CHECK(build.function.cfg.idoms == std::vector<uint32_t>{{~0u, 0, 0, 0, 0}});
}
// 'A Linear Time Algorithm for Placing Phi-Nodes' [Vugranam C.Sreedhar]. Figure 1.
TEST_CASE_FIXTURE(IrBuilderFixture, "DominanceVerification2")
{
defineCfgTree({{1, 16}, {2, 3, 4}, {4, 7}, {9}, {5}, {6}, {2, 8}, {8}, {7, 15}, {10, 11}, {12}, {12}, {13}, {3, 14, 15}, {12}, {16}, {}});
CHECK(build.function.cfg.idoms == std::vector<uint32_t>{~0u, 0, 1, 1, 1, 4, 5, 1, 1, 3, 9, 9, 9, 12, 13, 1, 0});
}
// 'A Linear Time Algorithm for Placing Phi-Nodes' [Vugranam C.Sreedhar]. Figure 4.
TEST_CASE_FIXTURE(IrBuilderFixture, "DominanceVerification3")
{
defineCfgTree({{1, 2}, {3}, {3, 4}, {5}, {5, 6}, {7}, {7}, {}});
CHECK(build.function.cfg.idoms == std::vector<uint32_t>{~0u, 0, 0, 0, 2, 0, 4, 0});
}
TEST_SUITE_END(); TEST_SUITE_END();
TEST_SUITE_BEGIN("ValueNumbering"); TEST_SUITE_BEGIN("ValueNumbering");

View File

@ -791,14 +791,21 @@ TEST_CASE_FIXTURE(NormalizeFixture, "normalize_blocked_types")
CHECK_EQ(normalizer.typeFromNormal(*norm), &blocked); CHECK_EQ(normalizer.typeFromNormal(*norm), &blocked);
} }
TEST_CASE_FIXTURE(NormalizeFixture, "normalize_pending_expansion_types") TEST_CASE_FIXTURE(NormalizeFixture, "normalize_is_exactly_number")
{ {
AstName name; const NormalizedType* number = normalizer.normalize(builtinTypes->numberType);
Type pending{PendingExpansionType{std::nullopt, name, {}, {}}}; // 1. all types for which Types::number say true for, NormalizedType::isExactlyNumber should say true as well
CHECK(Luau::isNumber(builtinTypes->numberType) == number->isExactlyNumber());
// 2. isExactlyNumber should handle cases like `number & number`
TypeId intersection = arena.addType(IntersectionType{{builtinTypes->numberType, builtinTypes->numberType}});
const NormalizedType* normIntersection = normalizer.normalize(intersection);
CHECK(normIntersection->isExactlyNumber());
const NormalizedType* norm = normalizer.normalize(&pending); // 3. isExactlyNumber should reject things that are definitely not precisely numbers `number | any`
CHECK_EQ(normalizer.typeFromNormal(*norm), &pending); TypeId yoonion = arena.addType(UnionType{{builtinTypes->anyType, builtinTypes->numberType}});
const NormalizedType* unionIntersection = normalizer.normalize(yoonion);
CHECK(!unionIntersection->isExactlyNumber());
} }
TEST_SUITE_END(); TEST_SUITE_END();

View File

@ -14,7 +14,7 @@
using namespace Luau; using namespace Luau;
LUAU_FASTFLAG(LuauInstantiateInSubtyping); LUAU_FASTFLAG(LuauInstantiateInSubtyping)
TEST_SUITE_BEGIN("TypeInferFunctions"); TEST_SUITE_BEGIN("TypeInferFunctions");
@ -2073,4 +2073,20 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "param_1_and_2_both_takes_the_same_generic_bu
CHECK_EQ(toString(result.errors[1]), "Type 'number' could not be converted into 'boolean'"); CHECK_EQ(toString(result.errors[1]), "Type 'number' could not be converted into 'boolean'");
} }
TEST_CASE_FIXTURE(Fixture, "attempt_to_call_an_intersection_of_tables")
{
CheckResult result = check(R"(
local function f(t: { x: number } & { y: string })
t()
end
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK_EQ(toString(result.errors[0]), "Cannot call non-function {| x: number |} & {| y: string |}");
else
CHECK_EQ(toString(result.errors[0]), "Cannot call non-function {| x: number |}");
}
TEST_SUITE_END(); TEST_SUITE_END();

View File

@ -88,7 +88,6 @@ TableTests.oop_polymorphic
TableTests.quantify_even_that_table_was_never_exported_at_all TableTests.quantify_even_that_table_was_never_exported_at_all
TableTests.quantify_metatables_of_metatables_of_table TableTests.quantify_metatables_of_metatables_of_table
TableTests.reasonable_error_when_adding_a_nonexistent_property_to_an_array_like_table TableTests.reasonable_error_when_adding_a_nonexistent_property_to_an_array_like_table
TableTests.result_is_bool_for_equality_operators_if_lhs_is_any
TableTests.right_table_missing_key2 TableTests.right_table_missing_key2
TableTests.shared_selfs TableTests.shared_selfs
TableTests.shared_selfs_from_free_param TableTests.shared_selfs_from_free_param
@ -167,7 +166,6 @@ TypeInferOperators.CallOrOfFunctions
TypeInferOperators.cli_38355_recursive_union TypeInferOperators.cli_38355_recursive_union
TypeInferOperators.compound_assign_mismatch_metatable TypeInferOperators.compound_assign_mismatch_metatable
TypeInferOperators.disallow_string_and_types_without_metatables_from_arithmetic_binary_ops TypeInferOperators.disallow_string_and_types_without_metatables_from_arithmetic_binary_ops
TypeInferOperators.luau-polyfill.String.slice
TypeInferOperators.operator_eq_completely_incompatible TypeInferOperators.operator_eq_completely_incompatible
TypeInferOperators.typecheck_overloaded_multiply_that_is_an_intersection TypeInferOperators.typecheck_overloaded_multiply_that_is_an_intersection
TypeInferOperators.typecheck_overloaded_multiply_that_is_an_intersection_on_rhs TypeInferOperators.typecheck_overloaded_multiply_that_is_an_intersection_on_rhs