Merge branch 'upstream' into merge

This commit is contained in:
Arseny Kapoulkine 2022-09-23 11:34:11 -07:00
commit 7bea908f0d
46 changed files with 3938 additions and 725 deletions

View File

@ -90,18 +90,49 @@ struct TypeAliasExpansionConstraint
TypeId target;
};
using ConstraintPtr = std::unique_ptr<struct Constraint>;
struct FunctionCallConstraint
{
std::vector<NotNull<const Constraint>> innerConstraints;
std::vector<NotNull<const struct Constraint>> innerConstraints;
TypeId fn;
TypePackId result;
class AstExprCall* astFragment;
};
using ConstraintV = Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, UnaryConstraint,
BinaryConstraint, IterableConstraint, NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint>;
// result ~ prim ExpectedType SomeSingletonType MultitonType
//
// If ExpectedType is potentially a singleton (an actual singleton or a union
// that contains a singleton), then result ~ SomeSingletonType
//
// else result ~ MultitonType
struct PrimitiveTypeConstraint
{
TypeId resultType;
TypeId expectedType;
TypeId singletonType;
TypeId multitonType;
};
// result ~ hasProp type "prop_name"
//
// If the subject is a table, bind the result to the named prop. If the table
// has an indexer, bind it to the index result type. If the subject is a union,
// bind the result to the union of its constituents' properties.
//
// It would be nice to get rid of this constraint and someday replace it with
//
// T <: {p: X}
//
// Where {} describes an inexact shape type.
struct HasPropConstraint
{
TypeId resultType;
TypeId subjectType;
std::string prop;
};
using ConstraintV =
Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, UnaryConstraint, BinaryConstraint,
IterableConstraint, NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, PrimitiveTypeConstraint, HasPropConstraint>;
struct Constraint
{
@ -117,6 +148,8 @@ struct Constraint
std::vector<NotNull<Constraint>> dependencies;
};
using ConstraintPtr = std::unique_ptr<Constraint>;
inline Constraint& asMutable(const Constraint& c)
{
return const_cast<Constraint&>(c);

View File

@ -125,23 +125,25 @@ struct ConstraintGraphBuilder
void visit(const ScopePtr& scope, AstStatDeclareClass* declareClass);
void visit(const ScopePtr& scope, AstStatDeclareFunction* declareFunction);
TypePackId checkPack(const ScopePtr& scope, AstArray<AstExpr*> exprs);
TypePackId checkPack(const ScopePtr& scope, AstExpr* expr);
TypePackId checkPack(const ScopePtr& scope, AstArray<AstExpr*> exprs, const std::vector<TypeId>& expectedTypes = {});
TypePackId checkPack(const ScopePtr& scope, AstExpr* expr, const std::vector<TypeId>& expectedTypes = {});
/**
* Checks an expression that is expected to evaluate to one type.
* @param scope the scope the expression is contained within.
* @param expr the expression to check.
* @param expectedType the type of the expression that is expected from its
* surrounding context. Used to implement bidirectional type checking.
* @return the type of the expression.
*/
TypeId check(const ScopePtr& scope, AstExpr* expr);
TypeId check(const ScopePtr& scope, AstExpr* expr, std::optional<TypeId> expectedType = {});
TypeId checkExprTable(const ScopePtr& scope, AstExprTable* expr);
TypeId check(const ScopePtr& scope, AstExprTable* expr, std::optional<TypeId> expectedType);
TypeId check(const ScopePtr& scope, AstExprIndexName* indexName);
TypeId check(const ScopePtr& scope, AstExprIndexExpr* indexExpr);
TypeId check(const ScopePtr& scope, AstExprUnary* unary);
TypeId check(const ScopePtr& scope, AstExprBinary* binary);
TypeId check(const ScopePtr& scope, AstExprIfElse* ifElse);
TypeId check(const ScopePtr& scope, AstExprIfElse* ifElse, std::optional<TypeId> expectedType);
TypeId check(const ScopePtr& scope, AstExprTypeAssertion* typeAssert);
struct FunctionSignature

View File

@ -100,6 +100,8 @@ struct ConstraintSolver
bool tryDispatch(const NameConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const TypeAliasExpansionConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const FunctionCallConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const PrimitiveTypeConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const HasPropConstraint& c, NotNull<const Constraint> constraint);
// for a, ... in some_table do
bool tryDispatchIterableTable(TypeId iteratorTy, const IterableConstraint& c, NotNull<const Constraint> constraint, bool force);
@ -116,6 +118,16 @@ struct ConstraintSolver
bool block(TypeId target, NotNull<const Constraint> constraint);
bool block(TypePackId target, NotNull<const Constraint> constraint);
// Traverse the type. If any blocked or pending typevars are found, block
// the constraint on them.
//
// Returns false if a type blocks the constraint.
//
// FIXME: This use of a boolean for the return result is an appalling
// interface.
bool recursiveBlock(TypeId target, NotNull<const Constraint> constraint);
bool recursiveBlock(TypePackId target, NotNull<const Constraint> constraint);
void unblock(NotNull<const Constraint> progressed);
void unblock(TypeId progressed);
void unblock(TypePackId progressed);

View File

@ -17,8 +17,8 @@ struct SingletonTypes;
using ModulePtr = std::shared_ptr<Module>;
bool isSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice);
bool isSubtype(TypePackId subTy, TypePackId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice);
bool isSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop = true);
bool isSubtype(TypePackId subTy, TypePackId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop = true);
std::pair<TypeId, bool> normalize(
TypeId ty, NotNull<Scope> scope, TypeArena& arena, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice);

View File

@ -6,6 +6,7 @@
#include "Luau/Location.h"
#include <string>
#include <vector>
namespace Luau
{

View File

@ -36,6 +36,20 @@ static std::optional<AstExpr*> matchRequire(const AstExprCall& call)
return call.args.data[0];
}
static bool matchSetmetatable(const AstExprCall& call)
{
const char* smt = "setmetatable";
if (call.args.size != 2)
return false;
const AstExprGlobal* funcAsGlobal = call.func->as<AstExprGlobal>();
if (!funcAsGlobal || funcAsGlobal->name != smt)
return false;
return true;
}
ConstraintGraphBuilder::ConstraintGraphBuilder(const ModuleName& moduleName, ModulePtr module, TypeArena* arena,
NotNull<ModuleResolver> moduleResolver, NotNull<SingletonTypes> singletonTypes, NotNull<InternalErrorReporter> ice, const ScopePtr& globalScope, DcrLogger* logger)
: moduleName(moduleName)
@ -214,15 +228,16 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* local)
for (AstLocal* local : local->vars)
{
TypeId ty = freshType(scope);
TypeId ty = nullptr;
Location location = local->location;
if (local->annotation)
{
location = local->annotation->location;
TypeId annotation = resolveType(scope, local->annotation, /* topLevel */ true);
addConstraint(scope, location, SubtypeConstraint{ty, annotation});
ty = resolveType(scope, local->annotation, /* topLevel */ true);
}
else
ty = freshType(scope);
varTypes.push_back(ty);
scope->bindings[local] = Binding{ty, location};
@ -231,6 +246,8 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* local)
for (size_t i = 0; i < local->values.size; ++i)
{
AstExpr* value = local->values.data[i];
const bool hasAnnotation = i < local->vars.size && nullptr != local->vars.data[i]->annotation;
if (value->is<AstExprConstantNil>())
{
// HACK: we leave nil-initialized things floating under the assumption that they will later be populated.
@ -239,7 +256,11 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* local)
}
else if (i == local->values.size - 1)
{
TypePackId exprPack = checkPack(scope, value);
std::vector<TypeId> expectedTypes;
if (hasAnnotation)
expectedTypes.insert(begin(expectedTypes), begin(varTypes) + i, end(varTypes));
TypePackId exprPack = checkPack(scope, value, expectedTypes);
if (i < local->vars.size)
{
@ -250,7 +271,11 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocal* local)
}
else
{
TypeId exprType = check(scope, value);
std::optional<TypeId> expectedType;
if (hasAnnotation)
expectedType = varTypes.at(i);
TypeId exprType = check(scope, value, expectedType);
if (i < varTypes.size())
addConstraint(scope, local->location, SubtypeConstraint{varTypes[i], exprType});
}
@ -458,7 +483,15 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatFunction* funct
void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatReturn* ret)
{
TypePackId exprTypes = checkPack(scope, ret->list);
// At this point, the only way scope->returnType should have anything
// interesting in it is if the function has an explicit return annotation.
// If this is the case, then we can expect that the return expression
// conforms to that.
std::vector<TypeId> expectedTypes;
for (TypeId ty : scope->returnType)
expectedTypes.push_back(ty);
TypePackId exprTypes = checkPack(scope, ret->list, expectedTypes);
addConstraint(scope, ret->location, PackSubtypeConstraint{exprTypes, scope->returnType});
}
@ -695,7 +728,7 @@ void ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatDeclareFunction
scope->bindings[global->name] = Binding{fnType, global->location};
}
TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstArray<AstExpr*> exprs)
TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstArray<AstExpr*> exprs, const std::vector<TypeId>& expectedTypes)
{
std::vector<TypeId> head;
std::optional<TypePackId> tail;
@ -704,9 +737,17 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstArray<Ast
{
AstExpr* expr = exprs.data[i];
if (i < exprs.size - 1)
{
std::optional<TypeId> expectedType;
if (i < expectedTypes.size())
expectedType = expectedTypes[i];
head.push_back(check(scope, expr));
}
else
tail = checkPack(scope, expr);
{
std::vector<TypeId> expectedTailTypes{begin(expectedTypes) + i, end(expectedTypes)};
tail = checkPack(scope, expr, expectedTailTypes);
}
}
if (head.empty() && tail)
@ -715,7 +756,7 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstArray<Ast
return arena->addTypePack(TypePack{std::move(head), tail});
}
TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* expr)
TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* expr, const std::vector<TypeId>& expectedTypes)
{
RecursionCounter counter{&recursionCount};
@ -730,7 +771,6 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* exp
if (AstExprCall* call = expr->as<AstExprCall>())
{
TypeId fnType = check(scope, call->func);
const size_t constraintIndex = scope->constraints.size();
const size_t scopeIndex = scopes.size();
@ -743,49 +783,63 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* exp
// TODO self
const size_t constraintEndIndex = scope->constraints.size();
const size_t scopeEndIndex = scopes.size();
astOriginalCallTypes[call->func] = fnType;
TypeId instantiatedType = arena->addType(BlockedTypeVar{});
TypePackId rets = arena->addTypePack(BlockedTypePack{});
FunctionTypeVar ftv(arena->addTypePack(TypePack{args, {}}), rets);
TypeId inferredFnType = arena->addType(ftv);
scope->unqueuedConstraints.push_back(
std::make_unique<Constraint>(NotNull{scope.get()}, call->func->location, InstantiationConstraint{instantiatedType, fnType}));
NotNull<const Constraint> ic(scope->unqueuedConstraints.back().get());
scope->unqueuedConstraints.push_back(
std::make_unique<Constraint>(NotNull{scope.get()}, call->func->location, SubtypeConstraint{inferredFnType, instantiatedType}));
NotNull<Constraint> sc(scope->unqueuedConstraints.back().get());
// We force constraints produced by checking function arguments to wait
// until after we have resolved the constraint on the function itself.
// This ensures, for instance, that we start inferring the contents of
// lambdas under the assumption that their arguments and return types
// will be compatible with the enclosing function call.
for (size_t ci = constraintIndex; ci < constraintEndIndex; ++ci)
scope->constraints[ci]->dependencies.push_back(sc);
for (size_t si = scopeIndex; si < scopeEndIndex; ++si)
if (matchSetmetatable(*call))
{
for (auto& c : scopes[si].second->constraints)
{
c->dependencies.push_back(sc);
}
LUAU_ASSERT(args.size() == 2);
TypeId target = args[0];
TypeId mt = args[1];
MetatableTypeVar mtv{target, mt};
TypeId resultTy = arena->addType(mtv);
result = arena->addTypePack({resultTy});
}
else
{
const size_t constraintEndIndex = scope->constraints.size();
const size_t scopeEndIndex = scopes.size();
addConstraint(scope, call->func->location,
FunctionCallConstraint{
{ic, sc},
fnType,
rets,
call,
});
astOriginalCallTypes[call->func] = fnType;
result = rets;
TypeId instantiatedType = arena->addType(BlockedTypeVar{});
// TODO: How do expectedTypes play into this? Do they?
TypePackId rets = arena->addTypePack(BlockedTypePack{});
FunctionTypeVar ftv(arena->addTypePack(TypePack{args, {}}), rets);
TypeId inferredFnType = arena->addType(ftv);
scope->unqueuedConstraints.push_back(
std::make_unique<Constraint>(NotNull{scope.get()}, call->func->location, InstantiationConstraint{instantiatedType, fnType}));
NotNull<const Constraint> ic(scope->unqueuedConstraints.back().get());
scope->unqueuedConstraints.push_back(
std::make_unique<Constraint>(NotNull{scope.get()}, call->func->location, SubtypeConstraint{inferredFnType, instantiatedType}));
NotNull<Constraint> sc(scope->unqueuedConstraints.back().get());
// We force constraints produced by checking function arguments to wait
// until after we have resolved the constraint on the function itself.
// This ensures, for instance, that we start inferring the contents of
// lambdas under the assumption that their arguments and return types
// will be compatible with the enclosing function call.
for (size_t ci = constraintIndex; ci < constraintEndIndex; ++ci)
scope->constraints[ci]->dependencies.push_back(sc);
for (size_t si = scopeIndex; si < scopeEndIndex; ++si)
{
for (auto& c : scopes[si].second->constraints)
{
c->dependencies.push_back(sc);
}
}
addConstraint(scope, call->func->location,
FunctionCallConstraint{
{ic, sc},
fnType,
rets,
call,
});
result = rets;
}
}
else if (AstExprVarargs* varargs = expr->as<AstExprVarargs>())
{
@ -796,7 +850,10 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* exp
}
else
{
TypeId t = check(scope, expr);
std::optional<TypeId> expectedType;
if (!expectedTypes.empty())
expectedType = expectedTypes[0];
TypeId t = check(scope, expr, expectedType);
result = arena->addTypePack({t});
}
@ -805,7 +862,7 @@ TypePackId ConstraintGraphBuilder::checkPack(const ScopePtr& scope, AstExpr* exp
return result;
}
TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr)
TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr, std::optional<TypeId> expectedType)
{
RecursionCounter counter{&recursionCount};
@ -819,12 +876,47 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr)
if (auto group = expr->as<AstExprGroup>())
result = check(scope, group->expr);
else if (expr->is<AstExprConstantString>())
result = singletonTypes->stringType;
else if (auto stringExpr = expr->as<AstExprConstantString>())
{
if (expectedType)
{
const TypeId expectedTy = follow(*expectedType);
if (get<BlockedTypeVar>(expectedTy) || get<PendingExpansionTypeVar>(expectedTy))
{
result = arena->addType(BlockedTypeVar{});
TypeId singletonType = arena->addType(SingletonTypeVar(StringSingleton{std::string(stringExpr->value.data, stringExpr->value.size)}));
addConstraint(scope, expr->location, PrimitiveTypeConstraint{result, expectedTy, singletonType, singletonTypes->stringType});
}
else if (maybeSingleton(expectedTy))
result = arena->addType(SingletonTypeVar{StringSingleton{std::string{stringExpr->value.data, stringExpr->value.size}}});
else
result = singletonTypes->stringType;
}
else
result = singletonTypes->stringType;
}
else if (expr->is<AstExprConstantNumber>())
result = singletonTypes->numberType;
else if (expr->is<AstExprConstantBool>())
result = singletonTypes->booleanType;
else if (auto boolExpr = expr->as<AstExprConstantBool>())
{
if (expectedType)
{
const TypeId expectedTy = follow(*expectedType);
const TypeId singletonType = boolExpr->value ? singletonTypes->trueType : singletonTypes->falseType;
if (get<BlockedTypeVar>(expectedTy) || get<PendingExpansionTypeVar>(expectedTy))
{
result = arena->addType(BlockedTypeVar{});
addConstraint(scope, expr->location, PrimitiveTypeConstraint{result, expectedTy, singletonType, singletonTypes->booleanType});
}
else if (maybeSingleton(expectedTy))
result = singletonType;
else
result = singletonTypes->booleanType;
}
else
result = singletonTypes->booleanType;
}
else if (expr->is<AstExprConstantNil>())
result = singletonTypes->nilType;
else if (auto a = expr->as<AstExprLocal>())
@ -864,13 +956,13 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExpr* expr)
else if (auto indexExpr = expr->as<AstExprIndexExpr>())
result = check(scope, indexExpr);
else if (auto table = expr->as<AstExprTable>())
result = checkExprTable(scope, table);
result = check(scope, table, expectedType);
else if (auto unary = expr->as<AstExprUnary>())
result = check(scope, unary);
else if (auto binary = expr->as<AstExprBinary>())
result = check(scope, binary);
else if (auto ifElse = expr->as<AstExprIfElse>())
result = check(scope, ifElse);
result = check(scope, ifElse, expectedType);
else if (auto typeAssert = expr->as<AstExprTypeAssertion>())
result = check(scope, typeAssert);
else if (auto err = expr->as<AstExprError>())
@ -924,20 +1016,9 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprUnary* unary)
{
TypeId operandType = check(scope, unary->expr);
switch (unary->op)
{
case AstExprUnary::Minus:
{
TypeId resultType = arena->addType(BlockedTypeVar{});
addConstraint(scope, unary->location, UnaryConstraint{AstExprUnary::Minus, operandType, resultType});
return resultType;
}
default:
LUAU_ASSERT(0);
}
LUAU_UNREACHABLE();
return singletonTypes->errorRecoveryType();
TypeId resultType = arena->addType(BlockedTypeVar{});
addConstraint(scope, unary->location, UnaryConstraint{unary->op, operandType, resultType});
return resultType;
}
TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprBinary* binary)
@ -946,22 +1027,34 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprBinary* binar
TypeId rightType = check(scope, binary->right);
switch (binary->op)
{
case AstExprBinary::And:
case AstExprBinary::Or:
{
addConstraint(scope, binary->location, SubtypeConstraint{leftType, rightType});
return leftType;
}
case AstExprBinary::Add:
case AstExprBinary::Sub:
case AstExprBinary::Mul:
case AstExprBinary::Div:
case AstExprBinary::Mod:
case AstExprBinary::Pow:
case AstExprBinary::CompareNe:
case AstExprBinary::CompareEq:
case AstExprBinary::CompareLt:
case AstExprBinary::CompareLe:
case AstExprBinary::CompareGt:
case AstExprBinary::CompareGe:
{
TypeId resultType = arena->addType(BlockedTypeVar{});
addConstraint(scope, binary->location, BinaryConstraint{AstExprBinary::Add, leftType, rightType, resultType});
addConstraint(scope, binary->location, BinaryConstraint{binary->op, leftType, rightType, resultType});
return resultType;
}
case AstExprBinary::Sub:
case AstExprBinary::Concat:
{
TypeId resultType = arena->addType(BlockedTypeVar{});
addConstraint(scope, binary->location, BinaryConstraint{AstExprBinary::Sub, leftType, rightType, resultType});
return resultType;
addConstraint(scope, binary->left->location, SubtypeConstraint{leftType, singletonTypes->stringType});
addConstraint(scope, binary->right->location, SubtypeConstraint{rightType, singletonTypes->stringType});
return singletonTypes->stringType;
}
default:
LUAU_ASSERT(0);
@ -971,16 +1064,16 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprBinary* binar
return nullptr;
}
TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprIfElse* ifElse)
TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprIfElse* ifElse, std::optional<TypeId> expectedType)
{
check(scope, ifElse->condition);
TypeId thenType = check(scope, ifElse->trueExpr);
TypeId elseType = check(scope, ifElse->falseExpr);
TypeId thenType = check(scope, ifElse->trueExpr, expectedType);
TypeId elseType = check(scope, ifElse->falseExpr, expectedType);
if (ifElse->hasElse)
{
TypeId resultType = arena->addType(BlockedTypeVar{});
TypeId resultType = expectedType ? *expectedType : freshType(scope);
addConstraint(scope, ifElse->trueExpr->location, SubtypeConstraint{thenType, resultType});
addConstraint(scope, ifElse->falseExpr->location, SubtypeConstraint{elseType, resultType});
return resultType;
@ -995,7 +1088,7 @@ TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprTypeAssertion
return resolveType(scope, typeAssert->annotation);
}
TypeId ConstraintGraphBuilder::checkExprTable(const ScopePtr& scope, AstExprTable* expr)
TypeId ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprTable* expr, std::optional<TypeId> expectedType)
{
TypeId ty = arena->addType(TableTypeVar{});
TableTypeVar* ttv = getMutable<TableTypeVar>(ty);
@ -1015,7 +1108,18 @@ TypeId ConstraintGraphBuilder::checkExprTable(const ScopePtr& scope, AstExprTabl
for (const AstExprTable::Item& item : expr->items)
{
TypeId itemTy = check(scope, item.value);
std::optional<TypeId> expectedValueType;
if (item.key && expectedType)
{
if (auto stringKey = item.key->as<AstExprConstantString>())
{
expectedValueType = arena->addType(BlockedTypeVar{});
addConstraint(scope, item.value->location, HasPropConstraint{*expectedValueType, *expectedType, stringKey->value.data});
}
}
TypeId itemTy = check(scope, item.value, expectedValueType);
if (get<ErrorTypeVar>(follow(itemTy)))
return ty;
@ -1130,7 +1234,12 @@ ConstraintGraphBuilder::FunctionSignature ConstraintGraphBuilder::checkFunctionS
if (fn->returnAnnotation)
{
TypePackId annotatedRetType = resolveTypePack(signatureScope, *fn->returnAnnotation);
addConstraint(signatureScope, getLocation(*fn->returnAnnotation), PackSubtypeConstraint{returnType, annotatedRetType});
// We bind the annotated type directly here so that, when we need to
// generate constraints for return types, we have a guarantee that we
// know the annotated return type already, if one was provided.
LUAU_ASSERT(get<FreeTypePack>(returnType));
asMutable(returnType)->ty.emplace<BoundTypePack>(annotatedRetType);
}
std::vector<TypeId> argTypes;

View File

@ -396,8 +396,12 @@ bool ConstraintSolver::tryDispatch(NotNull<const Constraint> constraint, bool fo
success = tryDispatch(*taec, constraint);
else if (auto fcc = get<FunctionCallConstraint>(*constraint))
success = tryDispatch(*fcc, constraint);
else if (auto fcc = get<PrimitiveTypeConstraint>(*constraint))
success = tryDispatch(*fcc, constraint);
else if (auto hpc = get<HasPropConstraint>(*constraint))
success = tryDispatch(*hpc, constraint);
else
LUAU_ASSERT(0);
LUAU_ASSERT(false);
if (success)
{
@ -409,6 +413,11 @@ bool ConstraintSolver::tryDispatch(NotNull<const Constraint> constraint, bool fo
bool ConstraintSolver::tryDispatch(const SubtypeConstraint& c, NotNull<const Constraint> constraint, bool force)
{
if (!recursiveBlock(c.subType, constraint))
return false;
if (!recursiveBlock(c.superType, constraint))
return false;
if (isBlocked(c.subType))
return block(c.subType, constraint);
else if (isBlocked(c.superType))
@ -421,6 +430,9 @@ bool ConstraintSolver::tryDispatch(const SubtypeConstraint& c, NotNull<const Con
bool ConstraintSolver::tryDispatch(const PackSubtypeConstraint& c, NotNull<const Constraint> constraint, bool force)
{
if (!recursiveBlock(c.subPack, constraint) || !recursiveBlock(c.superPack, constraint))
return false;
if (isBlocked(c.subPack))
return block(c.subPack, constraint);
else if (isBlocked(c.superPack))
@ -480,13 +492,30 @@ bool ConstraintSolver::tryDispatch(const UnaryConstraint& c, NotNull<const Const
LUAU_ASSERT(get<BlockedTypeVar>(c.resultType));
if (isNumber(operandType) || get<AnyTypeVar>(operandType) || get<ErrorTypeVar>(operandType))
switch (c.op)
{
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(c.operandType);
return true;
case AstExprUnary::Not:
{
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(singletonTypes->booleanType);
return true;
}
case AstExprUnary::Len:
{
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(singletonTypes->numberType);
return true;
}
case AstExprUnary::Minus:
{
if (isNumber(operandType) || get<AnyTypeVar>(operandType) || get<ErrorTypeVar>(operandType))
{
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(c.operandType);
return true;
}
break;
}
}
LUAU_ASSERT(0); // TODO metatable handling
LUAU_ASSERT(false); // TODO metatable handling
return false;
}
@ -906,6 +935,91 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
return true;
}
bool ConstraintSolver::tryDispatch(const PrimitiveTypeConstraint& c, NotNull<const Constraint> constraint)
{
TypeId expectedType = follow(c.expectedType);
if (isBlocked(expectedType) || get<PendingExpansionTypeVar>(expectedType))
return block(expectedType, constraint);
TypeId bindTo = maybeSingleton(expectedType) ? c.singletonType : c.multitonType;
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(bindTo);
return true;
}
bool ConstraintSolver::tryDispatch(const HasPropConstraint& c, NotNull<const Constraint> constraint)
{
TypeId subjectType = follow(c.subjectType);
if (isBlocked(subjectType) || get<PendingExpansionTypeVar>(subjectType))
return block(subjectType, constraint);
TypeId resultType = nullptr;
auto collectParts = [&](auto&& unionOrIntersection) -> std::pair<bool, std::vector<TypeId>> {
bool blocked = false;
std::vector<TypeId> parts;
for (TypeId expectedPart : unionOrIntersection)
{
expectedPart = follow(expectedPart);
if (isBlocked(expectedPart) || get<PendingExpansionTypeVar>(expectedPart))
{
blocked = true;
block(expectedPart, constraint);
}
else if (const TableTypeVar* ttv = get<TableTypeVar>(follow(expectedPart)))
{
if (auto prop = ttv->props.find(c.prop); prop != ttv->props.end())
parts.push_back(prop->second.type);
else if (ttv->indexer && maybeString(ttv->indexer->indexType))
parts.push_back(ttv->indexer->indexResultType);
}
}
return {blocked, parts};
};
if (auto ttv = get<TableTypeVar>(subjectType))
{
if (auto prop = ttv->props.find(c.prop); prop != ttv->props.end())
resultType = prop->second.type;
else if (ttv->indexer && maybeString(ttv->indexer->indexType))
resultType = ttv->indexer->indexResultType;
}
else if (auto utv = get<UnionTypeVar>(subjectType))
{
auto [blocked, parts] = collectParts(utv);
if (blocked)
return false;
else if (parts.size() == 1)
resultType = parts[0];
else if (parts.size() > 1)
resultType = arena->addType(UnionTypeVar{std::move(parts)});
else
LUAU_ASSERT(false); // parts.size() == 0
}
else if (auto itv = get<IntersectionTypeVar>(subjectType))
{
auto [blocked, parts] = collectParts(itv);
if (blocked)
return false;
else if (parts.size() == 1)
resultType = parts[0];
else if (parts.size() > 1)
resultType = arena->addType(IntersectionTypeVar{std::move(parts)});
else
LUAU_ASSERT(false); // parts.size() == 0
}
if (resultType)
asMutable(c.resultType)->ty.emplace<BoundTypeVar>(resultType);
return true;
}
bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const IterableConstraint& c, NotNull<const Constraint> constraint, bool force)
{
auto block_ = [&](auto&& t) {
@ -914,7 +1028,7 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl
// TODO: I believe it is the case that, if we are asked to force
// this constraint, then we can do nothing but fail. I'd like to
// find a code sample that gets here.
LUAU_ASSERT(0);
LUAU_ASSERT(false);
}
else
block(t, constraint);
@ -979,7 +1093,7 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl
if (get<FreeTypeVar>(metaTy))
return block_(metaTy);
LUAU_ASSERT(0);
LUAU_ASSERT(false);
}
else
errorify(c.variables);
@ -996,7 +1110,7 @@ bool ConstraintSolver::tryDispatchIterableFunction(
if (get<FreeTypeVar>(firstIndexTy))
{
if (force)
LUAU_ASSERT(0);
LUAU_ASSERT(false);
else
block(firstIndexTy, constraint);
return false;
@ -1061,6 +1175,48 @@ bool ConstraintSolver::block(TypePackId target, NotNull<const Constraint> constr
return false;
}
struct Blocker : TypeVarOnceVisitor
{
NotNull<ConstraintSolver> solver;
NotNull<const Constraint> constraint;
bool blocked = false;
explicit Blocker(NotNull<ConstraintSolver> solver, NotNull<const Constraint> constraint)
: solver(solver)
, constraint(constraint)
{
}
bool visit(TypeId ty, const BlockedTypeVar&)
{
blocked = true;
solver->block(ty, constraint);
return false;
}
bool visit(TypeId ty, const PendingExpansionTypeVar&)
{
blocked = true;
solver->block(ty, constraint);
return false;
}
};
bool ConstraintSolver::recursiveBlock(TypeId target, NotNull<const Constraint> constraint)
{
Blocker blocker{NotNull{this}, constraint};
blocker.traverse(target);
return !blocker.blocked;
}
bool ConstraintSolver::recursiveBlock(TypePackId pack, NotNull<const Constraint> constraint)
{
Blocker blocker{NotNull{this}, constraint};
blocker.traverse(pack);
return !blocker.blocked;
}
void ConstraintSolver::unblock_(BlockedConstraintId progressed)
{
auto it = blocked.find(progressed);

View File

@ -5,6 +5,7 @@
#include <algorithm>
#include "Luau/Clone.h"
#include "Luau/Common.h"
#include "Luau/Unifier.h"
#include "Luau/VisitTypeVar.h"
@ -13,8 +14,8 @@ LUAU_FASTFLAGVARIABLE(DebugLuauCopyBeforeNormalizing, false)
// This could theoretically be 2000 on amd64, but x86 requires this.
LUAU_FASTINTVARIABLE(LuauNormalizeIterationLimit, 1200);
LUAU_FASTFLAGVARIABLE(LuauNormalizeCombineTableFix, false);
LUAU_FASTFLAGVARIABLE(LuauFixNormalizationOfCyclicUnions, false);
LUAU_FASTFLAG(LuauUnknownAndNeverType)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
namespace Luau
{
@ -54,24 +55,24 @@ struct Replacer
} // anonymous namespace
bool isSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice)
bool isSubtype(TypeId subTy, TypeId superTy, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop)
{
UnifierSharedState sharedState{&ice};
TypeArena arena;
Unifier u{&arena, singletonTypes, Mode::Strict, scope, Location{}, Covariant, sharedState};
u.anyIsTop = true;
u.anyIsTop = anyIsTop;
u.tryUnify(subTy, superTy);
const bool ok = u.errors.empty() && u.log.empty();
return ok;
}
bool isSubtype(TypePackId subPack, TypePackId superPack, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice)
bool isSubtype(TypePackId subPack, TypePackId superPack, NotNull<Scope> scope, NotNull<SingletonTypes> singletonTypes, InternalErrorReporter& ice, bool anyIsTop)
{
UnifierSharedState sharedState{&ice};
TypeArena arena;
Unifier u{&arena, singletonTypes, Mode::Strict, scope, Location{}, Covariant, sharedState};
u.anyIsTop = true;
u.anyIsTop = anyIsTop;
u.tryUnify(subPack, superPack);
const bool ok = u.errors.empty() && u.log.empty();
@ -319,18 +320,11 @@ struct Normalize final : TypeVarVisitor
UnionTypeVar* utv = &const_cast<UnionTypeVar&>(utvRef);
// TODO: Clip tempOptions and optionsRef when clipping FFlag::LuauFixNormalizationOfCyclicUnions
std::vector<TypeId> tempOptions;
if (!FFlag::LuauFixNormalizationOfCyclicUnions)
tempOptions = std::move(utv->options);
std::vector<TypeId>& optionsRef = FFlag::LuauFixNormalizationOfCyclicUnions ? utv->options : tempOptions;
// We might transmute, so it's not safe to rely on the builtin traversal logic of visitTypeVar
for (TypeId option : optionsRef)
for (TypeId option : utv->options)
traverse(option);
std::vector<TypeId> newOptions = normalizeUnion(optionsRef);
std::vector<TypeId> newOptions = normalizeUnion(utv->options);
const bool normal = areNormal(newOptions, seen, ice);
@ -355,106 +349,54 @@ struct Normalize final : TypeVarVisitor
IntersectionTypeVar* itv = &const_cast<IntersectionTypeVar&>(itvRef);
if (FFlag::LuauFixNormalizationOfCyclicUnions)
std::vector<TypeId> oldParts = itv->parts;
IntersectionTypeVar newIntersection;
for (TypeId part : oldParts)
traverse(part);
std::vector<TypeId> tables;
for (TypeId part : oldParts)
{
std::vector<TypeId> oldParts = itv->parts;
IntersectionTypeVar newIntersection;
for (TypeId part : oldParts)
traverse(part);
std::vector<TypeId> tables;
for (TypeId part : oldParts)
part = follow(part);
if (get<TableTypeVar>(part))
tables.push_back(part);
else
{
part = follow(part);
if (get<TableTypeVar>(part))
tables.push_back(part);
else
{
Replacer replacer{&arena, nullptr, nullptr}; // FIXME this is super super WEIRD
combineIntoIntersection(replacer, &newIntersection, part);
}
}
// Don't allocate a new table if there's just one in the intersection.
if (tables.size() == 1)
newIntersection.parts.push_back(tables[0]);
else if (!tables.empty())
{
const TableTypeVar* first = get<TableTypeVar>(tables[0]);
LUAU_ASSERT(first);
TypeId newTable = arena.addType(TableTypeVar{first->state, first->level});
TableTypeVar* ttv = getMutable<TableTypeVar>(newTable);
for (TypeId part : tables)
{
// Intuition: If combineIntoTable() needs to clone a table, any references to 'part' are cyclic and need
// to be rewritten to point at 'newTable' in the clone.
Replacer replacer{&arena, part, newTable};
combineIntoTable(replacer, ttv, part);
}
newIntersection.parts.push_back(newTable);
}
itv->parts = std::move(newIntersection.parts);
asMutable(ty)->normal = areNormal(itv->parts, seen, ice);
if (itv->parts.size() == 1)
{
TypeId part = itv->parts[0];
*asMutable(ty) = BoundTypeVar{part};
Replacer replacer{&arena, nullptr, nullptr}; // FIXME this is super super WEIRD
combineIntoIntersection(replacer, &newIntersection, part);
}
}
else
// Don't allocate a new table if there's just one in the intersection.
if (tables.size() == 1)
newIntersection.parts.push_back(tables[0]);
else if (!tables.empty())
{
std::vector<TypeId> oldParts = std::move(itv->parts);
const TableTypeVar* first = get<TableTypeVar>(tables[0]);
LUAU_ASSERT(first);
for (TypeId part : oldParts)
traverse(part);
std::vector<TypeId> tables;
for (TypeId part : oldParts)
TypeId newTable = arena.addType(TableTypeVar{first->state, first->level});
TableTypeVar* ttv = getMutable<TableTypeVar>(newTable);
for (TypeId part : tables)
{
part = follow(part);
if (get<TableTypeVar>(part))
tables.push_back(part);
else
{
Replacer replacer{&arena, nullptr, nullptr}; // FIXME this is super super WEIRD
combineIntoIntersection(replacer, itv, part);
}
// Intuition: If combineIntoTable() needs to clone a table, any references to 'part' are cyclic and need
// to be rewritten to point at 'newTable' in the clone.
Replacer replacer{&arena, part, newTable};
combineIntoTable(replacer, ttv, part);
}
// Don't allocate a new table if there's just one in the intersection.
if (tables.size() == 1)
itv->parts.push_back(tables[0]);
else if (!tables.empty())
{
const TableTypeVar* first = get<TableTypeVar>(tables[0]);
LUAU_ASSERT(first);
newIntersection.parts.push_back(newTable);
}
TypeId newTable = arena.addType(TableTypeVar{first->state, first->level});
TableTypeVar* ttv = getMutable<TableTypeVar>(newTable);
for (TypeId part : tables)
{
// Intuition: If combineIntoTable() needs to clone a table, any references to 'part' are cyclic and need
// to be rewritten to point at 'newTable' in the clone.
Replacer replacer{&arena, part, newTable};
combineIntoTable(replacer, ttv, part);
}
itv->parts = std::move(newIntersection.parts);
itv->parts.push_back(newTable);
}
asMutable(ty)->normal = areNormal(itv->parts, seen, ice);
asMutable(ty)->normal = areNormal(itv->parts, seen, ice);
if (itv->parts.size() == 1)
{
TypeId part = itv->parts[0];
*asMutable(ty) = BoundTypeVar{part};
}
if (itv->parts.size() == 1)
{
TypeId part = itv->parts[0];
*asMutable(ty) = BoundTypeVar{part};
}
return false;
@ -629,21 +571,18 @@ struct Normalize final : TypeVarVisitor
table->props.insert({propName, prop});
}
if (FFlag::LuauFixNormalizationOfCyclicUnions)
if (tyTable->indexer)
{
if (tyTable->indexer)
if (table->indexer)
{
if (table->indexer)
{
table->indexer->indexType = combine(replacer, replacer.smartClone(tyTable->indexer->indexType), table->indexer->indexType);
table->indexer->indexResultType =
combine(replacer, replacer.smartClone(tyTable->indexer->indexResultType), table->indexer->indexResultType);
}
else
{
table->indexer =
TableIndexer{replacer.smartClone(tyTable->indexer->indexType), replacer.smartClone(tyTable->indexer->indexResultType)};
}
table->indexer->indexType = combine(replacer, replacer.smartClone(tyTable->indexer->indexType), table->indexer->indexType);
table->indexer->indexResultType =
combine(replacer, replacer.smartClone(tyTable->indexer->indexResultType), table->indexer->indexResultType);
}
else
{
table->indexer =
TableIndexer{replacer.smartClone(tyTable->indexer->indexType), replacer.smartClone(tyTable->indexer->indexResultType)};
}
}

View File

@ -1512,6 +1512,15 @@ std::string toString(const Constraint& constraint, ToStringOptions& opts)
{
return "call " + tos(c.fn, opts) + " with { result = " + tos(c.result, opts) + " }";
}
else if constexpr (std::is_same_v<T, PrimitiveTypeConstraint>)
{
return tos(c.resultType, opts) + " ~ prim " + tos(c.expectedType, opts) + ", " + tos(c.singletonType, opts) + ", " +
tos(c.multitonType, opts);
}
else if constexpr (std::is_same_v<T, HasPropConstraint>)
{
return tos(c.resultType, opts) + " ~ hasProp " + tos(c.subjectType, opts) + ", \"" + c.prop + "\"";
}
else
static_assert(always_false_v<T>, "Non-exhaustive constraint switch");
};

View File

@ -307,10 +307,9 @@ struct TypeChecker2
if (var->annotation)
{
TypeId varType = lookupAnnotation(var->annotation);
if (!isSubtype(*it, varType, stack.back(), singletonTypes, ice))
{
reportError(TypeMismatch{varType, *it}, value->location);
}
ErrorVec errors = tryUnify(stack.back(), value->location, *it, varType);
if (!errors.empty())
reportErrors(std::move(errors));
}
++it;
@ -325,7 +324,7 @@ struct TypeChecker2
if (var->annotation)
{
TypeId varType = lookupAnnotation(var->annotation);
if (!isSubtype(varType, valueType, stack.back(), singletonTypes, ice))
if (!isSubtype(varType, valueType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
{
reportError(TypeMismatch{varType, valueType}, value->location);
}
@ -540,7 +539,7 @@ struct TypeChecker2
visit(rhs);
TypeId rhsType = lookupType(rhs);
if (!isSubtype(rhsType, lhsType, stack.back(), singletonTypes, ice))
if (!isSubtype(rhsType, lhsType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
{
reportError(TypeMismatch{lhsType, rhsType}, rhs->location);
}
@ -691,7 +690,7 @@ struct TypeChecker2
TypeId actualType = lookupType(number);
TypeId numberType = singletonTypes->numberType;
if (!isSubtype(numberType, actualType, stack.back(), singletonTypes, ice))
if (!isSubtype(numberType, actualType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
{
reportError(TypeMismatch{actualType, numberType}, number->location);
}
@ -702,7 +701,7 @@ struct TypeChecker2
TypeId actualType = lookupType(string);
TypeId stringType = singletonTypes->stringType;
if (!isSubtype(stringType, actualType, stack.back(), singletonTypes, ice))
if (!isSubtype(stringType, actualType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
{
reportError(TypeMismatch{actualType, stringType}, string->location);
}
@ -762,7 +761,7 @@ struct TypeChecker2
FunctionTypeVar ftv{argsTp, expectedRetType};
TypeId expectedType = arena.addType(ftv);
if (!isSubtype(expectedType, instantiatedFunctionType, stack.back(), singletonTypes, ice))
if (!isSubtype(instantiatedFunctionType, expectedType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
{
CloneState cloneState;
expectedType = clone(expectedType, module->internalTypes, cloneState);
@ -781,7 +780,7 @@ struct TypeChecker2
getIndexTypeFromType(module->getModuleScope(), leftType, indexName->index.value, indexName->location, /* addErrors */ true);
if (ty)
{
if (!isSubtype(resultType, *ty, stack.back(), singletonTypes, ice))
if (!isSubtype(resultType, *ty, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
{
reportError(TypeMismatch{resultType, *ty}, indexName->location);
}
@ -814,7 +813,7 @@ struct TypeChecker2
TypeId inferredArgTy = *argIt;
TypeId annotatedArgTy = lookupAnnotation(arg->annotation);
if (!isSubtype(annotatedArgTy, inferredArgTy, stack.back(), singletonTypes, ice))
if (!isSubtype(annotatedArgTy, inferredArgTy, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
{
reportError(TypeMismatch{annotatedArgTy, inferredArgTy}, arg->location);
}
@ -859,10 +858,10 @@ struct TypeChecker2
TypeId computedType = lookupType(expr->expr);
// Note: As an optimization, we try 'number <: number | string' first, as that is the more likely case.
if (isSubtype(annotationType, computedType, stack.back(), singletonTypes, ice))
if (isSubtype(annotationType, computedType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
return;
if (isSubtype(computedType, annotationType, stack.back(), singletonTypes, ice))
if (isSubtype(computedType, annotationType, stack.back(), singletonTypes, ice, /* anyIsTop */ false))
return;
reportError(TypesAreUnrelated{computedType, annotationType}, expr->location);

View File

@ -5,7 +5,6 @@
#include <functional>
#include <utility>
#include <vector>
#include <type_traits>
#include <stdint.h>
@ -35,30 +34,125 @@ public:
class iterator;
DenseHashTable(const Key& empty_key, size_t buckets = 0)
: count(0)
: data(nullptr)
, capacity(0)
, count(0)
, empty_key(empty_key)
{
// validate that equality operator is at least somewhat functional
LUAU_ASSERT(eq(empty_key, empty_key));
// buckets has to be power-of-two or zero
LUAU_ASSERT((buckets & (buckets - 1)) == 0);
// don't move this to initializer list! this works around an MSVC codegen issue on AMD CPUs:
// https://developercommunity.visualstudio.com/t/stdvector-constructor-from-size-t-is-25-times-slow/1546547
if (buckets)
resize_data<Item>(buckets);
{
data = static_cast<Item*>(::operator new(sizeof(Item) * buckets));
capacity = buckets;
ItemInterface::fill(data, buckets, empty_key);
}
}
~DenseHashTable()
{
if (data)
destroy();
}
DenseHashTable(const DenseHashTable& other)
: data(nullptr)
, capacity(0)
, count(other.count)
, empty_key(other.empty_key)
{
if (other.capacity)
{
data = static_cast<Item*>(::operator new(sizeof(Item) * other.capacity));
for (size_t i = 0; i < other.capacity; ++i)
{
new (&data[i]) Item(other.data[i]);
capacity = i + 1; // if Item copy throws, capacity will note the number of initialized objects for destroy() to clean up
}
}
}
DenseHashTable(DenseHashTable&& other)
: data(other.data)
, capacity(other.capacity)
, count(other.count)
, empty_key(other.empty_key)
{
other.data = nullptr;
other.capacity = 0;
other.count = 0;
}
DenseHashTable& operator=(DenseHashTable&& other)
{
if (this != &other)
{
if (data)
destroy();
data = other.data;
capacity = other.capacity;
count = other.count;
empty_key = other.empty_key;
other.data = nullptr;
other.capacity = 0;
other.count = 0;
}
return *this;
}
DenseHashTable& operator=(const DenseHashTable& other)
{
if (this != &other)
{
DenseHashTable copy(other);
*this = std::move(copy);
}
return *this;
}
void clear()
{
data.clear();
if (count == 0)
return;
if (capacity > 32)
{
destroy();
}
else
{
ItemInterface::destroy(data, capacity);
ItemInterface::fill(data, capacity, empty_key);
}
count = 0;
}
void destroy()
{
ItemInterface::destroy(data, capacity);
::operator delete(data);
data = nullptr;
capacity = 0;
}
Item* insert_unsafe(const Key& key)
{
// It is invalid to insert empty_key into the table since it acts as a "entry does not exist" marker
LUAU_ASSERT(!eq(key, empty_key));
size_t hashmod = data.size() - 1;
size_t hashmod = capacity - 1;
size_t bucket = hasher(key) & hashmod;
for (size_t probe = 0; probe <= hashmod; ++probe)
@ -90,12 +184,12 @@ public:
const Item* find(const Key& key) const
{
if (data.empty())
if (count == 0)
return 0;
if (eq(key, empty_key))
return 0;
size_t hashmod = data.size() - 1;
size_t hashmod = capacity - 1;
size_t bucket = hasher(key) & hashmod;
for (size_t probe = 0; probe <= hashmod; ++probe)
@ -121,18 +215,11 @@ public:
void rehash()
{
size_t newsize = data.empty() ? 16 : data.size() * 2;
if (data.empty() && data.capacity() >= newsize)
{
LUAU_ASSERT(count == 0);
resize_data<Item>(newsize);
return;
}
size_t newsize = capacity == 0 ? 16 : capacity * 2;
DenseHashTable newtable(empty_key, newsize);
for (size_t i = 0; i < data.size(); ++i)
for (size_t i = 0; i < capacity; ++i)
{
const Key& key = ItemInterface::getKey(data[i]);
@ -144,12 +231,14 @@ public:
}
LUAU_ASSERT(count == newtable.count);
data.swap(newtable.data);
std::swap(data, newtable.data);
std::swap(capacity, newtable.capacity);
}
void rehash_if_full()
{
if (count >= data.size() * 3 / 4)
if (count >= capacity * 3 / 4)
{
rehash();
}
@ -159,7 +248,7 @@ public:
{
size_t start = 0;
while (start < data.size() && eq(ItemInterface::getKey(data[start]), empty_key))
while (start < capacity && eq(ItemInterface::getKey(data[start]), empty_key))
start++;
return const_iterator(this, start);
@ -167,14 +256,14 @@ public:
const_iterator end() const
{
return const_iterator(this, data.size());
return const_iterator(this, capacity);
}
iterator begin()
{
size_t start = 0;
while (start < data.size() && eq(ItemInterface::getKey(data[start]), empty_key))
while (start < capacity && eq(ItemInterface::getKey(data[start]), empty_key))
start++;
return iterator(this, start);
@ -182,7 +271,7 @@ public:
iterator end()
{
return iterator(this, data.size());
return iterator(this, capacity);
}
size_t size() const
@ -227,7 +316,7 @@ public:
const_iterator& operator++()
{
size_t size = set->data.size();
size_t size = set->capacity;
do
{
@ -286,7 +375,7 @@ public:
iterator& operator++()
{
size_t size = set->data.size();
size_t size = set->capacity;
do
{
@ -309,23 +398,8 @@ public:
};
private:
template<typename T>
void resize_data(size_t count, typename std::enable_if_t<std::is_copy_assignable_v<T>>* dummy = nullptr)
{
data.resize(count, ItemInterface::create(empty_key));
}
template<typename T>
void resize_data(size_t count, typename std::enable_if_t<!std::is_copy_assignable_v<T>>* dummy = nullptr)
{
size_t size = data.size();
data.resize(count);
for (size_t i = size; i < count; i++)
data[i].first = empty_key;
}
std::vector<Item> data;
Item* data;
size_t capacity;
size_t count;
Key empty_key;
Hash hasher;
@ -345,9 +419,16 @@ struct ItemInterfaceSet
item = key;
}
static Key create(const Key& key)
static void fill(Key* data, size_t count, const Key& key)
{
return key;
for (size_t i = 0; i < count; ++i)
new (&data[i]) Key(key);
}
static void destroy(Key* data, size_t count)
{
for (size_t i = 0; i < count; ++i)
data[i].~Key();
}
};
@ -364,9 +445,22 @@ struct ItemInterfaceMap
item.first = key;
}
static std::pair<Key, Value> create(const Key& key)
static void fill(std::pair<Key, Value>* data, size_t count, const Key& key)
{
return std::pair<Key, Value>(key, Value());
for (size_t i = 0; i < count; ++i)
{
new (&data[i].first) Key(key);
new (&data[i].second) Value();
}
}
static void destroy(std::pair<Key, Value>* data, size_t count)
{
for (size_t i = 0; i < count; ++i)
{
data[i].first.~Key();
data[i].second.~Value();
}
}
};

View File

@ -6,6 +6,8 @@
#include "Luau/DenseHash.h"
#include "Luau/Common.h"
#include <vector>
namespace Luau
{

View File

@ -20,7 +20,6 @@ LUAU_DYNAMIC_FASTFLAGVARIABLE(LuaReportParseWrongNamedType, false)
bool lua_telemetry_parsed_named_non_function_type = false;
LUAU_FASTFLAGVARIABLE(LuauErrorDoubleHexPrefix, false)
LUAU_FASTFLAGVARIABLE(LuauLintParseIntegerIssues, false)
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuaReportParseIntegerIssues, false)
LUAU_FASTFLAGVARIABLE(LuauInterpolatedStringBaseSupport, false)
@ -2070,95 +2069,8 @@ AstExpr* Parser::parseAssertionExpr()
return expr;
}
static const char* parseInteger_DEPRECATED(double& result, const char* data, int base)
{
LUAU_ASSERT(!FFlag::LuauLintParseIntegerIssues);
char* end = nullptr;
unsigned long long value = strtoull(data, &end, base);
if (value == ULLONG_MAX && errno == ERANGE)
{
// 'errno' might have been set before we called 'strtoull', but we don't want the overhead of resetting a TLS variable on each call
// so we only reset it when we get a result that might be an out-of-range error and parse again to make sure
errno = 0;
value = strtoull(data, &end, base);
if (errno == ERANGE)
{
if (DFFlag::LuaReportParseIntegerIssues)
{
if (base == 2)
lua_telemetry_parsed_out_of_range_bin_integer = true;
else
lua_telemetry_parsed_out_of_range_hex_integer = true;
}
}
}
result = double(value);
return *end == 0 ? nullptr : "Malformed number";
}
static const char* parseNumber_DEPRECATED2(double& result, const char* data)
{
LUAU_ASSERT(!FFlag::LuauLintParseIntegerIssues);
// binary literal
if (data[0] == '0' && (data[1] == 'b' || data[1] == 'B') && data[2])
return parseInteger_DEPRECATED(result, data + 2, 2);
// hexadecimal literal
if (data[0] == '0' && (data[1] == 'x' || data[1] == 'X') && data[2])
{
if (DFFlag::LuaReportParseIntegerIssues && data[2] == '0' && (data[3] == 'x' || data[3] == 'X'))
lua_telemetry_parsed_double_prefix_hex_integer = true;
return parseInteger_DEPRECATED(result, data + 2, 16);
}
char* end = nullptr;
double value = strtod(data, &end);
result = value;
return *end == 0 ? nullptr : "Malformed number";
}
static bool parseNumber_DEPRECATED(double& result, const char* data)
{
LUAU_ASSERT(!FFlag::LuauLintParseIntegerIssues);
// binary literal
if (data[0] == '0' && (data[1] == 'b' || data[1] == 'B') && data[2])
{
char* end = nullptr;
unsigned long long value = strtoull(data + 2, &end, 2);
result = double(value);
return *end == 0;
}
// hexadecimal literal
else if (data[0] == '0' && (data[1] == 'x' || data[1] == 'X') && data[2])
{
char* end = nullptr;
unsigned long long value = strtoull(data + 2, &end, 16);
result = double(value);
return *end == 0;
}
else
{
char* end = nullptr;
double value = strtod(data, &end);
result = value;
return *end == 0;
}
}
static ConstantNumberParseResult parseInteger(double& result, const char* data, int base)
{
LUAU_ASSERT(FFlag::LuauLintParseIntegerIssues);
LUAU_ASSERT(base == 2 || base == 16);
char* end = nullptr;
@ -2195,8 +2107,6 @@ static ConstantNumberParseResult parseInteger(double& result, const char* data,
static ConstantNumberParseResult parseDouble(double& result, const char* data)
{
LUAU_ASSERT(FFlag::LuauLintParseIntegerIssues);
// binary literal
if (data[0] == '0' && (data[1] == 'b' || data[1] == 'B') && data[2])
return parseInteger(result, data + 2, 2);
@ -2771,49 +2681,14 @@ AstExpr* Parser::parseNumber()
scratchData.erase(std::remove(scratchData.begin(), scratchData.end(), '_'), scratchData.end());
}
if (FFlag::LuauLintParseIntegerIssues)
{
double value = 0;
ConstantNumberParseResult result = parseDouble(value, scratchData.c_str());
nextLexeme();
double value = 0;
ConstantNumberParseResult result = parseDouble(value, scratchData.c_str());
nextLexeme();
if (result == ConstantNumberParseResult::Malformed)
return reportExprError(start, {}, "Malformed number");
if (result == ConstantNumberParseResult::Malformed)
return reportExprError(start, {}, "Malformed number");
return allocator.alloc<AstExprConstantNumber>(start, value, result);
}
else if (DFFlag::LuaReportParseIntegerIssues)
{
double value = 0;
if (const char* error = parseNumber_DEPRECATED2(value, scratchData.c_str()))
{
nextLexeme();
return reportExprError(start, {}, "%s", error);
}
else
{
nextLexeme();
return allocator.alloc<AstExprConstantNumber>(start, value);
}
}
else
{
double value = 0;
if (parseNumber_DEPRECATED(value, scratchData.c_str()))
{
nextLexeme();
return allocator.alloc<AstExprConstantNumber>(start, value);
}
else
{
nextLexeme();
return reportExprError(start, {}, "Malformed number");
}
}
return allocator.alloc<AstExprConstantNumber>(start, value, result);
}
AstLocal* Parser::pushLocal(const Binding& binding)

View File

@ -70,6 +70,7 @@ target_link_libraries(Luau.Analysis PUBLIC Luau.Ast)
target_compile_features(Luau.CodeGen PRIVATE cxx_std_17)
target_include_directories(Luau.CodeGen PUBLIC CodeGen/include)
target_link_libraries(Luau.CodeGen PRIVATE Luau.VM Luau.VM.Internals) # Code generation needs VM internals
target_link_libraries(Luau.CodeGen PUBLIC Luau.Common)
target_compile_features(Luau.VM PRIVATE cxx_std_11)

View File

@ -27,13 +27,13 @@ public:
private:
static const unsigned kRawDataLimit = 128;
char rawData[kRawDataLimit];
char* pos = rawData;
uint8_t rawData[kRawDataLimit];
uint8_t* pos = rawData;
uint32_t stackOffset = 0;
// We will remember the FDE location to write some of the fields like entry length, function start and size later
char* fdeEntryStart = nullptr;
uint8_t* fdeEntryStart = nullptr;
};
} // namespace CodeGen

View File

@ -1,6 +1,8 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/AssemblyBuilderX64.h"
#include "ByteUtils.h"
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
@ -46,44 +48,6 @@ const unsigned AVX_F2 = 0b11;
const unsigned kMaxAlign = 16;
// Utility functions to correctly write data on big endian machines
#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#include <endian.h>
static void writeu32(uint8_t* target, uint32_t value)
{
value = htole32(value);
memcpy(target, &value, sizeof(value));
}
static void writeu64(uint8_t* target, uint64_t value)
{
value = htole64(value);
memcpy(target, &value, sizeof(value));
}
static void writef32(uint8_t* target, float value)
{
static_assert(sizeof(float) == sizeof(uint32_t), "type size must match to reinterpret data");
uint32_t data;
memcpy(&data, &value, sizeof(value));
writeu32(target, data);
}
static void writef64(uint8_t* target, double value)
{
static_assert(sizeof(double) == sizeof(uint64_t), "type size must match to reinterpret data");
uint64_t data;
memcpy(&data, &value, sizeof(value));
writeu64(target, data);
}
#else
#define writeu32(target, value) memcpy(target, &value, sizeof(value))
#define writeu64(target, value) memcpy(target, &value, sizeof(value))
#define writef32(target, value) memcpy(target, &value, sizeof(value))
#define writef64(target, value) memcpy(target, &value, sizeof(value))
#endif
AssemblyBuilderX64::AssemblyBuilderX64(bool logText)
: logText(logText)
{
@ -1014,16 +978,14 @@ void AssemblyBuilderX64::placeImm32(int32_t imm)
{
uint8_t* pos = codePos;
LUAU_ASSERT(pos + sizeof(imm) < codeEnd);
writeu32(pos, imm);
codePos = pos + sizeof(imm);
codePos = writeu32(pos, imm);
}
void AssemblyBuilderX64::placeImm64(int64_t imm)
{
uint8_t* pos = codePos;
LUAU_ASSERT(pos + sizeof(imm) < codeEnd);
writeu64(pos, imm);
codePos = pos + sizeof(imm);
codePos = writeu64(pos, imm);
}
void AssemblyBuilderX64::placeLabel(Label& label)

78
CodeGen/src/ByteUtils.h Normal file
View File

@ -0,0 +1,78 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
#include <endian.h>
#endif
#include <string.h>
inline uint8_t* writeu8(uint8_t* target, uint8_t value)
{
*target = value;
return target + sizeof(value);
}
inline uint8_t* writeu32(uint8_t* target, uint32_t value)
{
#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
value = htole32(value);
#endif
memcpy(target, &value, sizeof(value));
return target + sizeof(value);
}
inline uint8_t* writeu64(uint8_t* target, uint64_t value)
{
#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
value = htole64(value);
#endif
memcpy(target, &value, sizeof(value));
return target + sizeof(value);
}
inline uint8_t* writeuleb128(uint8_t* target, uint64_t value)
{
do
{
uint8_t byte = value & 0x7f;
value >>= 7;
if (value)
byte |= 0x80;
*target++ = byte;
} while (value);
return target;
}
inline uint8_t* writef32(uint8_t* target, float value)
{
#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
static_assert(sizeof(float) == sizeof(uint32_t), "type size must match to reinterpret data");
uint32_t data;
memcpy(&data, &value, sizeof(value));
writeu32(target, data);
#else
memcpy(target, &value, sizeof(value));
#endif
return target + sizeof(value);
}
inline uint8_t* writef64(uint8_t* target, double value)
{
#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
static_assert(sizeof(double) == sizeof(uint64_t), "type size must match to reinterpret data");
uint64_t data;
memcpy(&data, &value, sizeof(value));
writeu64(target, data);
#else
memcpy(target, &value, sizeof(value));
#endif
return target + sizeof(value);
}

2511
CodeGen/src/Fallbacks.cpp Normal file

File diff suppressed because it is too large Load Diff

93
CodeGen/src/Fallbacks.h Normal file
View File

@ -0,0 +1,93 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand
#pragma once
#include <stdint.h>
struct lua_State;
struct Closure;
typedef uint32_t Instruction;
typedef struct lua_TValue TValue;
typedef TValue* StkId;
const Instruction* execute_LOP_NOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LOADNIL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LOADB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LOADN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LOADK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MOVE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETUPVAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETUPVAL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_CLOSEUPVALS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETIMPORT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETTABLEN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETTABLEN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_CALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_RETURN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIF(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFEQ(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTEQ(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFLT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPIFNOTLT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_ADD(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SUB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MUL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DIV(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MOD(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_POW(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_ADDK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SUBK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MULK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DIVK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MODK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_POWK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_AND(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_OR(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_ANDK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_ORK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_CONCAT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_NOT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_MINUS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LENGTH(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_NEWTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DUPTABLE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORNPREP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORNLOOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORGLOOP(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP_INEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_FORGLOOP_INEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FORGPREP_NEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_FORGLOOP_NEXT(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPBACK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_LOADKX(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPX(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_COVERAGE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_CAPTURE(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_JUMPIFEQK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_DEP_JUMPIFNOTEQK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL1(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL2(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_FASTCALL2K(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKNIL(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKB(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKN(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);
const Instruction* execute_LOP_JUMPXEQKS(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k);

View File

@ -0,0 +1,56 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "lvm.h"
#include "lbuiltins.h"
#include "lbytecode.h"
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lgc.h"
#include "lmem.h"
#include "lnumutils.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include <string.h>
// All external function calls that can cause stack realloc or Lua calls have to be wrapped in VM_PROTECT
// This makes sure that we save the pc (in case the Lua call needs to generate a backtrace) before the call,
// and restores the stack pointer after in case stack gets reallocated
// Should only be used on the slow paths.
#define VM_PROTECT(x) \
{ \
L->ci->savedpc = pc; \
{ \
x; \
}; \
base = L->base; \
}
// Some external functions can cause an error, but never reallocate the stack; for these, VM_PROTECT_PC() is
// a cheaper version of VM_PROTECT that can be called before the external call.
#define VM_PROTECT_PC() L->ci->savedpc = pc
#define VM_REG(i) (LUAU_ASSERT(unsigned(i) < unsigned(L->top - base)), &base[i])
#define VM_KV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->l.p->sizek)), &k[i])
#define VM_UV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->nupvalues)), &cl->l.uprefs[i])
#define VM_PATCH_C(pc, slot) *const_cast<Instruction*>(pc) = ((uint8_t(slot) << 24) | (0x00ffffffu & *(pc)))
#define VM_PATCH_E(pc, slot) *const_cast<Instruction*>(pc) = ((uint32_t(slot) << 8) | (0x000000ffu & *(pc)))
#define VM_INTERRUPT() \
{ \
void (*interrupt)(lua_State*, int) = L->global->cb.interrupt; \
if (LUAU_UNLIKELY(!!interrupt)) \
{ /* the interrupt hook is called right before we advance pc */ \
VM_PROTECT(L->ci->savedpc++; interrupt(L, -1)); \
if (L->status != 0) \
{ \
L->ci->savedpc--; \
return NULL; \
} \
} \
}

View File

@ -1,6 +1,8 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/UnwindBuilderDwarf2.h"
#include "ByteUtils.h"
#include <string.h>
// General information about Dwarf2 format can be found at:
@ -11,40 +13,6 @@
// https://refspecs.linuxbase.org/elf/x86_64-abi-0.99.pdf [System V Application Binary Interface (AMD64 Architecture Processor Supplement)]
// Interaction between Dwarf2 and System V ABI can be found in sections '3.6.2 DWARF Register Number Mapping' and '4.2.4 EH_FRAME sections'
static char* writeu8(char* target, uint8_t value)
{
memcpy(target, &value, sizeof(value));
return target + sizeof(value);
}
static char* writeu32(char* target, uint32_t value)
{
memcpy(target, &value, sizeof(value));
return target + sizeof(value);
}
static char* writeu64(char* target, uint64_t value)
{
memcpy(target, &value, sizeof(value));
return target + sizeof(value);
}
static char* writeuleb128(char* target, uint64_t value)
{
do
{
char byte = value & 0x7f;
value >>= 7;
if (value)
byte |= 0x80;
*target++ = byte;
} while (value);
return target;
}
// Call frame instruction opcodes
#define DW_CFA_advance_loc 0x40
#define DW_CFA_offset 0x80
@ -104,7 +72,7 @@ const int kFdeInitialLocationOffset = 8;
const int kFdeAddressRangeOffset = 16;
// Define canonical frame address expression as [reg + offset]
static char* defineCfaExpression(char* pos, int dwReg, uint32_t stackOffset)
static uint8_t* defineCfaExpression(uint8_t* pos, int dwReg, uint32_t stackOffset)
{
pos = writeu8(pos, DW_CFA_def_cfa);
pos = writeuleb128(pos, dwReg);
@ -113,14 +81,14 @@ static char* defineCfaExpression(char* pos, int dwReg, uint32_t stackOffset)
}
// Update offset value in canonical frame address expression
static char* defineCfaExpressionOffset(char* pos, uint32_t stackOffset)
static uint8_t* defineCfaExpressionOffset(uint8_t* pos, uint32_t stackOffset)
{
pos = writeu8(pos, DW_CFA_def_cfa_offset);
pos = writeuleb128(pos, stackOffset);
return pos;
}
static char* defineSavedRegisterLocation(char* pos, int dwReg, uint32_t stackOffset)
static uint8_t* defineSavedRegisterLocation(uint8_t* pos, int dwReg, uint32_t stackOffset)
{
LUAU_ASSERT(stackOffset % kDataAlignFactor == 0 && "stack offsets have to be measured in kDataAlignFactor units");
@ -138,14 +106,14 @@ static char* defineSavedRegisterLocation(char* pos, int dwReg, uint32_t stackOff
return pos;
}
static char* advanceLocation(char* pos, uint8_t offset)
static uint8_t* advanceLocation(uint8_t* pos, uint8_t offset)
{
pos = writeu8(pos, DW_CFA_advance_loc1);
pos = writeu8(pos, offset);
return pos;
}
static char* alignPosition(char* start, char* pos)
static uint8_t* alignPosition(uint8_t* start, uint8_t* pos)
{
size_t size = pos - start;
size_t pad = ((size + kDwarfAlign - 1) & ~(kDwarfAlign - 1)) - size;
@ -163,7 +131,7 @@ namespace CodeGen
void UnwindBuilderDwarf2::start()
{
char* cieLength = pos;
uint8_t* cieLength = pos;
pos = writeu32(pos, 0); // Length (to be filled later)
pos = writeu32(pos, 0); // CIE id. 0 -- .eh_frame
@ -245,8 +213,8 @@ void UnwindBuilderDwarf2::finalize(char* target, void* funcAddress, size_t funcS
memcpy(target, rawData, getSize());
unsigned fdeEntryStartPos = unsigned(fdeEntryStart - rawData);
writeu64(target + fdeEntryStartPos + kFdeInitialLocationOffset, uintptr_t(funcAddress));
writeu64(target + fdeEntryStartPos + kFdeAddressRangeOffset, funcSize);
writeu64((uint8_t*)target + fdeEntryStartPos + kFdeInitialLocationOffset, uintptr_t(funcAddress));
writeu64((uint8_t*)target + fdeEntryStartPos + kFdeAddressRangeOffset, funcSize);
}
} // namespace CodeGen

View File

@ -3,6 +3,7 @@
#include "BuiltinFolding.h"
#include <vector>
#include <math.h>
namespace Luau

View File

@ -105,7 +105,7 @@ endif
$(AST_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include
$(COMPILER_OBJECTS): CXXFLAGS+=-std=c++17 -ICompiler/include -ICommon/include -IAst/include
$(ANALYSIS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -IAnalysis/include
$(CODEGEN_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -ICodeGen/include
$(CODEGEN_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -ICodeGen/include -IVM/include -IVM/src # Code generation needs VM internals
$(VM_OBJECTS): CXXFLAGS+=-std=c++11 -ICommon/include -IVM/include
$(ISOCLINE_OBJECTS): CXXFLAGS+=-Wno-unused-function -Iextern/isocline/include
$(TESTS_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/include -IAnalysis/include -ICodeGen/include -IVM/include -ICLI -Iextern -DDOCTEST_CONFIG_DOUBLE_STRINGIFY

View File

@ -69,8 +69,13 @@ target_sources(Luau.CodeGen PRIVATE
CodeGen/src/AssemblyBuilderX64.cpp
CodeGen/src/CodeAllocator.cpp
CodeGen/src/CodeBlockUnwind.cpp
CodeGen/src/Fallbacks.cpp
CodeGen/src/UnwindBuilderDwarf2.cpp
CodeGen/src/UnwindBuilderWin.cpp
CodeGen/src/ByteUtils.h
CodeGen/src/Fallbacks.h
CodeGen/src/FallbacksProlog.h
)
# Luau.Analysis Sources

View File

@ -304,6 +304,7 @@ LUA_API size_t lua_totalbytes(lua_State* L, int category);
LUA_API l_noret lua_error(lua_State* L);
LUA_API int lua_next(lua_State* L, int idx);
LUA_API int lua_rawiter(lua_State* L, int idx, int iter);
LUA_API void lua_concat(lua_State* L, int n);

View File

@ -109,6 +109,11 @@
#define LUA_MAXCAPTURES 32
#endif
// enables callbacks to redirect code execution from Luau VM to a custom implementation
#ifndef LUA_CUSTOM_EXECUTION
#define LUA_CUSTOM_EXECUTION 0
#endif
// }==================================================================
/*

View File

@ -51,6 +51,12 @@ const char* luau_ident = "$Luau: Copyright (C) 2019-2022 Roblox Corporation $\n"
L->top++; \
}
#define api_update_top(L, p) \
{ \
api_check(L, p >= L->base && p < L->ci->top); \
L->top = p; \
}
#define updateatom(L, ts) \
{ \
if (ts->atom == ATOM_UNDEF) \
@ -851,7 +857,7 @@ void lua_rawsetfield(lua_State* L, int idx, const char* k)
StkId t = index2addr(L, idx);
api_check(L, ttistable(t));
if (hvalue(t)->readonly)
luaG_runerror(L, "Attempt to modify a readonly table");
luaG_readonlyerror(L);
setobj2t(L, luaH_setstr(L, hvalue(t), luaS_new(L, k)), L->top - 1);
luaC_barriert(L, hvalue(t), L->top - 1);
L->top--;
@ -1204,6 +1210,52 @@ int lua_next(lua_State* L, int idx)
return more;
}
int lua_rawiter(lua_State* L, int idx, int iter)
{
luaC_threadbarrier(L);
StkId t = index2addr(L, idx);
api_check(L, ttistable(t));
api_check(L, iter >= 0);
Table* h = hvalue(t);
int sizearray = h->sizearray;
// first we advance iter through the array portion
for (; unsigned(iter) < unsigned(sizearray); ++iter)
{
TValue* e = &h->array[iter];
if (!ttisnil(e))
{
StkId top = L->top;
setnvalue(top + 0, double(iter + 1));
setobj2s(L, top + 1, e);
api_update_top(L, top + 2);
return iter + 1;
}
}
int sizenode = 1 << h->lsizenode;
// then we advance iter through the hash portion
for (; unsigned(iter - sizearray) < unsigned(sizenode); ++iter)
{
LuaNode* n = &h->node[iter - sizearray];
if (!ttisnil(gval(n)))
{
StkId top = L->top;
getnodekey(L, top + 0, n);
setobj2s(L, top + 1, gval(n));
api_update_top(L, top + 2);
return iter + 1;
}
}
// traversal finished
return -1;
}
void lua_concat(lua_State* L, int n)
{
api_checknelems(L, n);
@ -1382,7 +1434,7 @@ void lua_cleartable(lua_State* L, int idx)
api_check(L, ttistable(t));
Table* tt = hvalue(t);
if (tt->readonly)
luaG_runerror(L, "Attempt to modify a readonly table");
luaG_readonlyerror(L);
luaH_clear(tt);
}

View File

@ -340,6 +340,11 @@ void luaG_breakpoint(lua_State* L, Proto* p, int line, bool enable)
p->code[i] |= op;
LUAU_ASSERT(LUAU_INSN_OP(p->code[i]) == op);
#if LUA_CUSTOM_EXECUTION
if (L->global->ecb.setbreakpoint)
L->global->ecb.setbreakpoint(L, p, i);
#endif
// note: this is important!
// we only patch the *first* instruction in each proto that's attributed to a given line
// this can be changed, but if requires making patching a bit more nuanced so that we don't patch AUX words

View File

@ -31,6 +31,11 @@ Proto* luaF_newproto(lua_State* L)
f->source = NULL;
f->debugname = NULL;
f->debuginsn = NULL;
#if LUA_CUSTOM_EXECUTION
f->execdata = NULL;
#endif
return f;
}
@ -149,6 +154,15 @@ void luaF_freeproto(lua_State* L, Proto* f, lua_Page* page)
luaM_freearray(L, f->upvalues, f->sizeupvalues, TString*, f->memcat);
if (f->debuginsn)
luaM_freearray(L, f->debuginsn, f->sizecode, uint8_t, f->memcat);
#if LUA_CUSTOM_EXECUTION
if (f->execdata)
{
LUAU_ASSERT(L->global->ecb.destroy);
L->global->ecb.destroy(L, f);
}
#endif
luaM_freegco(L, f, sizeof(Proto), f->memcat, page);
}

View File

@ -281,6 +281,10 @@ typedef struct Proto
TString* debugname;
uint8_t* debuginsn; // a copy of code[] array with just opcodes
#if LUA_CUSTOM_EXECUTION
void* execdata;
#endif
GCObject* gclist;

View File

@ -212,6 +212,11 @@ lua_State* lua_newstate(lua_Alloc f, void* ud)
g->memcatbytes[0] = sizeof(LG);
g->cb = lua_Callbacks();
#if LUA_CUSTOM_EXECUTION
g->ecb = lua_ExecutionCallbacks();
#endif
g->gcstats = GCStats();
#ifdef LUAI_GCMETRICS

View File

@ -146,6 +146,19 @@ struct GCMetrics
};
#endif
#if LUA_CUSTOM_EXECUTION
// Callbacks that can be used to to redirect code execution from Luau bytecode VM to a custom implementation (AoT/JiT/sandboxing/...)
typedef struct lua_ExecutionCallbacks
{
void* context;
void (*destroy)(lua_State* L, Proto* proto); // called when function is destroyed
int (*enter)(lua_State* L, Proto* proto); // called when function is about to start/resume (when execdata is present), return 0 to exit VM
void (*setbreakpoint)(lua_State* L, Proto* proto, int line); // called when a breakpoint is set in a function
} lua_ExecutionCallbacks;
#endif
/*
** `global state', shared by all threads of this state
*/
@ -202,6 +215,10 @@ typedef struct global_State
lua_Callbacks cb;
#if LUA_CUSTOM_EXECUTION
lua_ExecutionCallbacks ecb;
#endif
GCStats gcstats;
#ifdef LUAI_GCMETRICS

View File

@ -24,6 +24,9 @@ LUAI_FUNC void luaV_gettable(lua_State* L, const TValue* t, TValue* key, StkId v
LUAI_FUNC void luaV_settable(lua_State* L, const TValue* t, TValue* key, StkId val);
LUAI_FUNC void luaV_concat(lua_State* L, int total, int last);
LUAI_FUNC void luaV_getimport(lua_State* L, Table* env, TValue* k, uint32_t id, bool propagatenil);
LUAI_FUNC void luaV_prepareFORN(lua_State* L, StkId plimit, StkId pstep, StkId pinit);
LUAI_FUNC void luaV_callTM(lua_State* L, int nparams, int res);
LUAI_FUNC void luaV_tryfuncTM(lua_State* L, StkId func);
LUAI_FUNC void luau_execute(lua_State* L);
LUAI_FUNC int luau_precall(lua_State* L, struct lua_TValue* func, int nresults);

View File

@ -131,84 +131,6 @@
goto dispatchContinue
#endif
LUAU_NOINLINE static void luau_prepareFORN(lua_State* L, StkId plimit, StkId pstep, StkId pinit)
{
if (!ttisnumber(pinit) && !luaV_tonumber(pinit, pinit))
luaG_forerror(L, pinit, "initial value");
if (!ttisnumber(plimit) && !luaV_tonumber(plimit, plimit))
luaG_forerror(L, plimit, "limit");
if (!ttisnumber(pstep) && !luaV_tonumber(pstep, pstep))
luaG_forerror(L, pstep, "step");
}
// calls a C function f with no yielding support; optionally save one resulting value to the res register
// the function and arguments have to already be pushed to L->top
LUAU_NOINLINE static void luau_callTM(lua_State* L, int nparams, int res)
{
++L->nCcalls;
if (L->nCcalls >= LUAI_MAXCCALLS)
luaD_checkCstack(L);
luaD_checkstack(L, LUA_MINSTACK);
StkId top = L->top;
StkId fun = top - nparams - 1;
CallInfo* ci = incr_ci(L);
ci->func = fun;
ci->base = fun + 1;
ci->top = top + LUA_MINSTACK;
ci->savedpc = NULL;
ci->flags = 0;
ci->nresults = (res >= 0);
LUAU_ASSERT(ci->top <= L->stack_last);
LUAU_ASSERT(ttisfunction(ci->func));
LUAU_ASSERT(clvalue(ci->func)->isC);
L->base = fun + 1;
LUAU_ASSERT(L->top == L->base + nparams);
lua_CFunction func = clvalue(fun)->c.f;
int n = func(L);
LUAU_ASSERT(n >= 0); // yields should have been blocked by nCcalls
// ci is our callinfo, cip is our parent
// note that we read L->ci again since it may have been reallocated by the call
CallInfo* cip = L->ci - 1;
// copy return value into parent stack
if (res >= 0)
{
if (n > 0)
{
setobj2s(L, &cip->base[res], L->top - n);
}
else
{
setnilvalue(&cip->base[res]);
}
}
L->ci = cip;
L->base = cip->base;
L->top = cip->top;
--L->nCcalls;
}
LUAU_NOINLINE static void luau_tryfuncTM(lua_State* L, StkId func)
{
const TValue* tm = luaT_gettmbyobj(L, func, TM_CALL);
if (!ttisfunction(tm))
luaG_typeerror(L, func, "call");
for (StkId p = L->top; p > func; p--) // open space for metamethod
setobjs2s(L, p, p - 1);
L->top++; // stack space pre-allocated by the caller
setobj2s(L, func, tm); // tag method is the new function to be called
}
LUAU_NOINLINE void luau_callhook(lua_State* L, lua_Hook hook, void* userdata)
{
ptrdiff_t base = savestack(L, L->base);
@ -284,6 +206,20 @@ static void luau_execute(lua_State* L)
LUAU_ASSERT(L->isactive);
LUAU_ASSERT(!isblack(obj2gco(L))); // we don't use luaC_threadbarrier because active threads never turn black
#if LUA_CUSTOM_EXECUTION
Proto* p = clvalue(L->ci->func)->l.p;
if (p->execdata)
{
if (L->global->ecb.enter(L, p) == 0)
return;
}
reentry:
#endif
LUAU_ASSERT(isLua(L->ci));
pc = L->ci->savedpc;
cl = clvalue(L->ci->func);
base = L->base;
@ -564,7 +500,7 @@ static void luau_execute(lua_State* L)
L->top = top + 3;
L->cachedslot = LUAU_INSN_C(insn);
VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn)));
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
VM_NEXT();
@ -601,7 +537,7 @@ static void luau_execute(lua_State* L)
L->top = top + 3;
L->cachedslot = LUAU_INSN_C(insn);
VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn)));
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
VM_NEXT();
@ -680,7 +616,7 @@ static void luau_execute(lua_State* L)
L->top = top + 4;
L->cachedslot = LUAU_INSN_C(insn);
VM_PROTECT(luau_callTM(L, 3, -1));
VM_PROTECT(luaV_callTM(L, 3, -1));
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
VM_PATCH_C(pc - 2, L->cachedslot);
VM_NEXT();
@ -973,7 +909,7 @@ static void luau_execute(lua_State* L)
// slow-path: not a function call
if (LUAU_UNLIKELY(!ttisfunction(ra)))
{
VM_PROTECT(luau_tryfuncTM(L, ra));
VM_PROTECT(luaV_tryfuncTM(L, ra));
argtop++; // __call adds an extra self
}
@ -1009,6 +945,18 @@ static void luau_execute(lua_State* L)
setnilvalue(argi++); // complete missing arguments
L->top = p->is_vararg ? argi : ci->top;
#if LUA_CUSTOM_EXECUTION
if (p->execdata)
{
LUAU_ASSERT(L->global->ecb.enter);
if (L->global->ecb.enter(L, p) == 1)
goto reentry;
else
goto exit;
}
#endif
// reentry
pc = p->code;
cl = ccl;
@ -1092,11 +1040,26 @@ static void luau_execute(lua_State* L)
LUAU_ASSERT(isLua(L->ci));
Closure* nextcl = clvalue(cip->func);
Proto* nextproto = nextcl->l.p;
#if LUA_CUSTOM_EXECUTION
if (nextproto->execdata)
{
LUAU_ASSERT(L->global->ecb.enter);
if (L->global->ecb.enter(L, nextproto) == 1)
goto reentry;
else
goto exit;
}
#endif
// reentry
pc = cip->savedpc;
cl = clvalue(cip->func);
cl = nextcl;
base = L->base;
k = cl->l.p->k;
k = nextproto->k;
VM_NEXT();
}
@ -1212,7 +1175,7 @@ static void luau_execute(lua_State* L)
int res = int(top - base);
L->top = top + 3;
VM_PROTECT(luau_callTM(L, 2, res));
VM_PROTECT(luaV_callTM(L, 2, res));
pc += !l_isfalse(&base[res]) ? LUAU_INSN_D(insn) : 1;
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
VM_NEXT();
@ -1324,7 +1287,7 @@ static void luau_execute(lua_State* L)
int res = int(top - base);
L->top = top + 3;
VM_PROTECT(luau_callTM(L, 2, res));
VM_PROTECT(luaV_callTM(L, 2, res));
pc += l_isfalse(&base[res]) ? LUAU_INSN_D(insn) : 1;
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
VM_NEXT();
@ -1519,7 +1482,7 @@ static void luau_execute(lua_State* L)
setobj2s(L, top + 2, rc);
L->top = top + 3;
VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn)));
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
VM_NEXT();
}
else
@ -1565,7 +1528,7 @@ static void luau_execute(lua_State* L)
setobj2s(L, top + 2, rc);
L->top = top + 3;
VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn)));
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
VM_NEXT();
}
else
@ -1626,7 +1589,7 @@ static void luau_execute(lua_State* L)
setobj2s(L, top + 2, rc);
L->top = top + 3;
VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn)));
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
VM_NEXT();
}
else
@ -1687,7 +1650,7 @@ static void luau_execute(lua_State* L)
setobj2s(L, top + 2, rc);
L->top = top + 3;
VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn)));
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
VM_NEXT();
}
else
@ -1819,7 +1782,7 @@ static void luau_execute(lua_State* L)
setobj2s(L, top + 2, kv);
L->top = top + 3;
VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn)));
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
VM_NEXT();
}
else
@ -1865,7 +1828,7 @@ static void luau_execute(lua_State* L)
setobj2s(L, top + 2, kv);
L->top = top + 3;
VM_PROTECT(luau_callTM(L, 2, LUAU_INSN_A(insn)));
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
VM_NEXT();
}
else
@ -2029,7 +1992,7 @@ static void luau_execute(lua_State* L)
setobj2s(L, top + 1, rb);
L->top = top + 2;
VM_PROTECT(luau_callTM(L, 1, LUAU_INSN_A(insn)));
VM_PROTECT(luaV_callTM(L, 1, LUAU_INSN_A(insn)));
VM_NEXT();
}
else
@ -2145,7 +2108,7 @@ static void luau_execute(lua_State* L)
// Note: this doesn't reallocate stack so we don't need to recompute ra/base
VM_PROTECT_PC();
luau_prepareFORN(L, ra + 0, ra + 1, ra + 2);
luaV_prepareFORN(L, ra + 0, ra + 1, ra + 2);
}
double limit = nvalue(ra + 0);
@ -2861,7 +2824,7 @@ int luau_precall(lua_State* L, StkId func, int nresults)
{
if (!ttisfunction(func))
{
luau_tryfuncTM(L, func);
luaV_tryfuncTM(L, func);
// L->top is incremented by tryfuncTM
}

View File

@ -508,3 +508,81 @@ void luaV_dolen(lua_State* L, StkId ra, const TValue* rb)
if (!ttisnumber(res))
luaG_runerror(L, "'__len' must return a number"); // note, we can't access rb since stack may have been reallocated
}
LUAU_NOINLINE void luaV_prepareFORN(lua_State* L, StkId plimit, StkId pstep, StkId pinit)
{
if (!ttisnumber(pinit) && !luaV_tonumber(pinit, pinit))
luaG_forerror(L, pinit, "initial value");
if (!ttisnumber(plimit) && !luaV_tonumber(plimit, plimit))
luaG_forerror(L, plimit, "limit");
if (!ttisnumber(pstep) && !luaV_tonumber(pstep, pstep))
luaG_forerror(L, pstep, "step");
}
// calls a C function f with no yielding support; optionally save one resulting value to the res register
// the function and arguments have to already be pushed to L->top
LUAU_NOINLINE void luaV_callTM(lua_State* L, int nparams, int res)
{
++L->nCcalls;
if (L->nCcalls >= LUAI_MAXCCALLS)
luaD_checkCstack(L);
luaD_checkstack(L, LUA_MINSTACK);
StkId top = L->top;
StkId fun = top - nparams - 1;
CallInfo* ci = incr_ci(L);
ci->func = fun;
ci->base = fun + 1;
ci->top = top + LUA_MINSTACK;
ci->savedpc = NULL;
ci->flags = 0;
ci->nresults = (res >= 0);
LUAU_ASSERT(ci->top <= L->stack_last);
LUAU_ASSERT(ttisfunction(ci->func));
LUAU_ASSERT(clvalue(ci->func)->isC);
L->base = fun + 1;
LUAU_ASSERT(L->top == L->base + nparams);
lua_CFunction func = clvalue(fun)->c.f;
int n = func(L);
LUAU_ASSERT(n >= 0); // yields should have been blocked by nCcalls
// ci is our callinfo, cip is our parent
// note that we read L->ci again since it may have been reallocated by the call
CallInfo* cip = L->ci - 1;
// copy return value into parent stack
if (res >= 0)
{
if (n > 0)
{
setobj2s(L, &cip->base[res], L->top - n);
}
else
{
setnilvalue(&cip->base[res]);
}
}
L->ci = cip;
L->base = cip->base;
L->top = cip->top;
--L->nCcalls;
}
LUAU_NOINLINE void luaV_tryfuncTM(lua_State* L, StkId func)
{
const TValue* tm = luaT_gettmbyobj(L, func, TM_CALL);
if (!ttisfunction(tm))
luaG_typeerror(L, func, "call");
for (StkId p = L->top; p > func; p--) // open space for metamethod
setobjs2s(L, p, p - 1);
L->top++; // stack space pre-allocated by the caller
setobj2s(L, func, tm); // tag method is the new function to be called
}

View File

@ -62,6 +62,29 @@ LOADN R0 5
LOADK R1 K0
RETURN R0 2
)");
CHECK_EQ("\n" + bcb.dumpEverything(), R"(
Function 0 (??):
LOADN R0 5
LOADK R1 K0
RETURN R0 2
)");
}
TEST_CASE("CompileError")
{
std::string source = "local " + rep("a,", 300) + "a = ...";
// fails to parse
std::string bc1 = Luau::compile(source + " !#*$!#$^&!*#&$^*");
// parses, but fails to compile (too many locals)
std::string bc2 = Luau::compile(source);
// 0 acts as a special marker for error bytecode
CHECK_EQ(bc1[0], 0);
CHECK_EQ(bc2[0], 0);
}
TEST_CASE("LocalsDirectReference")
@ -1230,6 +1253,27 @@ RETURN R0 0
)");
}
TEST_CASE("UnaryBasic")
{
CHECK_EQ("\n" + compileFunction0("local a = ... return not a"), R"(
GETVARARGS R0 1
NOT R1 R0
RETURN R1 1
)");
CHECK_EQ("\n" + compileFunction0("local a = ... return -a"), R"(
GETVARARGS R0 1
MINUS R1 R0
RETURN R1 1
)");
CHECK_EQ("\n" + compileFunction0("local a = ... return #a"), R"(
GETVARARGS R0 1
LENGTH R1 R0
RETURN R1 1
)");
}
TEST_CASE("InterpStringWithNoExpressions")
{
ScopedFastFlag sff{"LuauInterpolatedStringBaseSupport", true};
@ -4975,6 +5019,27 @@ MOVE R1 R0
CALL R1 0 1
RETURN R1 1
)");
// we can't inline any functions in modules with getfenv/setfenv
CHECK_EQ("\n" + compileFunction(R"(
local function foo()
return 42
end
local x = foo()
getfenv()
return x
)",
1, 2),
R"(
DUPCLOSURE R0 K0
MOVE R1 R0
CALL R1 0 1
GETIMPORT R2 2
CALL R2 0 0
RETURN R1 1
)");
}
TEST_CASE("InlineNestedLoops")
@ -6101,6 +6166,7 @@ return
bit32.extract(-1, 31),
bit32.replace(100, 1, 0),
math.log(100, 10),
typeof(nil),
(type("fin"))
)",
0, 2),
@ -6156,7 +6222,8 @@ LOADN R47 1
LOADN R48 101
LOADN R49 2
LOADK R50 K3
RETURN R0 51
LOADK R51 K4
RETURN R0 52
)");
}

View File

@ -786,6 +786,44 @@ TEST_CASE("ApiTables")
lua_pop(L, 1);
}
TEST_CASE("ApiIter")
{
StateRef globalState(luaL_newstate(), lua_close);
lua_State* L = globalState.get();
lua_newtable(L);
lua_pushnumber(L, 123.0);
lua_setfield(L, -2, "key");
lua_pushnumber(L, 456.0);
lua_rawsetfield(L, -2, "key2");
lua_pushstring(L, "test");
lua_rawseti(L, -2, 1);
// Lua-compatible iteration interface: lua_next
double sum1 = 0;
lua_pushnil(L);
while (lua_next(L, -2))
{
sum1 += lua_tonumber(L, -2); // key
sum1 += lua_tonumber(L, -1); // value
lua_pop(L, 1); // pop value, key is used by lua_next
}
CHECK(sum1 == 580);
// Luau iteration interface: lua_rawiter (faster and preferable to lua_next)
double sum2 = 0;
for (int index = 0; index = lua_rawiter(L, -1, index), index >= 0; )
{
sum2 += lua_tonumber(L, -2); // key
sum2 += lua_tonumber(L, -1); // value
lua_pop(L, 2); // pop both key and value
}
CHECK(sum2 == 580);
// pop table
lua_pop(L, 1);
}
TEST_CASE("ApiCalls")
{
StateRef globalState = runConformance("apicalls.lua");

View File

@ -1705,8 +1705,6 @@ TEST_CASE_FIXTURE(Fixture, "TestStringInterpolation")
TEST_CASE_FIXTURE(Fixture, "IntegerParsing")
{
ScopedFastFlag luauLintParseIntegerIssues{"LuauLintParseIntegerIssues", true};
LintResult result = lint(R"(
local _ = 0b10000000000000000000000000000000000000000000000000000000000000000
local _ = 0x10000000000000000
@ -1720,7 +1718,6 @@ local _ = 0x10000000000000000
// TODO: remove with FFlagLuauErrorDoubleHexPrefix
TEST_CASE_FIXTURE(Fixture, "IntegerParsingDoublePrefix")
{
ScopedFastFlag luauLintParseIntegerIssues{"LuauLintParseIntegerIssues", true};
ScopedFastFlag luauErrorDoubleHexPrefix{"LuauErrorDoubleHexPrefix", false}; // Lint will be available until we start rejecting code
LintResult result = lint(R"(

View File

@ -2,6 +2,7 @@
#include "Fixture.h"
#include "Luau/Common.h"
#include "doctest.h"
#include "Luau/Normalize.h"
@ -747,7 +748,6 @@ TEST_CASE_FIXTURE(Fixture, "cyclic_union")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
{"LuauFixNormalizationOfCyclicUnions", true},
};
CheckResult result = check(R"(
@ -765,7 +765,6 @@ TEST_CASE_FIXTURE(Fixture, "cyclic_intersection")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
{"LuauFixNormalizationOfCyclicUnions", true},
};
CheckResult result = check(R"(
@ -784,7 +783,6 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_indexers")
{
ScopedFastFlag sff[] = {
{"LuauLowerBoundsCalculation", true},
{"LuauFixNormalizationOfCyclicUnions", true},
};
CheckResult result = check(R"(

View File

@ -682,7 +682,6 @@ TEST_CASE_FIXTURE(Fixture, "parse_numbers_binary")
TEST_CASE_FIXTURE(Fixture, "parse_numbers_error")
{
ScopedFastFlag luauLintParseIntegerIssues{"LuauLintParseIntegerIssues", true};
ScopedFastFlag luauErrorDoubleHexPrefix{"LuauErrorDoubleHexPrefix", true};
CHECK_EQ(getParseError("return 0b123"), "Malformed number");
@ -695,7 +694,6 @@ TEST_CASE_FIXTURE(Fixture, "parse_numbers_error")
TEST_CASE_FIXTURE(Fixture, "parse_numbers_error_soft")
{
ScopedFastFlag luauLintParseIntegerIssues{"LuauLintParseIntegerIssues", true};
ScopedFastFlag luauErrorDoubleHexPrefix{"LuauErrorDoubleHexPrefix", false};
CHECK_EQ(getParseError("return 0x0x0x0x0x0x0x0"), "Malformed number");

View File

@ -198,8 +198,12 @@ TEST_CASE_FIXTURE(Fixture, "generic_aliases")
LUAU_REQUIRE_ERROR_COUNT(1, result);
const char* expectedError = "Type '{ v: string }' could not be converted into 'T<number>'\n"
"caused by:\n"
" Property 'v' is not compatible. Type 'string' could not be converted into 'number'";
CHECK(result.errors[0].location == Location{{4, 31}, {4, 44}});
CHECK(toString(result.errors[0]) == "Type '{ v: string }' could not be converted into 'T<number>'");
CHECK(toString(result.errors[0]) == expectedError);
}
TEST_CASE_FIXTURE(Fixture, "dependent_generic_aliases")
@ -215,8 +219,14 @@ TEST_CASE_FIXTURE(Fixture, "dependent_generic_aliases")
LUAU_REQUIRE_ERROR_COUNT(1, result);
const char* expectedError = "Type '{ t: { v: string } }' could not be converted into 'U<number>'\n"
"caused by:\n"
" Property 't' is not compatible. Type '{ v: string }' could not be converted into 'T<number>'\n"
"caused by:\n"
" Property 'v' is not compatible. Type 'string' could not be converted into 'number'";
CHECK(result.errors[0].location == Location{{4, 31}, {4, 52}});
CHECK(toString(result.errors[0]) == "Type '{ t: { v: string } }' could not be converted into 'U<number>'");
CHECK(toString(result.errors[0]) == expectedError);
}
TEST_CASE_FIXTURE(Fixture, "mutually_recursive_generic_aliases")

View File

@ -558,11 +558,10 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "greedy_inference_with_shared_self_triggers_f
CHECK_EQ("Not all codepaths in this function return 'self, a...'.", toString(result.errors[0]));
}
TEST_CASE_FIXTURE(Fixture, "dcr_cant_partially_dispatch_a_constraint")
TEST_CASE_FIXTURE(Fixture, "dcr_can_partially_dispatch_a_constraint")
{
ScopedFastFlag sff[] = {
{"DebugLuauDeferredConstraintResolution", true},
{"LuauSpecialTypesAsterisked", true},
};
CheckResult result = check(R"(
@ -577,7 +576,6 @@ TEST_CASE_FIXTURE(Fixture, "dcr_cant_partially_dispatch_a_constraint")
LUAU_REQUIRE_NO_ERRORS(result);
// We should be able to resolve this to number, but we're not there yet.
// Solving this requires recognizing that we can partially solve the
// following constraint:
//
@ -586,7 +584,7 @@ TEST_CASE_FIXTURE(Fixture, "dcr_cant_partially_dispatch_a_constraint")
// The correct thing for us to do is to consider the constraint dispatched,
// but we need to also record a new constraint number <: *blocked* to finish
// the job later.
CHECK("<a>(a, *error-type*) -> ()" == toString(requireType("prime_iter")));
CHECK("<a>(a, number) -> ()" == toString(requireType("prime_iter")));
}
TEST_CASE_FIXTURE(Fixture, "free_options_cannot_be_unified_together")

View File

@ -314,6 +314,31 @@ caused by:
toString(result.errors[0]));
}
TEST_CASE_FIXTURE(Fixture, "parametric_tagged_union_alias")
{
ScopedFastFlag sff[] = {
{"DebugLuauDeferredConstraintResolution", true},
};
CheckResult result = check(R"(
type Ok<T> = {success: true, result: T}
type Err<T> = {success: false, error: T}
type Result<O, E> = Ok<O> | Err<E>
local a : Result<string, number> = {success = false, result = "hotdogs"}
local b : Result<string, number> = {success = true, result = "hotdogs"}
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
const std::string expectedError = "Type '{ result: string, success: false }' could not be converted into 'Err<number> | Ok<string>'\n"
"caused by:\n"
" None of the union options are compatible. For example: Table type '{ result: string, success: false }'"
" not compatible with type 'Err<number>' because the former is missing field 'error'";
CHECK(toString(result.errors[0]) == expectedError);
}
TEST_CASE_FIXTURE(Fixture, "if_then_else_expression_singleton_options")
{
CheckResult result = check(R"(

View File

@ -1082,7 +1082,7 @@ TEST_CASE_FIXTURE(Fixture, "do_not_bind_a_free_table_to_a_union_containing_that_
)");
}
TEST_CASE_FIXTURE(Fixture, "types stored in astResolvedTypes")
TEST_CASE_FIXTURE(Fixture, "types_stored_in_astResolvedTypes")
{
CheckResult result = check(R"(
type alias = typeof("hello")
@ -1122,4 +1122,41 @@ TEST_CASE_FIXTURE(Fixture, "bidirectional_checking_of_higher_order_function")
CHECK(location.end.line == 4);
}
TEST_CASE_FIXTURE(Fixture, "dcr_can_partially_dispatch_a_constraint")
{
ScopedFastFlag sff[] = {
{"DebugLuauDeferredConstraintResolution", true},
};
CheckResult result = check(R"(
local function hasDivisors(value: number)
end
function prime_iter(state, index)
hasDivisors(index)
index += 1
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
// Solving this requires recognizing that we can't dispatch a constraint
// like this without doing further work:
//
// (*blocked*) -> () <: (number) -> (b...)
//
// We solve this by searching both types for BlockedTypeVars and block the
// constraint on any we find. It also gets the job done, but I'm worried
// about the efficiency of doing so many deep type traversals and it may
// make us more prone to getting stuck on constraint cycles.
//
// If this doesn't pan out, a possible solution is to go further down the
// path of supporting partial constraint dispatch. The way it would work is
// that we'd dispatch the above constraint by binding b... to (), but we
// would append a new constraint number <: *blocked* to the constraint set
// to be solved later. This should be faster and theoretically less prone
// to cyclic constraint dependencies.
CHECK("<a>(a, number) -> ()" == toString(requireType("prime_iter")));
}
TEST_SUITE_END();

View File

@ -10,7 +10,6 @@ AnnotationTests.luau_ice_triggers_an_ice_handler
AnnotationTests.luau_print_is_magic_if_the_flag_is_set
AnnotationTests.occurs_check_on_cyclic_intersection_typevar
AnnotationTests.occurs_check_on_cyclic_union_typevar
AnnotationTests.too_many_type_params
AnnotationTests.two_type_params
AnnotationTests.use_type_required_from_another_file
AstQuery.last_argument_function_call_type
@ -28,7 +27,6 @@ AutocompleteTest.autocomplete_interpolated_string
AutocompleteTest.autocomplete_on_string_singletons
AutocompleteTest.autocomplete_oop_implicit_self
AutocompleteTest.autocomplete_repeat_middle_keyword
AutocompleteTest.autocomplete_string_singleton_equality
AutocompleteTest.autocomplete_string_singleton_escape
AutocompleteTest.autocomplete_string_singletons
AutocompleteTest.autocomplete_while_middle_keywords
@ -85,7 +83,6 @@ AutocompleteTest.type_correct_expected_argument_type_pack_suggestion
AutocompleteTest.type_correct_expected_argument_type_suggestion
AutocompleteTest.type_correct_expected_argument_type_suggestion_optional
AutocompleteTest.type_correct_expected_argument_type_suggestion_self
AutocompleteTest.type_correct_expected_return_type_pack_suggestion
AutocompleteTest.type_correct_expected_return_type_suggestion
AutocompleteTest.type_correct_full_type_suggestion
AutocompleteTest.type_correct_function_no_parenthesis
@ -113,7 +110,6 @@ BuiltinTests.dont_add_definitions_to_persistent_types
BuiltinTests.find_capture_types
BuiltinTests.find_capture_types2
BuiltinTests.find_capture_types3
BuiltinTests.getfenv
BuiltinTests.global_singleton_types_are_sealed
BuiltinTests.gmatch_capture_types
BuiltinTests.gmatch_capture_types2
@ -130,13 +126,9 @@ BuiltinTests.next_iterator_should_infer_types_and_type_check
BuiltinTests.os_time_takes_optional_date_table
BuiltinTests.pairs_iterator_should_infer_types_and_type_check
BuiltinTests.see_thru_select
BuiltinTests.see_thru_select_count
BuiltinTests.select_on_variadic
BuiltinTests.select_slightly_out_of_range
BuiltinTests.select_way_out_of_range
BuiltinTests.select_with_decimal_argument_is_rounded_down
BuiltinTests.select_with_variadic_typepack_tail
BuiltinTests.select_with_variadic_typepack_tail_and_string_head
BuiltinTests.set_metatable_needs_arguments
BuiltinTests.setmetatable_should_not_mutate_persisted_types
BuiltinTests.sort
@ -147,20 +139,16 @@ BuiltinTests.string_format_arg_types_inference
BuiltinTests.string_format_as_method
BuiltinTests.string_format_correctly_ordered_types
BuiltinTests.string_format_report_all_type_errors_at_correct_positions
BuiltinTests.string_format_tostring_specifier
BuiltinTests.string_format_tostring_specifier_type_constraint
BuiltinTests.string_format_use_correct_argument
BuiltinTests.string_format_use_correct_argument2
BuiltinTests.string_format_use_correct_argument3
BuiltinTests.string_lib_self_noself
BuiltinTests.table_concat_returns_string
BuiltinTests.table_dot_remove_optionally_returns_generic
BuiltinTests.table_freeze_is_generic
BuiltinTests.table_insert_correctly_infers_type_of_array_2_args_overload
BuiltinTests.table_insert_correctly_infers_type_of_array_3_args_overload
BuiltinTests.table_pack
BuiltinTests.table_pack_reduce
BuiltinTests.table_pack_variadic
BuiltinTests.tonumber_returns_optional_number_type
BuiltinTests.tonumber_returns_optional_number_type2
DefinitionTests.class_definition_overload_metamethods
@ -168,7 +156,6 @@ DefinitionTests.declaring_generic_functions
DefinitionTests.definition_file_classes
FrontendTest.ast_node_at_position
FrontendTest.automatically_check_dependent_scripts
FrontendTest.dont_reparse_clean_file_when_linting
FrontendTest.environments
FrontendTest.imported_table_modification_2
FrontendTest.it_should_be_safe_to_stringify_errors_when_full_type_graph_is_discarded
@ -187,11 +174,8 @@ GenericsTests.check_mutual_generic_functions
GenericsTests.correctly_instantiate_polymorphic_member_functions
GenericsTests.do_not_always_instantiate_generic_intersection_types
GenericsTests.do_not_infer_generic_functions
GenericsTests.dont_unify_bound_types
GenericsTests.duplicate_generic_type_packs
GenericsTests.duplicate_generic_types
GenericsTests.error_detailed_function_mismatch_generic_pack
GenericsTests.error_detailed_function_mismatch_generic_types
GenericsTests.factories_of_generics
GenericsTests.generic_argument_count_too_few
GenericsTests.generic_argument_count_too_many
@ -205,30 +189,22 @@ GenericsTests.generic_type_pack_unification2
GenericsTests.generic_type_pack_unification3
GenericsTests.infer_generic_function_function_argument
GenericsTests.infer_generic_function_function_argument_overloaded
GenericsTests.infer_generic_lib_function_function_argument
GenericsTests.infer_generic_methods
GenericsTests.inferred_local_vars_can_be_polytypes
GenericsTests.instantiate_cyclic_generic_function
GenericsTests.instantiate_generic_function_in_assignments
GenericsTests.instantiate_generic_function_in_assignments2
GenericsTests.instantiated_function_argument_names
GenericsTests.instantiation_sharing_types
GenericsTests.local_vars_can_be_instantiated_polytypes
GenericsTests.mutable_state_polymorphism
GenericsTests.no_stack_overflow_from_quantifying
GenericsTests.properties_can_be_instantiated_polytypes
GenericsTests.rank_N_types_via_typeof
GenericsTests.reject_clashing_generic_and_pack_names
GenericsTests.self_recursive_instantiated_param
IntersectionTypes.argument_is_intersection
IntersectionTypes.error_detailed_intersection_all
IntersectionTypes.error_detailed_intersection_part
IntersectionTypes.fx_intersection_as_argument
IntersectionTypes.fx_union_as_argument_fails
IntersectionTypes.index_on_an_intersection_type_with_mixed_types
IntersectionTypes.index_on_an_intersection_type_with_property_guaranteed_to_exist
IntersectionTypes.index_on_an_intersection_type_works_at_arbitrary_depth
IntersectionTypes.no_stack_overflow_from_flattenintersection
IntersectionTypes.overload_is_not_a_function
IntersectionTypes.select_correct_union_fn
IntersectionTypes.should_still_pick_an_overload_whose_arguments_are_unions
IntersectionTypes.table_intersection_write_sealed
@ -236,17 +212,13 @@ IntersectionTypes.table_intersection_write_sealed_indirect
IntersectionTypes.table_write_sealed_indirect
isSubtype.intersection_of_tables
isSubtype.table_with_table_prop
Linter.TableOperations
ModuleTests.clone_self_property
ModuleTests.deepClone_cyclic_table
ModuleTests.do_not_clone_reexports
NonstrictModeTests.delay_function_does_not_require_its_argument_to_return_anything
NonstrictModeTests.for_in_iterator_variables_are_any
NonstrictModeTests.function_parameters_are_any
NonstrictModeTests.inconsistent_module_return_types_are_ok
NonstrictModeTests.inconsistent_return_types_are_ok
NonstrictModeTests.infer_nullary_function
NonstrictModeTests.infer_the_maximum_number_of_values_the_function_could_return
NonstrictModeTests.inline_table_props_are_also_any
NonstrictModeTests.local_tables_are_not_any
NonstrictModeTests.locals_are_any_by_default
@ -294,20 +266,16 @@ ProvisionalTests.greedy_inference_with_shared_self_triggers_function_with_no_ret
ProvisionalTests.invariant_table_properties_means_instantiating_tables_in_call_is_unsound
ProvisionalTests.it_should_be_agnostic_of_actual_size
ProvisionalTests.lower_bounds_calculation_is_too_permissive_with_overloaded_higher_order_functions
ProvisionalTests.lvalue_equals_another_lvalue_with_no_overlap
ProvisionalTests.normalization_fails_on_certain_kinds_of_cyclic_tables
ProvisionalTests.operator_eq_completely_incompatible
ProvisionalTests.pcall_returns_at_least_two_value_but_function_returns_nothing
ProvisionalTests.setmetatable_constrains_free_type_into_free_table
ProvisionalTests.typeguard_inference_incomplete
ProvisionalTests.weirditer_should_not_loop_forever
ProvisionalTests.while_body_are_also_refined
RefinementTest.and_constraint
RefinementTest.and_or_peephole_refinement
RefinementTest.apply_refinements_on_astexprindexexpr_whose_subscript_expr_is_constant_string
RefinementTest.assert_a_to_be_truthy_then_assert_a_to_be_number
RefinementTest.assert_non_binary_expressions_actually_resolve_constraints
RefinementTest.assign_table_with_refined_property_with_a_similar_type_is_illegal
RefinementTest.call_a_more_specific_function_using_typeguard
RefinementTest.correctly_lookup_a_shadowed_local_that_which_was_previously_refined
RefinementTest.correctly_lookup_property_whose_base_was_previously_refined
@ -319,24 +287,19 @@ RefinementTest.discriminate_tag
RefinementTest.either_number_or_string
RefinementTest.eliminate_subclasses_of_instance
RefinementTest.falsiness_of_TruthyPredicate_narrows_into_nil
RefinementTest.free_type_is_equal_to_an_lvalue
RefinementTest.impossible_type_narrow_is_not_an_error
RefinementTest.index_on_a_refined_property
RefinementTest.invert_is_truthy_constraint
RefinementTest.invert_is_truthy_constraint_ifelse_expression
RefinementTest.is_truthy_constraint
RefinementTest.is_truthy_constraint_ifelse_expression
RefinementTest.lvalue_is_equal_to_a_term
RefinementTest.lvalue_is_equal_to_another_lvalue
RefinementTest.lvalue_is_not_nil
RefinementTest.merge_should_be_fully_agnostic_of_hashmap_ordering
RefinementTest.narrow_boolean_to_true_or_false
RefinementTest.narrow_property_of_a_bounded_variable
RefinementTest.narrow_this_large_union
RefinementTest.nonoptional_type_can_narrow_to_nil_if_sense_is_true
RefinementTest.not_a_and_not_b
RefinementTest.not_a_and_not_b2
RefinementTest.not_a_or_not_b
RefinementTest.not_a_or_not_b2
RefinementTest.not_and_constraint
RefinementTest.not_t_or_some_prop_of_t
RefinementTest.or_predicate_with_truthy_predicates
@ -344,7 +307,6 @@ RefinementTest.parenthesized_expressions_are_followed_through
RefinementTest.refine_a_property_not_to_be_nil_through_an_intersection_table
RefinementTest.refine_the_correct_types_opposite_of_when_a_is_not_number_or_string
RefinementTest.refine_unknowns
RefinementTest.string_not_equal_to_string_or_nil
RefinementTest.term_is_equal_to_an_lvalue
RefinementTest.truthy_constraint_on_properties
RefinementTest.type_assertion_expr_carry_its_constraints
@ -363,11 +325,9 @@ RefinementTest.typeguard_narrows_for_functions
RefinementTest.typeguard_narrows_for_table
RefinementTest.typeguard_not_to_be_string
RefinementTest.typeguard_only_look_up_types_from_global_scope
RefinementTest.unknown_lvalue_is_not_synonymous_with_other_on_not_equal
RefinementTest.what_nonsensical_condition
RefinementTest.x_as_any_if_x_is_instance_elseif_x_is_table
RefinementTest.x_is_not_instance_or_else_not_part
RuntimeLimits.typescript_port_of_Result_type
TableTests.a_free_shape_can_turn_into_a_scalar_if_it_is_compatible
TableTests.a_free_shape_cannot_turn_into_a_scalar_if_it_is_not_compatible
TableTests.access_index_metamethod_that_returns_variadic
@ -383,7 +343,6 @@ TableTests.casting_sealed_tables_with_props_into_table_with_indexer
TableTests.casting_tables_with_props_into_table_with_indexer3
TableTests.casting_tables_with_props_into_table_with_indexer4
TableTests.checked_prop_too_early
TableTests.confusing_indexing
TableTests.defining_a_method_for_a_builtin_sealed_table_must_fail
TableTests.defining_a_method_for_a_local_sealed_table_must_fail
TableTests.defining_a_self_method_for_a_builtin_sealed_table_must_fail
@ -393,11 +352,7 @@ TableTests.dont_hang_when_trying_to_look_up_in_cyclic_metatable_index
TableTests.dont_leak_free_table_props
TableTests.dont_quantify_table_that_belongs_to_outer_scope
TableTests.dont_suggest_exact_match_keys
TableTests.error_detailed_indexer_key
TableTests.error_detailed_indexer_value
TableTests.error_detailed_metatable_prop
TableTests.error_detailed_prop
TableTests.error_detailed_prop_nested
TableTests.expected_indexer_from_table_union
TableTests.expected_indexer_value_type_extra
TableTests.expected_indexer_value_type_extra_2
@ -422,11 +377,7 @@ TableTests.infer_indexer_from_value_property_in_literal
TableTests.inferred_return_type_of_free_table
TableTests.inferring_crazy_table_should_also_be_quick
TableTests.instantiate_table_cloning_3
TableTests.instantiate_tables_at_scope_level
TableTests.leaking_bad_metatable_errors
TableTests.length_operator_intersection
TableTests.length_operator_non_table_union
TableTests.length_operator_union
TableTests.length_operator_union_errors
TableTests.less_exponential_blowup_please
TableTests.meta_add
@ -444,16 +395,15 @@ TableTests.open_table_unification_2
TableTests.pass_a_union_of_tables_to_a_function_that_requires_a_table
TableTests.pass_a_union_of_tables_to_a_function_that_requires_a_table_2
TableTests.pass_incompatible_union_to_a_generic_table_without_crashing
TableTests.passing_compatible_unions_to_a_generic_table_without_crashing
TableTests.persistent_sealed_table_is_immutable
TableTests.prop_access_on_key_whose_types_mismatches
TableTests.property_lookup_through_tabletypevar_metatable
TableTests.quantify_even_that_table_was_never_exported_at_all
TableTests.quantify_metatables_of_metatables_of_table
TableTests.quantifying_a_bound_var_works
TableTests.reasonable_error_when_adding_a_nonexistent_property_to_an_array_like_table
TableTests.result_is_always_any_if_lhs_is_any
TableTests.result_is_bool_for_equality_operators_if_lhs_is_any
TableTests.right_table_missing_key
TableTests.right_table_missing_key2
TableTests.scalar_is_a_subtype_of_a_compatible_polymorphic_shape_type
TableTests.scalar_is_not_a_subtype_of_a_compatible_polymorphic_shape_type
@ -463,14 +413,11 @@ TableTests.shared_selfs_through_metatables
TableTests.table_indexing_error_location
TableTests.table_insert_should_cope_with_optional_properties_in_nonstrict
TableTests.table_insert_should_cope_with_optional_properties_in_strict
TableTests.table_length
TableTests.table_param_row_polymorphism_2
TableTests.table_param_row_polymorphism_3
TableTests.table_simple_call
TableTests.table_subtyping_with_extra_props_dont_report_multiple_errors
TableTests.table_subtyping_with_missing_props_dont_report_multiple_errors
TableTests.table_subtyping_with_missing_props_dont_report_multiple_errors2
TableTests.table_unifies_into_map
TableTests.tables_get_names_from_their_locals
TableTests.tc_member_function
TableTests.tc_member_function_2
@ -480,10 +427,8 @@ TableTests.unification_of_unions_in_a_self_referential_type
TableTests.unifying_tables_shouldnt_uaf2
TableTests.used_colon_instead_of_dot
TableTests.used_dot_instead_of_colon
TableTests.width_subtyping
ToDot.bound_table
ToDot.function
ToDot.metatable
ToDot.table
ToString.exhaustive_toString_of_cyclic_table
ToString.function_type_with_argument_names_generic
@ -500,7 +445,6 @@ TryUnifyTests.members_of_failed_typepack_unification_are_unified_with_errorType
TryUnifyTests.result_of_failed_typepack_unification_is_constrained
TryUnifyTests.typepack_unification_should_trim_free_tails
TryUnifyTests.variadics_should_use_reversed_properly
TypeAliases.cli_38393_recursive_intersection_oom
TypeAliases.forward_declared_alias_is_not_clobbered_by_prior_unification_with_any
TypeAliases.generic_param_remap
TypeAliases.mismatched_generic_pack_type_param
@ -517,29 +461,19 @@ TypeAliases.type_alias_local_mutation
TypeAliases.type_alias_local_rename
TypeAliases.type_alias_of_an_imported_recursive_generic_type
TypeInfer.checking_should_not_ice
TypeInfer.cyclic_follow
TypeInfer.do_not_bind_a_free_table_to_a_union_containing_that_table
TypeInfer.dont_report_type_errors_within_an_AstStatError
TypeInfer.globals
TypeInfer.globals2
TypeInfer.infer_assignment_value_types_mutable_lval
TypeInfer.no_stack_overflow_from_isoptional
TypeInfer.tc_after_error_recovery_no_replacement_name_in_error
TypeInfer.tc_if_else_expressions1
TypeInfer.tc_if_else_expressions2
TypeInfer.tc_if_else_expressions_expected_type_1
TypeInfer.tc_if_else_expressions_expected_type_2
TypeInfer.tc_if_else_expressions_expected_type_3
TypeInfer.tc_if_else_expressions_type_union
TypeInfer.tc_interpolated_string_basic
TypeInfer.tc_interpolated_string_constant_type
TypeInfer.tc_interpolated_string_with_invalid_expression
TypeInfer.type_infer_recursion_limit_no_ice
TypeInferAnyError.assign_prop_to_table_by_calling_any_yields_any
TypeInferAnyError.can_get_length_of_any
TypeInferAnyError.for_in_loop_iterator_is_any2
TypeInferAnyError.length_of_error_type_does_not_produce_an_error
TypeInferAnyError.replace_every_free_type_when_unifying_a_complex_function_with_any
TypeInferAnyError.union_of_types_regression_test
TypeInferClasses.call_base_method
TypeInferClasses.call_instance_method
@ -549,39 +483,23 @@ TypeInferClasses.class_type_mismatch_with_name_conflict
TypeInferClasses.classes_can_have_overloaded_operators
TypeInferClasses.classes_without_overloaded_operators_cannot_be_added
TypeInferClasses.detailed_class_unification_error
TypeInferClasses.function_arguments_are_covariant
TypeInferClasses.higher_order_function_return_type_is_not_contravariant
TypeInferClasses.higher_order_function_return_values_are_covariant
TypeInferClasses.higher_order_function_arguments_are_contravariant
TypeInferClasses.optional_class_field_access_error
TypeInferClasses.table_class_unification_reports_sane_errors_for_missing_properties
TypeInferClasses.table_indexers_are_invariant
TypeInferClasses.table_properties_are_invariant
TypeInferClasses.warn_when_prop_almost_matches
TypeInferClasses.we_can_report_when_someone_is_trying_to_use_a_table_rather_than_a_class
TypeInferFunctions.another_recursive_local_function
TypeInferFunctions.call_o_with_another_argument_after_foo_was_quantified
TypeInferFunctions.calling_function_with_anytypepack_doesnt_leak_free_types
TypeInferFunctions.calling_function_with_incorrect_argument_type_yields_errors_spanning_argument
TypeInferFunctions.complicated_return_types_require_an_explicit_annotation
TypeInferFunctions.dont_give_other_overloads_message_if_only_one_argument_matching_overload_exists
TypeInferFunctions.dont_infer_parameter_types_for_functions_from_their_call_site
TypeInferFunctions.duplicate_functions_with_different_signatures_not_allowed_in_nonstrict
TypeInferFunctions.error_detailed_function_mismatch_arg
TypeInferFunctions.error_detailed_function_mismatch_arg_count
TypeInferFunctions.error_detailed_function_mismatch_ret
TypeInferFunctions.error_detailed_function_mismatch_ret_count
TypeInferFunctions.error_detailed_function_mismatch_ret_mult
TypeInferFunctions.free_is_not_bound_to_unknown
TypeInferFunctions.func_expr_doesnt_leak_free
TypeInferFunctions.function_cast_error_uses_correct_language
TypeInferFunctions.function_decl_non_self_sealed_overwrite
TypeInferFunctions.function_decl_non_self_sealed_overwrite_2
TypeInferFunctions.function_decl_non_self_unsealed_overwrite
TypeInferFunctions.function_does_not_return_enough_values
TypeInferFunctions.function_statement_sealed_table_assignment_through_indexer
TypeInferFunctions.higher_order_function_2
TypeInferFunctions.higher_order_function_4
TypeInferFunctions.ignored_return_values
TypeInferFunctions.improved_function_arg_mismatch_error_nonstrict
TypeInferFunctions.improved_function_arg_mismatch_errors
TypeInferFunctions.inconsistent_higher_order_function
@ -589,15 +507,11 @@ TypeInferFunctions.inconsistent_return_types
TypeInferFunctions.infer_anonymous_function_arguments
TypeInferFunctions.infer_return_type_from_selected_overload
TypeInferFunctions.infer_that_function_does_not_return_a_table
TypeInferFunctions.it_is_ok_not_to_supply_enough_retvals
TypeInferFunctions.list_all_overloads_if_no_overload_takes_given_argument_count
TypeInferFunctions.list_only_alternative_overloads_that_match_argument_count
TypeInferFunctions.no_lossy_function_type
TypeInferFunctions.occurs_check_failure_in_function_return_type
TypeInferFunctions.quantify_constrained_types
TypeInferFunctions.record_matching_overload
TypeInferFunctions.recursive_function
TypeInferFunctions.recursive_local_function
TypeInferFunctions.report_exiting_without_return_nonstrict
TypeInferFunctions.report_exiting_without_return_strict
TypeInferFunctions.return_type_by_overload
@ -608,11 +522,7 @@ TypeInferFunctions.too_few_arguments_variadic_generic2
TypeInferFunctions.too_many_arguments
TypeInferFunctions.too_many_return_values
TypeInferFunctions.vararg_function_is_quantified
TypeInferLoops.for_in_loop_error_on_factory_not_returning_the_right_amount_of_values
TypeInferLoops.for_in_loop_with_custom_iterator
TypeInferLoops.for_in_loop_with_next
TypeInferLoops.for_in_with_just_one_iterator_is_ok
TypeInferLoops.loop_iter_iter_metamethod
TypeInferLoops.loop_iter_no_indexer_nonstrict
TypeInferLoops.loop_iter_trailing_nil
TypeInferLoops.loop_typecheck_crash_on_empty_optional
@ -631,7 +541,6 @@ TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_it_wont_help_2
TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_not_defined_with_colon
TypeInferOOP.inferred_methods_of_free_tables_have_the_same_level_as_the_enclosing_table
TypeInferOOP.inferring_hundreds_of_self_calls_should_not_suffocate_memory
TypeInferOOP.methods_are_topologically_sorted
TypeInferOperators.and_adds_boolean
TypeInferOperators.and_adds_boolean_no_superfluous_union
TypeInferOperators.and_binexps_dont_unify
@ -641,13 +550,10 @@ TypeInferOperators.cannot_compare_tables_that_do_not_have_the_same_metatable
TypeInferOperators.cannot_indirectly_compare_types_that_do_not_have_a_metatable
TypeInferOperators.cannot_indirectly_compare_types_that_do_not_offer_overloaded_ordering_operators
TypeInferOperators.cli_38355_recursive_union
TypeInferOperators.compare_numbers
TypeInferOperators.compare_strings
TypeInferOperators.compound_assign_mismatch_metatable
TypeInferOperators.compound_assign_mismatch_op
TypeInferOperators.compound_assign_mismatch_result
TypeInferOperators.concat_op_on_free_lhs_and_string_rhs
TypeInferOperators.concat_op_on_string_lhs_and_free_rhs
TypeInferOperators.disallow_string_and_types_without_metatables_from_arithmetic_binary_ops
TypeInferOperators.dont_strip_nil_from_rhs_or_operator
TypeInferOperators.equality_operations_succeed_if_any_union_branch_succeeds
@ -655,18 +561,12 @@ TypeInferOperators.error_on_invalid_operand_types_to_relational_operators
TypeInferOperators.error_on_invalid_operand_types_to_relational_operators2
TypeInferOperators.expected_types_through_binary_and
TypeInferOperators.expected_types_through_binary_or
TypeInferOperators.in_nonstrict_mode_strip_nil_from_intersections_when_considering_relational_operators
TypeInferOperators.infer_any_in_all_modes_when_lhs_is_unknown
TypeInferOperators.operator_eq_operands_are_not_subtypes_of_each_other_but_has_overlap
TypeInferOperators.operator_eq_verifies_types_do_intersect
TypeInferOperators.or_joins_types
TypeInferOperators.or_joins_types_with_no_extras
TypeInferOperators.primitive_arith_no_metatable
TypeInferOperators.primitive_arith_no_metatable_with_follows
TypeInferOperators.primitive_arith_possible_metatable
TypeInferOperators.produce_the_correct_error_message_when_comparing_a_table_with_a_metatable_with_one_that_does_not
TypeInferOperators.refine_and_or
TypeInferOperators.some_primitive_binary_ops
TypeInferOperators.strict_binary_op_where_lhs_unknown
TypeInferOperators.strip_nil_from_lhs_or_operator
TypeInferOperators.strip_nil_from_lhs_or_operator2
@ -676,13 +576,11 @@ TypeInferOperators.typecheck_unary_len_error
TypeInferOperators.typecheck_unary_minus
TypeInferOperators.typecheck_unary_minus_error
TypeInferOperators.unary_not_is_boolean
TypeInferOperators.unknown_type_in_comparison
TypeInferOperators.UnknownGlobalCompoundAssign
TypeInferPrimitives.CheckMethodsOfNumber
TypeInferPrimitives.singleton_types
TypeInferPrimitives.string_function_other
TypeInferPrimitives.string_index
TypeInferPrimitives.string_length
TypeInferPrimitives.string_method
TypeInferUnknownNever.assign_to_global_which_is_never
TypeInferUnknownNever.assign_to_local_which_is_never
@ -692,9 +590,7 @@ TypeInferUnknownNever.call_never
TypeInferUnknownNever.dont_unify_operands_if_one_of_the_operand_is_never_in_any_ordering_operators
TypeInferUnknownNever.index_on_union_of_tables_for_properties_that_is_never
TypeInferUnknownNever.index_on_union_of_tables_for_properties_that_is_sorta_never
TypeInferUnknownNever.length_of_never
TypeInferUnknownNever.math_operators_and_never
TypeInferUnknownNever.type_packs_containing_never_is_itself_uninhabitable
TypeInferUnknownNever.type_packs_containing_never_is_itself_uninhabitable2
TypeInferUnknownNever.unary_minus_of_never
TypePackTests.higher_order_function
@ -718,18 +614,17 @@ TypePackTests.type_alias_type_packs
TypePackTests.type_alias_type_packs_errors
TypePackTests.type_alias_type_packs_import
TypePackTests.type_alias_type_packs_nested
TypePackTests.type_pack_hidden_free_tail_infinite_growth
TypePackTests.type_pack_type_parameters
TypePackTests.unify_variadic_tails_in_arguments
TypePackTests.unify_variadic_tails_in_arguments_free
TypePackTests.varargs_inference_through_multiple_scopes
TypePackTests.variadic_packs
TypeSingletons.bool_singleton_subtype
TypeSingletons.bool_singletons
TypeSingletons.bool_singletons_mismatch
TypeSingletons.enums_using_singletons
TypeSingletons.enums_using_singletons_mismatch
TypeSingletons.enums_using_singletons_subtyping
TypeSingletons.error_detailed_tagged_union_mismatch_bool
TypeSingletons.error_detailed_tagged_union_mismatch_string
TypeSingletons.function_call_with_singletons
TypeSingletons.function_call_with_singletons_mismatch
TypeSingletons.if_then_else_expression_singleton_options
TypeSingletons.indexing_on_string_singletons
@ -752,7 +647,6 @@ TypeSingletons.widening_happens_almost_everywhere
TypeSingletons.widening_happens_almost_everywhere_except_for_tables
UnionTypes.error_detailed_optional
UnionTypes.error_detailed_union_all
UnionTypes.error_detailed_union_part
UnionTypes.error_takes_optional_arguments
UnionTypes.index_on_a_union_type_with_missing_property
UnionTypes.index_on_a_union_type_with_mixed_types
@ -771,6 +665,4 @@ UnionTypes.optional_union_follow
UnionTypes.optional_union_functions
UnionTypes.optional_union_members
UnionTypes.optional_union_methods
UnionTypes.return_types_can_be_disjoint
UnionTypes.table_union_write_indirect
UnionTypes.union_equality_comparisons

100
tools/lvmexecute_split.py Normal file
View File

@ -0,0 +1,100 @@
#!/usr/bin/python
# This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
# This code can be used to split lvmexecute.cpp VM switch into separate functions for use as native code generation fallbacks
import sys
import re
input = sys.stdin.readlines()
inst = ""
header = """// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand
#pragma once
#include <stdint.h>
struct lua_State;
struct Closure;
typedef uint32_t Instruction;
typedef struct lua_TValue TValue;
typedef TValue* StkId;
"""
source = """// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand
#include "Fallbacks.h"
#include "FallbacksProlog.h"
"""
function = ""
state = 0
# parse with the state machine
for line in input:
# find the start of an instruction
if state == 0:
match = re.match("\s+VM_CASE\((LOP_[A-Z_0-9]+)\)", line)
if match:
inst = match[1]
signature = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)"
header += signature + ";\n"
function = signature + "\n"
state = 1
# find the end of an instruction
elif state == 1:
# remove jumps back into the native code
if line == "#if LUA_CUSTOM_EXECUTION\n":
state = 2
continue
if line[0] == ' ':
finalline = line[12:-1] + "\n"
else:
finalline = line
finalline = finalline.replace("VM_NEXT();", "return pc;");
finalline = finalline.replace("goto exit;", "return NULL;");
finalline = finalline.replace("return;", "return NULL;");
function += finalline
match = re.match(" }", line)
if match:
# break is not supported
if inst == "LOP_BREAK":
function = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, Closure* cl, StkId base, TValue* k)\n"
function += "{\n LUAU_ASSERT(!\"Unsupported deprecated opcode\");\n LUAU_UNREACHABLE();\n}\n"
# handle fallthrough
elif inst == "LOP_NAMECALL":
function = function[:-len(finalline)]
function += " return pc;\n}\n"
source += function + "\n"
state = 0
# skip LUA_CUSTOM_EXECUTION code blocks
elif state == 2:
if line == "#endif\n":
state = 3
continue
# skip extra line
elif state == 3:
state = 1
# make sure we found the ending
assert(state == 0)
with open("Fallbacks.h", "w") as fp:
fp.writelines(header)
with open("Fallbacks.cpp", "w") as fp:
fp.writelines(source)