Sync to upstream/release/584 (#977)

* Added support for async typechecking cancellation using a token passed
through frontend options
* Added luaC_enumheap for building debug tools that need a graph of Luau
heap

In our new typechecker:
* Errors or now suppressed when checking property lookup of
error-suppressing unions

In our native code generation (jit):
* Fixed unhandled value type in NOT_ANY lowering
* Fast-call tag checks will exit to VM on failure, instead of relying on
a native fallback
* Added vector type to the type information
* Eliminated redundant direct jumps across dead blocks
* Debugger APIs are now disabled for call frames executing natively
* Implemented support for unwind registration on macOS 14
This commit is contained in:
vegorov-rbx 2023-07-14 11:08:53 -07:00 committed by GitHub
parent e25de95445
commit 218159140c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
71 changed files with 1854 additions and 527 deletions

View File

@ -0,0 +1,24 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include <atomic>
namespace Luau
{
struct FrontendCancellationToken
{
void cancel()
{
cancelled.store(true);
}
bool requested()
{
return cancelled.load();
}
std::atomic<bool> cancelled;
};
} // namespace Luau

View File

@ -21,8 +21,8 @@ struct DiffPathNode
Kind kind;
// non-null when TableProperty
std::optional<Name> tableProperty;
// non-null when FunctionArgument, FunctionReturn, Union, or Intersection (i.e. anonymous fields)
std::optional<int> index;
// non-null when FunctionArgument (unless variadic arg), FunctionReturn (unless variadic arg), Union, or Intersection (i.e. anonymous fields)
std::optional<size_t> index;
/**
* Do not use for leaf nodes
@ -32,7 +32,7 @@ struct DiffPathNode
{
}
DiffPathNode(Kind kind, std::optional<Name> tableProperty, std::optional<int> index)
DiffPathNode(Kind kind, std::optional<Name> tableProperty, std::optional<size_t> index)
: kind(kind)
, tableProperty(tableProperty)
, index(index)
@ -42,19 +42,35 @@ struct DiffPathNode
std::string toString() const;
static DiffPathNode constructWithTableProperty(Name tableProperty);
static DiffPathNode constructWithKindAndIndex(Kind kind, size_t index);
static DiffPathNode constructWithKind(Kind kind);
};
struct DiffPathNodeLeaf
{
std::optional<TypeId> ty;
std::optional<Name> tableProperty;
DiffPathNodeLeaf(std::optional<TypeId> ty, std::optional<Name> tableProperty)
std::optional<int> minLength;
bool isVariadic;
DiffPathNodeLeaf(std::optional<TypeId> ty, std::optional<Name> tableProperty, std::optional<int> minLength, bool isVariadic)
: ty(ty)
, tableProperty(tableProperty)
, minLength(minLength)
, isVariadic(isVariadic)
{
}
static DiffPathNodeLeaf detailsNormal(TypeId ty);
static DiffPathNodeLeaf detailsTableProperty(TypeId ty, Name tableProperty);
static DiffPathNodeLeaf detailsLength(int minLength, bool isVariadic);
static DiffPathNodeLeaf nullopts();
};
struct DiffPath
{
std::vector<DiffPathNode> path;

View File

@ -357,13 +357,13 @@ struct PackWhereClauseNeeded
bool operator==(const PackWhereClauseNeeded& rhs) const;
};
using TypeErrorData =
Variant<TypeMismatch, UnknownSymbol, UnknownProperty, NotATable, CannotExtendTable, OnlyTablesCanHaveMethods, DuplicateTypeDefinition,
CountMismatch, FunctionDoesNotTakeSelf, FunctionRequiresSelf, OccursCheckFailed, UnknownRequire, IncorrectGenericParameterCount, SyntaxError,
CodeTooComplex, UnificationTooComplex, UnknownPropButFoundLikeProp, GenericError, InternalError, CannotCallNonFunction, ExtraInformation,
DeprecatedApiUsed, ModuleHasCyclicDependency, IllegalRequire, FunctionExitsWithoutReturning, DuplicateGenericParameter,
CannotInferBinaryOperation, MissingProperties, SwappedGenericTypeParameter, OptionalValueAccess, MissingUnionProperty, TypesAreUnrelated,
NormalizationTooComplex, TypePackMismatch, DynamicPropertyLookupOnClassesUnsafe, UninhabitedTypeFamily, UninhabitedTypePackFamily, WhereClauseNeeded, PackWhereClauseNeeded>;
using TypeErrorData = Variant<TypeMismatch, UnknownSymbol, UnknownProperty, NotATable, CannotExtendTable, OnlyTablesCanHaveMethods,
DuplicateTypeDefinition, CountMismatch, FunctionDoesNotTakeSelf, FunctionRequiresSelf, OccursCheckFailed, UnknownRequire,
IncorrectGenericParameterCount, SyntaxError, CodeTooComplex, UnificationTooComplex, UnknownPropButFoundLikeProp, GenericError, InternalError,
CannotCallNonFunction, ExtraInformation, DeprecatedApiUsed, ModuleHasCyclicDependency, IllegalRequire, FunctionExitsWithoutReturning,
DuplicateGenericParameter, CannotInferBinaryOperation, MissingProperties, SwappedGenericTypeParameter, OptionalValueAccess, MissingUnionProperty,
TypesAreUnrelated, NormalizationTooComplex, TypePackMismatch, DynamicPropertyLookupOnClassesUnsafe, UninhabitedTypeFamily,
UninhabitedTypePackFamily, WhereClauseNeeded, PackWhereClauseNeeded>;
struct TypeErrorSummary
{

View File

@ -29,6 +29,7 @@ struct ModuleResolver;
struct ParseResult;
struct HotComment;
struct BuildQueueItem;
struct FrontendCancellationToken;
struct LoadDefinitionFileResult
{
@ -96,6 +97,8 @@ struct FrontendOptions
std::optional<unsigned> randomizeConstraintResolutionSeed;
std::optional<LintOptions> enabledLintWarnings;
std::shared_ptr<FrontendCancellationToken> cancellationToken;
};
struct CheckResult
@ -191,6 +194,7 @@ private:
std::optional<double> finishTime;
std::optional<int> instantiationChildLimit;
std::optional<int> unifierIterationLimit;
std::shared_ptr<FrontendCancellationToken> cancellationToken;
};
ModulePtr check(const SourceModule& sourceModule, Mode mode, std::vector<RequireCycle> requireCycles, std::optional<ScopePtr> environmentScope,

View File

@ -16,10 +16,10 @@ struct InsertionOrderedMap
{
static_assert(std::is_trivially_copyable_v<K>, "key must be trivially copyable");
private:
private:
using vec = std::vector<std::pair<K, V>>;
public:
public:
using iterator = typename vec::iterator;
using const_iterator = typename vec::const_iterator;
@ -131,4 +131,4 @@ private:
std::unordered_map<K, size_t> indices;
};
}
} // namespace Luau

View File

@ -112,6 +112,7 @@ struct Module
Mode mode;
SourceCode::Type type;
bool timeout = false;
bool cancelled = false;
TypePackId returnType = nullptr;
std::unordered_map<Name, TypeFun> exportedTypeBindings;

View File

@ -139,6 +139,6 @@ std::string dump(const std::shared_ptr<Scope>& scope, const char* name);
std::string generateName(size_t n);
std::string toString(const Position& position);
std::string toString(const Location& location);
std::string toString(const Location& location, int offset = 0, bool useBegin = true);
} // namespace Luau

View File

@ -12,6 +12,7 @@ namespace Luau
struct DcrLogger;
struct BuiltinTypes;
void check(NotNull<BuiltinTypes> builtinTypes, NotNull<struct UnifierSharedState> sharedState, DcrLogger* logger, const SourceModule& sourceModule, Module* module);
void check(NotNull<BuiltinTypes> builtinTypes, NotNull<struct UnifierSharedState> sharedState, DcrLogger* logger, const SourceModule& sourceModule,
Module* module);
} // namespace Luau

View File

@ -25,6 +25,7 @@ namespace Luau
struct Scope;
struct TypeChecker;
struct ModuleResolver;
struct FrontendCancellationToken;
using Name = std::string;
using ScopePtr = std::shared_ptr<Scope>;
@ -64,6 +65,15 @@ public:
}
};
class UserCancelError : public InternalCompilerError
{
public:
explicit UserCancelError(const std::string& moduleName)
: InternalCompilerError("Analysis has been cancelled by user", moduleName)
{
}
};
struct GlobalTypes
{
GlobalTypes(NotNull<BuiltinTypes> builtinTypes);
@ -262,6 +272,7 @@ public:
[[noreturn]] void ice(const std::string& message, const Location& location);
[[noreturn]] void ice(const std::string& message);
[[noreturn]] void throwTimeLimitError();
[[noreturn]] void throwUserCancelError();
ScopePtr childFunctionScope(const ScopePtr& parent, const Location& location, int subLevel = 0);
ScopePtr childScope(const ScopePtr& parent, const Location& location);
@ -387,6 +398,8 @@ public:
std::optional<int> instantiationChildLimit;
std::optional<int> unifierIterationLimit;
std::shared_ptr<FrontendCancellationToken> cancellationToken;
public:
const TypeId nilType;
const TypeId numberType;

View File

@ -76,8 +76,7 @@ struct Unifier
std::vector<TypeId> blockedTypes;
std::vector<TypePackId> blockedTypePacks;
Unifier(
NotNull<Normalizer> normalizer, NotNull<Scope> scope, const Location& location, Variance variance, TxnLog* parentLog = nullptr);
Unifier(NotNull<Normalizer> normalizer, NotNull<Scope> scope, const Location& location, Variance variance, TxnLog* parentLog = nullptr);
// Configure the Unifier to test for scope subsumption via embedded Scope
// pointers rather than TypeLevels.

View File

@ -7,7 +7,6 @@
#include "Luau/Unifiable.h"
LUAU_FASTFLAG(DebugLuauCopyBeforeNormalizing)
LUAU_FASTFLAG(LuauClonePublicInterfaceLess2)
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
LUAU_FASTINTVARIABLE(LuauTypeCloneRecursionLimit, 300)

View File

@ -3,7 +3,9 @@
#include "Luau/Error.h"
#include "Luau/ToString.h"
#include "Luau/Type.h"
#include "Luau/TypePack.h"
#include <optional>
#include <string>
namespace Luau
{
@ -18,6 +20,20 @@ std::string DiffPathNode::toString() const
return *tableProperty;
break;
}
case DiffPathNode::Kind::FunctionArgument:
{
if (!index.has_value())
return "Arg[Variadic]";
// Add 1 because Lua is 1-indexed
return "Arg[" + std::to_string(*index + 1) + "]";
}
case DiffPathNode::Kind::FunctionReturn:
{
if (!index.has_value())
return "Ret[Variadic]";
// Add 1 because Lua is 1-indexed
return "Ret[" + std::to_string(*index + 1) + "]";
}
default:
{
throw InternalCompilerError{"DiffPathNode::toString is not exhaustive"};
@ -30,9 +46,34 @@ DiffPathNode DiffPathNode::constructWithTableProperty(Name tableProperty)
return DiffPathNode{DiffPathNode::Kind::TableProperty, tableProperty, std::nullopt};
}
DiffPathNode DiffPathNode::constructWithKindAndIndex(Kind kind, size_t index)
{
return DiffPathNode{kind, std::nullopt, index};
}
DiffPathNode DiffPathNode::constructWithKind(Kind kind)
{
return DiffPathNode{kind, std::nullopt, std::nullopt};
}
DiffPathNodeLeaf DiffPathNodeLeaf::detailsNormal(TypeId ty)
{
return DiffPathNodeLeaf{ty, std::nullopt, std::nullopt, false};
}
DiffPathNodeLeaf DiffPathNodeLeaf::detailsTableProperty(TypeId ty, Name tableProperty)
{
return DiffPathNodeLeaf{ty, tableProperty, std::nullopt, false};
}
DiffPathNodeLeaf DiffPathNodeLeaf::detailsLength(int minLength, bool isVariadic)
{
return DiffPathNodeLeaf{std::nullopt, std::nullopt, minLength, isVariadic};
}
DiffPathNodeLeaf DiffPathNodeLeaf::nullopts()
{
return DiffPathNodeLeaf{std::nullopt, std::nullopt};
return DiffPathNodeLeaf{std::nullopt, std::nullopt, std::nullopt, false};
}
std::string DiffPath::toString(bool prependDot) const
@ -79,9 +120,21 @@ std::string DiffError::toStringALeaf(std::string rootName, const DiffPathNodeLea
}
throw InternalCompilerError{"Both leaf.ty and otherLeaf.ty is nullopt"};
}
case DiffError::Kind::LengthMismatchInFnArgs:
{
if (!leaf.minLength.has_value())
throw InternalCompilerError{"leaf.minLength is nullopt"};
return pathStr + " takes " + std::to_string(*leaf.minLength) + (leaf.isVariadic ? " or more" : "") + " arguments";
}
case DiffError::Kind::LengthMismatchInFnRets:
{
if (!leaf.minLength.has_value())
throw InternalCompilerError{"leaf.minLength is nullopt"};
return pathStr + " returns " + std::to_string(*leaf.minLength) + (leaf.isVariadic ? " or more" : "") + " values";
}
default:
{
throw InternalCompilerError{"DiffPath::toStringWithLeaf is not exhaustive"};
throw InternalCompilerError{"DiffPath::toStringALeaf is not exhaustive"};
}
}
}
@ -139,6 +192,14 @@ static DifferResult diffUsingEnv(DifferEnvironment& env, TypeId left, TypeId rig
static DifferResult diffTable(DifferEnvironment& env, TypeId left, TypeId right);
static DifferResult diffPrimitive(DifferEnvironment& env, TypeId left, TypeId right);
static DifferResult diffSingleton(DifferEnvironment& env, TypeId left, TypeId right);
static DifferResult diffFunction(DifferEnvironment& env, TypeId left, TypeId right);
/**
* The last argument gives context info on which complex type contained the TypePack.
*/
static DifferResult diffTpi(DifferEnvironment& env, DiffError::Kind possibleNonNormalErrorKind, TypePackId left, TypePackId right);
static DifferResult diffCanonicalTpShape(DifferEnvironment& env, DiffError::Kind possibleNonNormalErrorKind,
const std::pair<std::vector<TypeId>, std::optional<TypePackId>>& left, const std::pair<std::vector<TypeId>, std::optional<TypePackId>>& right);
static DifferResult diffHandleFlattenedTail(DifferEnvironment& env, DiffError::Kind possibleNonNormalErrorKind, TypePackId left, TypePackId right);
static DifferResult diffTable(DifferEnvironment& env, TypeId left, TypeId right)
{
@ -152,7 +213,7 @@ static DifferResult diffTable(DifferEnvironment& env, TypeId left, TypeId right)
// left has a field the right doesn't
return DifferResult{DiffError{
DiffError::Kind::MissingProperty,
DiffPathNodeLeaf{value.type(), field},
DiffPathNodeLeaf::detailsTableProperty(value.type(), field),
DiffPathNodeLeaf::nullopts(),
getDevFixFriendlyName(env.rootLeft),
getDevFixFriendlyName(env.rootRight),
@ -164,8 +225,9 @@ static DifferResult diffTable(DifferEnvironment& env, TypeId left, TypeId right)
if (leftTable->props.find(field) == leftTable->props.end())
{
// right has a field the left doesn't
return DifferResult{DiffError{DiffError::Kind::MissingProperty, DiffPathNodeLeaf::nullopts(), DiffPathNodeLeaf{value.type(), field},
getDevFixFriendlyName(env.rootLeft), getDevFixFriendlyName(env.rootRight)}};
return DifferResult{
DiffError{DiffError::Kind::MissingProperty, DiffPathNodeLeaf::nullopts(), DiffPathNodeLeaf::detailsTableProperty(value.type(), field),
getDevFixFriendlyName(env.rootLeft), getDevFixFriendlyName(env.rootRight)}};
}
}
// left and right have the same set of keys
@ -191,8 +253,8 @@ static DifferResult diffPrimitive(DifferEnvironment& env, TypeId left, TypeId ri
{
return DifferResult{DiffError{
DiffError::Kind::Normal,
DiffPathNodeLeaf{left, std::nullopt},
DiffPathNodeLeaf{right, std::nullopt},
DiffPathNodeLeaf::detailsNormal(left),
DiffPathNodeLeaf::detailsNormal(right),
getDevFixFriendlyName(env.rootLeft),
getDevFixFriendlyName(env.rootRight),
}};
@ -209,8 +271,8 @@ static DifferResult diffSingleton(DifferEnvironment& env, TypeId left, TypeId ri
{
return DifferResult{DiffError{
DiffError::Kind::Normal,
DiffPathNodeLeaf{left, std::nullopt},
DiffPathNodeLeaf{right, std::nullopt},
DiffPathNodeLeaf::detailsNormal(left),
DiffPathNodeLeaf::detailsNormal(right),
getDevFixFriendlyName(env.rootLeft),
getDevFixFriendlyName(env.rootRight),
}};
@ -218,6 +280,17 @@ static DifferResult diffSingleton(DifferEnvironment& env, TypeId left, TypeId ri
return DifferResult{};
}
static DifferResult diffFunction(DifferEnvironment& env, TypeId left, TypeId right)
{
const FunctionType* leftFunction = get<FunctionType>(left);
const FunctionType* rightFunction = get<FunctionType>(right);
DifferResult differResult = diffTpi(env, DiffError::Kind::LengthMismatchInFnArgs, leftFunction->argTypes, rightFunction->argTypes);
if (differResult.diffError.has_value())
return differResult;
return diffTpi(env, DiffError::Kind::LengthMismatchInFnRets, leftFunction->retTypes, rightFunction->retTypes);
}
static DifferResult diffUsingEnv(DifferEnvironment& env, TypeId left, TypeId right)
{
left = follow(left);
@ -227,8 +300,8 @@ static DifferResult diffUsingEnv(DifferEnvironment& env, TypeId left, TypeId rig
{
return DifferResult{DiffError{
DiffError::Kind::Normal,
DiffPathNodeLeaf{left, std::nullopt},
DiffPathNodeLeaf{right, std::nullopt},
DiffPathNodeLeaf::detailsNormal(left),
DiffPathNodeLeaf::detailsNormal(right),
getDevFixFriendlyName(env.rootLeft),
getDevFixFriendlyName(env.rootRight),
}};
@ -244,6 +317,11 @@ static DifferResult diffUsingEnv(DifferEnvironment& env, TypeId left, TypeId rig
{
return diffSingleton(env, left, right);
}
else if (auto la = get<AnyType>(left))
{
// Both left and right must be Any if either is Any for them to be equal!
return DifferResult{};
}
throw InternalCompilerError{"Unimplemented Simple TypeId variant for diffing"};
}
@ -254,9 +332,116 @@ static DifferResult diffUsingEnv(DifferEnvironment& env, TypeId left, TypeId rig
{
return diffTable(env, left, right);
}
if (auto lf = get<FunctionType>(left))
{
return diffFunction(env, left, right);
}
throw InternalCompilerError{"Unimplemented non-simple TypeId variant for diffing"};
}
static DifferResult diffTpi(DifferEnvironment& env, DiffError::Kind possibleNonNormalErrorKind, TypePackId left, TypePackId right)
{
left = follow(left);
right = follow(right);
// Canonicalize
std::pair<std::vector<TypeId>, std::optional<TypePackId>> leftFlatTpi = flatten(left);
std::pair<std::vector<TypeId>, std::optional<TypePackId>> rightFlatTpi = flatten(right);
// Check for shape equality
DifferResult diffResult = diffCanonicalTpShape(env, possibleNonNormalErrorKind, leftFlatTpi, rightFlatTpi);
if (diffResult.diffError.has_value())
{
return diffResult;
}
// Left and Right have the same shape
for (size_t i = 0; i < leftFlatTpi.first.size(); i++)
{
DifferResult differResult = diffUsingEnv(env, leftFlatTpi.first[i], rightFlatTpi.first[i]);
if (!differResult.diffError.has_value())
continue;
switch (possibleNonNormalErrorKind)
{
case DiffError::Kind::LengthMismatchInFnArgs:
{
differResult.wrapDiffPath(DiffPathNode::constructWithKindAndIndex(DiffPathNode::Kind::FunctionArgument, i));
return differResult;
}
case DiffError::Kind::LengthMismatchInFnRets:
{
differResult.wrapDiffPath(DiffPathNode::constructWithKindAndIndex(DiffPathNode::Kind::FunctionReturn, i));
return differResult;
}
default:
{
throw InternalCompilerError{"Unhandled Tpi diffing case with same shape"};
}
}
}
if (!leftFlatTpi.second.has_value())
return DifferResult{};
return diffHandleFlattenedTail(env, possibleNonNormalErrorKind, *leftFlatTpi.second, *rightFlatTpi.second);
}
static DifferResult diffCanonicalTpShape(DifferEnvironment& env, DiffError::Kind possibleNonNormalErrorKind,
const std::pair<std::vector<TypeId>, std::optional<TypePackId>>& left, const std::pair<std::vector<TypeId>, std::optional<TypePackId>>& right)
{
if (left.first.size() == right.first.size() && left.second.has_value() == right.second.has_value())
return DifferResult{};
return DifferResult{DiffError{
possibleNonNormalErrorKind,
DiffPathNodeLeaf::detailsLength(int(left.first.size()), left.second.has_value()),
DiffPathNodeLeaf::detailsLength(int(right.first.size()), right.second.has_value()),
getDevFixFriendlyName(env.rootLeft),
getDevFixFriendlyName(env.rootRight),
}};
}
static DifferResult diffHandleFlattenedTail(DifferEnvironment& env, DiffError::Kind possibleNonNormalErrorKind, TypePackId left, TypePackId right)
{
left = follow(left);
right = follow(right);
if (left->ty.index() != right->ty.index())
{
throw InternalCompilerError{"Unhandled case where the tail of 2 normalized typepacks have different variants"};
}
// Both left and right are the same variant
if (auto lv = get<VariadicTypePack>(left))
{
auto rv = get<VariadicTypePack>(right);
DifferResult differResult = diffUsingEnv(env, lv->ty, rv->ty);
if (!differResult.diffError.has_value())
return DifferResult{};
switch (possibleNonNormalErrorKind)
{
case DiffError::Kind::LengthMismatchInFnArgs:
{
differResult.wrapDiffPath(DiffPathNode::constructWithKind(DiffPathNode::Kind::FunctionArgument));
return differResult;
}
case DiffError::Kind::LengthMismatchInFnRets:
{
differResult.wrapDiffPath(DiffPathNode::constructWithKind(DiffPathNode::Kind::FunctionReturn));
return differResult;
}
default:
{
throw InternalCompilerError{"Unhandled flattened tail case for VariadicTypePack"};
}
}
}
throw InternalCompilerError{"Unhandled tail type pack variant for flattened tails"};
}
DifferResult diff(TypeId ty1, TypeId ty2)
{
DifferEnvironment differEnv{ty1, ty2};
@ -267,7 +452,7 @@ bool isSimple(TypeId ty)
{
ty = follow(ty);
// TODO: think about GenericType, etc.
return get<PrimitiveType>(ty) || get<SingletonType>(ty);
return get<PrimitiveType>(ty) || get<SingletonType>(ty) || get<AnyType>(ty);
}
} // namespace Luau

View File

@ -495,12 +495,16 @@ struct ErrorConverter
std::string operator()(const WhereClauseNeeded& e) const
{
return "Type family instance " + Luau::toString(e.ty) + " depends on generic function parameters but does not appear in the function signature; this construct cannot be type-checked at this time";
return "Type family instance " + Luau::toString(e.ty) +
" depends on generic function parameters but does not appear in the function signature; this construct cannot be type-checked at this "
"time";
}
std::string operator()(const PackWhereClauseNeeded& e) const
{
return "Type pack family instance " + Luau::toString(e.tp) + " depends on generic function parameters but does not appear in the function signature; this construct cannot be type-checked at this time";
return "Type pack family instance " + Luau::toString(e.tp) +
" depends on generic function parameters but does not appear in the function signature; this construct cannot be type-checked at this "
"time";
}
};

View File

@ -35,7 +35,7 @@ LUAU_FASTINTVARIABLE(LuauAutocompleteCheckTimeoutMs, 100)
LUAU_FASTFLAGVARIABLE(DebugLuauDeferredConstraintResolution, false)
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false)
LUAU_FASTFLAGVARIABLE(DebugLuauReadWriteProperties, false)
LUAU_FASTFLAGVARIABLE(LuauFixBuildQueueExceptionUnwrap, false)
LUAU_FASTFLAGVARIABLE(LuauTypecheckCancellation, false)
namespace Luau
{
@ -461,6 +461,10 @@ CheckResult Frontend::check(const ModuleName& name, std::optional<FrontendOption
if (item.module->timeout)
checkResult.timeoutHits.push_back(item.name);
// If check was manually cancelled, do not return partial results
if (FFlag::LuauTypecheckCancellation && item.module->cancelled)
return {};
checkResult.errors.insert(checkResult.errors.end(), item.module->errors.begin(), item.module->errors.end());
if (item.name == name)
@ -610,6 +614,7 @@ std::vector<ModuleName> Frontend::checkQueuedModules(std::optional<FrontendOptio
std::vector<size_t> nextItems;
std::optional<size_t> itemWithException;
bool cancelled = false;
while (remaining != 0)
{
@ -626,15 +631,15 @@ std::vector<ModuleName> Frontend::checkQueuedModules(std::optional<FrontendOptio
{
const BuildQueueItem& item = buildQueueItems[i];
if (FFlag::LuauFixBuildQueueExceptionUnwrap)
{
// If exception was thrown, stop adding new items and wait for processing items to complete
if (item.exception)
itemWithException = i;
// If exception was thrown, stop adding new items and wait for processing items to complete
if (item.exception)
itemWithException = i;
if (itemWithException)
break;
}
if (FFlag::LuauTypecheckCancellation && item.module && item.module->cancelled)
cancelled = true;
if (itemWithException || cancelled)
break;
recordItemResult(item);
@ -671,8 +676,12 @@ std::vector<ModuleName> Frontend::checkQueuedModules(std::optional<FrontendOptio
// If we aren't done, but don't have anything processing, we hit a cycle
if (remaining != 0 && processing == 0)
{
// Typechecking might have been cancelled by user, don't return partial results
if (FFlag::LuauTypecheckCancellation && cancelled)
return {};
// We might have stopped because of a pending exception
if (FFlag::LuauFixBuildQueueExceptionUnwrap && itemWithException)
if (itemWithException)
{
recordItemResult(buildQueueItems[*itemWithException]);
break;
@ -901,6 +910,9 @@ void Frontend::checkBuildQueueItem(BuildQueueItem& item)
else
typeCheckLimits.unifierIterationLimit = std::nullopt;
if (FFlag::LuauTypecheckCancellation)
typeCheckLimits.cancellationToken = item.options.cancellationToken;
ModulePtr moduleForAutocomplete = check(sourceModule, Mode::Strict, requireCycles, environmentScope, /*forAutocomplete*/ true,
/*recordJsonLog*/ false, typeCheckLimits);
@ -918,7 +930,12 @@ void Frontend::checkBuildQueueItem(BuildQueueItem& item)
return;
}
ModulePtr module = check(sourceModule, mode, requireCycles, environmentScope, /*forAutocomplete*/ false, item.recordJsonLog, {});
TypeCheckLimits typeCheckLimits;
if (FFlag::LuauTypecheckCancellation)
typeCheckLimits.cancellationToken = item.options.cancellationToken;
ModulePtr module = check(sourceModule, mode, requireCycles, environmentScope, /*forAutocomplete*/ false, item.recordJsonLog, typeCheckLimits);
item.stats.timeCheck += getTimestamp() - timestamp;
item.stats.filesStrict += mode == Mode::Strict;
@ -996,6 +1013,10 @@ void Frontend::checkBuildQueueItems(std::vector<BuildQueueItem>& items)
for (BuildQueueItem& item : items)
{
checkBuildQueueItem(item);
if (FFlag::LuauTypecheckCancellation && item.module && item.module->cancelled)
break;
recordItemResult(item);
}
}
@ -1232,8 +1253,8 @@ ModulePtr Frontend::check(const SourceModule& sourceModule, Mode mode, std::vect
catch (const InternalCompilerError& err)
{
InternalCompilerError augmented = err.location.has_value()
? InternalCompilerError{err.message, sourceModule.humanReadableName, *err.location}
: InternalCompilerError{err.message, sourceModule.humanReadableName};
? InternalCompilerError{err.message, sourceModule.humanReadableName, *err.location}
: InternalCompilerError{err.message, sourceModule.humanReadableName};
throw augmented;
}
}
@ -1254,6 +1275,9 @@ ModulePtr Frontend::check(const SourceModule& sourceModule, Mode mode, std::vect
typeChecker.instantiationChildLimit = typeCheckLimits.instantiationChildLimit;
typeChecker.unifierIterationLimit = typeCheckLimits.unifierIterationLimit;
if (FFlag::LuauTypecheckCancellation)
typeChecker.cancellationToken = typeCheckLimits.cancellationToken;
return typeChecker.check(sourceModule, mode, environmentScope);
}
}

View File

@ -15,8 +15,6 @@
#include <algorithm>
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAGVARIABLE(LuauClonePublicInterfaceLess2, false);
LUAU_FASTFLAGVARIABLE(LuauCloneSkipNonInternalVisit, false);
namespace Luau
{
@ -98,7 +96,7 @@ struct ClonePublicInterface : Substitution
bool ignoreChildrenVisit(TypeId ty) override
{
if (FFlag::LuauCloneSkipNonInternalVisit && ty->owningArena != &module->internalTypes)
if (ty->owningArena != &module->internalTypes)
return true;
return false;
@ -106,7 +104,7 @@ struct ClonePublicInterface : Substitution
bool ignoreChildrenVisit(TypePackId tp) override
{
if (FFlag::LuauCloneSkipNonInternalVisit && tp->owningArena != &module->internalTypes)
if (tp->owningArena != &module->internalTypes)
return true;
return false;
@ -211,35 +209,23 @@ void Module::clonePublicInterface(NotNull<BuiltinTypes> builtinTypes, InternalEr
TxnLog log;
ClonePublicInterface clonePublicInterface{&log, builtinTypes, this};
if (FFlag::LuauClonePublicInterfaceLess2)
returnType = clonePublicInterface.cloneTypePack(returnType);
else
returnType = clone(returnType, interfaceTypes, cloneState);
returnType = clonePublicInterface.cloneTypePack(returnType);
moduleScope->returnType = returnType;
if (varargPack)
{
if (FFlag::LuauClonePublicInterfaceLess2)
varargPack = clonePublicInterface.cloneTypePack(*varargPack);
else
varargPack = clone(*varargPack, interfaceTypes, cloneState);
varargPack = clonePublicInterface.cloneTypePack(*varargPack);
moduleScope->varargPack = varargPack;
}
for (auto& [name, tf] : moduleScope->exportedTypeBindings)
{
if (FFlag::LuauClonePublicInterfaceLess2)
tf = clonePublicInterface.cloneTypeFun(tf);
else
tf = clone(tf, interfaceTypes, cloneState);
tf = clonePublicInterface.cloneTypeFun(tf);
}
for (auto& [name, ty] : declaredGlobals)
{
if (FFlag::LuauClonePublicInterfaceLess2)
ty = clonePublicInterface.cloneType(ty);
else
ty = clone(ty, interfaceTypes, cloneState);
ty = clonePublicInterface.cloneType(ty);
}
// Copy external stuff over to Module itself

View File

@ -8,93 +8,15 @@
#include <algorithm>
#include <stdexcept>
LUAU_FASTFLAG(LuauClonePublicInterfaceLess2)
LUAU_FASTINTVARIABLE(LuauTarjanChildLimit, 10000)
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
LUAU_FASTFLAG(LuauCloneSkipNonInternalVisit)
LUAU_FASTFLAGVARIABLE(LuauTarjanSingleArr, false)
namespace Luau
{
static TypeId DEPRECATED_shallowClone(TypeId ty, TypeArena& dest, const TxnLog* log, bool alwaysClone)
{
ty = log->follow(ty);
TypeId result = ty;
if (auto pty = log->pending(ty))
ty = &pty->pending;
if (const FunctionType* ftv = get<FunctionType>(ty))
{
FunctionType clone = FunctionType{ftv->level, ftv->scope, ftv->argTypes, ftv->retTypes, ftv->definition, ftv->hasSelf};
clone.generics = ftv->generics;
clone.genericPacks = ftv->genericPacks;
clone.magicFunction = ftv->magicFunction;
clone.dcrMagicFunction = ftv->dcrMagicFunction;
clone.dcrMagicRefinement = ftv->dcrMagicRefinement;
clone.tags = ftv->tags;
clone.argNames = ftv->argNames;
result = dest.addType(std::move(clone));
}
else if (const TableType* ttv = get<TableType>(ty))
{
LUAU_ASSERT(!ttv->boundTo);
TableType clone = TableType{ttv->props, ttv->indexer, ttv->level, ttv->scope, ttv->state};
clone.definitionModuleName = ttv->definitionModuleName;
clone.definitionLocation = ttv->definitionLocation;
clone.name = ttv->name;
clone.syntheticName = ttv->syntheticName;
clone.instantiatedTypeParams = ttv->instantiatedTypeParams;
clone.instantiatedTypePackParams = ttv->instantiatedTypePackParams;
clone.tags = ttv->tags;
result = dest.addType(std::move(clone));
}
else if (const MetatableType* mtv = get<MetatableType>(ty))
{
MetatableType clone = MetatableType{mtv->table, mtv->metatable};
clone.syntheticName = mtv->syntheticName;
result = dest.addType(std::move(clone));
}
else if (const UnionType* utv = get<UnionType>(ty))
{
UnionType clone;
clone.options = utv->options;
result = dest.addType(std::move(clone));
}
else if (const IntersectionType* itv = get<IntersectionType>(ty))
{
IntersectionType clone;
clone.parts = itv->parts;
result = dest.addType(std::move(clone));
}
else if (const PendingExpansionType* petv = get<PendingExpansionType>(ty))
{
PendingExpansionType clone{petv->prefix, petv->name, petv->typeArguments, petv->packArguments};
result = dest.addType(std::move(clone));
}
else if (const NegationType* ntv = get<NegationType>(ty))
{
result = dest.addType(NegationType{ntv->ty});
}
else if (const TypeFamilyInstanceType* tfit = get<TypeFamilyInstanceType>(ty))
{
TypeFamilyInstanceType clone{tfit->family, tfit->typeArguments, tfit->packArguments};
result = dest.addType(std::move(clone));
}
else
return result;
asMutable(result)->documentationSymbol = ty->documentationSymbol;
return result;
}
static TypeId shallowClone(TypeId ty, TypeArena& dest, const TxnLog* log, bool alwaysClone)
{
if (!FFlag::LuauClonePublicInterfaceLess2)
return DEPRECATED_shallowClone(ty, dest, log, alwaysClone);
auto go = [ty, &dest, alwaysClone](auto&& a) {
using T = std::decay_t<decltype(a)>;
@ -224,7 +146,7 @@ void Tarjan::visitChildren(TypeId ty, int index)
{
LUAU_ASSERT(ty == log->follow(ty));
if (FFlag::LuauCloneSkipNonInternalVisit ? ignoreChildrenVisit(ty) : ignoreChildren(ty))
if (ignoreChildrenVisit(ty))
return;
if (auto pty = log->pending(ty))
@ -324,7 +246,7 @@ void Tarjan::visitChildren(TypePackId tp, int index)
{
LUAU_ASSERT(tp == log->follow(tp));
if (FFlag::LuauCloneSkipNonInternalVisit ? ignoreChildrenVisit(tp) : ignoreChildren(tp))
if (ignoreChildrenVisit(tp))
return;
if (auto ptp = log->pending(tp))
@ -856,7 +778,7 @@ std::optional<TypePackId> Substitution::substitute(TypePackId tp)
TypeId Substitution::clone(TypeId ty)
{
return shallowClone(ty, *arena, log, /* alwaysClone */ FFlag::LuauClonePublicInterfaceLess2);
return shallowClone(ty, *arena, log, /* alwaysClone */ true);
}
TypePackId Substitution::clone(TypePackId tp)
@ -888,12 +810,8 @@ TypePackId Substitution::clone(TypePackId tp)
clone.packArguments.assign(tfitp->packArguments.begin(), tfitp->packArguments.end());
return addTypePack(std::move(clone));
}
else if (FFlag::LuauClonePublicInterfaceLess2)
{
return addTypePack(*tp);
}
else
return tp;
return addTypePack(*tp);
}
void Substitution::foundDirty(TypeId ty)

View File

@ -13,8 +13,10 @@
#include <algorithm>
#include <stdexcept>
#include <string>
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTFLAGVARIABLE(LuauToStringPrettifyLocation, false)
/*
* Enables increasing levels of verbosity for Luau type names when stringifying.
@ -1739,9 +1741,17 @@ std::string toString(const Position& position)
return "{ line = " + std::to_string(position.line) + ", col = " + std::to_string(position.column) + " }";
}
std::string toString(const Location& location)
std::string toString(const Location& location, int offset, bool useBegin)
{
return "Location { " + toString(location.begin) + ", " + toString(location.end) + " }";
if (FFlag::LuauToStringPrettifyLocation)
{
return "(" + std::to_string(location.begin.line + offset) + ", " + std::to_string(location.begin.column + offset) + ") - (" +
std::to_string(location.end.line + offset) + ", " + std::to_string(location.end.column + offset) + ")";
}
else
{
return "Location { " + toString(location.begin) + ", " + toString(location.end) + " }";
}
}
} // namespace Luau

View File

@ -1239,7 +1239,8 @@ struct TypeChecker2
return std::move(u.errors);
}
std::pair<Analysis, ErrorVec> checkOverload(TypeId fnTy, const TypePack* args, Location fnLoc, const std::vector<Location>* argLocs, bool callMetamethodOk = true)
std::pair<Analysis, ErrorVec> checkOverload(
TypeId fnTy, const TypePack* args, Location fnLoc, const std::vector<Location>* argLocs, bool callMetamethodOk = true)
{
fnTy = follow(fnTy);
@ -1257,17 +1258,18 @@ struct TypeChecker2
std::vector<Location> withSelfLocs = *argLocs;
withSelfLocs.insert(withSelfLocs.begin(), fnLoc);
return checkOverload(*callMm, &withSelf, fnLoc, &withSelfLocs, /*callMetamethodOk=*/ false);
return checkOverload(*callMm, &withSelf, fnLoc, &withSelfLocs, /*callMetamethodOk=*/false);
}
else
return {TypeIsNotAFunction, {}}; // Intentionally empty. We can just fabricate the type error later on.
}
LUAU_NOINLINE
std::pair<Analysis, ErrorVec> checkOverload_(TypeId fnTy, const FunctionType* fn, const TypePack* args, Location fnLoc, const std::vector<Location>* argLocs)
std::pair<Analysis, ErrorVec> checkOverload_(
TypeId fnTy, const FunctionType* fn, const TypePack* args, Location fnLoc, const std::vector<Location>* argLocs)
{
TxnLog fake;
FamilyGraphReductionResult result = reduceFamilies(fnTy, callLoc, arena, builtinTypes, scope, normalizer, &fake, /*force=*/ true);
FamilyGraphReductionResult result = reduceFamilies(fnTy, callLoc, arena, builtinTypes, scope, normalizer, &fake, /*force=*/true);
if (!result.errors.empty())
return {OverloadIsNonviable, result.errors};
@ -2374,6 +2376,9 @@ struct TypeChecker2
return;
}
if (norm->shouldSuppressErrors())
return;
bool foundOneProp = false;
std::vector<TypeId> typesMissingTheProp;
@ -2539,7 +2544,8 @@ struct TypeChecker2
}
};
void check(NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> unifierState, DcrLogger* logger, const SourceModule& sourceModule, Module* module)
void check(
NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> unifierState, DcrLogger* logger, const SourceModule& sourceModule, Module* module)
{
TypeChecker2 typeChecker{builtinTypes, unifierState, logger, &sourceModule, module};

View File

@ -2,6 +2,7 @@
#include "Luau/TypeInfer.h"
#include "Luau/ApplyTypeFunction.h"
#include "Luau/Cancellation.h"
#include "Luau/Clone.h"
#include "Luau/Common.h"
#include "Luau/Instantiation.h"
@ -302,6 +303,10 @@ ModulePtr TypeChecker::checkWithoutRecursionCheck(const SourceModule& module, Mo
{
currentModule->timeout = true;
}
catch (const UserCancelError&)
{
currentModule->cancelled = true;
}
if (FFlag::DebugLuauSharedSelf)
{
@ -345,7 +350,9 @@ ModulePtr TypeChecker::checkWithoutRecursionCheck(const SourceModule& module, Mo
ControlFlow TypeChecker::check(const ScopePtr& scope, const AstStat& program)
{
if (finishTime && TimeTrace::getClock() > *finishTime)
throw TimeLimitError(iceHandler->moduleName);
throwTimeLimitError();
if (cancellationToken && cancellationToken->requested())
throwUserCancelError();
if (auto block = program.as<AstStatBlock>())
return check(scope, *block);
@ -4929,16 +4936,26 @@ void TypeChecker::reportErrors(const ErrorVec& errors)
reportError(err);
}
void TypeChecker::ice(const std::string& message, const Location& location)
LUAU_NOINLINE void TypeChecker::ice(const std::string& message, const Location& location)
{
iceHandler->ice(message, location);
}
void TypeChecker::ice(const std::string& message)
LUAU_NOINLINE void TypeChecker::ice(const std::string& message)
{
iceHandler->ice(message);
}
LUAU_NOINLINE void TypeChecker::throwTimeLimitError()
{
throw TimeLimitError(iceHandler->moduleName);
}
LUAU_NOINLINE void TypeChecker::throwUserCancelError()
{
throw UserCancelError(iceHandler->moduleName);
}
void TypeChecker::prepareErrorsForDisplay(ErrorVec& errVec)
{
// Remove errors with names that were generated by recovery from a parse error

View File

@ -19,7 +19,6 @@
LUAU_FASTINT(LuauTypeInferTypePackLoopLimit)
LUAU_FASTFLAG(LuauErrorRecoveryType)
LUAU_FASTFLAGVARIABLE(LuauInstantiateInSubtyping, false)
LUAU_FASTFLAGVARIABLE(LuauVariadicAnyCanBeGeneric, false)
LUAU_FASTFLAGVARIABLE(LuauMaintainScopesInUnifier, false)
LUAU_FASTFLAGVARIABLE(LuauTransitiveSubtyping, false)
LUAU_FASTFLAGVARIABLE(LuauOccursIsntAlwaysFailure, false)

View File

@ -39,11 +39,6 @@ struct Location
bool containsClosed(const Position& p) const;
void extend(const Location& other);
void shift(const Position& start, const Position& oldEnd, const Position& newEnd);
/**
* Use offset=1 when displaying for the user.
*/
std::string toString(int offset = 0, bool useBegin = true) const;
};
} // namespace Luau

View File

@ -129,12 +129,4 @@ void Location::shift(const Position& start, const Position& oldEnd, const Positi
end.shift(start, oldEnd, newEnd);
}
std::string Location::toString(int offset, bool useBegin) const
{
const Position& pos = useBegin ? this->begin : this->end;
std::string line{std::to_string(pos.line + offset)};
std::string column{std::to_string(pos.column + offset)};
return "(" + line + ", " + column + ")";
}
} // namespace Luau

View File

@ -92,14 +92,6 @@ struct GlobalContext
{
~GlobalContext()
{
// Ideally we would want all ThreadContext destructors to run
// But in VS, not all thread_local object instances are destroyed
for (ThreadContext* context : threads)
{
if (!context->events.empty())
context->flushEvents();
}
if (traceFile)
fclose(traceFile);
}
@ -109,7 +101,7 @@ struct GlobalContext
uint32_t nextThreadId = 0;
std::vector<Token> tokens;
FILE* traceFile = nullptr;
private:
friend std::shared_ptr<GlobalContext> getGlobalContext();
GlobalContext() = default;

View File

@ -429,8 +429,7 @@ struct Reducer
}
}
void run(const std::string scriptName, const std::string command, std::string_view source,
std::string_view searchText)
void run(const std::string scriptName, const std::string command, std::string_view source, std::string_view searchText)
{
this->scriptName = scriptName;

View File

@ -26,6 +26,7 @@ struct IrBuilder
void rebuildBytecodeBasicBlocks(Proto* proto);
void translateInst(LuauOpcode op, const Instruction* pc, int i);
void handleFastcallFallback(IrOp fallbackOrUndef, const Instruction* pc, int i);
bool isInternalBlock(IrOp block);
void beginBlock(IrOp block);
@ -61,10 +62,13 @@ struct IrBuilder
IrOp vmConst(uint32_t index);
IrOp vmUpvalue(uint8_t index);
IrOp vmExit(uint32_t pcpos);
bool inTerminatedBlock = false;
bool activeFastcallFallback = false;
IrOp fastcallFallbackReturn;
int fastcallSkipTarget = -1;
IrFunction function;

View File

@ -165,7 +165,7 @@ enum class IrCmd : uint8_t
NOT_ANY, // TODO: boolean specialization will be useful
// Unconditional jump
// A: block
// A: block/vmexit
JUMP,
// Jump if TValue is truthy
@ -364,7 +364,7 @@ enum class IrCmd : uint8_t
// Guard against tag mismatch
// A, B: tag
// C: block/undef
// C: block/vmexit/undef
// D: bool (finish execution in VM on failure)
// In final x64 lowering, A can also be Rn
// When undef is specified instead of a block, execution is aborted on check failure; if D is true, execution is continued in VM interpreter
@ -384,7 +384,7 @@ enum class IrCmd : uint8_t
CHECK_NO_METATABLE,
// Guard against executing in unsafe environment, exits to VM on check failure
// A: unsigned int (pcpos)/undef
// A: vmexit/undef
// When undef is specified, execution is aborted on check failure
CHECK_SAFE_ENV,
@ -670,6 +670,9 @@ enum class IrOpKind : uint32_t
// To reference a VM upvalue
VmUpvalue,
// To reference an exit to VM at specific PC pos
VmExit,
};
struct IrOp

View File

@ -24,11 +24,23 @@ extern "C" void __register_frame(const void*);
extern "C" void __deregister_frame(const void*);
extern "C" void __unw_add_dynamic_fde() __attribute__((weak));
#endif
#if defined(__APPLE__) && defined(__aarch64__)
#include <sys/sysctl.h>
#include <mach-o/loader.h>
#include <dlfcn.h>
struct unw_dynamic_unwind_sections_t
{
uintptr_t dso_base;
uintptr_t dwarf_section;
size_t dwarf_section_length;
uintptr_t compact_unwind_section;
size_t compact_unwind_section_length;
};
typedef int (*unw_add_find_dynamic_unwind_sections_t)(int (*)(uintptr_t addr, unw_dynamic_unwind_sections_t* info));
#endif
namespace Luau
@ -36,6 +48,26 @@ namespace Luau
namespace CodeGen
{
#if defined(__APPLE__) && defined(__aarch64__)
static int findDynamicUnwindSections(uintptr_t addr, unw_dynamic_unwind_sections_t* info)
{
// Define a minimal mach header for JIT'd code.
static const mach_header_64 kFakeMachHeader = {
MH_MAGIC_64,
CPU_TYPE_ARM64,
CPU_SUBTYPE_ARM64_ALL,
MH_DYLIB,
};
info->dso_base = (uintptr_t)&kFakeMachHeader;
info->dwarf_section = 0;
info->dwarf_section_length = 0;
info->compact_unwind_section = 0;
info->compact_unwind_section_length = 0;
return 1;
}
#endif
#if defined(__linux__) || defined(__APPLE__)
static void visitFdeEntries(char* pos, void (*cb)(const void*))
{
@ -87,6 +119,15 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz
visitFdeEntries(unwindData, __register_frame);
#endif
#if defined(__APPLE__) && defined(__aarch64__)
// Starting from macOS 14, we need to register unwind section callback to state that our ABI doesn't require pointer authentication
// This might conflict with other JITs that do the same; unfortunately this is the best we can do for now.
static unw_add_find_dynamic_unwind_sections_t unw_add_find_dynamic_unwind_sections =
unw_add_find_dynamic_unwind_sections_t(dlsym(RTLD_DEFAULT, "__unw_add_find_dynamic_unwind_sections"));
static int regonce = unw_add_find_dynamic_unwind_sections ? unw_add_find_dynamic_unwind_sections(findDynamicUnwindSections) : 0;
LUAU_ASSERT(regonce == 0);
#endif
beginOffset = unwindSize + unwind->getBeginOffset();
return block;
}

View File

@ -141,14 +141,6 @@ static int onEnter(lua_State* L, Proto* proto)
return GateFn(data->context.gateEntry)(L, proto, target, &data->context);
}
static void onSetBreakpoint(lua_State* L, Proto* proto, int instruction)
{
if (!proto->execdata)
return;
LUAU_ASSERT(!"Native breakpoints are not implemented");
}
#if defined(__aarch64__)
unsigned int getCpuFeaturesA64()
{
@ -245,7 +237,6 @@ void create(lua_State* L)
ecb->close = onCloseState;
ecb->destroy = onDestroyFunction;
ecb->enter = onEnter;
ecb->setbreakpoint = onSetBreakpoint;
}
void compile(lua_State* L, int idx)
@ -259,7 +250,8 @@ void compile(lua_State* L, int idx)
return;
#if defined(__aarch64__)
A64::AssemblyBuilderA64 build(/* logText= */ false, getCpuFeaturesA64());
static unsigned int cpuFeatures = getCpuFeaturesA64();
A64::AssemblyBuilderA64 build(/* logText= */ false, cpuFeatures);
#else
X64::AssemblyBuilderX64 build(/* logText= */ false);
#endif

View File

@ -100,7 +100,8 @@ std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
case AssemblyOptions::Host:
{
#if defined(__aarch64__)
A64::AssemblyBuilderA64 build(/* logText= */ options.includeAssembly, getCpuFeaturesA64());
static unsigned int cpuFeatures = getCpuFeaturesA64();
A64::AssemblyBuilderA64 build(/* logText= */ options.includeAssembly, cpuFeatures);
#else
X64::AssemblyBuilderX64 build(/* logText= */ options.includeAssembly);
#endif

View File

@ -44,6 +44,18 @@ inline void gatherFunctions(std::vector<Proto*>& results, Proto* proto)
gatherFunctions(results, proto->p[i]);
}
inline IrBlock& getNextBlock(IrFunction& function, std::vector<uint32_t>& sortedBlocks, IrBlock& dummy, size_t i)
{
for (size_t j = i + 1; j < sortedBlocks.size(); ++j)
{
IrBlock& block = function.blocks[sortedBlocks[j]];
if (block.kind != IrBlockKind::Dead)
return block;
}
return dummy;
}
template<typename AssemblyBuilder, typename IrLowering>
inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& function, int bytecodeid, AssemblyOptions options)
{
@ -118,6 +130,8 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
build.setLabel(block.label);
IrBlock& nextBlock = getNextBlock(function, sortedBlocks, dummy, i);
for (uint32_t index = block.start; index <= block.finish; index++)
{
LUAU_ASSERT(index < function.instructions.size());
@ -156,9 +170,7 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
toStringDetailed(ctx, block, blockIndex, inst, index, /* includeUseInfo */ true);
}
IrBlock& next = i + 1 < sortedBlocks.size() ? function.blocks[sortedBlocks[i + 1]] : dummy;
lowering.lowerInst(inst, index, next);
lowering.lowerInst(inst, index, nextBlock);
if (lowering.hasError())
{

View File

@ -12,7 +12,7 @@ constexpr unsigned kTValueSizeLog2 = 4;
constexpr unsigned kLuaNodeSizeLog2 = 5;
// TKey.tt and TKey.next are packed together in a bitfield
constexpr unsigned kOffsetOfTKeyTagNext = 12; // offsetof cannot be used on a bit field
constexpr unsigned kOffsetOfTKeyTagNext = 12; // offsetof cannot be used on a bit field
constexpr unsigned kTKeyTagBits = 4;
constexpr unsigned kTKeyTagMask = (1 << kTKeyTagBits) - 1;
@ -33,7 +33,7 @@ struct ModuleHelpers
Label continueCallInVm;
// A64
Label reentry; // x0: closure
Label reentry; // x0: closure
};
} // namespace CodeGen

View File

@ -268,7 +268,6 @@ void callStepGc(IrRegAllocX64& regs, AssemblyBuilderX64& build)
build.setLabel(skip);
}
void emitClearNativeFlag(AssemblyBuilderX64& build)
{
build.mov(rax, qword[rState + offsetof(lua_State, ci)]);

View File

@ -128,8 +128,16 @@ void IrBuilder::buildFunctionIr(Proto* proto)
// We skip dead bytecode instructions when they appear after block was already terminated
if (!inTerminatedBlock)
{
translateInst(op, pc, i);
if (fastcallSkipTarget != -1)
{
nexti = fastcallSkipTarget;
fastcallSkipTarget = -1;
}
}
i = nexti;
LUAU_ASSERT(i <= proto->sizecode);
@ -357,49 +365,17 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
translateInstCloseUpvals(*this, pc);
break;
case LOP_FASTCALL:
{
int skip = LUAU_INSN_C(*pc);
IrOp next = blockAtInst(i + skip + 2);
translateFastCallN(*this, pc, i, false, 0, {}, next);
activeFastcallFallback = true;
fastcallFallbackReturn = next;
handleFastcallFallback(translateFastCallN(*this, pc, i, false, 0, {}), pc, i);
break;
}
case LOP_FASTCALL1:
{
int skip = LUAU_INSN_C(*pc);
IrOp next = blockAtInst(i + skip + 2);
translateFastCallN(*this, pc, i, true, 1, undef(), next);
activeFastcallFallback = true;
fastcallFallbackReturn = next;
handleFastcallFallback(translateFastCallN(*this, pc, i, true, 1, undef()), pc, i);
break;
}
case LOP_FASTCALL2:
{
int skip = LUAU_INSN_C(*pc);
IrOp next = blockAtInst(i + skip + 2);
translateFastCallN(*this, pc, i, true, 2, vmReg(pc[1]), next);
activeFastcallFallback = true;
fastcallFallbackReturn = next;
handleFastcallFallback(translateFastCallN(*this, pc, i, true, 2, vmReg(pc[1])), pc, i);
break;
}
case LOP_FASTCALL2K:
{
int skip = LUAU_INSN_C(*pc);
IrOp next = blockAtInst(i + skip + 2);
translateFastCallN(*this, pc, i, true, 2, vmConst(pc[1]), next);
activeFastcallFallback = true;
fastcallFallbackReturn = next;
handleFastcallFallback(translateFastCallN(*this, pc, i, true, 2, vmConst(pc[1])), pc, i);
break;
}
case LOP_FORNPREP:
translateInstForNPrep(*this, pc, i);
break;
@ -493,6 +469,25 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
}
}
void IrBuilder::handleFastcallFallback(IrOp fallbackOrUndef, const Instruction* pc, int i)
{
int skip = LUAU_INSN_C(*pc);
if (fallbackOrUndef.kind != IrOpKind::Undef)
{
IrOp next = blockAtInst(i + skip + 2);
inst(IrCmd::JUMP, next);
beginBlock(fallbackOrUndef);
activeFastcallFallback = true;
fastcallFallbackReturn = next;
}
else
{
fastcallSkipTarget = i + skip + 2;
}
}
bool IrBuilder::isInternalBlock(IrOp block)
{
IrBlock& target = function.blocks[block.index];
@ -718,5 +713,10 @@ IrOp IrBuilder::vmUpvalue(uint8_t index)
return {IrOpKind::VmUpvalue, index};
}
IrOp IrBuilder::vmExit(uint32_t pcpos)
{
return {IrOpKind::VmExit, pcpos};
}
} // namespace CodeGen
} // namespace Luau

View File

@ -389,6 +389,9 @@ void toString(IrToStringContext& ctx, IrOp op)
case IrOpKind::VmUpvalue:
append(ctx.result, "U%d", vmUpvalueOp(op));
break;
case IrOpKind::VmExit:
append(ctx.result, "exit(%d)", op.index);
break;
}
}

View File

@ -178,6 +178,7 @@ IrLoweringA64::IrLoweringA64(AssemblyBuilderA64& build, ModuleHelpers& helpers,
, function(function)
, regs(function, {{x0, x15}, {x16, x17}, {q0, q7}, {q16, q31}})
, valueTracker(function)
, exitHandlerMap(~0u)
{
// In order to allocate registers during lowering, we need to know where instruction results are last used
updateLastUseLocations(function);
@ -514,8 +515,11 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
build.cmp(regOp(inst.a), LUA_TBOOLEAN);
build.b(ConditionA64::NotEqual, notbool);
// boolean => invert value
build.eor(inst.regA64, regOp(inst.b), 1);
if (inst.b.kind == IrOpKind::Constant)
build.mov(inst.regA64, intOp(inst.b) == 0 ? 1 : 0);
else
build.eor(inst.regA64, regOp(inst.b), 1); // boolean => invert value
build.b(exit);
// not boolean => result is true iff tag was nil
@ -527,7 +531,16 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break;
}
case IrCmd::JUMP:
jumpOrFallthrough(blockOp(inst.a), next);
if (inst.a.kind == IrOpKind::VmExit)
{
Label fresh;
build.b(getTargetLabel(inst.a, fresh));
finalizeTargetLabel(inst.a, fresh);
}
else
{
jumpOrFallthrough(blockOp(inst.a), next);
}
break;
case IrCmd::JUMP_IF_TRUTHY:
{
@ -1029,8 +1042,8 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
case IrCmd::CHECK_TAG:
{
bool continueInVm = (inst.d.kind == IrOpKind::Constant && intOp(inst.d));
Label abort; // used when guard aborts execution
Label& fail = inst.c.kind == IrOpKind::Undef ? (continueInVm ? helpers.exitContinueVmClearNativeFlag : abort) : labelOp(inst.c);
Label fresh; // used when guard aborts execution or jumps to a VM exit
Label& fail = continueInVm ? helpers.exitContinueVmClearNativeFlag : getTargetLabel(inst.c, fresh);
if (tagOp(inst.b) == 0)
{
build.cbnz(regOp(inst.a), fail);
@ -1040,55 +1053,43 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
build.cmp(regOp(inst.a), tagOp(inst.b));
build.b(ConditionA64::NotEqual, fail);
}
if (abort.id && !continueInVm)
emitAbort(build, abort);
if (!continueInVm)
finalizeTargetLabel(inst.c, fresh);
break;
}
case IrCmd::CHECK_READONLY:
{
Label abort; // used when guard aborts execution
Label fresh; // used when guard aborts execution or jumps to a VM exit
RegisterA64 temp = regs.allocTemp(KindA64::w);
build.ldrb(temp, mem(regOp(inst.a), offsetof(Table, readonly)));
build.cbnz(temp, inst.b.kind == IrOpKind::Undef ? abort : labelOp(inst.b));
if (abort.id)
emitAbort(build, abort);
build.cbnz(temp, getTargetLabel(inst.b, fresh));
finalizeTargetLabel(inst.b, fresh);
break;
}
case IrCmd::CHECK_NO_METATABLE:
{
Label abort; // used when guard aborts execution
Label fresh; // used when guard aborts execution or jumps to a VM exit
RegisterA64 temp = regs.allocTemp(KindA64::x);
build.ldr(temp, mem(regOp(inst.a), offsetof(Table, metatable)));
build.cbnz(temp, inst.b.kind == IrOpKind::Undef ? abort : labelOp(inst.b));
if (abort.id)
emitAbort(build, abort);
build.cbnz(temp, getTargetLabel(inst.b, fresh));
finalizeTargetLabel(inst.b, fresh);
break;
}
case IrCmd::CHECK_SAFE_ENV:
{
Label abort; // used when guard aborts execution
Label fresh; // used when guard aborts execution or jumps to a VM exit
RegisterA64 temp = regs.allocTemp(KindA64::x);
RegisterA64 tempw = castReg(KindA64::w, temp);
build.ldr(temp, mem(rClosure, offsetof(Closure, env)));
build.ldrb(tempw, mem(temp, offsetof(Table, safeenv)));
if (inst.a.kind == IrOpKind::Undef)
{
build.cbz(tempw, abort);
emitAbort(build, abort);
}
else
{
Label self;
build.cbz(tempw, self);
exitHandlers.push_back({self, uintOp(inst.a)});
}
build.cbz(tempw, getTargetLabel(inst.a, fresh));
finalizeTargetLabel(inst.a, fresh);
break;
}
case IrCmd::CHECK_ARRAY_SIZE:
{
Label abort; // used when guard aborts execution
Label& fail = inst.c.kind == IrOpKind::Undef ? abort : labelOp(inst.c);
Label fresh; // used when guard aborts execution or jumps to a VM exit
Label& fail = getTargetLabel(inst.c, fresh);
RegisterA64 temp = regs.allocTemp(KindA64::w);
build.ldr(temp, mem(regOp(inst.a), offsetof(Table, sizearray)));
@ -1120,8 +1121,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
else
LUAU_ASSERT(!"Unsupported instruction form");
if (abort.id)
emitAbort(build, abort);
finalizeTargetLabel(inst.c, fresh);
break;
}
case IrCmd::JUMP_SLOT_MATCH:
@ -1158,15 +1158,13 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
}
case IrCmd::CHECK_NODE_NO_NEXT:
{
Label abort; // used when guard aborts execution
Label fresh; // used when guard aborts execution or jumps to a VM exit
RegisterA64 temp = regs.allocTemp(KindA64::w);
build.ldr(temp, mem(regOp(inst.a), offsetof(LuaNode, key) + kOffsetOfTKeyTagNext));
build.lsr(temp, temp, kTKeyTagBits);
build.cbnz(temp, inst.b.kind == IrOpKind::Undef ? abort : labelOp(inst.b));
if (abort.id)
emitAbort(build, abort);
build.cbnz(temp, getTargetLabel(inst.b, fresh));
finalizeTargetLabel(inst.b, fresh);
break;
}
case IrCmd::INTERRUPT:
@ -1799,6 +1797,35 @@ void IrLoweringA64::jumpOrFallthrough(IrBlock& target, IrBlock& next)
build.b(target.label);
}
Label& IrLoweringA64::getTargetLabel(IrOp op, Label& fresh)
{
if (op.kind == IrOpKind::Undef)
return fresh;
if (op.kind == IrOpKind::VmExit)
{
if (uint32_t* index = exitHandlerMap.find(op.index))
return exitHandlers[*index].self;
return fresh;
}
return labelOp(op);
}
void IrLoweringA64::finalizeTargetLabel(IrOp op, Label& fresh)
{
if (op.kind == IrOpKind::Undef)
{
emitAbort(build, fresh);
}
else if (op.kind == IrOpKind::VmExit && fresh.id != 0)
{
exitHandlerMap[op.index] = uint32_t(exitHandlers.size());
exitHandlers.push_back({fresh, op.index});
}
}
RegisterA64 IrLoweringA64::tempDouble(IrOp op)
{
if (op.kind == IrOpKind::Inst)

View File

@ -2,6 +2,7 @@
#pragma once
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/DenseHash.h"
#include "Luau/IrData.h"
#include "IrRegAllocA64.h"
@ -33,6 +34,9 @@ struct IrLoweringA64
bool isFallthroughBlock(IrBlock target, IrBlock next);
void jumpOrFallthrough(IrBlock& target, IrBlock& next);
Label& getTargetLabel(IrOp op, Label& fresh);
void finalizeTargetLabel(IrOp op, Label& fresh);
// Operand data build helpers
// May emit data/address synthesis instructions
RegisterA64 tempDouble(IrOp op);
@ -77,6 +81,7 @@ struct IrLoweringA64
std::vector<InterruptHandler> interruptHandlers;
std::vector<ExitHandler> exitHandlers;
DenseHashMap<uint32_t, uint32_t> exitHandlerMap;
bool error = false;
};

View File

@ -28,6 +28,7 @@ IrLoweringX64::IrLoweringX64(AssemblyBuilderX64& build, ModuleHelpers& helpers,
, function(function)
, regs(build, function)
, valueTracker(function)
, exitHandlerMap(~0u)
{
// In order to allocate registers during lowering, we need to know where instruction results are last used
updateLastUseLocations(function);
@ -492,8 +493,17 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
build.jcc(ConditionX64::NotEqual, savezero);
}
build.cmp(regOp(inst.b), 0);
build.jcc(ConditionX64::Equal, saveone);
if (inst.b.kind == IrOpKind::Constant)
{
// If value is 1, we fallthrough to storing 0
if (intOp(inst.b) == 0)
build.jmp(saveone);
}
else
{
build.cmp(regOp(inst.b), 0);
build.jcc(ConditionX64::Equal, saveone);
}
build.setLabel(savezero);
build.mov(inst.regX64, 0);
@ -506,7 +516,24 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
break;
}
case IrCmd::JUMP:
jumpOrFallthrough(blockOp(inst.a), next);
if (inst.a.kind == IrOpKind::VmExit)
{
if (uint32_t* index = exitHandlerMap.find(inst.a.index))
{
build.jmp(exitHandlers[*index].self);
}
else
{
Label self;
build.jmp(self);
exitHandlerMap[inst.a.index] = uint32_t(exitHandlers.size());
exitHandlers.push_back({self, inst.a.index});
}
}
else
{
jumpOrFallthrough(blockOp(inst.a), next);
}
break;
case IrCmd::JUMP_IF_TRUTHY:
jumpIfTruthy(build, vmRegOp(inst.a), labelOp(inst.b), labelOp(inst.c));
@ -907,19 +934,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
build.mov(tmp.reg, qword[tmp.reg + offsetof(Closure, env)]);
build.cmp(byte[tmp.reg + offsetof(Table, safeenv)], 0);
if (inst.a.kind == IrOpKind::Undef)
{
Label skip;
build.jcc(ConditionX64::NotEqual, skip);
build.ud2();
build.setLabel(skip);
}
else
{
Label self;
build.jcc(ConditionX64::Equal, self);
exitHandlers.push_back({self, uintOp(inst.a)});
}
jumpOrAbortOnUndef(ConditionX64::Equal, ConditionX64::NotEqual, inst.a);
break;
}
case IrCmd::CHECK_ARRAY_SIZE:
@ -1473,6 +1488,20 @@ void IrLoweringX64::jumpOrAbortOnUndef(ConditionX64 cond, ConditionX64 condInver
build.ud2();
build.setLabel(skip);
}
else if (targetOrUndef.kind == IrOpKind::VmExit)
{
if (uint32_t* index = exitHandlerMap.find(targetOrUndef.index))
{
build.jcc(cond, exitHandlers[*index].self);
}
else
{
Label self;
build.jcc(cond, self);
exitHandlerMap[targetOrUndef.index] = uint32_t(exitHandlers.size());
exitHandlers.push_back({self, targetOrUndef.index});
}
}
else
{
build.jcc(cond, labelOp(targetOrUndef));

View File

@ -2,6 +2,7 @@
#pragma once
#include "Luau/AssemblyBuilderX64.h"
#include "Luau/DenseHash.h"
#include "Luau/IrData.h"
#include "Luau/IrRegAllocX64.h"
@ -77,6 +78,7 @@ struct IrLoweringX64
std::vector<InterruptHandler> interruptHandlers;
std::vector<ExitHandler> exitHandlers;
DenseHashMap<uint32_t, uint32_t> exitHandlerMap;
};
} // namespace X64

View File

@ -18,12 +18,12 @@ namespace Luau
namespace CodeGen
{
static void builtinCheckDouble(IrBuilder& build, IrOp arg, IrOp fallback)
static void builtinCheckDouble(IrBuilder& build, IrOp arg, int pcpos)
{
if (arg.kind == IrOpKind::Constant)
LUAU_ASSERT(build.function.constOp(arg).kind == IrConstKind::Double);
else
build.loadAndCheckTag(arg, LUA_TNUMBER, fallback);
build.loadAndCheckTag(arg, LUA_TNUMBER, build.vmExit(pcpos));
}
static IrOp builtinLoadDouble(IrBuilder& build, IrOp arg)
@ -38,27 +38,27 @@ static IrOp builtinLoadDouble(IrBuilder& build, IrOp arg)
// (number, ...) -> number
static BuiltinImplResult translateBuiltinNumberToNumber(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
build.inst(IrCmd::FASTCALL, build.constUint(bfid), build.vmReg(ra), build.vmReg(arg), args, build.constInt(1), build.constInt(1));
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinNumberToNumberLibm(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp res = build.inst(IrCmd::INVOKE_LIBM, build.constUint(bfid), va);
@ -68,17 +68,17 @@ static BuiltinImplResult translateBuiltinNumberToNumberLibm(
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltin2NumberToNumberLibm(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
@ -90,17 +90,17 @@ static BuiltinImplResult translateBuiltin2NumberToNumberLibm(
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinMathLdexp(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
@ -114,17 +114,17 @@ static BuiltinImplResult translateBuiltinMathLdexp(
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
// (number, ...) -> (number, number)
static BuiltinImplResult translateBuiltinNumberTo2Number(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 1 || nresults > 2)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
build.inst(
IrCmd::FASTCALL, build.constUint(bfid), build.vmReg(ra), build.vmReg(arg), args, build.constInt(1), build.constInt(nresults == 1 ? 1 : 2));
@ -134,7 +134,7 @@ static BuiltinImplResult translateBuiltinNumberTo2Number(
if (nresults != 1)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 1), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 2};
return {BuiltinImplType::Full, 2};
}
static BuiltinImplResult translateBuiltinAssert(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
@ -151,12 +151,12 @@ static BuiltinImplResult translateBuiltinAssert(IrBuilder& build, int nparams, i
return {BuiltinImplType::UsesFallback, 0};
}
static BuiltinImplResult translateBuiltinMathDeg(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
static BuiltinImplResult translateBuiltinMathDeg(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
const double rpd = (3.14159265358979323846 / 180.0);
@ -167,15 +167,15 @@ static BuiltinImplResult translateBuiltinMathDeg(IrBuilder& build, int nparams,
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinMathRad(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
static BuiltinImplResult translateBuiltinMathRad(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
const double rpd = (3.14159265358979323846 / 180.0);
@ -186,11 +186,11 @@ static BuiltinImplResult translateBuiltinMathRad(IrBuilder& build, int nparams,
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinMathLog(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
@ -213,7 +213,7 @@ static BuiltinImplResult translateBuiltinMathLog(
denom = log(*y);
}
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
@ -227,19 +227,19 @@ static BuiltinImplResult translateBuiltinMathLog(
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinMathMin(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
static BuiltinImplResult translateBuiltinMathMin(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 2 || nparams > kMinMaxUnrolledParams || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
for (int i = 3; i <= nparams; ++i)
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + (i - 2)), fallback);
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + (i - 2)), pcpos);
IrOp varg1 = builtinLoadDouble(build, build.vmReg(arg));
IrOp varg2 = builtinLoadDouble(build, args);
@ -257,19 +257,19 @@ static BuiltinImplResult translateBuiltinMathMin(IrBuilder& build, int nparams,
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinMathMax(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
static BuiltinImplResult translateBuiltinMathMax(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 2 || nparams > kMinMaxUnrolledParams || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
for (int i = 3; i <= nparams; ++i)
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + (i - 2)), fallback);
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + (i - 2)), pcpos);
IrOp varg1 = builtinLoadDouble(build, build.vmReg(arg));
IrOp varg2 = builtinLoadDouble(build, args);
@ -287,10 +287,10 @@ static BuiltinImplResult translateBuiltinMathMax(IrBuilder& build, int nparams,
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinMathClamp(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
static BuiltinImplResult translateBuiltinMathClamp(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback, int pcpos)
{
if (nparams < 3 || nresults > 1)
return {BuiltinImplType::None, -1};
@ -299,9 +299,9 @@ static BuiltinImplResult translateBuiltinMathClamp(IrBuilder& build, int nparams
LUAU_ASSERT(args.kind == IrOpKind::VmReg);
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + 1), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + 1), pcpos);
IrOp min = builtinLoadDouble(build, args);
IrOp max = builtinLoadDouble(build, build.vmReg(vmRegOp(args) + 1));
@ -321,12 +321,12 @@ static BuiltinImplResult translateBuiltinMathClamp(IrBuilder& build, int nparams
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinMathUnary(IrBuilder& build, IrCmd cmd, int nparams, int ra, int arg, int nresults, IrOp fallback)
static BuiltinImplResult translateBuiltinMathUnary(IrBuilder& build, IrCmd cmd, int nparams, int ra, int arg, int nresults, int pcpos)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
IrOp varg = builtinLoadDouble(build, build.vmReg(arg));
IrOp result = build.inst(cmd, varg);
@ -336,10 +336,10 @@ static BuiltinImplResult translateBuiltinMathUnary(IrBuilder& build, IrCmd cmd,
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinType(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
static BuiltinImplResult translateBuiltinType(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
@ -350,10 +350,10 @@ static BuiltinImplResult translateBuiltinType(IrBuilder& build, int nparams, int
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra), name);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TSTRING));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinTypeof(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
static BuiltinImplResult translateBuiltinTypeof(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
@ -363,20 +363,20 @@ static BuiltinImplResult translateBuiltinTypeof(IrBuilder& build, int nparams, i
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra), name);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TSTRING));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinBit32BinaryOp(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 2 || nparams > kBit32BinaryOpUnrolledParams || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
for (int i = 3; i <= nparams; ++i)
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + (i - 2)), fallback);
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + (i - 2)), pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
@ -433,16 +433,16 @@ static BuiltinImplResult translateBuiltinBit32BinaryOp(
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
}
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinBit32Bnot(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
@ -454,19 +454,19 @@ static BuiltinImplResult translateBuiltinBit32Bnot(
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinBit32Shift(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback, int pcpos)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
IrOp block = build.block(IrBlockKind::Internal);
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
@ -499,13 +499,13 @@ static BuiltinImplResult translateBuiltinBit32Shift(
}
static BuiltinImplResult translateBuiltinBit32Rotate(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
@ -522,17 +522,17 @@ static BuiltinImplResult translateBuiltinBit32Rotate(
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinBit32Extract(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback, int pcpos)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
@ -553,7 +553,7 @@ static BuiltinImplResult translateBuiltinBit32Extract(
}
else
{
builtinCheckDouble(build, build.vmReg(args.index + 1), fallback);
builtinCheckDouble(build, build.vmReg(args.index + 1), pcpos);
IrOp vc = builtinLoadDouble(build, build.vmReg(args.index + 1));
IrOp w = build.inst(IrCmd::NUM_TO_INT, vc);
@ -586,12 +586,12 @@ static BuiltinImplResult translateBuiltinBit32Extract(
}
static BuiltinImplResult translateBuiltinBit32ExtractK(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 2 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp n = build.inst(IrCmd::NUM_TO_UINT, va);
@ -613,16 +613,16 @@ static BuiltinImplResult translateBuiltinBit32ExtractK(
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinBit32Countz(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vaui = build.inst(IrCmd::NUM_TO_UINT, va);
@ -637,18 +637,18 @@ static BuiltinImplResult translateBuiltinBit32Countz(
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinBit32Replace(
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
IrBuilder& build, LuauBuiltinFunction bfid, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback, int pcpos)
{
if (nparams < 3 || nresults > 1)
return {BuiltinImplType::None, -1};
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(args.index + 1), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
builtinCheckDouble(build, build.vmReg(args.index + 1), pcpos);
IrOp va = builtinLoadDouble(build, build.vmReg(arg));
IrOp vb = builtinLoadDouble(build, args);
@ -678,7 +678,7 @@ static BuiltinImplResult translateBuiltinBit32Replace(
}
else
{
builtinCheckDouble(build, build.vmReg(args.index + 2), fallback);
builtinCheckDouble(build, build.vmReg(args.index + 2), pcpos);
IrOp vd = builtinLoadDouble(build, build.vmReg(args.index + 2));
IrOp w = build.inst(IrCmd::NUM_TO_INT, vd);
@ -716,16 +716,16 @@ static BuiltinImplResult translateBuiltinBit32Replace(
return {BuiltinImplType::UsesFallback, 1};
}
static BuiltinImplResult translateBuiltinVector(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
static BuiltinImplResult translateBuiltinVector(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 3 || nresults > 1)
return {BuiltinImplType::None, -1};
LUAU_ASSERT(LUA_VECTOR_SIZE == 3);
builtinCheckDouble(build, build.vmReg(arg), fallback);
builtinCheckDouble(build, args, fallback);
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + 1), fallback);
builtinCheckDouble(build, build.vmReg(arg), pcpos);
builtinCheckDouble(build, args, pcpos);
builtinCheckDouble(build, build.vmReg(vmRegOp(args) + 1), pcpos);
IrOp x = builtinLoadDouble(build, build.vmReg(arg));
IrOp y = builtinLoadDouble(build, args);
@ -734,15 +734,15 @@ static BuiltinImplResult translateBuiltinVector(IrBuilder& build, int nparams, i
build.inst(IrCmd::STORE_VECTOR, build.vmReg(ra), x, y, z);
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TVECTOR));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
static BuiltinImplResult translateBuiltinStringLen(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, IrOp fallback)
static BuiltinImplResult translateBuiltinStringLen(IrBuilder& build, int nparams, int ra, int arg, IrOp args, int nresults, int pcpos)
{
if (nparams < 1 || nresults > 1)
return {BuiltinImplType::None, -1};
build.loadAndCheckTag(build.vmReg(arg), LUA_TSTRING, fallback);
build.loadAndCheckTag(build.vmReg(arg), LUA_TSTRING, build.vmExit(pcpos));
IrOp ts = build.inst(IrCmd::LOAD_POINTER, build.vmReg(arg));
@ -751,10 +751,10 @@ static BuiltinImplResult translateBuiltinStringLen(IrBuilder& build, int nparams
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(ra), build.inst(IrCmd::INT_TO_NUM, len));
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
return {BuiltinImplType::UsesFallback, 1};
return {BuiltinImplType::Full, 1};
}
BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg, IrOp args, int nparams, int nresults, IrOp fallback)
BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg, IrOp args, int nparams, int nresults, IrOp fallback, int pcpos)
{
// Builtins are not allowed to handle variadic arguments
if (nparams == LUA_MULTRET)
@ -765,27 +765,27 @@ BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg,
case LBF_ASSERT:
return translateBuiltinAssert(build, nparams, ra, arg, args, nresults, fallback);
case LBF_MATH_DEG:
return translateBuiltinMathDeg(build, nparams, ra, arg, args, nresults, fallback);
return translateBuiltinMathDeg(build, nparams, ra, arg, args, nresults, pcpos);
case LBF_MATH_RAD:
return translateBuiltinMathRad(build, nparams, ra, arg, args, nresults, fallback);
return translateBuiltinMathRad(build, nparams, ra, arg, args, nresults, pcpos);
case LBF_MATH_LOG:
return translateBuiltinMathLog(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinMathLog(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_MATH_MIN:
return translateBuiltinMathMin(build, nparams, ra, arg, args, nresults, fallback);
return translateBuiltinMathMin(build, nparams, ra, arg, args, nresults, pcpos);
case LBF_MATH_MAX:
return translateBuiltinMathMax(build, nparams, ra, arg, args, nresults, fallback);
return translateBuiltinMathMax(build, nparams, ra, arg, args, nresults, pcpos);
case LBF_MATH_CLAMP:
return translateBuiltinMathClamp(build, nparams, ra, arg, args, nresults, fallback);
return translateBuiltinMathClamp(build, nparams, ra, arg, args, nresults, fallback, pcpos);
case LBF_MATH_FLOOR:
return translateBuiltinMathUnary(build, IrCmd::FLOOR_NUM, nparams, ra, arg, nresults, fallback);
return translateBuiltinMathUnary(build, IrCmd::FLOOR_NUM, nparams, ra, arg, nresults, pcpos);
case LBF_MATH_CEIL:
return translateBuiltinMathUnary(build, IrCmd::CEIL_NUM, nparams, ra, arg, nresults, fallback);
return translateBuiltinMathUnary(build, IrCmd::CEIL_NUM, nparams, ra, arg, nresults, pcpos);
case LBF_MATH_SQRT:
return translateBuiltinMathUnary(build, IrCmd::SQRT_NUM, nparams, ra, arg, nresults, fallback);
return translateBuiltinMathUnary(build, IrCmd::SQRT_NUM, nparams, ra, arg, nresults, pcpos);
case LBF_MATH_ABS:
return translateBuiltinMathUnary(build, IrCmd::ABS_NUM, nparams, ra, arg, nresults, fallback);
return translateBuiltinMathUnary(build, IrCmd::ABS_NUM, nparams, ra, arg, nresults, pcpos);
case LBF_MATH_ROUND:
return translateBuiltinMathUnary(build, IrCmd::ROUND_NUM, nparams, ra, arg, nresults, fallback);
return translateBuiltinMathUnary(build, IrCmd::ROUND_NUM, nparams, ra, arg, nresults, pcpos);
case LBF_MATH_EXP:
case LBF_MATH_ASIN:
case LBF_MATH_SIN:
@ -797,49 +797,49 @@ BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg,
case LBF_MATH_TAN:
case LBF_MATH_TANH:
case LBF_MATH_LOG10:
return translateBuiltinNumberToNumberLibm(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinNumberToNumberLibm(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_MATH_SIGN:
return translateBuiltinNumberToNumber(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinNumberToNumber(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_MATH_POW:
case LBF_MATH_FMOD:
case LBF_MATH_ATAN2:
return translateBuiltin2NumberToNumberLibm(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltin2NumberToNumberLibm(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_MATH_LDEXP:
return translateBuiltinMathLdexp(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinMathLdexp(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_MATH_FREXP:
case LBF_MATH_MODF:
return translateBuiltinNumberTo2Number(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinNumberTo2Number(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_BIT32_BAND:
case LBF_BIT32_BOR:
case LBF_BIT32_BXOR:
case LBF_BIT32_BTEST:
return translateBuiltinBit32BinaryOp(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinBit32BinaryOp(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_BIT32_BNOT:
return translateBuiltinBit32Bnot(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinBit32Bnot(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_BIT32_LSHIFT:
case LBF_BIT32_RSHIFT:
case LBF_BIT32_ARSHIFT:
return translateBuiltinBit32Shift(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinBit32Shift(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback, pcpos);
case LBF_BIT32_LROTATE:
case LBF_BIT32_RROTATE:
return translateBuiltinBit32Rotate(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinBit32Rotate(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_BIT32_EXTRACT:
return translateBuiltinBit32Extract(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinBit32Extract(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback, pcpos);
case LBF_BIT32_EXTRACTK:
return translateBuiltinBit32ExtractK(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinBit32ExtractK(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_BIT32_COUNTLZ:
case LBF_BIT32_COUNTRZ:
return translateBuiltinBit32Countz(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinBit32Countz(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, pcpos);
case LBF_BIT32_REPLACE:
return translateBuiltinBit32Replace(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback);
return translateBuiltinBit32Replace(build, LuauBuiltinFunction(bfid), nparams, ra, arg, args, nresults, fallback, pcpos);
case LBF_TYPE:
return translateBuiltinType(build, nparams, ra, arg, args, nresults, fallback);
return translateBuiltinType(build, nparams, ra, arg, args, nresults);
case LBF_TYPEOF:
return translateBuiltinTypeof(build, nparams, ra, arg, args, nresults, fallback);
return translateBuiltinTypeof(build, nparams, ra, arg, args, nresults);
case LBF_VECTOR:
return translateBuiltinVector(build, nparams, ra, arg, args, nresults, fallback);
return translateBuiltinVector(build, nparams, ra, arg, args, nresults, pcpos);
case LBF_STRING_LEN:
return translateBuiltinStringLen(build, nparams, ra, arg, args, nresults, fallback);
return translateBuiltinStringLen(build, nparams, ra, arg, args, nresults, pcpos);
default:
return {BuiltinImplType::None, -1};
}

View File

@ -13,6 +13,7 @@ enum class BuiltinImplType
{
None,
UsesFallback, // Uses fallback for unsupported cases
Full, // Is either implemented in full, or exits to VM
};
struct BuiltinImplResult
@ -21,7 +22,7 @@ struct BuiltinImplResult
int actualResultCount;
};
BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg, IrOp args, int nparams, int nresults, IrOp fallback);
BuiltinImplResult translateBuiltin(IrBuilder& build, int bfid, int ra, int arg, IrOp args, int nparams, int nresults, IrOp fallback, int pcpos);
} // namespace CodeGen
} // namespace Luau

View File

@ -514,7 +514,7 @@ void translateInstCloseUpvals(IrBuilder& build, const Instruction* pc)
build.inst(IrCmd::CLOSE_UPVALS, build.vmReg(ra));
}
void translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool customParams, int customParamCount, IrOp customArgs, IrOp next)
IrOp translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool customParams, int customParamCount, IrOp customArgs)
{
LuauOpcode opcode = LuauOpcode(LUAU_INSN_OP(*pc));
int bfid = LUAU_INSN_A(*pc);
@ -542,16 +542,25 @@ void translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool
IrOp fallback = build.block(IrBlockKind::Fallback);
// In unsafe environment, instead of retrying fastcall at 'pcpos' we side-exit directly to fallback sequence
build.inst(IrCmd::CHECK_SAFE_ENV, build.constUint(pcpos + getOpLength(opcode)));
build.inst(IrCmd::CHECK_SAFE_ENV, build.vmExit(pcpos + getOpLength(opcode)));
BuiltinImplResult br = translateBuiltin(build, LuauBuiltinFunction(bfid), ra, arg, builtinArgs, nparams, nresults, fallback);
BuiltinImplResult br =
translateBuiltin(build, LuauBuiltinFunction(bfid), ra, arg, builtinArgs, nparams, nresults, fallback, pcpos + getOpLength(opcode));
if (br.type == BuiltinImplType::UsesFallback)
if (br.type != BuiltinImplType::None)
{
LUAU_ASSERT(nparams != LUA_MULTRET && "builtins are not allowed to handle variadic arguments");
if (nresults == LUA_MULTRET)
build.inst(IrCmd::ADJUST_STACK_TO_REG, build.vmReg(ra), build.constInt(br.actualResultCount));
if (br.type != BuiltinImplType::UsesFallback)
{
// We ended up not using the fallback block, kill it
build.function.blockOp(fallback).kind = IrBlockKind::Dead;
return build.undef();
}
}
else
{
@ -568,10 +577,7 @@ void translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool
build.inst(IrCmd::ADJUST_STACK_TO_TOP);
}
build.inst(IrCmd::JUMP, next);
// this will be filled with IR corresponding to instructions after FASTCALL until skip+1
build.beginBlock(fallback);
return fallback;
}
void translateInstForNPrep(IrBuilder& build, const Instruction* pc, int pcpos)
@ -670,7 +676,7 @@ void translateInstForGPrepNext(IrBuilder& build, const Instruction* pc, int pcpo
IrOp fallback = build.block(IrBlockKind::Fallback);
// fast-path: pairs/next
build.inst(IrCmd::CHECK_SAFE_ENV, build.constUint(pcpos));
build.inst(IrCmd::CHECK_SAFE_ENV, build.vmExit(pcpos));
IrOp tagB = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 1));
build.inst(IrCmd::CHECK_TAG, tagB, build.constTag(LUA_TTABLE), fallback);
IrOp tagC = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 2));
@ -697,7 +703,7 @@ void translateInstForGPrepInext(IrBuilder& build, const Instruction* pc, int pcp
IrOp finish = build.block(IrBlockKind::Internal);
// fast-path: ipairs/inext
build.inst(IrCmd::CHECK_SAFE_ENV, build.constUint(pcpos));
build.inst(IrCmd::CHECK_SAFE_ENV, build.vmExit(pcpos));
IrOp tagB = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 1));
build.inst(IrCmd::CHECK_TAG, tagB, build.constTag(LUA_TTABLE), fallback);
IrOp tagC = build.inst(IrCmd::LOAD_TAG, build.vmReg(ra + 2));
@ -923,7 +929,7 @@ void translateInstGetImport(IrBuilder& build, const Instruction* pc, int pcpos)
IrOp fastPath = build.block(IrBlockKind::Internal);
IrOp fallback = build.block(IrBlockKind::Fallback);
build.inst(IrCmd::CHECK_SAFE_ENV, build.constUint(pcpos));
build.inst(IrCmd::CHECK_SAFE_ENV, build.vmExit(pcpos));
// note: if import failed, k[] is nil; we could check this during codegen, but we instead use runtime fallback
// this allows us to handle ahead-of-time codegen smoothly when an import fails to resolve at runtime

View File

@ -45,7 +45,7 @@ void translateInstDupTable(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstGetUpval(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstSetUpval(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstCloseUpvals(IrBuilder& build, const Instruction* pc);
void translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool customParams, int customParamCount, IrOp customArgs, IrOp next);
IrOp translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool customParams, int customParamCount, IrOp customArgs);
void translateInstForNPrep(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstForNLoop(IrBuilder& build, const Instruction* pc, int pcpos);
void translateInstForGPrepNext(IrBuilder& build, const Instruction* pc, int pcpos);

View File

@ -977,7 +977,7 @@ static void constPropInBlockChain(IrBuilder& build, std::vector<uint8_t>& visite
// Unconditional jump into a block with a single user (current block) allows us to continue optimization
// with the information we have gathered so far (unless we have already visited that block earlier)
if (termInst.cmd == IrCmd::JUMP)
if (termInst.cmd == IrCmd::JUMP && termInst.a.kind != IrOpKind::VmExit)
{
IrBlock& target = function.blockOp(termInst.a);
uint32_t targetIdx = function.getBlockIndex(target);
@ -1011,7 +1011,7 @@ static std::vector<uint32_t> collectDirectBlockJumpPath(IrFunction& function, st
IrBlock* nextBlock = nullptr;
// A chain is made from internal blocks that were not a part of bytecode CFG
if (termInst.cmd == IrCmd::JUMP)
if (termInst.cmd == IrCmd::JUMP && termInst.a.kind != IrOpKind::VmExit)
{
IrBlock& target = function.blockOp(termInst.a);
uint32_t targetIdx = function.getBlockIndex(target);
@ -1052,6 +1052,10 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited
if (termInst.cmd != IrCmd::JUMP)
return;
// And it can't be jump to a VM exit
if (termInst.a.kind == IrOpKind::VmExit)
return;
// And it has to jump to a block with more than one user
// If there's only one use, it should already be optimized by constPropInBlockChain
if (function.blockOp(termInst.a).useCount == 1)
@ -1084,7 +1088,8 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited
build.beginBlock(newBlock);
// By default, blocks are ordered according to start instruction; we alter sort order to make sure linearized block is placed right after the starting block
// By default, blocks are ordered according to start instruction; we alter sort order to make sure linearized block is placed right after the
// starting block
function.blocks[newBlock.index].sortkey = startingInsn + 1;
replace(function, termInst.a, newBlock);

View File

@ -428,7 +428,7 @@ enum LuauBytecodeTag
};
// Type table tags
enum LuauBytecodeEncodedType
enum LuauBytecodeType
{
LBC_TYPE_NIL = 0,
LBC_TYPE_BOOLEAN,

View File

@ -47,7 +47,7 @@ public:
BytecodeBuilder(BytecodeEncoder* encoder = 0);
uint32_t beginFunction(uint8_t numparams, bool isvararg = false);
void endFunction(uint8_t maxstacksize, uint8_t numupvalues);
void endFunction(uint8_t maxstacksize, uint8_t numupvalues, uint8_t flags = 0);
void setMainFunction(uint32_t fid);
@ -274,7 +274,7 @@ private:
void dumpConstant(std::string& result, int k) const;
void dumpInstruction(const uint32_t* opcode, std::string& output, int targetLabel) const;
void writeFunction(std::string& ss, uint32_t id) const;
void writeFunction(std::string& ss, uint32_t id, uint8_t flags) const;
void writeLineInfo(std::string& ss) const;
void writeStringTable(std::string& ss) const;

View File

@ -35,6 +35,9 @@ struct CompileOptions
const char* vectorLib = nullptr;
const char* vectorCtor = nullptr;
// vector type name for type tables; disabled by default
const char* vectorType = nullptr;
// null-terminated array of globals that are mutable; disables the import optimization for fields accessed through these
const char** mutableGlobals = nullptr;
};

View File

@ -31,6 +31,9 @@ struct lua_CompileOptions
const char* vectorLib;
const char* vectorCtor;
// vector type name for type tables; disabled by default
const char* vectorType;
// null-terminated array of globals that are mutable; disables the import optimization for fields accessed through these
const char** mutableGlobals;
};

View File

@ -249,7 +249,7 @@ uint32_t BytecodeBuilder::beginFunction(uint8_t numparams, bool isvararg)
return id;
}
void BytecodeBuilder::endFunction(uint8_t maxstacksize, uint8_t numupvalues)
void BytecodeBuilder::endFunction(uint8_t maxstacksize, uint8_t numupvalues, uint8_t flags)
{
LUAU_ASSERT(currentFunction != ~0u);
@ -265,7 +265,7 @@ void BytecodeBuilder::endFunction(uint8_t maxstacksize, uint8_t numupvalues)
// very approximate: 4 bytes per instruction for code, 1 byte for debug line, and 1-2 bytes for aux data like constants plus overhead
func.data.reserve(32 + insns.size() * 7);
writeFunction(func.data, currentFunction);
writeFunction(func.data, currentFunction, flags);
currentFunction = ~0u;
@ -631,7 +631,7 @@ void BytecodeBuilder::finalize()
writeVarInt(bytecode, mainFunction);
}
void BytecodeBuilder::writeFunction(std::string& ss, uint32_t id) const
void BytecodeBuilder::writeFunction(std::string& ss, uint32_t id, uint8_t flags) const
{
LUAU_ASSERT(id < functions.size());
const Function& func = functions[id];
@ -644,7 +644,7 @@ void BytecodeBuilder::writeFunction(std::string& ss, uint32_t id) const
if (FFlag::BytecodeVersion4)
{
writeByte(ss, 0); // Reserved for cgflags
writeByte(ss, flags);
writeVarInt(ss, uint32_t(func.typeinfo.size()));
ss.append(func.typeinfo);
@ -1213,10 +1213,15 @@ void BytecodeBuilder::validateInstructions() const
break;
case LOP_GETIMPORT:
{
VREG(LUAU_INSN_A(insn));
VCONST(LUAU_INSN_D(insn), Import);
// TODO: check insn[i + 1] for conformance with 10-bit import encoding
break;
uint32_t id = insns[i + 1];
LUAU_ASSERT((id >> 30) != 0); // import chain with length 1-3
for (unsigned int j = 0; j < (id >> 30); ++j)
VCONST((id >> (20 - 10 * j)) & 1023, String);
}
break;
case LOP_GETTABLE:
case LOP_SETTABLE:

View File

@ -3874,7 +3874,7 @@ void compileOrThrow(BytecodeBuilder& bytecode, const ParseResult& parseResult, c
if (FFlag::LuauCompileFunctionType)
{
buildTypeMap(compiler.typeMap, root);
buildTypeMap(compiler.typeMap, root, options.vectorType);
}
// gathers all functions with the invariant that all function references are to functions earlier in the list

View File

@ -15,7 +15,7 @@ static bool isGeneric(AstName name, const AstArray<AstGenericType>& generics)
return false;
}
static LuauBytecodeEncodedType getPrimitiveType(AstName name)
static LuauBytecodeType getPrimitiveType(AstName name)
{
if (name == "nil")
return LBC_TYPE_NIL;
@ -33,8 +33,8 @@ static LuauBytecodeEncodedType getPrimitiveType(AstName name)
return LBC_TYPE_INVALID;
}
static LuauBytecodeEncodedType getType(
AstType* ty, const AstArray<AstGenericType>& generics, const DenseHashMap<AstName, AstStatTypeAlias*>& typeAliases, bool resolveAliases)
static LuauBytecodeType getType(AstType* ty, const AstArray<AstGenericType>& generics, const DenseHashMap<AstName, AstStatTypeAlias*>& typeAliases,
bool resolveAliases, const char* vectorType)
{
if (AstTypeReference* ref = ty->as<AstTypeReference>())
{
@ -45,7 +45,7 @@ static LuauBytecodeEncodedType getType(
{
// note: we only resolve aliases to the depth of 1 to avoid dealing with recursive aliases
if (resolveAliases)
return getType((*alias)->type, (*alias)->generics, typeAliases, /* resolveAliases= */ false);
return getType((*alias)->type, (*alias)->generics, typeAliases, /* resolveAliases= */ false, vectorType);
else
return LBC_TYPE_ANY;
}
@ -53,7 +53,10 @@ static LuauBytecodeEncodedType getType(
if (isGeneric(ref->name, generics))
return LBC_TYPE_ANY;
if (LuauBytecodeEncodedType prim = getPrimitiveType(ref->name); prim != LBC_TYPE_INVALID)
if (vectorType && ref->name == vectorType)
return LBC_TYPE_VECTOR;
if (LuauBytecodeType prim = getPrimitiveType(ref->name); prim != LBC_TYPE_INVALID)
return prim;
// not primitive or alias or generic => host-provided, we assume userdata for now
@ -70,11 +73,11 @@ static LuauBytecodeEncodedType getType(
else if (AstTypeUnion* un = ty->as<AstTypeUnion>())
{
bool optional = false;
LuauBytecodeEncodedType type = LBC_TYPE_INVALID;
LuauBytecodeType type = LBC_TYPE_INVALID;
for (AstType* ty : un->types)
{
LuauBytecodeEncodedType et = getType(ty, generics, typeAliases, resolveAliases);
LuauBytecodeType et = getType(ty, generics, typeAliases, resolveAliases, vectorType);
if (et == LBC_TYPE_NIL)
{
@ -95,7 +98,7 @@ static LuauBytecodeEncodedType getType(
if (type == LBC_TYPE_INVALID)
return LBC_TYPE_ANY;
return LuauBytecodeEncodedType(type | (optional && (type != LBC_TYPE_ANY) ? LBC_TYPE_OPTIONAL_BIT : 0));
return LuauBytecodeType(type | (optional && (type != LBC_TYPE_ANY) ? LBC_TYPE_OPTIONAL_BIT : 0));
}
else if (AstTypeIntersection* inter = ty->as<AstTypeIntersection>())
{
@ -105,7 +108,7 @@ static LuauBytecodeEncodedType getType(
return LBC_TYPE_ANY;
}
static std::string getFunctionType(const AstExprFunction* func, const DenseHashMap<AstName, AstStatTypeAlias*>& typeAliases)
static std::string getFunctionType(const AstExprFunction* func, const DenseHashMap<AstName, AstStatTypeAlias*>& typeAliases, const char* vectorType)
{
bool self = func->self != 0;
@ -121,8 +124,8 @@ static std::string getFunctionType(const AstExprFunction* func, const DenseHashM
bool haveNonAnyParam = false;
for (AstLocal* arg : func->args)
{
LuauBytecodeEncodedType ty =
arg->annotation ? getType(arg->annotation, func->generics, typeAliases, /* resolveAliases= */ true) : LBC_TYPE_ANY;
LuauBytecodeType ty =
arg->annotation ? getType(arg->annotation, func->generics, typeAliases, /* resolveAliases= */ true, vectorType) : LBC_TYPE_ANY;
if (ty != LBC_TYPE_ANY)
haveNonAnyParam = true;
@ -140,12 +143,14 @@ static std::string getFunctionType(const AstExprFunction* func, const DenseHashM
struct TypeMapVisitor : AstVisitor
{
DenseHashMap<AstExprFunction*, std::string>& typeMap;
const char* vectorType;
DenseHashMap<AstName, AstStatTypeAlias*> typeAliases;
std::vector<std::pair<AstName, AstStatTypeAlias*>> typeAliasStack;
TypeMapVisitor(DenseHashMap<AstExprFunction*, std::string>& typeMap)
TypeMapVisitor(DenseHashMap<AstExprFunction*, std::string>& typeMap, const char* vectorType)
: typeMap(typeMap)
, vectorType(vectorType)
, typeAliases(AstName())
{
}
@ -206,7 +211,7 @@ struct TypeMapVisitor : AstVisitor
bool visit(AstExprFunction* node) override
{
std::string type = getFunctionType(node, typeAliases);
std::string type = getFunctionType(node, typeAliases, vectorType);
if (!type.empty())
typeMap[node] = std::move(type);
@ -215,9 +220,9 @@ struct TypeMapVisitor : AstVisitor
}
};
void buildTypeMap(DenseHashMap<AstExprFunction*, std::string>& typeMap, AstNode* root)
void buildTypeMap(DenseHashMap<AstExprFunction*, std::string>& typeMap, AstNode* root, const char* vectorType)
{
TypeMapVisitor visitor(typeMap);
TypeMapVisitor visitor(typeMap, vectorType);
root->visit(&visitor);
}

View File

@ -8,6 +8,6 @@
namespace Luau
{
void buildTypeMap(DenseHashMap<AstExprFunction*, std::string>& typeMap, AstNode* root);
void buildTypeMap(DenseHashMap<AstExprFunction*, std::string>& typeMap, AstNode* root, const char* vectorType);
} // namespace Luau

View File

@ -143,6 +143,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/include/Luau/Autocomplete.h
Analysis/include/Luau/Breadcrumb.h
Analysis/include/Luau/BuiltinDefinitions.h
Analysis/include/Luau/Cancellation.h
Analysis/include/Luau/Clone.h
Analysis/include/Luau/Config.h
Analysis/include/Luau/Constraint.h

View File

@ -35,6 +35,10 @@ int lua_getargument(lua_State* L, int level, int n)
return 0;
CallInfo* ci = L->ci - level;
// changing tables in native functions externally may invalidate safety contracts wrt table state (metatable/size/readonly)
if (ci->flags & LUA_CALLINFO_NATIVE)
return 0;
Proto* fp = getluaproto(ci);
int res = 0;
@ -60,9 +64,13 @@ int lua_getargument(lua_State* L, int level, int n)
const char* lua_getlocal(lua_State* L, int level, int n)
{
if (unsigned(level) >= unsigned(L->ci - L->base_ci))
return 0;
return NULL;
CallInfo* ci = L->ci - level;
// changing tables in native functions externally may invalidate safety contracts wrt table state (metatable/size/readonly)
if (ci->flags & LUA_CALLINFO_NATIVE)
return NULL;
Proto* fp = getluaproto(ci);
const LocVar* var = fp ? luaF_getlocal(fp, n, currentpc(L, ci)) : NULL;
if (var)
@ -77,9 +85,13 @@ const char* lua_getlocal(lua_State* L, int level, int n)
const char* lua_setlocal(lua_State* L, int level, int n)
{
if (unsigned(level) >= unsigned(L->ci - L->base_ci))
return 0;
return NULL;
CallInfo* ci = L->ci - level;
// changing registers in native functions externally may invalidate safety contracts wrt register type tags
if (ci->flags & LUA_CALLINFO_NATIVE)
return NULL;
Proto* fp = getluaproto(ci);
const LocVar* var = fp ? luaF_getlocal(fp, n, currentpc(L, ci)) : NULL;
if (var)
@ -321,7 +333,8 @@ void luaG_pusherror(lua_State* L, const char* error)
void luaG_breakpoint(lua_State* L, Proto* p, int line, bool enable)
{
if (p->lineinfo)
// since native code doesn't support breakpoints, we would need to update all call frames with LUAU_CALLINFO_NATIVE that refer to p
if (p->lineinfo && !p->execdata)
{
for (int i = 0; i < p->sizecode; ++i)
{
@ -347,11 +360,6 @@ void luaG_breakpoint(lua_State* L, Proto* p, int line, bool enable)
p->code[i] |= op;
LUAU_ASSERT(LUAU_INSN_OP(p->code[i]) == op);
#if LUA_CUSTOM_EXECUTION
if (L->global->ecb.setbreakpoint)
L->global->ecb.setbreakpoint(L, p, i);
#endif
// note: this is important!
// we only patch the *first* instruction in each proto that's attributed to a given line
// this can be changed, but if requires making patching a bit more nuanced so that we don't patch AUX words
@ -410,11 +418,11 @@ static int getmaxline(Proto* p)
return result;
}
// Find the line number with instructions. If the provided line doesn't have any instruction, it should return the next line number with
// instructions.
// Find the line number with instructions. If the provided line doesn't have any instruction, it should return the next valid line number.
static int getnextline(Proto* p, int line)
{
int closest = -1;
if (p->lineinfo)
{
for (int i = 0; i < p->sizecode; ++i)
@ -435,7 +443,6 @@ static int getnextline(Proto* p, int line)
for (int i = 0; i < p->sizep; ++i)
{
// Find the closest line number to the intended one.
int candidate = getnextline(p->p[i], line);
if (candidate == line)
@ -454,14 +461,12 @@ int lua_breakpoint(lua_State* L, int funcindex, int line, int enabled)
api_check(L, ttisfunction(func) && !clvalue(func)->isC);
Proto* p = clvalue(func)->l.p;
// Find line number to add the breakpoint to.
// set the breakpoint to the next closest line with valid instructions
int target = getnextline(p, line);
if (target != -1)
{
// Add breakpoint on the exact line
luaG_breakpoint(L, p, target, bool(enabled));
}
return target;
}

View File

@ -22,6 +22,7 @@ Proto* luaF_newproto(lua_State* L)
f->numparams = 0;
f->is_vararg = 0;
f->maxstacksize = 0;
f->flags = 0;
f->sizelineinfo = 0;
f->linegaplog2 = 0;
f->lineinfo = NULL;
@ -155,13 +156,8 @@ void luaF_freeproto(lua_State* L, Proto* f, lua_Page* page)
if (f->debuginsn)
luaM_freearray(L, f->debuginsn, f->sizecode, uint8_t, f->memcat);
#if LUA_CUSTOM_EXECUTION
if (f->execdata)
{
LUAU_ASSERT(L->global->ecb.destroy);
L->global->ecb.destroy(L, f);
}
#endif
if (f->typeinfo)
luaM_freearray(L, f->typeinfo, f->numparams + 2, uint8_t, f->memcat);

View File

@ -134,5 +134,7 @@ LUAI_FUNC void luaC_barriertable(lua_State* L, Table* t, GCObject* v);
LUAI_FUNC void luaC_barrierback(lua_State* L, GCObject* o, GCObject** gclist);
LUAI_FUNC void luaC_validate(lua_State* L);
LUAI_FUNC void luaC_dump(lua_State* L, void* file, const char* (*categoryName)(lua_State* L, uint8_t memcat));
LUAI_FUNC void luaC_enumheap(lua_State* L, void* context, void (*node)(void* context, void* ptr, uint8_t tt, uint8_t memcat, const char* name),
void (*edge)(void* context, void* from, void* to, const char* name));
LUAI_FUNC int64_t luaC_allocationrate(lua_State* L);
LUAI_FUNC const char* luaC_statename(int state);

View File

@ -602,3 +602,229 @@ void luaC_dump(lua_State* L, void* file, const char* (*categoryName)(lua_State*
fprintf(f, "}\n");
fprintf(f, "}}\n");
}
struct EnumContext
{
lua_State* L;
void* context;
void (*node)(void* context, void* ptr, uint8_t tt, uint8_t memcat, const char* name);
void (*edge)(void* context, void* from, void* to, const char* name);
};
static void* enumtopointer(GCObject* gco)
{
// To match lua_topointer, userdata pointer is represented as a pointer to internal data
return gco->gch.tt == LUA_TUSERDATA ? (void*)gco2u(gco)->data : (void*)gco;
}
static void enumnode(EnumContext* ctx, GCObject* gco, const char* objname)
{
ctx->node(ctx->context, enumtopointer(gco), gco->gch.tt, gco->gch.memcat, objname);
}
static void enumedge(EnumContext* ctx, GCObject* from, GCObject* to, const char* edgename)
{
ctx->edge(ctx->context, enumtopointer(from), enumtopointer(to), edgename);
}
static void enumedges(EnumContext* ctx, GCObject* from, TValue* data, size_t size, const char* edgename)
{
for (size_t i = 0; i < size; ++i)
{
if (iscollectable(&data[i]))
enumedge(ctx, from, gcvalue(&data[i]), edgename);
}
}
static void enumstring(EnumContext* ctx, TString* ts)
{
enumnode(ctx, obj2gco(ts), NULL);
}
static void enumtable(EnumContext* ctx, Table* h)
{
// Provide a name for a special registry table
enumnode(ctx, obj2gco(h), h == hvalue(registry(ctx->L)) ? "registry" : NULL);
if (h->node != &luaH_dummynode)
{
for (int i = 0; i < sizenode(h); ++i)
{
const LuaNode& n = h->node[i];
if (!ttisnil(&n.val) && (iscollectable(&n.key) || iscollectable(&n.val)))
{
if (iscollectable(&n.key))
enumedge(ctx, obj2gco(h), gcvalue(&n.key), "[key]");
if (iscollectable(&n.val))
{
if (ttisstring(&n.key))
{
enumedge(ctx, obj2gco(h), gcvalue(&n.val), svalue(&n.key));
}
else if (ttisnumber(&n.key))
{
char buf[32];
snprintf(buf, sizeof(buf), "%.14g", nvalue(&n.key));
enumedge(ctx, obj2gco(h), gcvalue(&n.val), buf);
}
else
{
enumedge(ctx, obj2gco(h), gcvalue(&n.val), NULL);
}
}
}
}
}
if (h->sizearray)
enumedges(ctx, obj2gco(h), h->array, h->sizearray, "array");
if (h->metatable)
enumedge(ctx, obj2gco(h), obj2gco(h->metatable), "metatable");
}
static void enumclosure(EnumContext* ctx, Closure* cl)
{
if (cl->isC)
{
enumnode(ctx, obj2gco(cl), cl->c.debugname);
}
else
{
Proto* p = cl->l.p;
char buf[LUA_IDSIZE];
if (p->source)
snprintf(buf, sizeof(buf), "%s:%d %s", p->debugname ? getstr(p->debugname) : "", p->linedefined, getstr(p->source));
else
snprintf(buf, sizeof(buf), "%s:%d", p->debugname ? getstr(p->debugname) : "", p->linedefined);
enumnode(ctx, obj2gco(cl), buf);
}
enumedge(ctx, obj2gco(cl), obj2gco(cl->env), "env");
if (cl->isC)
{
if (cl->nupvalues)
enumedges(ctx, obj2gco(cl), cl->c.upvals, cl->nupvalues, "upvalue");
}
else
{
enumedge(ctx, obj2gco(cl), obj2gco(cl->l.p), "proto");
if (cl->nupvalues)
enumedges(ctx, obj2gco(cl), cl->l.uprefs, cl->nupvalues, "upvalue");
}
}
static void enumudata(EnumContext* ctx, Udata* u)
{
enumnode(ctx, obj2gco(u), NULL);
if (u->metatable)
enumedge(ctx, obj2gco(u), obj2gco(u->metatable), "metatable");
}
static void enumthread(EnumContext* ctx, lua_State* th)
{
Closure* tcl = NULL;
for (CallInfo* ci = th->base_ci; ci <= th->ci; ++ci)
{
if (ttisfunction(ci->func))
{
tcl = clvalue(ci->func);
break;
}
}
if (tcl && !tcl->isC && tcl->l.p->source)
{
Proto* p = tcl->l.p;
enumnode(ctx, obj2gco(th), getstr(p->source));
}
else
{
enumnode(ctx, obj2gco(th), NULL);
}
enumedge(ctx, obj2gco(th), obj2gco(th->gt), "globals");
if (th->top > th->stack)
enumedges(ctx, obj2gco(th), th->stack, th->top - th->stack, "stack");
}
static void enumproto(EnumContext* ctx, Proto* p)
{
enumnode(ctx, obj2gco(p), p->source ? getstr(p->source) : NULL);
if (p->sizek)
enumedges(ctx, obj2gco(p), p->k, p->sizek, "constants");
for (int i = 0; i < p->sizep; ++i)
enumedge(ctx, obj2gco(p), obj2gco(p->p[i]), "protos");
}
static void enumupval(EnumContext* ctx, UpVal* uv)
{
enumnode(ctx, obj2gco(uv), NULL);
if (iscollectable(uv->v))
enumedge(ctx, obj2gco(uv), gcvalue(uv->v), "value");
}
static void enumobj(EnumContext* ctx, GCObject* o)
{
switch (o->gch.tt)
{
case LUA_TSTRING:
return enumstring(ctx, gco2ts(o));
case LUA_TTABLE:
return enumtable(ctx, gco2h(o));
case LUA_TFUNCTION:
return enumclosure(ctx, gco2cl(o));
case LUA_TUSERDATA:
return enumudata(ctx, gco2u(o));
case LUA_TTHREAD:
return enumthread(ctx, gco2th(o));
case LUA_TPROTO:
return enumproto(ctx, gco2p(o));
case LUA_TUPVAL:
return enumupval(ctx, gco2uv(o));
default:
LUAU_ASSERT(!"Unknown object tag");
}
}
static bool enumgco(void* context, lua_Page* page, GCObject* gco)
{
enumobj((EnumContext*)context, gco);
return false;
}
void luaC_enumheap(lua_State* L, void* context, void (*node)(void* context, void* ptr, uint8_t tt, uint8_t memcat, const char* name),
void (*edge)(void* context, void* from, void* to, const char* name))
{
global_State* g = L->global;
EnumContext ctx;
ctx.L = L;
ctx.context = context;
ctx.node = node;
ctx.edge = edge;
enumgco(&ctx, NULL, obj2gco(g->mainthread));
luaM_visitgco(L, &ctx, enumgco);
}

View File

@ -263,9 +263,22 @@ typedef struct Proto
CommonHeader;
uint8_t nups; // number of upvalues
uint8_t numparams;
uint8_t is_vararg;
uint8_t maxstacksize;
uint8_t flags;
TValue* k; // constants used by the function
Instruction* code; // function bytecode
struct Proto** p; // functions defined inside the function
const Instruction* codeentry;
void* execdata;
uintptr_t exectarget;
uint8_t* lineinfo; // for each instruction, line number as a delta from baseline
int* abslineinfo; // baseline line info, one entry for each 1<<linegaplog2 instructions; allocated after lineinfo
struct LocVar* locvars; // information about local variables
@ -275,10 +288,6 @@ typedef struct Proto
TString* debugname;
uint8_t* debuginsn; // a copy of code[] array with just opcodes
const Instruction* codeentry;
void* execdata;
uintptr_t exectarget;
uint8_t* typeinfo;
GCObject* gclist;
@ -293,12 +302,6 @@ typedef struct Proto
int linegaplog2;
int linedefined;
int bytecodeid;
uint8_t nups; // number of upvalues
uint8_t numparams;
uint8_t is_vararg;
uint8_t maxstacksize;
} Proto;
// clang-format on

View File

@ -101,10 +101,8 @@ static void close_state(lua_State* L)
for (int i = 1; i < LUA_MEMORY_CATEGORIES; i++)
LUAU_ASSERT(g->memcatbytes[i] == 0);
#if LUA_CUSTOM_EXECUTION
if (L->global->ecb.close)
L->global->ecb.close(L);
#endif
(*g->frealloc)(g->ud, L, sizeof(LG), 0);
}

View File

@ -154,7 +154,6 @@ struct lua_ExecutionCallbacks
void (*close)(lua_State* L); // called when global VM state is closed
void (*destroy)(lua_State* L, Proto* proto); // called when function is destroyed
int (*enter)(lua_State* L, Proto* proto); // called when function is about to start/resume (when execdata is present), return 0 to exit VM
void (*setbreakpoint)(lua_State* L, Proto* proto, int line); // called when a breakpoint is set in a function
};
/*

View File

@ -230,8 +230,7 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
if (version >= 4)
{
uint8_t cgflags = read<uint8_t>(data, size, offset);
LUAU_ASSERT(cgflags == 0);
p->flags = read<uint8_t>(data, size, offset);
uint32_t typesize = readVarInt(data, size, offset);

View File

@ -26,7 +26,7 @@ const bool kFuzzLinter = true;
const bool kFuzzTypeck = true;
const bool kFuzzVM = true;
const bool kFuzzTranspile = true;
const bool kFuzzCodegen = true;
const bool kFuzzCodegenVM = true;
const bool kFuzzCodegenAssembly = true;
// Should we generate type annotations?
@ -35,7 +35,7 @@ const bool kFuzzTypes = true;
const Luau::CodeGen::AssemblyOptions::Target kFuzzCodegenTarget = Luau::CodeGen::AssemblyOptions::A64;
static_assert(!(kFuzzVM && !kFuzzCompiler), "VM requires the compiler!");
static_assert(!(kFuzzCodegen && !kFuzzVM), "Codegen requires the VM!");
static_assert(!(kFuzzCodegenVM && !kFuzzCompiler), "Codegen requires the compiler!");
static_assert(!(kFuzzCodegenAssembly && !kFuzzCompiler), "Codegen requires the compiler!");
std::vector<std::string> protoprint(const luau::ModuleSet& stat, bool types);
@ -47,6 +47,7 @@ LUAU_FASTINT(LuauTableTypeMaximumStringifierLength)
LUAU_FASTINT(LuauTypeInferIterationLimit)
LUAU_FASTINT(LuauTarjanChildLimit)
LUAU_FASTFLAG(DebugLuauFreezeArena)
LUAU_FASTFLAG(DebugLuauAbortingChecks)
std::chrono::milliseconds kInterruptTimeout(10);
std::chrono::time_point<std::chrono::system_clock> interruptDeadline;
@ -90,7 +91,7 @@ lua_State* createGlobalState()
{
lua_State* L = lua_newstate(allocate, NULL);
if (kFuzzCodegen && Luau::CodeGen::isSupported())
if (kFuzzCodegenVM && Luau::CodeGen::isSupported())
Luau::CodeGen::create(L);
lua_callbacks(L)->interrupt = interrupt;
@ -228,6 +229,7 @@ DEFINE_PROTO_FUZZER(const luau::ModuleSet& message)
flag->value = true;
FFlag::DebugLuauFreezeArena.value = true;
FFlag::DebugLuauAbortingChecks.value = true;
std::vector<std::string> sources = protoprint(message, kFuzzTypes);
@ -370,7 +372,7 @@ DEFINE_PROTO_FUZZER(const luau::ModuleSet& message)
}
// run resulting bytecode (from last successfully compiler module)
if (kFuzzVM && bytecode.size())
if ((kFuzzVM || kFuzzCodegenVM) && bytecode.size())
{
static lua_State* globalState = createGlobalState();
@ -395,9 +397,10 @@ DEFINE_PROTO_FUZZER(const luau::ModuleSet& message)
LUAU_ASSERT(heapSize < 256 * 1024);
};
runCode(bytecode, false);
if (kFuzzVM)
runCode(bytecode, false);
if (kFuzzCodegen && Luau::CodeGen::isSupported())
if (kFuzzCodegenVM && Luau::CodeGen::isSupported())
runCode(bytecode, true);
}
}

View File

@ -53,7 +53,10 @@ static std::string compileTypeTable(const char* source)
{
Luau::BytecodeBuilder bcb;
bcb.setDumpFlags(Luau::BytecodeBuilder::Dump_Code);
Luau::compileOrThrow(bcb, source);
Luau::CompileOptions opts;
opts.vectorType = "Vector3";
Luau::compileOrThrow(bcb, source, opts);
return bcb.dumpTypeInfo();
}
@ -7159,6 +7162,31 @@ end
)");
}
TEST_CASE("HostTypesVector")
{
ScopedFastFlag sff("LuauCompileFunctionType", true);
CHECK_EQ("\n" + compileTypeTable(R"(
function myfunc(test: Instance, pos: Vector3)
end
function myfunc2<Vector3>(test: Instance, pos: Vector3)
end
do
type Vector3 = number
function myfunc3(test: Instance, pos: Vector3)
end
end
)"),
R"(
0: function(userdata, vector)
1: function(userdata, any)
2: function(userdata, number)
)");
}
TEST_CASE("TypeAliasScoping")
{
ScopedFastFlag sff("LuauCompileFunctionType", true);

View File

@ -5,6 +5,7 @@
#include "luacodegen.h"
#include "Luau/BuiltinDefinitions.h"
#include "Luau/DenseHash.h"
#include "Luau/ModuleResolver.h"
#include "Luau/TypeInfer.h"
#include "Luau/StringUtils.h"
@ -15,6 +16,7 @@
#include "ScopedFlags.h"
#include <fstream>
#include <string>
#include <vector>
#include <math.h>
@ -1244,6 +1246,8 @@ TEST_CASE("GCDump")
{
// internal function, declared in lgc.h - not exposed via lua.h
extern void luaC_dump(lua_State * L, void* file, const char* (*categoryName)(lua_State * L, uint8_t memcat));
extern void luaC_enumheap(lua_State * L, void* context, void (*node)(void* context, void* ptr, uint8_t tt, uint8_t memcat, const char* name),
void (*edge)(void* context, void* from, void* to, const char* name));
StateRef globalState(luaL_newstate(), lua_close);
lua_State* L = globalState.get();
@ -1287,6 +1291,40 @@ TEST_CASE("GCDump")
luaC_dump(L, f, nullptr);
fclose(f);
struct Node
{
void* ptr;
uint8_t tag;
uint8_t memcat;
std::string name;
};
struct EnumContext
{
EnumContext()
: nodes{nullptr}
, edges{nullptr}
{
}
Luau::DenseHashMap<void*, Node> nodes;
Luau::DenseHashMap<void*, void*> edges;
} ctx;
luaC_enumheap(
L, &ctx,
[](void* ctx, void* gco, uint8_t tt, uint8_t memcat, const char* name) {
EnumContext& context = *(EnumContext*)ctx;
context.nodes[gco] = {gco, tt, memcat, name ? name : ""};
},
[](void* ctx, void* s, void* t, const char*) {
EnumContext& context = *(EnumContext*)ctx;
context.edges[s] = t;
});
CHECK(!ctx.nodes.empty());
CHECK(!ctx.edges.empty());
}
TEST_CASE("Interrupt")

View File

@ -1,15 +1,20 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Differ.h"
#include "Luau/Common.h"
#include "Luau/Error.h"
#include "Luau/Frontend.h"
#include "Fixture.h"
#include "Luau/Symbol.h"
#include "ScopedFlags.h"
#include "doctest.h"
#include <iostream>
using namespace Luau;
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
TEST_SUITE_BEGIN("Differ");
TEST_CASE_FIXTURE(Fixture, "equal_numbers")
@ -313,4 +318,685 @@ TEST_CASE_FIXTURE(Fixture, "singleton_string")
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "equal_function")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number)
return x
end
function almostFoo(y: number)
return y + 10
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
try
{
DifferResult diffRes = diff(foo, almostFoo);
INFO(diffRes.diffError->toString());
CHECK(!diffRes.diffError.has_value());
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
}
TEST_CASE_FIXTURE(Fixture, "equal_function_inferred_ret_length")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function bar(x: number, y: string)
return x, y
end
function almostBar(a: number, b: string)
return a, b
end
function foo(x: number, y: string, z: boolean)
return z, bar(x, y)
end
function almostFoo(a: number, b: string, c: boolean)
return c, almostBar(a, b)
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
try
{
DifferResult diffRes = diff(foo, almostFoo);
INFO(diffRes.diffError->toString());
CHECK(!diffRes.diffError.has_value());
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
}
TEST_CASE_FIXTURE(Fixture, "equal_function_inferred_ret_length_2")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function bar(x: number, y: string)
return x, y
end
function foo(x: number, y: string, z: boolean)
return bar(x, y), z
end
function almostFoo(a: number, b: string, c: boolean)
return a, c
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
try
{
DifferResult diffRes = diff(foo, almostFoo);
INFO(diffRes.diffError->toString());
CHECK(!diffRes.diffError.has_value());
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
}
TEST_CASE_FIXTURE(Fixture, "function_arg_normal")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: number, z: number)
return x * y * z
end
function almostFoo(a: number, b: number, msg: string)
return a
almostFoo = foo
)");
LUAU_REQUIRE_ERRORS(result);
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
diffMessage = diff(foo, almostFoo).diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol>.Arg[3] has type number, while the right type at <unlabeled-symbol>.Arg[3] has type string)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_arg_normal_2")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: number, z: string)
return x * y
end
function almostFoo(a: number, y: string, msg: string)
return a
almostFoo = foo
)");
LUAU_REQUIRE_ERRORS(result);
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
diffMessage = diff(foo, almostFoo).diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol>.Arg[2] has type number, while the right type at <unlabeled-symbol>.Arg[2] has type string)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_ret_normal")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: number, z: string)
return x
end
function almostFoo(a: number, b: number, msg: string)
return msg
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol>.Ret[1] has type number, while the right type at <unlabeled-symbol>.Ret[1] has type string)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_arg_length")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: number)
return x
end
function almostFoo(x: number, y: number, c: number)
return x
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol> takes 2 or more arguments, while the right type at <unlabeled-symbol> takes 3 or more arguments)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_arg_length_2")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: string, z: number)
return z
end
function almostFoo(x: number, y: string)
return x
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol> takes 3 or more arguments, while the right type at <unlabeled-symbol> takes 2 or more arguments)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_arg_length_none")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo()
return 5
end
function almostFoo(x: number, y: string)
return x
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol> takes 0 or more arguments, while the right type at <unlabeled-symbol> takes 2 or more arguments)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_arg_length_none_2")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number)
return x
end
function almostFoo()
return 5
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol> takes 1 or more arguments, while the right type at <unlabeled-symbol> takes 0 or more arguments)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_ret_length")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: number)
return x
end
function almostFoo(x: number, y: number)
return x, y
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol> returns 1 values, while the right type at <unlabeled-symbol> returns 2 values)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_ret_length_2")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: string, z: number)
return y, x, z
end
function almostFoo(x: number, y: string, z: number)
return y, x
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol> returns 3 values, while the right type at <unlabeled-symbol> returns 2 values)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_ret_length_none")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: string)
return
end
function almostFoo(x: number, y: string)
return x
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol> returns 0 values, while the right type at <unlabeled-symbol> returns 1 values)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_ret_length_none_2")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo()
return 5
end
function almostFoo()
return
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol> returns 1 values, while the right type at <unlabeled-symbol> returns 0 values)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_variadic_arg_normal")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: string, ...: number)
return x, y
end
function almostFoo(a: number, b: string, ...: string)
return a, b
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol>.Arg[Variadic] has type number, while the right type at <unlabeled-symbol>.Arg[Variadic] has type string)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_variadic_arg_missing")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: string, ...: number)
return x, y
end
function almostFoo(a: number, b: string)
return a, b
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol>.Arg[Variadic] has type number, while the right type at <unlabeled-symbol>.Arg[Variadic] has type any)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_variadic_arg_missing_2")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
function foo(x: number, y: string)
return x, y
end
function almostFoo(a: number, b: string, ...: string)
return a, b
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol>.Arg[Variadic] has type any, while the right type at <unlabeled-symbol>.Arg[Variadic] has type string)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_variadic_oversaturation")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
-- allowed to be oversaturated
function foo(x: number, y: string)
return x, y
end
-- must not be oversaturated
local almostFoo: (number, string) -> (number, string) = foo
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol> takes 2 or more arguments, while the right type at <unlabeled-symbol> takes 2 arguments)",
diffMessage);
}
TEST_CASE_FIXTURE(Fixture, "function_variadic_oversaturation_2")
{
// Old solver does not correctly infer function typepacks
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
CheckResult result = check(R"(
-- must not be oversaturated
local foo: (number, string) -> (number, string)
-- allowed to be oversaturated
function almostFoo(x: number, y: string)
return x, y
end
)");
TypeId foo = requireType("foo");
TypeId almostFoo = requireType("almostFoo");
std::string diffMessage;
try
{
DifferResult diffRes = diff(foo, almostFoo);
if (!diffRes.diffError.has_value())
{
INFO("Differ did not report type error, even though types are unequal");
CHECK(false);
}
diffMessage = diffRes.diffError->toString();
}
catch (const InternalCompilerError& e)
{
INFO(("InternalCompilerError: " + e.message));
CHECK(false);
}
CHECK_EQ(
R"(DiffError: these two types are not equal because the left type at <unlabeled-symbol> takes 2 arguments, while the right type at <unlabeled-symbol> takes 2 or more arguments)",
diffMessage);
}
TEST_SUITE_END();

View File

@ -407,19 +407,15 @@ type B = A
TEST_CASE_FIXTURE(BuiltinsFixture, "do_not_clone_reexports")
{
ScopedFastFlag flags[] = {
{"LuauClonePublicInterfaceLess2", true},
};
fileResolver.source["Module/A"] = R"(
export type A = {p : number}
return {}
export type A = {p : number}
return {}
)";
fileResolver.source["Module/B"] = R"(
local a = require(script.Parent.A)
export type B = {q : a.A}
return {}
local a = require(script.Parent.A)
export type B = {q : a.A}
return {}
)";
CheckResult result = frontend.check("Module/B");
@ -442,19 +438,15 @@ return {}
TEST_CASE_FIXTURE(BuiltinsFixture, "do_not_clone_types_of_reexported_values")
{
ScopedFastFlag flags[] = {
{"LuauClonePublicInterfaceLess2", true},
};
fileResolver.source["Module/A"] = R"(
local exports = {a={p=5}}
return exports
local exports = {a={p=5}}
return exports
)";
fileResolver.source["Module/B"] = R"(
local a = require(script.Parent.A)
local exports = {b=a.a}
return exports
local a = require(script.Parent.A)
local exports = {b=a.a}
return exports
)";
CheckResult result = frontend.check("Module/B");

View File

@ -54,8 +54,7 @@ TEST_SUITE_BEGIN("AllocatorTests");
TEST_CASE("allocator_can_be_moved")
{
Counter* c = nullptr;
auto inner = [&]()
{
auto inner = [&]() {
Luau::Allocator allocator;
c = allocator.alloc<Counter>();
Luau::Allocator moved{std::move(allocator)};
@ -922,8 +921,7 @@ TEST_CASE_FIXTURE(Fixture, "parse_interpolated_string_double_brace_mid")
TEST_CASE_FIXTURE(Fixture, "parse_interpolated_string_without_end_brace")
{
auto columnOfEndBraceError = [this](const char* code)
{
auto columnOfEndBraceError = [this](const char* code) {
try
{
parse(code);
@ -2387,8 +2385,7 @@ public:
TEST_CASE_FIXTURE(Fixture, "recovery_of_parenthesized_expressions")
{
auto checkAstEquivalence = [this](const char* codeWithErrors, const char* code)
{
auto checkAstEquivalence = [this](const char* codeWithErrors, const char* code) {
try
{
parse(codeWithErrors);
@ -2408,8 +2405,7 @@ TEST_CASE_FIXTURE(Fixture, "recovery_of_parenthesized_expressions")
CHECK_EQ(counterWithErrors.count, counter.count);
};
auto checkRecovery = [this, checkAstEquivalence](const char* codeWithErrors, const char* code, unsigned expectedErrorCount)
{
auto checkRecovery = [this, checkAstEquivalence](const char* codeWithErrors, const char* code, unsigned expectedErrorCount) {
try
{
parse(codeWithErrors);

View File

@ -225,7 +225,8 @@ TEST_CASE_FIXTURE(Fixture, "internal_families_raise_errors")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK(toString(result.errors[0]) == "Type family instance Add<a, b> depends on generic function parameters but does not appear in the function signature; this construct cannot be type-checked at this time");
CHECK(toString(result.errors[0]) == "Type family instance Add<a, b> depends on generic function parameters but does not appear in the function "
"signature; this construct cannot be type-checked at this time");
}
TEST_CASE_FIXTURE(BuiltinsFixture, "type_families_inhabited_with_normalization")

View File

@ -1913,8 +1913,6 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "dont_assert_when_the_tarjan_limit_is_exceede
ScopedFastInt sfi{"LuauTarjanChildLimit", 2};
ScopedFastFlag sff[] = {
{"DebugLuauDeferredConstraintResolution", true},
{"LuauClonePublicInterfaceLess2", true},
{"LuauCloneSkipNonInternalVisit", true},
};
CheckResult result = check(R"(

View File

@ -670,7 +670,9 @@ TEST_CASE_FIXTURE(Fixture, "strict_binary_op_where_lhs_unknown")
if (FFlag::DebugLuauDeferredConstraintResolution)
{
LUAU_REQUIRE_ERROR_COUNT(ops.size(), result);
CHECK_EQ("Type family instance Add<a, b> depends on generic function parameters but does not appear in the function signature; this construct cannot be type-checked at this time", toString(result.errors[0]));
CHECK_EQ("Type family instance Add<a, b> depends on generic function parameters but does not appear in the function signature; this "
"construct cannot be type-checked at this time",
toString(result.errors[0]));
CHECK_EQ("Unknown type used in - operation; consider adding a type annotation to 'a'", toString(result.errors[1]));
}
else

View File

@ -789,4 +789,19 @@ TEST_CASE_FIXTURE(Fixture, "lookup_prop_of_intersection_containing_unions")
CHECK("variables" == unknownProp->key);
}
TEST_CASE_FIXTURE(Fixture, "suppress_errors_for_prop_lookup_of_a_union_that_includes_error")
{
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
registerHiddenTypes(&frontend);
CheckResult result = check(R"(
local a : err | Not<nil>
local b = a.foo
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END();

View File

@ -92,4 +92,13 @@ end
assert(pcall(fuzzfail9) == false)
local function fuzzfail10()
local _
_ = false,if _ then _ else _
_ = not _
l0,_[l0] = not _
end
assert(pcall(fuzzfail10) == false)
return('OK')