mirror of
https://github.com/luau-lang/luau.git
synced 2024-11-15 06:15:44 +08:00
Sync to upstream/release/577 (#934)
Lots of things going on this week: * Fix a crash that could occur in the presence of a cyclic union. We shouldn't be creating cyclic unions, but we shouldn't be crashing when they arise either. * Minor cleanup of `luau_precall` * Internal change to make L->top handling slightly more uniform * Optimize SETGLOBAL & GETGLOBAL fallback C functions. * https://github.com/Roblox/luau/pull/929 * The syntax to the `luau-reduce` commandline tool has changed. It now accepts a script, a command to execute, and an error to search for. It no longer automatically passes the script to the command which makes it a lot more flexible. Also be warned that it edits the script it is passed **in place**. Do not point it at something that is not in source control! New solver * Switch to a greedier but more fallible algorithm for simplifying union and intersection types that are created as part of refinement calculation. This has much better and more predictable performance. * Fix a constraint cycle in recursive function calls. * Much improved inference of binary addition. Functions like `function add(x, y) return x + y end` can now be inferred without annotations. We also accurately typecheck calls to functions like this. * Many small bugfixes surrounding things like table indexers * Add support for indexers on class types. This was previously added to the old solver; we now add it to the new one for feature parity. JIT * https://github.com/Roblox/luau/pull/931 * Fuse key.value and key.tt loads for CEHCK_SLOT_MATCH in A64 * Implement remaining aliases of BFM for A64 * Implement new callinfo flag for A64 * Add instruction simplification for int->num->int conversion chains * Don't even load execdata for X64 calls * Treat opcode fallbacks the same as manually written fallbacks --------- Co-authored-by: Arseny Kapoulkine <arseny.kapoulkine@gmail.com> Co-authored-by: Vyacheslav Egorov <vegorov@roblox.com>
This commit is contained in:
parent
da0458bf6e
commit
721f6e10fb
@ -144,6 +144,24 @@ struct HasPropConstraint
|
||||
TypeId resultType;
|
||||
TypeId subjectType;
|
||||
std::string prop;
|
||||
|
||||
// HACK: We presently need types like true|false or string|"hello" when
|
||||
// deciding whether a particular literal expression should have a singleton
|
||||
// type. This boolean is set to true when extracting the property type of a
|
||||
// value that may be a union of tables.
|
||||
//
|
||||
// For example, in the following code fragment, we want the lookup of the
|
||||
// success property to yield true|false when extracting an expectedType in
|
||||
// this expression:
|
||||
//
|
||||
// type Result<T, E> = {success:true, result: T} | {success:false, error: E}
|
||||
//
|
||||
// local r: Result<number, string> = {success=true, result=9}
|
||||
//
|
||||
// If we naively simplify the expectedType to boolean, we will erroneously
|
||||
// compute the type boolean for the success property of the table literal.
|
||||
// This causes type checking to fail.
|
||||
bool suppressSimplification = false;
|
||||
};
|
||||
|
||||
// result ~ setProp subjectType ["prop", "prop2", ...] propType
|
||||
@ -198,6 +216,24 @@ struct UnpackConstraint
|
||||
TypePackId sourcePack;
|
||||
};
|
||||
|
||||
// resultType ~ refine type mode discriminant
|
||||
//
|
||||
// Compute type & discriminant (or type | discriminant) as soon as possible (but
|
||||
// no sooner), simplify, and bind resultType to that type.
|
||||
struct RefineConstraint
|
||||
{
|
||||
enum
|
||||
{
|
||||
Intersection,
|
||||
Union
|
||||
} mode;
|
||||
|
||||
TypeId resultType;
|
||||
|
||||
TypeId type;
|
||||
TypeId discriminant;
|
||||
};
|
||||
|
||||
// ty ~ reduce ty
|
||||
//
|
||||
// Try to reduce ty, if it is a TypeFamilyInstanceType. Otherwise, do nothing.
|
||||
@ -214,10 +250,10 @@ struct ReducePackConstraint
|
||||
TypePackId tp;
|
||||
};
|
||||
|
||||
using ConstraintV =
|
||||
Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, UnaryConstraint, BinaryConstraint,
|
||||
IterableConstraint, NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, PrimitiveTypeConstraint, HasPropConstraint,
|
||||
SetPropConstraint, SetIndexerConstraint, SingletonOrTopTypeConstraint, UnpackConstraint, ReduceConstraint, ReducePackConstraint>;
|
||||
using ConstraintV = Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, UnaryConstraint,
|
||||
BinaryConstraint, IterableConstraint, NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, PrimitiveTypeConstraint,
|
||||
HasPropConstraint, SetPropConstraint, SetIndexerConstraint, SingletonOrTopTypeConstraint, UnpackConstraint, RefineConstraint, ReduceConstraint,
|
||||
ReducePackConstraint>;
|
||||
|
||||
struct Constraint
|
||||
{
|
||||
|
@ -188,6 +188,7 @@ struct ConstraintGraphBuilder
|
||||
Inference check(const ScopePtr& scope, AstExprGlobal* global);
|
||||
Inference check(const ScopePtr& scope, AstExprIndexName* indexName);
|
||||
Inference check(const ScopePtr& scope, AstExprIndexExpr* indexExpr);
|
||||
Inference check(const ScopePtr& scope, AstExprFunction* func, std::optional<TypeId> expectedType);
|
||||
Inference check(const ScopePtr& scope, AstExprUnary* unary);
|
||||
Inference check(const ScopePtr& scope, AstExprBinary* binary, std::optional<TypeId> expectedType);
|
||||
Inference check(const ScopePtr& scope, AstExprIfElse* ifElse, std::optional<TypeId> expectedType);
|
||||
@ -213,7 +214,8 @@ struct ConstraintGraphBuilder
|
||||
ScopePtr bodyScope;
|
||||
};
|
||||
|
||||
FunctionSignature checkFunctionSignature(const ScopePtr& parent, AstExprFunction* fn, std::optional<TypeId> expectedType = {});
|
||||
FunctionSignature checkFunctionSignature(
|
||||
const ScopePtr& parent, AstExprFunction* fn, std::optional<TypeId> expectedType = {}, std::optional<Location> originalName = {});
|
||||
|
||||
/**
|
||||
* Checks the body of a function expression.
|
||||
|
@ -8,7 +8,6 @@
|
||||
#include "Luau/Normalize.h"
|
||||
#include "Luau/ToString.h"
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypeReduction.h"
|
||||
#include "Luau/Variant.h"
|
||||
|
||||
#include <vector>
|
||||
@ -121,6 +120,7 @@ struct ConstraintSolver
|
||||
bool tryDispatch(const SetIndexerConstraint& c, NotNull<const Constraint> constraint, bool force);
|
||||
bool tryDispatch(const SingletonOrTopTypeConstraint& c, NotNull<const Constraint> constraint);
|
||||
bool tryDispatch(const UnpackConstraint& c, NotNull<const Constraint> constraint);
|
||||
bool tryDispatch(const RefineConstraint& c, NotNull<const Constraint> constraint, bool force);
|
||||
bool tryDispatch(const ReduceConstraint& c, NotNull<const Constraint> constraint, bool force);
|
||||
bool tryDispatch(const ReducePackConstraint& c, NotNull<const Constraint> constraint, bool force);
|
||||
|
||||
@ -132,8 +132,10 @@ struct ConstraintSolver
|
||||
bool tryDispatchIterableFunction(
|
||||
TypeId nextTy, TypeId tableTy, TypeId firstIndexTy, const IterableConstraint& c, NotNull<const Constraint> constraint, bool force);
|
||||
|
||||
std::pair<std::vector<TypeId>, std::optional<TypeId>> lookupTableProp(TypeId subjectType, const std::string& propName);
|
||||
std::pair<std::vector<TypeId>, std::optional<TypeId>> lookupTableProp(TypeId subjectType, const std::string& propName, std::unordered_set<TypeId>& seen);
|
||||
std::pair<std::vector<TypeId>, std::optional<TypeId>> lookupTableProp(
|
||||
TypeId subjectType, const std::string& propName, bool suppressSimplification = false);
|
||||
std::pair<std::vector<TypeId>, std::optional<TypeId>> lookupTableProp(
|
||||
TypeId subjectType, const std::string& propName, bool suppressSimplification, std::unordered_set<TypeId>& seen);
|
||||
|
||||
void block(NotNull<const Constraint> target, NotNull<const Constraint> constraint);
|
||||
/**
|
||||
@ -143,6 +145,16 @@ struct ConstraintSolver
|
||||
bool block(TypeId target, NotNull<const Constraint> constraint);
|
||||
bool block(TypePackId target, NotNull<const Constraint> constraint);
|
||||
|
||||
// Block on every target
|
||||
template<typename T>
|
||||
bool block(const T& targets, NotNull<const Constraint> constraint)
|
||||
{
|
||||
for (TypeId target : targets)
|
||||
block(target, constraint);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* For all constraints that are blocked on one constraint, make them block
|
||||
* on a new constraint.
|
||||
@ -151,15 +163,15 @@ struct ConstraintSolver
|
||||
*/
|
||||
void inheritBlocks(NotNull<const Constraint> source, NotNull<const Constraint> addition);
|
||||
|
||||
// Traverse the type. If any blocked or pending types are found, block
|
||||
// the constraint on them.
|
||||
// Traverse the type. If any pending types are found, block the constraint
|
||||
// on them.
|
||||
//
|
||||
// Returns false if a type blocks the constraint.
|
||||
//
|
||||
// FIXME: This use of a boolean for the return result is an appalling
|
||||
// interface.
|
||||
bool recursiveBlock(TypeId target, NotNull<const Constraint> constraint);
|
||||
bool recursiveBlock(TypePackId target, NotNull<const Constraint> constraint);
|
||||
bool blockOnPendingTypes(TypeId target, NotNull<const Constraint> constraint);
|
||||
bool blockOnPendingTypes(TypePackId target, NotNull<const Constraint> constraint);
|
||||
|
||||
void unblock(NotNull<const Constraint> progressed);
|
||||
void unblock(TypeId progressed);
|
||||
@ -255,6 +267,8 @@ private:
|
||||
|
||||
TypeId unionOfTypes(TypeId a, TypeId b, NotNull<Scope> scope, bool unifyFreeTypes);
|
||||
|
||||
TypePackId anyifyModuleReturnTypePackGenerics(TypePackId tp);
|
||||
|
||||
ToStringOptions opts;
|
||||
};
|
||||
|
||||
|
@ -85,14 +85,11 @@ struct Module
|
||||
DenseHashMap<const AstNode*, TypeId> astOverloadResolvedTypes{nullptr};
|
||||
|
||||
DenseHashMap<const AstType*, TypeId> astResolvedTypes{nullptr};
|
||||
DenseHashMap<const AstType*, TypeId> astOriginalResolvedTypes{nullptr};
|
||||
DenseHashMap<const AstTypePack*, TypePackId> astResolvedTypePacks{nullptr};
|
||||
|
||||
// Map AST nodes to the scope they create. Cannot be NotNull<Scope> because we need a sentinel value for the map.
|
||||
DenseHashMap<const AstNode*, Scope*> astScopes{nullptr};
|
||||
|
||||
std::unique_ptr<struct TypeReduction> reduction;
|
||||
|
||||
std::unordered_map<Name, TypeId> declaredGlobals;
|
||||
ErrorVec errors;
|
||||
LintResult lintResult;
|
||||
|
@ -267,8 +267,18 @@ struct NormalizedType
|
||||
|
||||
NormalizedType(NormalizedType&&) = default;
|
||||
NormalizedType& operator=(NormalizedType&&) = default;
|
||||
|
||||
// IsType functions
|
||||
|
||||
/// Returns true if the type is a subtype of function. This includes any and unknown.
|
||||
bool isFunction() const;
|
||||
|
||||
/// Returns true if the type is a subtype of number. This includes any and unknown.
|
||||
bool isNumber() const;
|
||||
};
|
||||
|
||||
|
||||
|
||||
class Normalizer
|
||||
{
|
||||
std::unordered_map<TypeId, std::unique_ptr<NormalizedType>> cachedNormals;
|
||||
|
36
Analysis/include/Luau/Simplify.h
Normal file
36
Analysis/include/Luau/Simplify.h
Normal file
@ -0,0 +1,36 @@
|
||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
|
||||
#pragma once
|
||||
|
||||
#include "Luau/Type.h"
|
||||
|
||||
#include <set>
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
||||
struct TypeArena;
|
||||
struct BuiltinTypes;
|
||||
|
||||
struct SimplifyResult
|
||||
{
|
||||
TypeId result;
|
||||
|
||||
std::set<TypeId> blockedTypes;
|
||||
};
|
||||
|
||||
SimplifyResult simplifyIntersection(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeArena> arena, TypeId ty, TypeId discriminant);
|
||||
SimplifyResult simplifyUnion(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeArena> arena, TypeId ty, TypeId discriminant);
|
||||
|
||||
enum class Relation
|
||||
{
|
||||
Disjoint, // No A is a B or vice versa
|
||||
Coincident, // Every A is in B and vice versa
|
||||
Intersects, // Some As are in B and some Bs are in A. ex (number | string) <-> (string | boolean)
|
||||
Subset, // Every A is in B
|
||||
Superset, // Every B is in A
|
||||
};
|
||||
|
||||
Relation relate(TypeId left, TypeId right);
|
||||
|
||||
} // namespace Luau
|
@ -99,10 +99,7 @@ inline std::string toString(const Constraint& c, ToStringOptions&& opts)
|
||||
return toString(c, opts);
|
||||
}
|
||||
|
||||
inline std::string toString(const Constraint& c)
|
||||
{
|
||||
return toString(c, ToStringOptions{});
|
||||
}
|
||||
std::string toString(const Constraint& c);
|
||||
|
||||
std::string toString(const Type& tv, ToStringOptions& opts);
|
||||
std::string toString(const TypePackVar& tp, ToStringOptions& opts);
|
||||
|
@ -308,6 +308,12 @@ public:
|
||||
// used. Else we use the embedded Scope*.
|
||||
bool useScopes = false;
|
||||
|
||||
// It is sometimes the case under DCR that we speculatively rebind
|
||||
// GenericTypes to other types as though they were free. We mark logs that
|
||||
// contain these kinds of substitutions as radioactive so that we know that
|
||||
// we must never commit one.
|
||||
bool radioactive = false;
|
||||
|
||||
// Used to avoid infinite recursion when types are cyclic.
|
||||
// Shared with all the descendent TxnLogs.
|
||||
std::vector<std::pair<TypeOrPackId, TypeOrPackId>>* sharedSeen;
|
||||
|
@ -349,7 +349,9 @@ struct FunctionType
|
||||
DcrMagicFunction dcrMagicFunction = nullptr;
|
||||
DcrMagicRefinement dcrMagicRefinement = nullptr;
|
||||
bool hasSelf;
|
||||
bool hasNoGenerics = false;
|
||||
// `hasNoFreeOrGenericTypes` should be true if and only if the type does not have any free or generic types present inside it.
|
||||
// this flag is used as an optimization to exit early from procedures that manipulate free or generic types.
|
||||
bool hasNoFreeOrGenericTypes = false;
|
||||
};
|
||||
|
||||
enum class TableState
|
||||
@ -530,7 +532,7 @@ struct ClassType
|
||||
*/
|
||||
struct TypeFamilyInstanceType
|
||||
{
|
||||
NotNull<TypeFamily> family;
|
||||
NotNull<const TypeFamily> family;
|
||||
|
||||
std::vector<TypeId> typeArguments;
|
||||
std::vector<TypePackId> packArguments;
|
||||
|
@ -21,6 +21,7 @@ using TypePackId = const TypePackVar*;
|
||||
struct TypeArena;
|
||||
struct BuiltinTypes;
|
||||
struct TxnLog;
|
||||
class Normalizer;
|
||||
|
||||
/// Represents a reduction result, which may have successfully reduced the type,
|
||||
/// may have concretely failed to reduce the type, or may simply be stuck
|
||||
@ -52,8 +53,8 @@ struct TypeFamily
|
||||
std::string name;
|
||||
|
||||
/// The reducer function for the type family.
|
||||
std::function<TypeFamilyReductionResult<TypeId>(
|
||||
std::vector<TypeId>, std::vector<TypePackId>, NotNull<TypeArena>, NotNull<BuiltinTypes>, NotNull<const TxnLog> log)>
|
||||
std::function<TypeFamilyReductionResult<TypeId>(std::vector<TypeId>, std::vector<TypePackId>, NotNull<TypeArena>, NotNull<BuiltinTypes>,
|
||||
NotNull<TxnLog>, NotNull<Scope>, NotNull<Normalizer>)>
|
||||
reducer;
|
||||
};
|
||||
|
||||
@ -66,8 +67,8 @@ struct TypePackFamily
|
||||
std::string name;
|
||||
|
||||
/// The reducer function for the type pack family.
|
||||
std::function<TypeFamilyReductionResult<TypePackId>(
|
||||
std::vector<TypeId>, std::vector<TypePackId>, NotNull<TypeArena>, NotNull<BuiltinTypes>, NotNull<const TxnLog> log)>
|
||||
std::function<TypeFamilyReductionResult<TypePackId>(std::vector<TypeId>, std::vector<TypePackId>, NotNull<TypeArena>, NotNull<BuiltinTypes>,
|
||||
NotNull<TxnLog>, NotNull<Scope>, NotNull<Normalizer>)>
|
||||
reducer;
|
||||
};
|
||||
|
||||
@ -93,8 +94,8 @@ struct FamilyGraphReductionResult
|
||||
* against the TxnLog, otherwise substitutions will directly mutate the type
|
||||
* graph. Do not provide the empty TxnLog, as a result.
|
||||
*/
|
||||
FamilyGraphReductionResult reduceFamilies(
|
||||
TypeId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, TxnLog* log = nullptr, bool force = false);
|
||||
FamilyGraphReductionResult reduceFamilies(TypeId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins,
|
||||
NotNull<Scope> scope, NotNull<Normalizer> normalizer, TxnLog* log = nullptr, bool force = false);
|
||||
|
||||
/**
|
||||
* Attempt to reduce all instances of any type or type pack family in the type
|
||||
@ -109,7 +110,16 @@ FamilyGraphReductionResult reduceFamilies(
|
||||
* against the TxnLog, otherwise substitutions will directly mutate the type
|
||||
* graph. Do not provide the empty TxnLog, as a result.
|
||||
*/
|
||||
FamilyGraphReductionResult reduceFamilies(
|
||||
TypePackId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, TxnLog* log = nullptr, bool force = false);
|
||||
FamilyGraphReductionResult reduceFamilies(TypePackId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins,
|
||||
NotNull<Scope> scope, NotNull<Normalizer> normalizer, TxnLog* log = nullptr, bool force = false);
|
||||
|
||||
struct BuiltinTypeFamilies
|
||||
{
|
||||
BuiltinTypeFamilies();
|
||||
|
||||
TypeFamily addFamily;
|
||||
};
|
||||
|
||||
const BuiltinTypeFamilies kBuiltinTypeFamilies{};
|
||||
|
||||
} // namespace Luau
|
||||
|
@ -1,85 +0,0 @@
|
||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#pragma once
|
||||
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypeArena.h"
|
||||
#include "Luau/TypePack.h"
|
||||
#include "Luau/Variant.h"
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
||||
namespace detail
|
||||
{
|
||||
template<typename T>
|
||||
struct ReductionEdge
|
||||
{
|
||||
T type = nullptr;
|
||||
bool irreducible = false;
|
||||
};
|
||||
|
||||
struct TypeReductionMemoization
|
||||
{
|
||||
TypeReductionMemoization() = default;
|
||||
|
||||
TypeReductionMemoization(const TypeReductionMemoization&) = delete;
|
||||
TypeReductionMemoization& operator=(const TypeReductionMemoization&) = delete;
|
||||
|
||||
TypeReductionMemoization(TypeReductionMemoization&&) = default;
|
||||
TypeReductionMemoization& operator=(TypeReductionMemoization&&) = default;
|
||||
|
||||
DenseHashMap<TypeId, ReductionEdge<TypeId>> types{nullptr};
|
||||
DenseHashMap<TypePackId, ReductionEdge<TypePackId>> typePacks{nullptr};
|
||||
|
||||
bool isIrreducible(TypeId ty);
|
||||
bool isIrreducible(TypePackId tp);
|
||||
|
||||
TypeId memoize(TypeId ty, TypeId reducedTy);
|
||||
TypePackId memoize(TypePackId tp, TypePackId reducedTp);
|
||||
|
||||
// Reducing A into B may have a non-irreducible edge A to B for which B is not irreducible, which means B could be reduced into C.
|
||||
// Because reduction should always be transitive, A should point to C if A points to B and B points to C.
|
||||
std::optional<ReductionEdge<TypeId>> memoizedof(TypeId ty) const;
|
||||
std::optional<ReductionEdge<TypePackId>> memoizedof(TypePackId tp) const;
|
||||
};
|
||||
} // namespace detail
|
||||
|
||||
struct TypeReductionOptions
|
||||
{
|
||||
/// If it's desirable for type reduction to allocate into a different arena than the TypeReduction instance you have, you will need
|
||||
/// to create a temporary TypeReduction in that case, and set [`TypeReductionOptions::allowTypeReductionsFromOtherArenas`] to true.
|
||||
/// This is because TypeReduction caches the reduced type.
|
||||
bool allowTypeReductionsFromOtherArenas = false;
|
||||
};
|
||||
|
||||
struct TypeReduction
|
||||
{
|
||||
explicit TypeReduction(NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtinTypes, NotNull<struct InternalErrorReporter> handle,
|
||||
const TypeReductionOptions& opts = {});
|
||||
|
||||
TypeReduction(const TypeReduction&) = delete;
|
||||
TypeReduction& operator=(const TypeReduction&) = delete;
|
||||
|
||||
TypeReduction(TypeReduction&&) = default;
|
||||
TypeReduction& operator=(TypeReduction&&) = default;
|
||||
|
||||
std::optional<TypeId> reduce(TypeId ty);
|
||||
std::optional<TypePackId> reduce(TypePackId tp);
|
||||
std::optional<TypeFun> reduce(const TypeFun& fun);
|
||||
|
||||
private:
|
||||
NotNull<TypeArena> arena;
|
||||
NotNull<BuiltinTypes> builtinTypes;
|
||||
NotNull<struct InternalErrorReporter> handle;
|
||||
|
||||
TypeReductionOptions options;
|
||||
detail::TypeReductionMemoization memoization;
|
||||
|
||||
// Computes an *estimated length* of the cartesian product of the given type.
|
||||
size_t cartesianProductSize(TypeId ty) const;
|
||||
|
||||
bool hasExceededCartesianProductLimit(TypeId ty) const;
|
||||
bool hasExceededCartesianProductLimit(TypePackId tp) const;
|
||||
};
|
||||
|
||||
} // namespace Luau
|
@ -7,7 +7,6 @@
|
||||
#include "Luau/ToString.h"
|
||||
#include "Luau/TypeInfer.h"
|
||||
#include "Luau/TypePack.h"
|
||||
#include "Luau/TypeReduction.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <unordered_set>
|
||||
|
@ -11,6 +11,7 @@ LUAU_FASTFLAG(LuauClonePublicInterfaceLess2)
|
||||
LUAU_FASTFLAG(DebugLuauReadWriteProperties)
|
||||
|
||||
LUAU_FASTINTVARIABLE(LuauTypeCloneRecursionLimit, 300)
|
||||
LUAU_FASTFLAGVARIABLE(LuauCloneCyclicUnions, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
@ -282,7 +283,7 @@ void TypeCloner::operator()(const FunctionType& t)
|
||||
ftv->argTypes = clone(t.argTypes, dest, cloneState);
|
||||
ftv->argNames = t.argNames;
|
||||
ftv->retTypes = clone(t.retTypes, dest, cloneState);
|
||||
ftv->hasNoGenerics = t.hasNoGenerics;
|
||||
ftv->hasNoFreeOrGenericTypes = t.hasNoFreeOrGenericTypes;
|
||||
}
|
||||
|
||||
void TypeCloner::operator()(const TableType& t)
|
||||
@ -373,14 +374,30 @@ void TypeCloner::operator()(const AnyType& t)
|
||||
|
||||
void TypeCloner::operator()(const UnionType& t)
|
||||
{
|
||||
std::vector<TypeId> options;
|
||||
options.reserve(t.options.size());
|
||||
if (FFlag::LuauCloneCyclicUnions)
|
||||
{
|
||||
TypeId result = dest.addType(FreeType{nullptr});
|
||||
seenTypes[typeId] = result;
|
||||
|
||||
for (TypeId ty : t.options)
|
||||
options.push_back(clone(ty, dest, cloneState));
|
||||
std::vector<TypeId> options;
|
||||
options.reserve(t.options.size());
|
||||
|
||||
TypeId result = dest.addType(UnionType{std::move(options)});
|
||||
seenTypes[typeId] = result;
|
||||
for (TypeId ty : t.options)
|
||||
options.push_back(clone(ty, dest, cloneState));
|
||||
|
||||
asMutable(result)->ty.emplace<UnionType>(std::move(options));
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<TypeId> options;
|
||||
options.reserve(t.options.size());
|
||||
|
||||
for (TypeId ty : t.options)
|
||||
options.push_back(clone(ty, dest, cloneState));
|
||||
|
||||
TypeId result = dest.addType(UnionType{std::move(options)});
|
||||
seenTypes[typeId] = result;
|
||||
}
|
||||
}
|
||||
|
||||
void TypeCloner::operator()(const IntersectionType& t)
|
||||
|
@ -13,6 +13,9 @@
|
||||
#include "Luau/Scope.h"
|
||||
#include "Luau/TypeUtils.h"
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypeFamily.h"
|
||||
#include "Luau/Simplify.h"
|
||||
#include "Luau/VisitType.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
@ -195,8 +198,23 @@ struct RefinementPartition
|
||||
|
||||
using RefinementContext = std::unordered_map<DefId, RefinementPartition>;
|
||||
|
||||
static void unionRefinements(const RefinementContext& lhs, const RefinementContext& rhs, RefinementContext& dest, NotNull<TypeArena> arena)
|
||||
static void unionRefinements(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeArena> arena, const RefinementContext& lhs, const RefinementContext& rhs,
|
||||
RefinementContext& dest, std::vector<ConstraintV>* constraints)
|
||||
{
|
||||
const auto intersect = [&](const std::vector<TypeId>& types) {
|
||||
if (1 == types.size())
|
||||
return types[0];
|
||||
else if (2 == types.size())
|
||||
{
|
||||
// TODO: It may be advantageous to create a RefineConstraint here when there are blockedTypes.
|
||||
SimplifyResult sr = simplifyIntersection(builtinTypes, arena, types[0], types[1]);
|
||||
if (sr.blockedTypes.empty())
|
||||
return sr.result;
|
||||
}
|
||||
|
||||
return arena->addType(IntersectionType{types});
|
||||
};
|
||||
|
||||
for (auto& [def, partition] : lhs)
|
||||
{
|
||||
auto rhsIt = rhs.find(def);
|
||||
@ -206,55 +224,54 @@ static void unionRefinements(const RefinementContext& lhs, const RefinementConte
|
||||
LUAU_ASSERT(!partition.discriminantTypes.empty());
|
||||
LUAU_ASSERT(!rhsIt->second.discriminantTypes.empty());
|
||||
|
||||
TypeId leftDiscriminantTy =
|
||||
partition.discriminantTypes.size() == 1 ? partition.discriminantTypes[0] : arena->addType(IntersectionType{partition.discriminantTypes});
|
||||
TypeId leftDiscriminantTy = partition.discriminantTypes.size() == 1 ? partition.discriminantTypes[0] : intersect(partition.discriminantTypes);
|
||||
|
||||
TypeId rightDiscriminantTy = rhsIt->second.discriminantTypes.size() == 1 ? rhsIt->second.discriminantTypes[0]
|
||||
: arena->addType(IntersectionType{rhsIt->second.discriminantTypes});
|
||||
TypeId rightDiscriminantTy =
|
||||
rhsIt->second.discriminantTypes.size() == 1 ? rhsIt->second.discriminantTypes[0] : intersect(rhsIt->second.discriminantTypes);
|
||||
|
||||
dest[def].discriminantTypes.push_back(arena->addType(UnionType{{leftDiscriminantTy, rightDiscriminantTy}}));
|
||||
dest[def].discriminantTypes.push_back(simplifyUnion(builtinTypes, arena, leftDiscriminantTy, rightDiscriminantTy).result);
|
||||
dest[def].shouldAppendNilType |= partition.shouldAppendNilType || rhsIt->second.shouldAppendNilType;
|
||||
}
|
||||
}
|
||||
|
||||
static void computeRefinement(const ScopePtr& scope, RefinementId refinement, RefinementContext* refis, bool sense, NotNull<TypeArena> arena, bool eq,
|
||||
std::vector<ConstraintV>* constraints)
|
||||
static void computeRefinement(NotNull<BuiltinTypes> builtinTypes, NotNull<TypeArena> arena, const ScopePtr& scope, RefinementId refinement,
|
||||
RefinementContext* refis, bool sense, bool eq, std::vector<ConstraintV>* constraints)
|
||||
{
|
||||
if (!refinement)
|
||||
return;
|
||||
else if (auto variadic = get<Variadic>(refinement))
|
||||
{
|
||||
for (RefinementId refi : variadic->refinements)
|
||||
computeRefinement(scope, refi, refis, sense, arena, eq, constraints);
|
||||
computeRefinement(builtinTypes, arena, scope, refi, refis, sense, eq, constraints);
|
||||
}
|
||||
else if (auto negation = get<Negation>(refinement))
|
||||
return computeRefinement(scope, negation->refinement, refis, !sense, arena, eq, constraints);
|
||||
return computeRefinement(builtinTypes, arena, scope, negation->refinement, refis, !sense, eq, constraints);
|
||||
else if (auto conjunction = get<Conjunction>(refinement))
|
||||
{
|
||||
RefinementContext lhsRefis;
|
||||
RefinementContext rhsRefis;
|
||||
|
||||
computeRefinement(scope, conjunction->lhs, sense ? refis : &lhsRefis, sense, arena, eq, constraints);
|
||||
computeRefinement(scope, conjunction->rhs, sense ? refis : &rhsRefis, sense, arena, eq, constraints);
|
||||
computeRefinement(builtinTypes, arena, scope, conjunction->lhs, sense ? refis : &lhsRefis, sense, eq, constraints);
|
||||
computeRefinement(builtinTypes, arena, scope, conjunction->rhs, sense ? refis : &rhsRefis, sense, eq, constraints);
|
||||
|
||||
if (!sense)
|
||||
unionRefinements(lhsRefis, rhsRefis, *refis, arena);
|
||||
unionRefinements(builtinTypes, arena, lhsRefis, rhsRefis, *refis, constraints);
|
||||
}
|
||||
else if (auto disjunction = get<Disjunction>(refinement))
|
||||
{
|
||||
RefinementContext lhsRefis;
|
||||
RefinementContext rhsRefis;
|
||||
|
||||
computeRefinement(scope, disjunction->lhs, sense ? &lhsRefis : refis, sense, arena, eq, constraints);
|
||||
computeRefinement(scope, disjunction->rhs, sense ? &rhsRefis : refis, sense, arena, eq, constraints);
|
||||
computeRefinement(builtinTypes, arena, scope, disjunction->lhs, sense ? &lhsRefis : refis, sense, eq, constraints);
|
||||
computeRefinement(builtinTypes, arena, scope, disjunction->rhs, sense ? &rhsRefis : refis, sense, eq, constraints);
|
||||
|
||||
if (sense)
|
||||
unionRefinements(lhsRefis, rhsRefis, *refis, arena);
|
||||
unionRefinements(builtinTypes, arena, lhsRefis, rhsRefis, *refis, constraints);
|
||||
}
|
||||
else if (auto equivalence = get<Equivalence>(refinement))
|
||||
{
|
||||
computeRefinement(scope, equivalence->lhs, refis, sense, arena, true, constraints);
|
||||
computeRefinement(scope, equivalence->rhs, refis, sense, arena, true, constraints);
|
||||
computeRefinement(builtinTypes, arena, scope, equivalence->lhs, refis, sense, true, constraints);
|
||||
computeRefinement(builtinTypes, arena, scope, equivalence->rhs, refis, sense, true, constraints);
|
||||
}
|
||||
else if (auto proposition = get<Proposition>(refinement))
|
||||
{
|
||||
@ -300,6 +317,63 @@ static void computeRefinement(const ScopePtr& scope, RefinementId refinement, Re
|
||||
}
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/*
|
||||
* Constraint generation may be called upon to simplify an intersection or union
|
||||
* of types that are not sufficiently solved yet. We use
|
||||
* FindSimplificationBlockers to recognize these types and defer the
|
||||
* simplification until constraint solution.
|
||||
*/
|
||||
struct FindSimplificationBlockers : TypeOnceVisitor
|
||||
{
|
||||
bool found = false;
|
||||
|
||||
bool visit(TypeId) override
|
||||
{
|
||||
return !found;
|
||||
}
|
||||
|
||||
bool visit(TypeId, const BlockedType&) override
|
||||
{
|
||||
found = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool visit(TypeId, const FreeType&) override
|
||||
{
|
||||
found = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool visit(TypeId, const PendingExpansionType&) override
|
||||
{
|
||||
found = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
// We do not need to know anything at all about a function's argument or
|
||||
// return types in order to simplify it in an intersection or union.
|
||||
bool visit(TypeId, const FunctionType&) override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool visit(TypeId, const ClassType&) override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
bool mustDeferIntersection(TypeId ty)
|
||||
{
|
||||
FindSimplificationBlockers bts;
|
||||
bts.traverse(ty);
|
||||
return bts.found;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
void ConstraintGraphBuilder::applyRefinements(const ScopePtr& scope, Location location, RefinementId refinement)
|
||||
{
|
||||
if (!refinement)
|
||||
@ -307,7 +381,7 @@ void ConstraintGraphBuilder::applyRefinements(const ScopePtr& scope, Location lo
|
||||
|
||||
RefinementContext refinements;
|
||||
std::vector<ConstraintV> constraints;
|
||||
computeRefinement(scope, refinement, &refinements, /*sense*/ true, arena, /*eq*/ false, &constraints);
|
||||
computeRefinement(builtinTypes, arena, scope, refinement, &refinements, /*sense*/ true, /*eq*/ false, &constraints);
|
||||
|
||||
for (auto& [def, partition] : refinements)
|
||||
{
|
||||
@ -317,8 +391,24 @@ void ConstraintGraphBuilder::applyRefinements(const ScopePtr& scope, Location lo
|
||||
if (partition.shouldAppendNilType)
|
||||
ty = arena->addType(UnionType{{ty, builtinTypes->nilType}});
|
||||
|
||||
partition.discriminantTypes.push_back(ty);
|
||||
scope->dcrRefinements[def] = arena->addType(IntersectionType{std::move(partition.discriminantTypes)});
|
||||
// Intersect ty with every discriminant type. If either type is not
|
||||
// sufficiently solved, we queue the intersection up via an
|
||||
// IntersectConstraint.
|
||||
|
||||
for (TypeId dt : partition.discriminantTypes)
|
||||
{
|
||||
if (mustDeferIntersection(ty) || mustDeferIntersection(dt))
|
||||
{
|
||||
TypeId r = arena->addType(BlockedType{});
|
||||
addConstraint(scope, location, RefineConstraint{RefineConstraint::Intersection, r, ty, dt});
|
||||
|
||||
ty = r;
|
||||
}
|
||||
else
|
||||
ty = simplifyIntersection(builtinTypes, arena, ty, dt).result;
|
||||
}
|
||||
|
||||
scope->dcrRefinements[def] = ty;
|
||||
}
|
||||
}
|
||||
|
||||
@ -708,7 +798,7 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatLocalFun
|
||||
functionType = arena->addType(BlockedType{});
|
||||
scope->bindings[function->name] = Binding{functionType, function->name->location};
|
||||
|
||||
FunctionSignature sig = checkFunctionSignature(scope, function->func);
|
||||
FunctionSignature sig = checkFunctionSignature(scope, function->func, /* expectedType */ std::nullopt, function->name->location);
|
||||
sig.bodyScope->bindings[function->name] = Binding{sig.signature, function->func->location};
|
||||
|
||||
BreadcrumbId bc = dfg->getBreadcrumb(function->name);
|
||||
@ -741,10 +831,12 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatFunction
|
||||
TypeId generalizedType = arena->addType(BlockedType{});
|
||||
|
||||
Checkpoint start = checkpoint(this);
|
||||
FunctionSignature sig = checkFunctionSignature(scope, function->func);
|
||||
FunctionSignature sig = checkFunctionSignature(scope, function->func, /* expectedType */ std::nullopt, function->name->location);
|
||||
|
||||
std::unordered_set<Constraint*> excludeList;
|
||||
|
||||
const NullableBreadcrumbId functionBreadcrumb = dfg->getBreadcrumb(function->name);
|
||||
|
||||
if (AstExprLocal* localName = function->name->as<AstExprLocal>())
|
||||
{
|
||||
std::optional<TypeId> existingFunctionTy = scope->lookup(localName->local);
|
||||
@ -759,6 +851,9 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatFunction
|
||||
scope->bindings[localName->local] = Binding{generalizedType, localName->location};
|
||||
|
||||
sig.bodyScope->bindings[localName->local] = Binding{sig.signature, localName->location};
|
||||
|
||||
if (functionBreadcrumb)
|
||||
sig.bodyScope->dcrRefinements[functionBreadcrumb->def] = sig.signature;
|
||||
}
|
||||
else if (AstExprGlobal* globalName = function->name->as<AstExprGlobal>())
|
||||
{
|
||||
@ -769,6 +864,9 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatFunction
|
||||
generalizedType = *existingFunctionTy;
|
||||
|
||||
sig.bodyScope->bindings[globalName->name] = Binding{sig.signature, globalName->location};
|
||||
|
||||
if (functionBreadcrumb)
|
||||
sig.bodyScope->dcrRefinements[functionBreadcrumb->def] = sig.signature;
|
||||
}
|
||||
else if (AstExprIndexName* indexName = function->name->as<AstExprIndexName>())
|
||||
{
|
||||
@ -795,8 +893,8 @@ ControlFlow ConstraintGraphBuilder::visit(const ScopePtr& scope, AstStatFunction
|
||||
if (generalizedType == nullptr)
|
||||
ice->ice("generalizedType == nullptr", function->location);
|
||||
|
||||
if (NullableBreadcrumbId bc = dfg->getBreadcrumb(function->name))
|
||||
scope->dcrRefinements[bc->def] = generalizedType;
|
||||
if (functionBreadcrumb)
|
||||
scope->dcrRefinements[functionBreadcrumb->def] = generalizedType;
|
||||
|
||||
checkFunctionBody(sig.bodyScope, function->func);
|
||||
Checkpoint end = checkpoint(this);
|
||||
@ -1469,21 +1567,7 @@ Inference ConstraintGraphBuilder::check(
|
||||
else if (auto call = expr->as<AstExprCall>())
|
||||
result = flattenPack(scope, expr->location, checkPack(scope, call)); // TODO: needs predicates too
|
||||
else if (auto a = expr->as<AstExprFunction>())
|
||||
{
|
||||
Checkpoint startCheckpoint = checkpoint(this);
|
||||
FunctionSignature sig = checkFunctionSignature(scope, a, expectedType);
|
||||
checkFunctionBody(sig.bodyScope, a);
|
||||
Checkpoint endCheckpoint = checkpoint(this);
|
||||
|
||||
TypeId generalizedTy = arena->addType(BlockedType{});
|
||||
NotNull<Constraint> gc = addConstraint(sig.signatureScope, expr->location, GeneralizationConstraint{generalizedTy, sig.signature});
|
||||
|
||||
forEachConstraint(startCheckpoint, endCheckpoint, this, [gc](const ConstraintPtr& constraint) {
|
||||
gc->dependencies.emplace_back(constraint.get());
|
||||
});
|
||||
|
||||
result = Inference{generalizedTy};
|
||||
}
|
||||
result = check(scope, a, expectedType);
|
||||
else if (auto indexName = expr->as<AstExprIndexName>())
|
||||
result = check(scope, indexName);
|
||||
else if (auto indexExpr = expr->as<AstExprIndexExpr>())
|
||||
@ -1651,6 +1735,23 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprIndexExpr*
|
||||
return Inference{result};
|
||||
}
|
||||
|
||||
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprFunction* func, std::optional<TypeId> expectedType)
|
||||
{
|
||||
Checkpoint startCheckpoint = checkpoint(this);
|
||||
FunctionSignature sig = checkFunctionSignature(scope, func, expectedType);
|
||||
checkFunctionBody(sig.bodyScope, func);
|
||||
Checkpoint endCheckpoint = checkpoint(this);
|
||||
|
||||
TypeId generalizedTy = arena->addType(BlockedType{});
|
||||
NotNull<Constraint> gc = addConstraint(sig.signatureScope, func->location, GeneralizationConstraint{generalizedTy, sig.signature});
|
||||
|
||||
forEachConstraint(startCheckpoint, endCheckpoint, this, [gc](const ConstraintPtr& constraint) {
|
||||
gc->dependencies.emplace_back(constraint.get());
|
||||
});
|
||||
|
||||
return Inference{generalizedTy};
|
||||
}
|
||||
|
||||
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprUnary* unary)
|
||||
{
|
||||
auto [operandType, refinement] = check(scope, unary->expr);
|
||||
@ -1667,6 +1768,17 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprBinary* bi
|
||||
{
|
||||
auto [leftType, rightType, refinement] = checkBinary(scope, binary, expectedType);
|
||||
|
||||
if (binary->op == AstExprBinary::Op::Add)
|
||||
{
|
||||
TypeId resultType = arena->addType(TypeFamilyInstanceType{
|
||||
NotNull{&kBuiltinTypeFamilies.addFamily},
|
||||
{leftType, rightType},
|
||||
{},
|
||||
});
|
||||
addConstraint(scope, binary->location, ReduceConstraint{resultType});
|
||||
return Inference{resultType, std::move(refinement)};
|
||||
}
|
||||
|
||||
TypeId resultType = arena->addType(BlockedType{});
|
||||
addConstraint(scope, binary->location,
|
||||
BinaryConstraint{binary->op, leftType, rightType, resultType, binary, &module->astOriginalCallTypes, &module->astOverloadResolvedTypes});
|
||||
@ -1686,7 +1798,7 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprIfElse* if
|
||||
applyRefinements(elseScope, ifElse->falseExpr->location, refinementArena.negation(refinement));
|
||||
TypeId elseType = check(elseScope, ifElse->falseExpr, ValueContext::RValue, expectedType).ty;
|
||||
|
||||
return Inference{expectedType ? *expectedType : arena->addType(UnionType{{thenType, elseType}})};
|
||||
return Inference{expectedType ? *expectedType : simplifyUnion(builtinTypes, arena, thenType, elseType).result};
|
||||
}
|
||||
|
||||
Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprTypeAssertion* typeAssert)
|
||||
@ -1902,6 +2014,8 @@ TypeId ConstraintGraphBuilder::checkLValue(const ScopePtr& scope, AstExpr* expr)
|
||||
}
|
||||
else if (auto indexExpr = e->as<AstExprIndexExpr>())
|
||||
{
|
||||
// We need to populate the type for the index value
|
||||
check(scope, indexExpr->index, ValueContext::RValue);
|
||||
if (auto strIndex = indexExpr->index->as<AstExprConstantString>())
|
||||
{
|
||||
segments.push_back(std::string(strIndex->value.data, strIndex->value.size));
|
||||
@ -2018,12 +2132,12 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprTable* exp
|
||||
else
|
||||
{
|
||||
expectedValueType = arena->addType(BlockedType{});
|
||||
addConstraint(scope, item.value->location, HasPropConstraint{*expectedValueType, *expectedType, stringKey->value.data});
|
||||
addConstraint(scope, item.value->location,
|
||||
HasPropConstraint{*expectedValueType, *expectedType, stringKey->value.data, /*suppressSimplification*/ true});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// We'll resolve the expected index result type here with the following priority:
|
||||
// 1. Record table types - in which key, value pairs must be handled on a k,v pair basis.
|
||||
// In this case, the above if-statement will populate expectedValueType
|
||||
@ -2079,7 +2193,7 @@ Inference ConstraintGraphBuilder::check(const ScopePtr& scope, AstExprTable* exp
|
||||
}
|
||||
|
||||
ConstraintGraphBuilder::FunctionSignature ConstraintGraphBuilder::checkFunctionSignature(
|
||||
const ScopePtr& parent, AstExprFunction* fn, std::optional<TypeId> expectedType)
|
||||
const ScopePtr& parent, AstExprFunction* fn, std::optional<TypeId> expectedType, std::optional<Location> originalName)
|
||||
{
|
||||
ScopePtr signatureScope = nullptr;
|
||||
ScopePtr bodyScope = nullptr;
|
||||
@ -2235,12 +2349,18 @@ ConstraintGraphBuilder::FunctionSignature ConstraintGraphBuilder::checkFunctionS
|
||||
// TODO: Preserve argument names in the function's type.
|
||||
|
||||
FunctionType actualFunction{TypeLevel{}, parent.get(), arena->addTypePack(argTypes, varargPack), returnType};
|
||||
actualFunction.hasNoGenerics = !hasGenerics;
|
||||
actualFunction.generics = std::move(genericTypes);
|
||||
actualFunction.genericPacks = std::move(genericTypePacks);
|
||||
actualFunction.argNames = std::move(argNames);
|
||||
actualFunction.hasSelf = fn->self != nullptr;
|
||||
|
||||
FunctionDefinition defn;
|
||||
defn.definitionModuleName = module->name;
|
||||
defn.definitionLocation = fn->location;
|
||||
defn.varargLocation = fn->vararg ? std::make_optional(fn->varargLocation) : std::nullopt;
|
||||
defn.originalNameLocation = originalName.value_or(Location(fn->location.begin, 0));
|
||||
actualFunction.definition = defn;
|
||||
|
||||
TypeId actualFunctionType = arena->addType(std::move(actualFunction));
|
||||
LUAU_ASSERT(actualFunctionType);
|
||||
module->astTypes[fn] = actualFunctionType;
|
||||
@ -2283,6 +2403,7 @@ TypeId ConstraintGraphBuilder::resolveType(const ScopePtr& scope, AstType* ty, b
|
||||
if (ref->parameters.size != 1 || !ref->parameters.data[0].type)
|
||||
{
|
||||
reportError(ty->location, GenericError{"_luau_print requires one generic parameter"});
|
||||
module->astResolvedTypes[ty] = builtinTypes->errorRecoveryType();
|
||||
return builtinTypes->errorRecoveryType();
|
||||
}
|
||||
else
|
||||
@ -2420,7 +2541,6 @@ TypeId ConstraintGraphBuilder::resolveType(const ScopePtr& scope, AstType* ty, b
|
||||
|
||||
// This replicates the behavior of the appropriate FunctionType
|
||||
// constructors.
|
||||
ftv.hasNoGenerics = !hasGenerics;
|
||||
ftv.generics = std::move(genericTypes);
|
||||
ftv.genericPacks = std::move(genericTypePacks);
|
||||
|
||||
|
@ -11,12 +11,13 @@
|
||||
#include "Luau/Metamethods.h"
|
||||
#include "Luau/ModuleResolver.h"
|
||||
#include "Luau/Quantify.h"
|
||||
#include "Luau/Simplify.h"
|
||||
#include "Luau/ToString.h"
|
||||
#include "Luau/TypeUtils.h"
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypeFamily.h"
|
||||
#include "Luau/TypeUtils.h"
|
||||
#include "Luau/Unifier.h"
|
||||
#include "Luau/VisitType.h"
|
||||
#include "Luau/TypeFamily.h"
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolver, false);
|
||||
LUAU_FASTFLAG(LuauRequirePathTrueModuleName)
|
||||
@ -73,7 +74,7 @@ static std::pair<std::vector<TypeId>, std::vector<TypePackId>> saturateArguments
|
||||
// mutually exclusive with the type pack -> type conversion we do below:
|
||||
// extraTypes will only have elements in it if we have more types than we
|
||||
// have parameter slots for them to go into.
|
||||
if (!extraTypes.empty())
|
||||
if (!extraTypes.empty() && !fn.typePackParams.empty())
|
||||
{
|
||||
saturatedPackArguments.push_back(arena->addTypePack(extraTypes));
|
||||
}
|
||||
@ -89,7 +90,7 @@ static std::pair<std::vector<TypeId>, std::vector<TypePackId>> saturateArguments
|
||||
{
|
||||
saturatedTypeArguments.push_back(*first(tp));
|
||||
}
|
||||
else
|
||||
else if (saturatedPackArguments.size() < fn.typePackParams.size())
|
||||
{
|
||||
saturatedPackArguments.push_back(tp);
|
||||
}
|
||||
@ -426,7 +427,9 @@ void ConstraintSolver::finalizeModule()
|
||||
rootScope->returnType = builtinTypes->errorTypePack;
|
||||
}
|
||||
else
|
||||
rootScope->returnType = *returnType;
|
||||
{
|
||||
rootScope->returnType = anyifyModuleReturnTypePackGenerics(*returnType);
|
||||
}
|
||||
}
|
||||
|
||||
bool ConstraintSolver::tryDispatch(NotNull<const Constraint> constraint, bool force)
|
||||
@ -468,6 +471,8 @@ bool ConstraintSolver::tryDispatch(NotNull<const Constraint> constraint, bool fo
|
||||
success = tryDispatch(*sottc, constraint);
|
||||
else if (auto uc = get<UnpackConstraint>(*constraint))
|
||||
success = tryDispatch(*uc, constraint);
|
||||
else if (auto rc = get<RefineConstraint>(*constraint))
|
||||
success = tryDispatch(*rc, constraint, force);
|
||||
else if (auto rc = get<ReduceConstraint>(*constraint))
|
||||
success = tryDispatch(*rc, constraint, force);
|
||||
else if (auto rpc = get<ReducePackConstraint>(*constraint))
|
||||
@ -541,15 +546,25 @@ bool ConstraintSolver::tryDispatch(const InstantiationConstraint& c, NotNull<con
|
||||
if (isBlocked(c.superType))
|
||||
return block(c.superType, constraint);
|
||||
|
||||
if (!recursiveBlock(c.superType, constraint))
|
||||
if (!blockOnPendingTypes(c.superType, constraint))
|
||||
return false;
|
||||
|
||||
Instantiation inst(TxnLog::empty(), arena, TypeLevel{}, constraint->scope);
|
||||
|
||||
std::optional<TypeId> instantiated = inst.substitute(c.superType);
|
||||
LUAU_ASSERT(instantiated); // TODO FIXME HANDLE THIS
|
||||
|
||||
LUAU_ASSERT(get<BlockedType>(c.subType));
|
||||
|
||||
if (!instantiated.has_value())
|
||||
{
|
||||
reportError(UnificationTooComplex{}, constraint->location);
|
||||
|
||||
asMutable(c.subType)->ty.emplace<BoundType>(errorRecoveryType());
|
||||
unblock(c.subType);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
asMutable(c.subType)->ty.emplace<BoundType>(*instantiated);
|
||||
|
||||
InstantiationQueuer queuer{constraint->scope, constraint->location, this};
|
||||
@ -759,9 +774,11 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
|
||||
case AstExprBinary::Op::Div:
|
||||
case AstExprBinary::Op::Pow:
|
||||
case AstExprBinary::Op::Mod:
|
||||
{
|
||||
const NormalizedType* normLeftTy = normalizer->normalize(leftType);
|
||||
if (hasTypeInIntersection<FreeType>(leftType) && force)
|
||||
asMutable(leftType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : builtinTypes->numberType);
|
||||
if (isNumber(leftType))
|
||||
if (normLeftTy && normLeftTy->isNumber())
|
||||
{
|
||||
unify(leftType, rightType, constraint->scope);
|
||||
asMutable(resultType)->ty.emplace<BoundType>(anyPresent ? builtinTypes->anyType : leftType);
|
||||
@ -770,6 +787,7 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
// For concatenation, if the LHS is a string, the RHS must be a string as
|
||||
// well. The result will also be a string.
|
||||
case AstExprBinary::Op::Concat:
|
||||
@ -809,9 +827,9 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
|
||||
// truthy.
|
||||
case AstExprBinary::Op::And:
|
||||
{
|
||||
TypeId leftFilteredTy = arena->addType(IntersectionType{{builtinTypes->falsyType, leftType}});
|
||||
TypeId leftFilteredTy = simplifyIntersection(builtinTypes, arena, leftType, builtinTypes->falsyType).result;
|
||||
|
||||
asMutable(resultType)->ty.emplace<BoundType>(arena->addType(UnionType{{leftFilteredTy, rightType}}));
|
||||
asMutable(resultType)->ty.emplace<BoundType>(simplifyUnion(builtinTypes, arena, rightType, leftFilteredTy).result);
|
||||
unblock(resultType);
|
||||
return true;
|
||||
}
|
||||
@ -819,9 +837,9 @@ bool ConstraintSolver::tryDispatch(const BinaryConstraint& c, NotNull<const Cons
|
||||
// LHS is falsey.
|
||||
case AstExprBinary::Op::Or:
|
||||
{
|
||||
TypeId leftFilteredTy = arena->addType(IntersectionType{{builtinTypes->truthyType, leftType}});
|
||||
TypeId leftFilteredTy = simplifyIntersection(builtinTypes, arena, leftType, builtinTypes->truthyType).result;
|
||||
|
||||
asMutable(resultType)->ty.emplace<BoundType>(arena->addType(UnionType{{leftFilteredTy, rightType}}));
|
||||
asMutable(resultType)->ty.emplace<BoundType>(simplifyUnion(builtinTypes, arena, rightType, leftFilteredTy).result);
|
||||
unblock(resultType);
|
||||
return true;
|
||||
}
|
||||
@ -1266,7 +1284,12 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
|
||||
overload = follow(overload);
|
||||
|
||||
std::optional<TypeId> instantiated = inst.substitute(overload);
|
||||
LUAU_ASSERT(instantiated); // TODO FIXME HANDLE THIS
|
||||
|
||||
if (!instantiated.has_value())
|
||||
{
|
||||
reportError(UnificationTooComplex{}, constraint->location);
|
||||
return true;
|
||||
}
|
||||
|
||||
Unifier u{normalizer, Mode::Strict, constraint->scope, Location{}, Covariant};
|
||||
u.enableScopeTests();
|
||||
@ -1374,7 +1397,7 @@ bool ConstraintSolver::tryDispatch(const HasPropConstraint& c, NotNull<const Con
|
||||
return true;
|
||||
}
|
||||
|
||||
auto [blocked, result] = lookupTableProp(subjectType, c.prop);
|
||||
auto [blocked, result] = lookupTableProp(subjectType, c.prop, c.suppressSimplification);
|
||||
if (!blocked.empty())
|
||||
{
|
||||
for (TypeId blocked : blocked)
|
||||
@ -1632,7 +1655,9 @@ bool ConstraintSolver::tryDispatch(const SingletonOrTopTypeConstraint& c, NotNul
|
||||
else if (!c.negated && get<SingletonType>(followed))
|
||||
*asMutable(c.resultType) = BoundType{c.discriminantType};
|
||||
else
|
||||
*asMutable(c.resultType) = BoundType{builtinTypes->unknownType};
|
||||
*asMutable(c.resultType) = BoundType{builtinTypes->anyType};
|
||||
|
||||
unblock(c.resultType);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1700,10 +1725,131 @@ bool ConstraintSolver::tryDispatch(const UnpackConstraint& c, NotNull<const Cons
|
||||
return true;
|
||||
}
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
/*
|
||||
* Search for types that prevent us from being ready to dispatch a particular
|
||||
* RefineConstraint.
|
||||
*/
|
||||
struct FindRefineConstraintBlockers : TypeOnceVisitor
|
||||
{
|
||||
std::unordered_set<TypeId> found;
|
||||
bool visit(TypeId ty, const BlockedType&) override
|
||||
{
|
||||
found.insert(ty);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool visit(TypeId ty, const PendingExpansionType&) override
|
||||
{
|
||||
found.insert(ty);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
static bool isNegatedAny(TypeId ty)
|
||||
{
|
||||
ty = follow(ty);
|
||||
const NegationType* nt = get<NegationType>(ty);
|
||||
if (!nt)
|
||||
return false;
|
||||
TypeId negatedTy = follow(nt->ty);
|
||||
return bool(get<AnyType>(negatedTy));
|
||||
}
|
||||
|
||||
bool ConstraintSolver::tryDispatch(const RefineConstraint& c, NotNull<const Constraint> constraint, bool force)
|
||||
{
|
||||
if (isBlocked(c.discriminant))
|
||||
return block(c.discriminant, constraint);
|
||||
|
||||
FindRefineConstraintBlockers fbt;
|
||||
fbt.traverse(c.discriminant);
|
||||
|
||||
if (!fbt.found.empty())
|
||||
{
|
||||
bool foundOne = false;
|
||||
|
||||
for (TypeId blocked : fbt.found)
|
||||
{
|
||||
if (blocked == c.type)
|
||||
continue;
|
||||
|
||||
block(blocked, constraint);
|
||||
foundOne = true;
|
||||
}
|
||||
|
||||
if (foundOne)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* HACK: Refinements sometimes produce a type T & ~any under the assumption
|
||||
* that ~any is the same as any. This is so so weird, but refinements needs
|
||||
* some way to say "I may refine this, but I'm not sure."
|
||||
*
|
||||
* It does this by refining on a blocked type and deferring the decision
|
||||
* until it is unblocked.
|
||||
*
|
||||
* Refinements also get negated, so we wind up with types like T & ~*blocked*
|
||||
*
|
||||
* We need to treat T & ~any as T in this case.
|
||||
*/
|
||||
|
||||
if (c.mode == RefineConstraint::Intersection && isNegatedAny(c.discriminant))
|
||||
{
|
||||
asMutable(c.resultType)->ty.emplace<BoundType>(c.type);
|
||||
unblock(c.resultType);
|
||||
return true;
|
||||
}
|
||||
|
||||
const TypeId type = follow(c.type);
|
||||
|
||||
LUAU_ASSERT(get<BlockedType>(c.resultType));
|
||||
|
||||
if (type == c.resultType)
|
||||
{
|
||||
/*
|
||||
* Sometimes, we get a constraint of the form
|
||||
*
|
||||
* *blocked-N* ~ refine *blocked-N* & U
|
||||
*
|
||||
* The constraint essentially states that a particular type is a
|
||||
* refinement of itself. This is weird and I think vacuous.
|
||||
*
|
||||
* I *believe* it is safe to replace the result with a fresh type that
|
||||
* is constrained by U. We effect this by minting a fresh type for the
|
||||
* result when U = any, else we bind the result to whatever discriminant
|
||||
* was offered.
|
||||
*/
|
||||
if (get<AnyType>(follow(c.discriminant)))
|
||||
asMutable(c.resultType)->ty.emplace<FreeType>(constraint->scope);
|
||||
else
|
||||
asMutable(c.resultType)->ty.emplace<BoundType>(c.discriminant);
|
||||
|
||||
unblock(c.resultType);
|
||||
return true;
|
||||
}
|
||||
|
||||
auto [result, blockedTypes] = c.mode == RefineConstraint::Intersection ? simplifyIntersection(builtinTypes, NotNull{arena}, type, c.discriminant)
|
||||
: simplifyUnion(builtinTypes, NotNull{arena}, type, c.discriminant);
|
||||
|
||||
if (!force && !blockedTypes.empty())
|
||||
return block(blockedTypes, constraint);
|
||||
|
||||
asMutable(c.resultType)->ty.emplace<BoundType>(result);
|
||||
|
||||
unblock(c.resultType);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ConstraintSolver::tryDispatch(const ReduceConstraint& c, NotNull<const Constraint> constraint, bool force)
|
||||
{
|
||||
TypeId ty = follow(c.ty);
|
||||
FamilyGraphReductionResult result = reduceFamilies(ty, constraint->location, NotNull{arena}, builtinTypes, nullptr, force);
|
||||
FamilyGraphReductionResult result =
|
||||
reduceFamilies(ty, constraint->location, NotNull{arena}, builtinTypes, constraint->scope, normalizer, nullptr, force);
|
||||
|
||||
for (TypeId r : result.reducedTypes)
|
||||
unblock(r);
|
||||
@ -1726,7 +1872,8 @@ bool ConstraintSolver::tryDispatch(const ReduceConstraint& c, NotNull<const Cons
|
||||
bool ConstraintSolver::tryDispatch(const ReducePackConstraint& c, NotNull<const Constraint> constraint, bool force)
|
||||
{
|
||||
TypePackId tp = follow(c.tp);
|
||||
FamilyGraphReductionResult result = reduceFamilies(tp, constraint->location, NotNull{arena}, builtinTypes, nullptr, force);
|
||||
FamilyGraphReductionResult result =
|
||||
reduceFamilies(tp, constraint->location, NotNull{arena}, builtinTypes, constraint->scope, normalizer, nullptr, force);
|
||||
|
||||
for (TypeId r : result.reducedTypes)
|
||||
unblock(r);
|
||||
@ -1951,13 +2098,15 @@ bool ConstraintSolver::tryDispatchIterableFunction(
|
||||
return true;
|
||||
}
|
||||
|
||||
std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTableProp(TypeId subjectType, const std::string& propName)
|
||||
std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTableProp(
|
||||
TypeId subjectType, const std::string& propName, bool suppressSimplification)
|
||||
{
|
||||
std::unordered_set<TypeId> seen;
|
||||
return lookupTableProp(subjectType, propName, seen);
|
||||
return lookupTableProp(subjectType, propName, suppressSimplification, seen);
|
||||
}
|
||||
|
||||
std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTableProp(TypeId subjectType, const std::string& propName, std::unordered_set<TypeId>& seen)
|
||||
std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTableProp(
|
||||
TypeId subjectType, const std::string& propName, bool suppressSimplification, std::unordered_set<TypeId>& seen)
|
||||
{
|
||||
if (!seen.insert(subjectType).second)
|
||||
return {};
|
||||
@ -1985,7 +2134,7 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
|
||||
}
|
||||
else if (auto mt = get<MetatableType>(subjectType))
|
||||
{
|
||||
auto [blocked, result] = lookupTableProp(mt->table, propName, seen);
|
||||
auto [blocked, result] = lookupTableProp(mt->table, propName, suppressSimplification, seen);
|
||||
if (!blocked.empty() || result)
|
||||
return {blocked, result};
|
||||
|
||||
@ -2016,13 +2165,17 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
|
||||
}
|
||||
}
|
||||
else
|
||||
return lookupTableProp(indexType, propName, seen);
|
||||
return lookupTableProp(indexType, propName, suppressSimplification, seen);
|
||||
}
|
||||
}
|
||||
else if (auto ct = get<ClassType>(subjectType))
|
||||
{
|
||||
if (auto p = lookupClassProp(ct, propName))
|
||||
return {{}, p->type()};
|
||||
if (ct->indexer)
|
||||
{
|
||||
return {{}, ct->indexer->indexResultType};
|
||||
}
|
||||
}
|
||||
else if (auto pt = get<PrimitiveType>(subjectType); pt && pt->metatable)
|
||||
{
|
||||
@ -2033,7 +2186,7 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
|
||||
if (indexProp == metatable->props.end())
|
||||
return {{}, std::nullopt};
|
||||
|
||||
return lookupTableProp(indexProp->second.type(), propName, seen);
|
||||
return lookupTableProp(indexProp->second.type(), propName, suppressSimplification, seen);
|
||||
}
|
||||
else if (auto ft = get<FreeType>(subjectType))
|
||||
{
|
||||
@ -2054,7 +2207,7 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
|
||||
|
||||
for (TypeId ty : utv)
|
||||
{
|
||||
auto [innerBlocked, innerResult] = lookupTableProp(ty, propName, seen);
|
||||
auto [innerBlocked, innerResult] = lookupTableProp(ty, propName, suppressSimplification, seen);
|
||||
blocked.insert(blocked.end(), innerBlocked.begin(), innerBlocked.end());
|
||||
if (innerResult)
|
||||
options.insert(*innerResult);
|
||||
@ -2067,6 +2220,12 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
|
||||
return {{}, std::nullopt};
|
||||
else if (options.size() == 1)
|
||||
return {{}, *begin(options)};
|
||||
else if (options.size() == 2 && !suppressSimplification)
|
||||
{
|
||||
TypeId one = *begin(options);
|
||||
TypeId two = *(++begin(options));
|
||||
return {{}, simplifyUnion(builtinTypes, arena, one, two).result};
|
||||
}
|
||||
else
|
||||
return {{}, arena->addType(UnionType{std::vector<TypeId>(begin(options), end(options))})};
|
||||
}
|
||||
@ -2077,7 +2236,7 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
|
||||
|
||||
for (TypeId ty : itv)
|
||||
{
|
||||
auto [innerBlocked, innerResult] = lookupTableProp(ty, propName, seen);
|
||||
auto [innerBlocked, innerResult] = lookupTableProp(ty, propName, suppressSimplification, seen);
|
||||
blocked.insert(blocked.end(), innerBlocked.begin(), innerBlocked.end());
|
||||
if (innerResult)
|
||||
options.insert(*innerResult);
|
||||
@ -2090,6 +2249,12 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
|
||||
return {{}, std::nullopt};
|
||||
else if (options.size() == 1)
|
||||
return {{}, *begin(options)};
|
||||
else if (options.size() == 2 && !suppressSimplification)
|
||||
{
|
||||
TypeId one = *begin(options);
|
||||
TypeId two = *(++begin(options));
|
||||
return {{}, simplifyIntersection(builtinTypes, arena, one, two).result};
|
||||
}
|
||||
else
|
||||
return {{}, arena->addType(IntersectionType{std::vector<TypeId>(begin(options), end(options))})};
|
||||
}
|
||||
@ -2214,13 +2379,6 @@ struct Blocker : TypeOnceVisitor
|
||||
{
|
||||
}
|
||||
|
||||
bool visit(TypeId ty, const BlockedType&)
|
||||
{
|
||||
blocked = true;
|
||||
solver->block(ty, constraint);
|
||||
return false;
|
||||
}
|
||||
|
||||
bool visit(TypeId ty, const PendingExpansionType&)
|
||||
{
|
||||
blocked = true;
|
||||
@ -2229,14 +2387,14 @@ struct Blocker : TypeOnceVisitor
|
||||
}
|
||||
};
|
||||
|
||||
bool ConstraintSolver::recursiveBlock(TypeId target, NotNull<const Constraint> constraint)
|
||||
bool ConstraintSolver::blockOnPendingTypes(TypeId target, NotNull<const Constraint> constraint)
|
||||
{
|
||||
Blocker blocker{NotNull{this}, constraint};
|
||||
blocker.traverse(target);
|
||||
return !blocker.blocked;
|
||||
}
|
||||
|
||||
bool ConstraintSolver::recursiveBlock(TypePackId pack, NotNull<const Constraint> constraint)
|
||||
bool ConstraintSolver::blockOnPendingTypes(TypePackId pack, NotNull<const Constraint> constraint)
|
||||
{
|
||||
Blocker blocker{NotNull{this}, constraint};
|
||||
blocker.traverse(pack);
|
||||
@ -2482,4 +2640,34 @@ TypeId ConstraintSolver::unionOfTypes(TypeId a, TypeId b, NotNull<Scope> scope,
|
||||
return arena->addType(UnionType{types});
|
||||
}
|
||||
|
||||
TypePackId ConstraintSolver::anyifyModuleReturnTypePackGenerics(TypePackId tp)
|
||||
{
|
||||
tp = follow(tp);
|
||||
|
||||
if (const VariadicTypePack* vtp = get<VariadicTypePack>(tp))
|
||||
{
|
||||
TypeId ty = follow(vtp->ty);
|
||||
return get<GenericType>(ty) ? builtinTypes->anyTypePack : tp;
|
||||
}
|
||||
|
||||
if (!get<TypePack>(follow(tp)))
|
||||
return tp;
|
||||
|
||||
std::vector<TypeId> resultTypes;
|
||||
std::optional<TypePackId> resultTail;
|
||||
|
||||
TypePackIterator it = begin(tp);
|
||||
|
||||
for (TypePackIterator e = end(tp); it != e; ++it)
|
||||
{
|
||||
TypeId ty = follow(*it);
|
||||
resultTypes.push_back(get<GenericType>(ty) ? builtinTypes->anyType : ty);
|
||||
}
|
||||
|
||||
if (std::optional<TypePackId> tail = it.tail())
|
||||
resultTail = anyifyModuleReturnTypePackGenerics(*tail);
|
||||
|
||||
return arena->addTypePack(resultTypes, resultTail);
|
||||
}
|
||||
|
||||
} // namespace Luau
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include "Luau/TimeTrace.h"
|
||||
#include "Luau/TypeChecker2.h"
|
||||
#include "Luau/TypeInfer.h"
|
||||
#include "Luau/TypeReduction.h"
|
||||
#include "Luau/Variant.h"
|
||||
|
||||
#include <algorithm>
|
||||
@ -622,7 +621,6 @@ CheckResult Frontend::check_DEPRECATED(const ModuleName& name, std::optional<Fro
|
||||
module->astOriginalCallTypes.clear();
|
||||
module->astOverloadResolvedTypes.clear();
|
||||
module->astResolvedTypes.clear();
|
||||
module->astOriginalResolvedTypes.clear();
|
||||
module->astResolvedTypePacks.clear();
|
||||
module->astScopes.clear();
|
||||
|
||||
@ -1138,7 +1136,6 @@ void Frontend::checkBuildQueueItem(BuildQueueItem& item)
|
||||
module->astOriginalCallTypes.clear();
|
||||
module->astOverloadResolvedTypes.clear();
|
||||
module->astResolvedTypes.clear();
|
||||
module->astOriginalResolvedTypes.clear();
|
||||
module->astResolvedTypePacks.clear();
|
||||
module->astScopes.clear();
|
||||
|
||||
@ -1311,7 +1308,6 @@ ModulePtr check(const SourceModule& sourceModule, const std::vector<RequireCycle
|
||||
ModulePtr result = std::make_shared<Module>();
|
||||
result->name = sourceModule.name;
|
||||
result->humanReadableName = sourceModule.humanReadableName;
|
||||
result->reduction = std::make_unique<TypeReduction>(NotNull{&result->internalTypes}, builtinTypes, iceHandler);
|
||||
|
||||
std::unique_ptr<DcrLogger> logger;
|
||||
if (recordJsonLog)
|
||||
@ -1365,11 +1361,17 @@ ModulePtr check(const SourceModule& sourceModule, const std::vector<RequireCycle
|
||||
|
||||
Luau::check(builtinTypes, NotNull{&unifierState}, logger.get(), sourceModule, result.get());
|
||||
|
||||
// Ideally we freeze the arenas before the call into Luau::check, but TypeReduction
|
||||
// needs to allocate new types while Luau::check is in progress, so here we are.
|
||||
// It would be nice if we could freeze the arenas before doing type
|
||||
// checking, but we'll have to do some work to get there.
|
||||
//
|
||||
// It does mean that mutations to the type graph can happen after the constraints
|
||||
// have been solved, which will cause hard-to-debug problems. We should revisit this.
|
||||
// TypeChecker2 sometimes needs to allocate TypePacks via extendTypePack()
|
||||
// in order to do its thing. We can rework that code to instead allocate
|
||||
// into a temporary arena as long as we can prove that the allocated types
|
||||
// and packs can never find their way into an error.
|
||||
//
|
||||
// Notably, we would first need to get to a place where TypeChecker2 is
|
||||
// never in the position of dealing with a FreeType. They should all be
|
||||
// bound to something by the time constraints are solved.
|
||||
freeze(result->internalTypes);
|
||||
freeze(result->interfaceTypes);
|
||||
|
||||
|
@ -13,7 +13,7 @@ bool Instantiation::isDirty(TypeId ty)
|
||||
{
|
||||
if (const FunctionType* ftv = log->getMutable<FunctionType>(ty))
|
||||
{
|
||||
if (ftv->hasNoGenerics)
|
||||
if (ftv->hasNoFreeOrGenericTypes)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@ -74,7 +74,7 @@ bool ReplaceGenerics::ignoreChildren(TypeId ty)
|
||||
{
|
||||
if (const FunctionType* ftv = log->getMutable<FunctionType>(ty))
|
||||
{
|
||||
if (ftv->hasNoGenerics)
|
||||
if (ftv->hasNoFreeOrGenericTypes)
|
||||
return true;
|
||||
|
||||
// We aren't recursing in the case of a generic function which
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypeInfer.h"
|
||||
#include "Luau/TypePack.h"
|
||||
#include "Luau/TypeReduction.h"
|
||||
#include "Luau/VisitType.h"
|
||||
|
||||
#include <algorithm>
|
||||
@ -20,7 +19,6 @@ LUAU_FASTFLAGVARIABLE(LuauClonePublicInterfaceLess2, false);
|
||||
LUAU_FASTFLAG(LuauSubstitutionReentrant);
|
||||
LUAU_FASTFLAG(LuauClassTypeVarsInSubstitution);
|
||||
LUAU_FASTFLAG(LuauSubstitutionFixMissingFields);
|
||||
LUAU_FASTFLAGVARIABLE(LuauCopyExportedTypes, false);
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
@ -238,10 +236,7 @@ void Module::clonePublicInterface(NotNull<BuiltinTypes> builtinTypes, InternalEr
|
||||
|
||||
// Copy external stuff over to Module itself
|
||||
this->returnType = moduleScope->returnType;
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution || FFlag::LuauCopyExportedTypes)
|
||||
this->exportedTypeBindings = moduleScope->exportedTypeBindings;
|
||||
else
|
||||
this->exportedTypeBindings = std::move(moduleScope->exportedTypeBindings);
|
||||
this->exportedTypeBindings = moduleScope->exportedTypeBindings;
|
||||
}
|
||||
|
||||
bool Module::hasModuleScope() const
|
||||
|
@ -18,7 +18,6 @@ LUAU_FASTFLAGVARIABLE(DebugLuauCheckNormalizeInvariant, false)
|
||||
LUAU_FASTINTVARIABLE(LuauNormalizeIterationLimit, 1200);
|
||||
LUAU_FASTINTVARIABLE(LuauNormalizeCacheLimit, 100000);
|
||||
LUAU_FASTFLAGVARIABLE(LuauNormalizeBlockedTypes, false);
|
||||
LUAU_FASTFLAGVARIABLE(LuauNormalizeMetatableFixes, false);
|
||||
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
|
||||
LUAU_FASTFLAG(LuauUninhabitedSubAnything2)
|
||||
LUAU_FASTFLAG(LuauTransitiveSubtyping)
|
||||
@ -228,6 +227,16 @@ NormalizedType::NormalizedType(NotNull<BuiltinTypes> builtinTypes)
|
||||
{
|
||||
}
|
||||
|
||||
bool NormalizedType::isFunction() const
|
||||
{
|
||||
return !get<NeverType>(tops) || !functions.parts.empty();
|
||||
}
|
||||
|
||||
bool NormalizedType::isNumber() const
|
||||
{
|
||||
return !get<NeverType>(tops) || !get<NeverType>(numbers);
|
||||
}
|
||||
|
||||
static bool isShallowInhabited(const NormalizedType& norm)
|
||||
{
|
||||
// This test is just a shallow check, for example it returns `true` for `{ p : never }`
|
||||
@ -516,7 +525,8 @@ static bool areNormalizedClasses(const NormalizedClassType& tys)
|
||||
|
||||
static bool isPlainTyvar(TypeId ty)
|
||||
{
|
||||
return (get<FreeType>(ty) || get<GenericType>(ty) || (FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(ty)) || get<PendingExpansionType>(ty));
|
||||
return (get<FreeType>(ty) || get<GenericType>(ty) || (FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(ty)) ||
|
||||
get<PendingExpansionType>(ty) || get<TypeFamilyInstanceType>(ty));
|
||||
}
|
||||
|
||||
static bool isNormalizedTyvar(const NormalizedTyvars& tyvars)
|
||||
@ -1366,7 +1376,7 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, int ignor
|
||||
else if (FFlag::LuauTransitiveSubtyping && get<UnknownType>(here.tops))
|
||||
return true;
|
||||
else if (get<GenericType>(there) || get<FreeType>(there) || (FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(there)) ||
|
||||
get<PendingExpansionType>(there))
|
||||
get<PendingExpansionType>(there) || get<TypeFamilyInstanceType>(there))
|
||||
{
|
||||
if (tyvarIndex(there) <= ignoreSmallerTyvars)
|
||||
return true;
|
||||
@ -1436,7 +1446,7 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, int ignor
|
||||
}
|
||||
else if (!FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(there))
|
||||
LUAU_ASSERT(!"Internal error: Trying to normalize a BlockedType");
|
||||
else if (get<PendingExpansionType>(there))
|
||||
else if (get<PendingExpansionType>(there) || get<TypeFamilyInstanceType>(there))
|
||||
{
|
||||
// nothing
|
||||
}
|
||||
@ -1981,17 +1991,14 @@ std::optional<TypeId> Normalizer::intersectionOfTables(TypeId here, TypeId there
|
||||
else if (isPrim(there, PrimitiveType::Table))
|
||||
return here;
|
||||
|
||||
if (FFlag::LuauNormalizeMetatableFixes)
|
||||
{
|
||||
if (get<NeverType>(here))
|
||||
return there;
|
||||
else if (get<NeverType>(there))
|
||||
return here;
|
||||
else if (get<AnyType>(here))
|
||||
return there;
|
||||
else if (get<AnyType>(there))
|
||||
return here;
|
||||
}
|
||||
if (get<NeverType>(here))
|
||||
return there;
|
||||
else if (get<NeverType>(there))
|
||||
return here;
|
||||
else if (get<AnyType>(here))
|
||||
return there;
|
||||
else if (get<AnyType>(there))
|
||||
return here;
|
||||
|
||||
TypeId htable = here;
|
||||
TypeId hmtable = nullptr;
|
||||
@ -2009,22 +2016,12 @@ std::optional<TypeId> Normalizer::intersectionOfTables(TypeId here, TypeId there
|
||||
}
|
||||
|
||||
const TableType* httv = get<TableType>(htable);
|
||||
if (FFlag::LuauNormalizeMetatableFixes)
|
||||
{
|
||||
if (!httv)
|
||||
return std::nullopt;
|
||||
}
|
||||
else
|
||||
LUAU_ASSERT(httv);
|
||||
if (!httv)
|
||||
return std::nullopt;
|
||||
|
||||
const TableType* tttv = get<TableType>(ttable);
|
||||
if (FFlag::LuauNormalizeMetatableFixes)
|
||||
{
|
||||
if (!tttv)
|
||||
return std::nullopt;
|
||||
}
|
||||
else
|
||||
LUAU_ASSERT(tttv);
|
||||
if (!tttv)
|
||||
return std::nullopt;
|
||||
|
||||
|
||||
if (httv->state == TableState::Free || tttv->state == TableState::Free)
|
||||
@ -2471,7 +2468,7 @@ bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there)
|
||||
return true;
|
||||
}
|
||||
else if (get<GenericType>(there) || get<FreeType>(there) || (FFlag::LuauNormalizeBlockedTypes && get<BlockedType>(there)) ||
|
||||
get<PendingExpansionType>(there))
|
||||
get<PendingExpansionType>(there) || get<TypeFamilyInstanceType>(there))
|
||||
{
|
||||
NormalizedType thereNorm{builtinTypes};
|
||||
NormalizedType topNorm{builtinTypes};
|
||||
|
@ -136,7 +136,7 @@ void quantify(TypeId ty, TypeLevel level)
|
||||
ftv->genericPacks.insert(ftv->genericPacks.end(), q.genericPacks.begin(), q.genericPacks.end());
|
||||
|
||||
if (ftv->generics.empty() && ftv->genericPacks.empty() && !q.seenMutableType && !q.seenGenericType)
|
||||
ftv->hasNoGenerics = true;
|
||||
ftv->hasNoFreeOrGenericTypes = true;
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -276,7 +276,7 @@ std::optional<QuantifierResult> quantify(TypeArena* arena, TypeId ty, Scope* sco
|
||||
for (auto k : quantifier.insertedGenericPacks.keys)
|
||||
ftv->genericPacks.push_back(quantifier.insertedGenericPacks.pairings[k]);
|
||||
|
||||
ftv->hasNoGenerics = ftv->generics.empty() && ftv->genericPacks.empty() && !quantifier.seenGenericType && !quantifier.seenMutableType;
|
||||
ftv->hasNoFreeOrGenericTypes = ftv->generics.empty() && ftv->genericPacks.empty() && !quantifier.seenGenericType && !quantifier.seenMutableType;
|
||||
|
||||
return std::optional<QuantifierResult>({*result, std::move(quantifier.insertedGenerics), std::move(quantifier.insertedGenericPacks)});
|
||||
}
|
||||
|
1270
Analysis/src/Simplify.cpp
Normal file
1270
Analysis/src/Simplify.cpp
Normal file
File diff suppressed because it is too large
Load Diff
@ -1639,6 +1639,11 @@ std::string toString(const Constraint& constraint, ToStringOptions& opts)
|
||||
}
|
||||
else if constexpr (std::is_same_v<T, UnpackConstraint>)
|
||||
return tos(c.resultPack) + " ~ unpack " + tos(c.sourcePack);
|
||||
else if constexpr (std::is_same_v<T, RefineConstraint>)
|
||||
{
|
||||
const char* op = c.mode == RefineConstraint::Union ? "union" : "intersect";
|
||||
return tos(c.resultType) + " ~ refine " + tos(c.type) + " " + op + " " + tos(c.discriminant);
|
||||
}
|
||||
else if constexpr (std::is_same_v<T, ReduceConstraint>)
|
||||
return "reduce " + tos(c.ty);
|
||||
else if constexpr (std::is_same_v<T, ReducePackConstraint>)
|
||||
@ -1652,6 +1657,11 @@ std::string toString(const Constraint& constraint, ToStringOptions& opts)
|
||||
return visit(go, constraint.c);
|
||||
}
|
||||
|
||||
std::string toString(const Constraint& constraint)
|
||||
{
|
||||
return toString(constraint, ToStringOptions{});
|
||||
}
|
||||
|
||||
std::string dump(const Constraint& c)
|
||||
{
|
||||
ToStringOptions opts;
|
||||
|
@ -82,6 +82,8 @@ void TxnLog::concat(TxnLog rhs)
|
||||
|
||||
for (auto& [tp, rep] : rhs.typePackChanges)
|
||||
typePackChanges[tp] = std::move(rep);
|
||||
|
||||
radioactive |= rhs.radioactive;
|
||||
}
|
||||
|
||||
void TxnLog::concatAsIntersections(TxnLog rhs, NotNull<TypeArena> arena)
|
||||
@ -103,6 +105,8 @@ void TxnLog::concatAsIntersections(TxnLog rhs, NotNull<TypeArena> arena)
|
||||
|
||||
for (auto& [tp, rep] : rhs.typePackChanges)
|
||||
typePackChanges[tp] = std::move(rep);
|
||||
|
||||
radioactive |= rhs.radioactive;
|
||||
}
|
||||
|
||||
void TxnLog::concatAsUnion(TxnLog rhs, NotNull<TypeArena> arena)
|
||||
@ -199,10 +203,14 @@ void TxnLog::concatAsUnion(TxnLog rhs, NotNull<TypeArena> arena)
|
||||
|
||||
for (auto& [tp, rep] : rhs.typePackChanges)
|
||||
typePackChanges[tp] = std::move(rep);
|
||||
|
||||
radioactive |= rhs.radioactive;
|
||||
}
|
||||
|
||||
void TxnLog::commit()
|
||||
{
|
||||
LUAU_ASSERT(!radioactive);
|
||||
|
||||
for (auto& [ty, rep] : typeVarChanges)
|
||||
{
|
||||
if (!rep->dead)
|
||||
@ -234,6 +242,8 @@ TxnLog TxnLog::inverse()
|
||||
for (auto& [tp, _rep] : typePackChanges)
|
||||
inversed.typePackChanges[tp] = std::make_unique<PendingTypePack>(*tp);
|
||||
|
||||
inversed.radioactive = radioactive;
|
||||
|
||||
return inversed;
|
||||
}
|
||||
|
||||
@ -293,7 +303,8 @@ void TxnLog::popSeen(TypeOrPackId lhs, TypeOrPackId rhs)
|
||||
|
||||
PendingType* TxnLog::queue(TypeId ty)
|
||||
{
|
||||
LUAU_ASSERT(!ty->persistent);
|
||||
if (ty->persistent)
|
||||
radioactive = true;
|
||||
|
||||
// Explicitly don't look in ancestors. If we have discovered something new
|
||||
// about this type, we don't want to mutate the parent's state.
|
||||
@ -309,7 +320,8 @@ PendingType* TxnLog::queue(TypeId ty)
|
||||
|
||||
PendingTypePack* TxnLog::queue(TypePackId tp)
|
||||
{
|
||||
LUAU_ASSERT(!tp->persistent);
|
||||
if (tp->persistent)
|
||||
radioactive = true;
|
||||
|
||||
// Explicitly don't look in ancestors. If we have discovered something new
|
||||
// about this type, we don't want to mutate the parent's state.
|
||||
|
@ -13,7 +13,6 @@
|
||||
#include "Luau/ToString.h"
|
||||
#include "Luau/TxnLog.h"
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypeReduction.h"
|
||||
#include "Luau/TypeUtils.h"
|
||||
#include "Luau/Unifier.h"
|
||||
#include "Luau/TypeFamily.h"
|
||||
@ -21,7 +20,6 @@
|
||||
#include <algorithm>
|
||||
|
||||
LUAU_FASTFLAG(DebugLuauMagicTypes)
|
||||
LUAU_FASTFLAG(DebugLuauDontReduceTypes)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
@ -117,7 +115,7 @@ struct TypeChecker2
|
||||
TypeId checkForFamilyInhabitance(TypeId instance, Location location)
|
||||
{
|
||||
TxnLog fake{};
|
||||
reportErrors(reduceFamilies(instance, location, NotNull{&testArena}, builtinTypes, &fake, true).errors);
|
||||
reportErrors(reduceFamilies(instance, location, NotNull{&testArena}, builtinTypes, stack.back(), NotNull{&normalizer}, &fake, true).errors);
|
||||
return instance;
|
||||
}
|
||||
|
||||
@ -1002,7 +1000,9 @@ struct TypeChecker2
|
||||
|
||||
LUAU_ASSERT(ftv);
|
||||
reportErrors(tryUnify(stack.back(), call->location, ftv->retTypes, expectedRetType, CountMismatch::Context::Return, /* genericsOkay */ true));
|
||||
reportErrors(reduceFamilies(ftv->retTypes, call->location, NotNull{&testArena}, builtinTypes, &fake, true).errors);
|
||||
reportErrors(
|
||||
reduceFamilies(ftv->retTypes, call->location, NotNull{&testArena}, builtinTypes, stack.back(), NotNull{&normalizer}, &fake, true)
|
||||
.errors);
|
||||
|
||||
auto it = begin(expectedArgTypes);
|
||||
size_t i = 0;
|
||||
@ -1020,7 +1020,7 @@ struct TypeChecker2
|
||||
Location argLoc = argLocs.at(i >= argLocs.size() ? argLocs.size() - 1 : i);
|
||||
|
||||
reportErrors(tryUnify(stack.back(), argLoc, expectedArg, arg, CountMismatch::Context::Arg, /* genericsOkay */ true));
|
||||
reportErrors(reduceFamilies(arg, argLoc, NotNull{&testArena}, builtinTypes, &fake, true).errors);
|
||||
reportErrors(reduceFamilies(arg, argLoc, NotNull{&testArena}, builtinTypes, stack.back(), NotNull{&normalizer}, &fake, true).errors);
|
||||
|
||||
++it;
|
||||
++i;
|
||||
@ -1032,12 +1032,11 @@ struct TypeChecker2
|
||||
{
|
||||
TypePackId remainingArgs = testArena.addTypePack(TypePack{std::move(slice), std::nullopt});
|
||||
reportErrors(tryUnify(stack.back(), argLocs.back(), *tail, remainingArgs, CountMismatch::Context::Arg, /* genericsOkay */ true));
|
||||
reportErrors(reduceFamilies(remainingArgs, argLocs.back(), NotNull{&testArena}, builtinTypes, &fake, true).errors);
|
||||
reportErrors(reduceFamilies(
|
||||
remainingArgs, argLocs.back(), NotNull{&testArena}, builtinTypes, stack.back(), NotNull{&normalizer}, &fake, true)
|
||||
.errors);
|
||||
}
|
||||
}
|
||||
|
||||
// We do not need to do an arity test because this overload was
|
||||
// selected based on its arity already matching.
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1160,25 +1159,26 @@ struct TypeChecker2
|
||||
return ty;
|
||||
}
|
||||
|
||||
void visitExprName(AstExpr* expr, Location location, const std::string& propName, ValueContext context)
|
||||
void visitExprName(AstExpr* expr, Location location, const std::string& propName, ValueContext context, TypeId astIndexExprTy)
|
||||
{
|
||||
visit(expr, ValueContext::RValue);
|
||||
|
||||
TypeId leftType = stripFromNilAndReport(lookupType(expr), location);
|
||||
checkIndexTypeFromType(leftType, propName, location, context);
|
||||
checkIndexTypeFromType(leftType, propName, location, context, astIndexExprTy);
|
||||
}
|
||||
|
||||
void visit(AstExprIndexName* indexName, ValueContext context)
|
||||
{
|
||||
visitExprName(indexName->expr, indexName->location, indexName->index.value, context);
|
||||
// If we're indexing like _.foo - foo could either be a prop or a string.
|
||||
visitExprName(indexName->expr, indexName->location, indexName->index.value, context, builtinTypes->stringType);
|
||||
}
|
||||
|
||||
void visit(AstExprIndexExpr* indexExpr, ValueContext context)
|
||||
{
|
||||
if (auto str = indexExpr->index->as<AstExprConstantString>())
|
||||
{
|
||||
TypeId astIndexExprType = lookupType(indexExpr->index);
|
||||
const std::string stringValue(str->value.data, str->value.size);
|
||||
visitExprName(indexExpr->expr, indexExpr->location, stringValue, context);
|
||||
visitExprName(indexExpr->expr, indexExpr->location, stringValue, context, astIndexExprType);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1198,6 +1198,8 @@ struct TypeChecker2
|
||||
else
|
||||
reportError(CannotExtendTable{exprType, CannotExtendTable::Indexer, "indexer??"}, indexExpr->location);
|
||||
}
|
||||
else if (auto cls = get<ClassType>(exprType); cls && cls->indexer)
|
||||
reportErrors(tryUnify(scope, indexExpr->index->location, indexType, cls->indexer->indexType));
|
||||
else if (get<UnionType>(exprType) && isOptional(exprType))
|
||||
reportError(OptionalValueAccess{exprType}, indexExpr->location);
|
||||
}
|
||||
@ -1209,32 +1211,52 @@ struct TypeChecker2
|
||||
visitGenerics(fn->generics, fn->genericPacks);
|
||||
|
||||
TypeId inferredFnTy = lookupType(fn);
|
||||
const FunctionType* inferredFtv = get<FunctionType>(inferredFnTy);
|
||||
LUAU_ASSERT(inferredFtv);
|
||||
|
||||
// There is no way to write an annotation for the self argument, so we
|
||||
// cannot do anything to check it.
|
||||
auto argIt = begin(inferredFtv->argTypes);
|
||||
if (fn->self)
|
||||
++argIt;
|
||||
|
||||
for (const auto& arg : fn->args)
|
||||
const NormalizedType* normalizedFnTy = normalizer.normalize(inferredFnTy);
|
||||
if (!normalizedFnTy)
|
||||
{
|
||||
if (argIt == end(inferredFtv->argTypes))
|
||||
break;
|
||||
reportError(CodeTooComplex{}, fn->location);
|
||||
}
|
||||
else if (get<ErrorType>(normalizedFnTy->errors))
|
||||
{
|
||||
// Nothing
|
||||
}
|
||||
else if (!normalizedFnTy->isFunction())
|
||||
{
|
||||
ice->ice("Internal error: Lambda has non-function type " + toString(inferredFnTy), fn->location);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (1 != normalizedFnTy->functions.parts.size())
|
||||
ice->ice("Unexpected: Lambda has unexpected type " + toString(inferredFnTy), fn->location);
|
||||
|
||||
if (arg->annotation)
|
||||
const FunctionType* inferredFtv = get<FunctionType>(normalizedFnTy->functions.parts.front());
|
||||
LUAU_ASSERT(inferredFtv);
|
||||
|
||||
// There is no way to write an annotation for the self argument, so we
|
||||
// cannot do anything to check it.
|
||||
auto argIt = begin(inferredFtv->argTypes);
|
||||
if (fn->self)
|
||||
++argIt;
|
||||
|
||||
for (const auto& arg : fn->args)
|
||||
{
|
||||
TypeId inferredArgTy = *argIt;
|
||||
TypeId annotatedArgTy = lookupAnnotation(arg->annotation);
|
||||
if (argIt == end(inferredFtv->argTypes))
|
||||
break;
|
||||
|
||||
if (!isSubtype(inferredArgTy, annotatedArgTy, stack.back()))
|
||||
if (arg->annotation)
|
||||
{
|
||||
reportError(TypeMismatch{inferredArgTy, annotatedArgTy}, arg->location);
|
||||
}
|
||||
}
|
||||
TypeId inferredArgTy = *argIt;
|
||||
TypeId annotatedArgTy = lookupAnnotation(arg->annotation);
|
||||
|
||||
++argIt;
|
||||
if (!isSubtype(inferredArgTy, annotatedArgTy, stack.back()))
|
||||
{
|
||||
reportError(TypeMismatch{inferredArgTy, annotatedArgTy}, arg->location);
|
||||
}
|
||||
}
|
||||
|
||||
++argIt;
|
||||
}
|
||||
}
|
||||
|
||||
visit(fn->body);
|
||||
@ -1345,6 +1367,10 @@ struct TypeChecker2
|
||||
|
||||
TypeId leftType = lookupType(expr->left);
|
||||
TypeId rightType = lookupType(expr->right);
|
||||
TypeId expectedResult = lookupType(expr);
|
||||
|
||||
if (get<TypeFamilyInstanceType>(expectedResult))
|
||||
return expectedResult;
|
||||
|
||||
if (expr->op == AstExprBinary::Op::Or)
|
||||
{
|
||||
@ -1432,7 +1458,11 @@ struct TypeChecker2
|
||||
|
||||
TypeId instantiatedMm = module->astOverloadResolvedTypes[key];
|
||||
if (!instantiatedMm)
|
||||
reportError(CodeTooComplex{}, expr->location);
|
||||
{
|
||||
// reportError(CodeTooComplex{}, expr->location);
|
||||
// was handled by a type family
|
||||
return expectedResult;
|
||||
}
|
||||
|
||||
else if (const FunctionType* ftv = get<FunctionType>(follow(instantiatedMm)))
|
||||
{
|
||||
@ -1715,7 +1745,7 @@ struct TypeChecker2
|
||||
{
|
||||
// No further validation is necessary in this case. The main logic for
|
||||
// _luau_print is contained in lookupAnnotation.
|
||||
if (FFlag::DebugLuauMagicTypes && ty->name == "_luau_print" && ty->parameters.size > 0)
|
||||
if (FFlag::DebugLuauMagicTypes && ty->name == "_luau_print")
|
||||
return;
|
||||
|
||||
for (const AstTypeOrPack& param : ty->parameters)
|
||||
@ -1764,6 +1794,7 @@ struct TypeChecker2
|
||||
if (packsProvided != 0)
|
||||
{
|
||||
reportError(GenericError{"Type parameters must come before type pack parameters"}, ty->location);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (typesProvided < typesRequired)
|
||||
@ -1792,7 +1823,11 @@ struct TypeChecker2
|
||||
|
||||
if (extraTypes != 0 && packsProvided == 0)
|
||||
{
|
||||
packsProvided += 1;
|
||||
// Extra types are only collected into a pack if a pack is expected
|
||||
if (packsRequired != 0)
|
||||
packsProvided += 1;
|
||||
else
|
||||
typesProvided += extraTypes;
|
||||
}
|
||||
|
||||
for (size_t i = typesProvided; i < typesRequired; ++i)
|
||||
@ -1943,69 +1978,6 @@ struct TypeChecker2
|
||||
}
|
||||
}
|
||||
|
||||
void reduceTypes()
|
||||
{
|
||||
if (FFlag::DebugLuauDontReduceTypes)
|
||||
return;
|
||||
|
||||
for (auto [_, scope] : module->scopes)
|
||||
{
|
||||
for (auto& [_, b] : scope->bindings)
|
||||
{
|
||||
if (auto reduced = module->reduction->reduce(b.typeId))
|
||||
b.typeId = *reduced;
|
||||
}
|
||||
|
||||
if (auto reduced = module->reduction->reduce(scope->returnType))
|
||||
scope->returnType = *reduced;
|
||||
|
||||
if (scope->varargPack)
|
||||
{
|
||||
if (auto reduced = module->reduction->reduce(*scope->varargPack))
|
||||
scope->varargPack = *reduced;
|
||||
}
|
||||
|
||||
auto reduceMap = [this](auto& map) {
|
||||
for (auto& [_, tf] : map)
|
||||
{
|
||||
if (auto reduced = module->reduction->reduce(tf))
|
||||
tf = *reduced;
|
||||
}
|
||||
};
|
||||
|
||||
reduceMap(scope->exportedTypeBindings);
|
||||
reduceMap(scope->privateTypeBindings);
|
||||
reduceMap(scope->privateTypePackBindings);
|
||||
for (auto& [_, space] : scope->importedTypeBindings)
|
||||
reduceMap(space);
|
||||
}
|
||||
|
||||
auto reduceOrError = [this](auto& map) {
|
||||
for (auto [ast, t] : map)
|
||||
{
|
||||
if (!t)
|
||||
continue; // Reminder: this implies that the recursion limit was exceeded.
|
||||
else if (auto reduced = module->reduction->reduce(t))
|
||||
map[ast] = *reduced;
|
||||
else
|
||||
reportError(NormalizationTooComplex{}, ast->location);
|
||||
}
|
||||
};
|
||||
|
||||
module->astOriginalResolvedTypes = module->astResolvedTypes;
|
||||
|
||||
// Both [`Module::returnType`] and [`Module::exportedTypeBindings`] are empty here, and
|
||||
// is populated by [`Module::clonePublicInterface`] in the future, so by that point these
|
||||
// two aforementioned fields will only contain types that are irreducible.
|
||||
reduceOrError(module->astTypes);
|
||||
reduceOrError(module->astTypePacks);
|
||||
reduceOrError(module->astExpectedTypes);
|
||||
reduceOrError(module->astOriginalCallTypes);
|
||||
reduceOrError(module->astOverloadResolvedTypes);
|
||||
reduceOrError(module->astResolvedTypes);
|
||||
reduceOrError(module->astResolvedTypePacks);
|
||||
}
|
||||
|
||||
template<typename TID>
|
||||
bool isSubtype(TID subTy, TID superTy, NotNull<Scope> scope, bool genericsOkay = false)
|
||||
{
|
||||
@ -2034,6 +2006,9 @@ struct TypeChecker2
|
||||
|
||||
void reportError(TypeErrorData data, const Location& location)
|
||||
{
|
||||
if (auto utk = get_if<UnknownProperty>(&data))
|
||||
diagnoseMissingTableKey(utk, data);
|
||||
|
||||
module->errors.emplace_back(location, module->name, std::move(data));
|
||||
|
||||
if (logger)
|
||||
@ -2052,7 +2027,7 @@ struct TypeChecker2
|
||||
}
|
||||
|
||||
// If the provided type does not have the named property, report an error.
|
||||
void checkIndexTypeFromType(TypeId tableTy, const std::string& prop, const Location& location, ValueContext context)
|
||||
void checkIndexTypeFromType(TypeId tableTy, const std::string& prop, const Location& location, ValueContext context, TypeId astIndexExprType)
|
||||
{
|
||||
const NormalizedType* norm = normalizer.normalize(tableTy);
|
||||
if (!norm)
|
||||
@ -2069,7 +2044,7 @@ struct TypeChecker2
|
||||
return;
|
||||
|
||||
std::unordered_set<TypeId> seen;
|
||||
bool found = hasIndexTypeFromType(ty, prop, location, seen);
|
||||
bool found = hasIndexTypeFromType(ty, prop, location, seen, astIndexExprType);
|
||||
foundOneProp |= found;
|
||||
if (!found)
|
||||
typesMissingTheProp.push_back(ty);
|
||||
@ -2129,7 +2104,7 @@ struct TypeChecker2
|
||||
}
|
||||
}
|
||||
|
||||
bool hasIndexTypeFromType(TypeId ty, const std::string& prop, const Location& location, std::unordered_set<TypeId>& seen)
|
||||
bool hasIndexTypeFromType(TypeId ty, const std::string& prop, const Location& location, std::unordered_set<TypeId>& seen, TypeId astIndexExprType)
|
||||
{
|
||||
// If we have already encountered this type, we must assume that some
|
||||
// other codepath will do the right thing and signal false if the
|
||||
@ -2153,31 +2128,83 @@ struct TypeChecker2
|
||||
if (findTablePropertyRespectingMeta(builtinTypes, module->errors, ty, prop, location))
|
||||
return true;
|
||||
|
||||
else if (tt->indexer && isPrim(tt->indexer->indexType, PrimitiveType::String))
|
||||
return true;
|
||||
if (tt->indexer)
|
||||
{
|
||||
TypeId indexType = follow(tt->indexer->indexType);
|
||||
if (isPrim(indexType, PrimitiveType::String))
|
||||
return true;
|
||||
// If the indexer looks like { [any] : _} - the prop lookup should be allowed!
|
||||
else if (get<AnyType>(indexType) || get<UnknownType>(indexType))
|
||||
return true;
|
||||
}
|
||||
|
||||
else
|
||||
return false;
|
||||
return false;
|
||||
}
|
||||
else if (const ClassType* cls = get<ClassType>(ty))
|
||||
return bool(lookupClassProp(cls, prop));
|
||||
{
|
||||
// If the property doesn't exist on the class, we consult the indexer
|
||||
// We need to check if the type of the index expression foo (x[foo])
|
||||
// is compatible with the indexer's indexType
|
||||
// Construct the intersection and test inhabitedness!
|
||||
if (auto property = lookupClassProp(cls, prop))
|
||||
return true;
|
||||
if (cls->indexer)
|
||||
{
|
||||
TypeId inhabitatedTestType = testArena.addType(IntersectionType{{cls->indexer->indexType, astIndexExprType}});
|
||||
return normalizer.isInhabited(inhabitatedTestType);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
else if (const UnionType* utv = get<UnionType>(ty))
|
||||
return std::all_of(begin(utv), end(utv), [&](TypeId part) {
|
||||
return hasIndexTypeFromType(part, prop, location, seen);
|
||||
return hasIndexTypeFromType(part, prop, location, seen, astIndexExprType);
|
||||
});
|
||||
else if (const IntersectionType* itv = get<IntersectionType>(ty))
|
||||
return std::any_of(begin(itv), end(itv), [&](TypeId part) {
|
||||
return hasIndexTypeFromType(part, prop, location, seen);
|
||||
return hasIndexTypeFromType(part, prop, location, seen, astIndexExprType);
|
||||
});
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
void diagnoseMissingTableKey(UnknownProperty* utk, TypeErrorData& data) const
|
||||
{
|
||||
std::string_view sv(utk->key);
|
||||
std::set<Name> candidates;
|
||||
|
||||
auto accumulate = [&](const TableType::Props& props) {
|
||||
for (const auto& [name, ty] : props)
|
||||
{
|
||||
if (sv != name && equalsLower(sv, name))
|
||||
candidates.insert(name);
|
||||
}
|
||||
};
|
||||
|
||||
if (auto ttv = getTableType(utk->table))
|
||||
accumulate(ttv->props);
|
||||
else if (auto ctv = get<ClassType>(follow(utk->table)))
|
||||
{
|
||||
while (ctv)
|
||||
{
|
||||
accumulate(ctv->props);
|
||||
|
||||
if (!ctv->parent)
|
||||
break;
|
||||
|
||||
ctv = get<ClassType>(*ctv->parent);
|
||||
LUAU_ASSERT(ctv);
|
||||
}
|
||||
}
|
||||
|
||||
if (!candidates.empty())
|
||||
data = TypeErrorData(UnknownPropButFoundLikeProp{utk->table, utk->key, candidates});
|
||||
}
|
||||
};
|
||||
|
||||
void check(NotNull<BuiltinTypes> builtinTypes, NotNull<UnifierSharedState> unifierState, DcrLogger* logger, const SourceModule& sourceModule, Module* module)
|
||||
{
|
||||
TypeChecker2 typeChecker{builtinTypes, unifierState, logger, &sourceModule, module};
|
||||
typeChecker.reduceTypes();
|
||||
|
||||
typeChecker.visit(sourceModule.root);
|
||||
|
||||
unfreeze(module->interfaceTypes);
|
||||
|
@ -7,6 +7,10 @@
|
||||
#include "Luau/TxnLog.h"
|
||||
#include "Luau/Substitution.h"
|
||||
#include "Luau/ToString.h"
|
||||
#include "Luau/TypeUtils.h"
|
||||
#include "Luau/Unifier.h"
|
||||
#include "Luau/Instantiation.h"
|
||||
#include "Luau/Normalize.h"
|
||||
|
||||
LUAU_DYNAMIC_FASTINTVARIABLE(LuauTypeFamilyGraphReductionMaximumSteps, 1'000'000);
|
||||
|
||||
@ -30,6 +34,11 @@ struct InstanceCollector : TypeOnceVisitor
|
||||
return true;
|
||||
}
|
||||
|
||||
bool visit(TypeId ty, const ClassType&) override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool visit(TypePackId tp, const TypeFamilyInstanceTypePack&) override
|
||||
{
|
||||
// TypeOnceVisitor performs a depth-first traversal in the absence of
|
||||
@ -52,20 +61,24 @@ struct FamilyReducer
|
||||
Location location;
|
||||
NotNull<TypeArena> arena;
|
||||
NotNull<BuiltinTypes> builtins;
|
||||
TxnLog* log = nullptr;
|
||||
NotNull<const TxnLog> reducerLog;
|
||||
TxnLog* parentLog = nullptr;
|
||||
TxnLog log;
|
||||
bool force = false;
|
||||
NotNull<Scope> scope;
|
||||
NotNull<Normalizer> normalizer;
|
||||
|
||||
FamilyReducer(std::deque<TypeId> queuedTys, std::deque<TypePackId> queuedTps, Location location, NotNull<TypeArena> arena,
|
||||
NotNull<BuiltinTypes> builtins, TxnLog* log = nullptr, bool force = false)
|
||||
NotNull<BuiltinTypes> builtins, NotNull<Scope> scope, NotNull<Normalizer> normalizer, TxnLog* parentLog = nullptr, bool force = false)
|
||||
: queuedTys(std::move(queuedTys))
|
||||
, queuedTps(std::move(queuedTps))
|
||||
, location(location)
|
||||
, arena(arena)
|
||||
, builtins(builtins)
|
||||
, log(log)
|
||||
, reducerLog(NotNull{log ? log : TxnLog::empty()})
|
||||
, parentLog(parentLog)
|
||||
, log(parentLog)
|
||||
, force(force)
|
||||
, scope(scope)
|
||||
, normalizer(normalizer)
|
||||
{
|
||||
}
|
||||
|
||||
@ -78,16 +91,16 @@ struct FamilyReducer
|
||||
|
||||
SkipTestResult testForSkippability(TypeId ty)
|
||||
{
|
||||
ty = reducerLog->follow(ty);
|
||||
ty = log.follow(ty);
|
||||
|
||||
if (reducerLog->is<TypeFamilyInstanceType>(ty))
|
||||
if (log.is<TypeFamilyInstanceType>(ty))
|
||||
{
|
||||
if (!irreducible.contains(ty))
|
||||
return SkipTestResult::Defer;
|
||||
else
|
||||
return SkipTestResult::Irreducible;
|
||||
}
|
||||
else if (reducerLog->is<GenericType>(ty))
|
||||
else if (log.is<GenericType>(ty))
|
||||
{
|
||||
return SkipTestResult::Irreducible;
|
||||
}
|
||||
@ -97,16 +110,16 @@ struct FamilyReducer
|
||||
|
||||
SkipTestResult testForSkippability(TypePackId ty)
|
||||
{
|
||||
ty = reducerLog->follow(ty);
|
||||
ty = log.follow(ty);
|
||||
|
||||
if (reducerLog->is<TypeFamilyInstanceTypePack>(ty))
|
||||
if (log.is<TypeFamilyInstanceTypePack>(ty))
|
||||
{
|
||||
if (!irreducible.contains(ty))
|
||||
return SkipTestResult::Defer;
|
||||
else
|
||||
return SkipTestResult::Irreducible;
|
||||
}
|
||||
else if (reducerLog->is<GenericTypePack>(ty))
|
||||
else if (log.is<GenericTypePack>(ty))
|
||||
{
|
||||
return SkipTestResult::Irreducible;
|
||||
}
|
||||
@ -117,8 +130,8 @@ struct FamilyReducer
|
||||
template<typename T>
|
||||
void replace(T subject, T replacement)
|
||||
{
|
||||
if (log)
|
||||
log->replace(subject, Unifiable::Bound{replacement});
|
||||
if (parentLog)
|
||||
parentLog->replace(subject, Unifiable::Bound{replacement});
|
||||
else
|
||||
asMutable(subject)->ty.template emplace<Unifiable::Bound<T>>(replacement);
|
||||
|
||||
@ -208,37 +221,38 @@ struct FamilyReducer
|
||||
|
||||
void stepType()
|
||||
{
|
||||
TypeId subject = reducerLog->follow(queuedTys.front());
|
||||
TypeId subject = log.follow(queuedTys.front());
|
||||
queuedTys.pop_front();
|
||||
|
||||
if (irreducible.contains(subject))
|
||||
return;
|
||||
|
||||
if (const TypeFamilyInstanceType* tfit = reducerLog->get<TypeFamilyInstanceType>(subject))
|
||||
if (const TypeFamilyInstanceType* tfit = log.get<TypeFamilyInstanceType>(subject))
|
||||
{
|
||||
if (!testParameters(subject, tfit))
|
||||
return;
|
||||
|
||||
TypeFamilyReductionResult<TypeId> result = tfit->family->reducer(tfit->typeArguments, tfit->packArguments, arena, builtins, reducerLog);
|
||||
TypeFamilyReductionResult<TypeId> result =
|
||||
tfit->family->reducer(tfit->typeArguments, tfit->packArguments, arena, builtins, NotNull{&log}, scope, normalizer);
|
||||
handleFamilyReduction(subject, result);
|
||||
}
|
||||
}
|
||||
|
||||
void stepPack()
|
||||
{
|
||||
TypePackId subject = reducerLog->follow(queuedTps.front());
|
||||
TypePackId subject = log.follow(queuedTps.front());
|
||||
queuedTps.pop_front();
|
||||
|
||||
if (irreducible.contains(subject))
|
||||
return;
|
||||
|
||||
if (const TypeFamilyInstanceTypePack* tfit = reducerLog->get<TypeFamilyInstanceTypePack>(subject))
|
||||
if (const TypeFamilyInstanceTypePack* tfit = log.get<TypeFamilyInstanceTypePack>(subject))
|
||||
{
|
||||
if (!testParameters(subject, tfit))
|
||||
return;
|
||||
|
||||
TypeFamilyReductionResult<TypePackId> result =
|
||||
tfit->family->reducer(tfit->typeArguments, tfit->packArguments, arena, builtins, reducerLog);
|
||||
tfit->family->reducer(tfit->typeArguments, tfit->packArguments, arena, builtins, NotNull{&log}, scope, normalizer);
|
||||
handleFamilyReduction(subject, result);
|
||||
}
|
||||
}
|
||||
@ -253,9 +267,9 @@ struct FamilyReducer
|
||||
};
|
||||
|
||||
static FamilyGraphReductionResult reduceFamiliesInternal(std::deque<TypeId> queuedTys, std::deque<TypePackId> queuedTps, Location location,
|
||||
NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, TxnLog* log, bool force)
|
||||
NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, NotNull<Scope> scope, NotNull<Normalizer> normalizer, TxnLog* log, bool force)
|
||||
{
|
||||
FamilyReducer reducer{std::move(queuedTys), std::move(queuedTps), location, arena, builtins, log, force};
|
||||
FamilyReducer reducer{std::move(queuedTys), std::move(queuedTps), location, arena, builtins, scope, normalizer, log, force};
|
||||
int iterationCount = 0;
|
||||
|
||||
while (!reducer.done())
|
||||
@ -273,8 +287,8 @@ static FamilyGraphReductionResult reduceFamiliesInternal(std::deque<TypeId> queu
|
||||
return std::move(reducer.result);
|
||||
}
|
||||
|
||||
FamilyGraphReductionResult reduceFamilies(
|
||||
TypeId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, TxnLog* log, bool force)
|
||||
FamilyGraphReductionResult reduceFamilies(TypeId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins,
|
||||
NotNull<Scope> scope, NotNull<Normalizer> normalizer, TxnLog* log, bool force)
|
||||
{
|
||||
InstanceCollector collector;
|
||||
|
||||
@ -287,11 +301,11 @@ FamilyGraphReductionResult reduceFamilies(
|
||||
return FamilyGraphReductionResult{};
|
||||
}
|
||||
|
||||
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, arena, builtins, log, force);
|
||||
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, arena, builtins, scope, normalizer, log, force);
|
||||
}
|
||||
|
||||
FamilyGraphReductionResult reduceFamilies(
|
||||
TypePackId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins, TxnLog* log, bool force)
|
||||
FamilyGraphReductionResult reduceFamilies(TypePackId entrypoint, Location location, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins,
|
||||
NotNull<Scope> scope, NotNull<Normalizer> normalizer, TxnLog* log, bool force)
|
||||
{
|
||||
InstanceCollector collector;
|
||||
|
||||
@ -304,7 +318,113 @@ FamilyGraphReductionResult reduceFamilies(
|
||||
return FamilyGraphReductionResult{};
|
||||
}
|
||||
|
||||
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, arena, builtins, log, force);
|
||||
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, arena, builtins, scope, normalizer, log, force);
|
||||
}
|
||||
|
||||
bool isPending(TypeId ty, NotNull<TxnLog> log)
|
||||
{
|
||||
return log->is<FreeType>(ty) || log->is<BlockedType>(ty) || log->is<PendingExpansionType>(ty) || log->is<TypeFamilyInstanceType>(ty);
|
||||
}
|
||||
|
||||
TypeFamilyReductionResult<TypeId> addFamilyFn(std::vector<TypeId> typeParams, std::vector<TypePackId> packParams, NotNull<TypeArena> arena,
|
||||
NotNull<BuiltinTypes> builtins, NotNull<TxnLog> log, NotNull<Scope> scope, NotNull<Normalizer> normalizer)
|
||||
{
|
||||
if (typeParams.size() != 2 || !packParams.empty())
|
||||
{
|
||||
// TODO: ICE?
|
||||
LUAU_ASSERT(false);
|
||||
return {std::nullopt, true, {}, {}};
|
||||
}
|
||||
|
||||
TypeId lhsTy = log->follow(typeParams.at(0));
|
||||
TypeId rhsTy = log->follow(typeParams.at(1));
|
||||
|
||||
if (isNumber(lhsTy) && isNumber(rhsTy))
|
||||
{
|
||||
return {builtins->numberType, false, {}, {}};
|
||||
}
|
||||
else if (log->is<AnyType>(lhsTy) || log->is<AnyType>(rhsTy))
|
||||
{
|
||||
return {builtins->anyType, false, {}, {}};
|
||||
}
|
||||
else if (log->is<ErrorType>(lhsTy) || log->is<ErrorType>(rhsTy))
|
||||
{
|
||||
return {builtins->errorRecoveryType(), false, {}, {}};
|
||||
}
|
||||
else if (log->is<NeverType>(lhsTy) || log->is<NeverType>(rhsTy))
|
||||
{
|
||||
return {builtins->neverType, false, {}, {}};
|
||||
}
|
||||
else if (isPending(lhsTy, log))
|
||||
{
|
||||
return {std::nullopt, false, {lhsTy}, {}};
|
||||
}
|
||||
else if (isPending(rhsTy, log))
|
||||
{
|
||||
return {std::nullopt, false, {rhsTy}, {}};
|
||||
}
|
||||
|
||||
// findMetatableEntry demands the ability to emit errors, so we must give it
|
||||
// the necessary state to do that, even if we intend to just eat the errors.
|
||||
ErrorVec dummy;
|
||||
|
||||
std::optional<TypeId> addMm = findMetatableEntry(builtins, dummy, lhsTy, "__add", Location{});
|
||||
bool reversed = false;
|
||||
if (!addMm)
|
||||
{
|
||||
addMm = findMetatableEntry(builtins, dummy, rhsTy, "__add", Location{});
|
||||
reversed = true;
|
||||
}
|
||||
|
||||
if (!addMm)
|
||||
return {std::nullopt, true, {}, {}};
|
||||
|
||||
if (isPending(log->follow(*addMm), log))
|
||||
return {std::nullopt, false, {log->follow(*addMm)}, {}};
|
||||
|
||||
const FunctionType* mmFtv = log->get<FunctionType>(log->follow(*addMm));
|
||||
if (!mmFtv)
|
||||
return {std::nullopt, true, {}, {}};
|
||||
|
||||
Instantiation instantiation{log.get(), arena.get(), TypeLevel{}, scope.get()};
|
||||
if (std::optional<TypeId> instantiatedAddMm = instantiation.substitute(log->follow(*addMm)))
|
||||
{
|
||||
if (const FunctionType* instantiatedMmFtv = get<FunctionType>(*instantiatedAddMm))
|
||||
{
|
||||
std::vector<TypeId> inferredArgs;
|
||||
if (!reversed)
|
||||
inferredArgs = {lhsTy, rhsTy};
|
||||
else
|
||||
inferredArgs = {rhsTy, lhsTy};
|
||||
|
||||
TypePackId inferredArgPack = arena->addTypePack(std::move(inferredArgs));
|
||||
Unifier u{normalizer, Mode::Strict, scope, Location{}, Variance::Covariant, log.get()};
|
||||
u.tryUnify(inferredArgPack, instantiatedMmFtv->argTypes);
|
||||
|
||||
if (std::optional<TypeId> ret = first(instantiatedMmFtv->retTypes); ret && u.errors.empty())
|
||||
{
|
||||
return {u.log.follow(*ret), false, {}, {}};
|
||||
}
|
||||
else
|
||||
{
|
||||
return {std::nullopt, true, {}, {}};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
return {builtins->errorRecoveryType(), false, {}, {}};
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: Not the nicest logic here.
|
||||
return {std::nullopt, true, {}, {}};
|
||||
}
|
||||
}
|
||||
|
||||
BuiltinTypeFamilies::BuiltinTypeFamilies()
|
||||
: addFamily{"Add", addFamilyFn}
|
||||
{
|
||||
}
|
||||
|
||||
} // namespace Luau
|
||||
|
@ -18,7 +18,6 @@
|
||||
#include "Luau/ToString.h"
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypePack.h"
|
||||
#include "Luau/TypeReduction.h"
|
||||
#include "Luau/TypeUtils.h"
|
||||
#include "Luau/VisitType.h"
|
||||
|
||||
@ -269,7 +268,6 @@ ModulePtr TypeChecker::checkWithoutRecursionCheck(const SourceModule& module, Mo
|
||||
currentModule.reset(new Module);
|
||||
currentModule->name = module.name;
|
||||
currentModule->humanReadableName = module.humanReadableName;
|
||||
currentModule->reduction = std::make_unique<TypeReduction>(NotNull{¤tModule->internalTypes}, builtinTypes, NotNull{iceHandler});
|
||||
currentModule->type = module.type;
|
||||
currentModule->allocator = module.allocator;
|
||||
currentModule->names = module.names;
|
||||
@ -4842,7 +4840,7 @@ TypeId TypeChecker::instantiate(const ScopePtr& scope, TypeId ty, Location locat
|
||||
ty = follow(ty);
|
||||
|
||||
const FunctionType* ftv = get<FunctionType>(ty);
|
||||
if (ftv && ftv->hasNoGenerics)
|
||||
if (ftv && ftv->hasNoFreeOrGenericTypes)
|
||||
return ty;
|
||||
|
||||
Instantiation instantiation{log, ¤tModule->internalTypes, scope->level, /*scope*/ nullptr};
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -447,13 +447,13 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
|
||||
// "double-report" errors in some cases, like when trying to unify
|
||||
// identical type family instantiations like Add<false, false> with
|
||||
// Add<false, false>.
|
||||
reduceFamilies(superTy, location, NotNull(types), builtinTypes, &log);
|
||||
reduceFamilies(superTy, location, NotNull(types), builtinTypes, scope, normalizer, &log);
|
||||
superTy = log.follow(superTy);
|
||||
}
|
||||
|
||||
if (log.get<TypeFamilyInstanceType>(subTy))
|
||||
{
|
||||
reduceFamilies(subTy, location, NotNull(types), builtinTypes, &log);
|
||||
reduceFamilies(subTy, location, NotNull(types), builtinTypes, scope, normalizer, &log);
|
||||
subTy = log.follow(subTy);
|
||||
}
|
||||
|
||||
|
@ -56,10 +56,9 @@ struct Reducer
|
||||
ParseResult parseResult;
|
||||
AstStatBlock* root;
|
||||
|
||||
std::string tempScriptName;
|
||||
std::string scriptName;
|
||||
|
||||
std::string appName;
|
||||
std::vector<std::string> appArgs;
|
||||
std::string command;
|
||||
std::string_view searchText;
|
||||
|
||||
Reducer()
|
||||
@ -99,10 +98,10 @@ struct Reducer
|
||||
} while (true);
|
||||
}
|
||||
|
||||
FILE* f = fopen(tempScriptName.c_str(), "w");
|
||||
FILE* f = fopen(scriptName.c_str(), "w");
|
||||
if (!f)
|
||||
{
|
||||
printf("Unable to open temp script to %s\n", tempScriptName.c_str());
|
||||
printf("Unable to open temp script to %s\n", scriptName.c_str());
|
||||
exit(2);
|
||||
}
|
||||
|
||||
@ -113,7 +112,7 @@ struct Reducer
|
||||
if (written != source.size())
|
||||
{
|
||||
printf("??? %zu %zu\n", written, source.size());
|
||||
printf("Unable to write to temp script %s\n", tempScriptName.c_str());
|
||||
printf("Unable to write to temp script %s\n", scriptName.c_str());
|
||||
exit(3);
|
||||
}
|
||||
|
||||
@ -142,9 +141,15 @@ struct Reducer
|
||||
{
|
||||
writeTempScript();
|
||||
|
||||
std::string command = appName + " " + escape(tempScriptName);
|
||||
for (const auto& arg : appArgs)
|
||||
command += " " + escape(arg);
|
||||
std::string cmd = command;
|
||||
while (true)
|
||||
{
|
||||
auto pos = cmd.find("{}");
|
||||
if (std::string::npos == pos)
|
||||
break;
|
||||
|
||||
cmd = cmd.substr(0, pos) + escape(scriptName) + cmd.substr(pos + 2);
|
||||
}
|
||||
|
||||
#if VERBOSE >= 1
|
||||
printf("running %s\n", command.c_str());
|
||||
@ -424,30 +429,20 @@ struct Reducer
|
||||
}
|
||||
}
|
||||
|
||||
void run(const std::string scriptName, const std::string appName, const std::vector<std::string>& appArgs, std::string_view source,
|
||||
void run(const std::string scriptName, const std::string command, std::string_view source,
|
||||
std::string_view searchText)
|
||||
{
|
||||
tempScriptName = scriptName;
|
||||
if (tempScriptName.substr(tempScriptName.size() - 4) == ".lua")
|
||||
{
|
||||
tempScriptName.erase(tempScriptName.size() - 4);
|
||||
tempScriptName += "-reduced.lua";
|
||||
}
|
||||
else
|
||||
{
|
||||
this->tempScriptName = scriptName + "-reduced";
|
||||
}
|
||||
this->scriptName = scriptName;
|
||||
|
||||
#if 0
|
||||
// Handy debugging trick: VS Code will update its view of the file in realtime as it is edited.
|
||||
std::string wheee = "code " + tempScriptName;
|
||||
std::string wheee = "code " + scriptName;
|
||||
system(wheee.c_str());
|
||||
#endif
|
||||
|
||||
printf("Temp script: %s\n", tempScriptName.c_str());
|
||||
printf("Script: %s\n", scriptName.c_str());
|
||||
|
||||
this->appName = appName;
|
||||
this->appArgs = appArgs;
|
||||
this->command = command;
|
||||
this->searchText = searchText;
|
||||
|
||||
parseResult = Parser::parse(source.data(), source.size(), nameTable, allocator, parseOptions);
|
||||
@ -470,13 +465,14 @@ struct Reducer
|
||||
|
||||
writeTempScript(/* minify */ true);
|
||||
|
||||
printf("Done! Check %s\n", tempScriptName.c_str());
|
||||
printf("Done! Check %s\n", scriptName.c_str());
|
||||
}
|
||||
};
|
||||
|
||||
[[noreturn]] void help(const std::vector<std::string_view>& args)
|
||||
{
|
||||
printf("Syntax: %s script application \"search text\" [arguments]\n", args[0].data());
|
||||
printf("Syntax: %s script command \"search text\"\n", args[0].data());
|
||||
printf(" Within command, use {} as a stand-in for the script being reduced\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
@ -484,7 +480,7 @@ int main(int argc, char** argv)
|
||||
{
|
||||
const std::vector<std::string_view> args(argv, argv + argc);
|
||||
|
||||
if (args.size() < 4)
|
||||
if (args.size() != 4)
|
||||
help(args);
|
||||
|
||||
for (size_t i = 1; i < args.size(); ++i)
|
||||
@ -496,7 +492,6 @@ int main(int argc, char** argv)
|
||||
const std::string scriptName = argv[1];
|
||||
const std::string appName = argv[2];
|
||||
const std::string searchText = argv[3];
|
||||
const std::vector<std::string> appArgs(begin(args) + 4, end(args));
|
||||
|
||||
std::optional<std::string> source = readFile(scriptName);
|
||||
|
||||
@ -507,5 +502,5 @@ int main(int argc, char** argv)
|
||||
}
|
||||
|
||||
Reducer reducer;
|
||||
reducer.run(scriptName, appName, appArgs, *source, searchText);
|
||||
reducer.run(scriptName, appName, *source, searchText);
|
||||
}
|
||||
|
@ -80,6 +80,12 @@ public:
|
||||
void asr(RegisterA64 dst, RegisterA64 src1, uint8_t src2);
|
||||
void ror(RegisterA64 dst, RegisterA64 src1, uint8_t src2);
|
||||
|
||||
// Bitfields
|
||||
void ubfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w);
|
||||
void ubfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w);
|
||||
void sbfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w);
|
||||
void sbfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w);
|
||||
|
||||
// Load
|
||||
// Note: paired loads are currently omitted for simplicity
|
||||
void ldr(RegisterA64 dst, AddressA64 src);
|
||||
@ -212,7 +218,7 @@ private:
|
||||
void placeFCMP(const char* name, RegisterA64 src1, RegisterA64 src2, uint8_t op, uint8_t opc);
|
||||
void placeFMOV(const char* name, RegisterA64 dst, double src, uint32_t op);
|
||||
void placeBM(const char* name, RegisterA64 dst, RegisterA64 src1, uint32_t src2, uint8_t op);
|
||||
void placeBFM(const char* name, RegisterA64 dst, RegisterA64 src1, uint8_t src2, uint8_t op, int immr, int imms);
|
||||
void placeBFM(const char* name, RegisterA64 dst, RegisterA64 src1, int src2, uint8_t op, int immr, int imms);
|
||||
|
||||
void place(uint32_t word);
|
||||
|
||||
|
@ -9,7 +9,7 @@
|
||||
struct lua_State;
|
||||
|
||||
// returns 1 if Luau code generator is supported, 0 otherwise
|
||||
LUACODEGEN_API int luau_codegen_supported();
|
||||
LUACODEGEN_API int luau_codegen_supported(void);
|
||||
|
||||
// create an instance of Luau code generator. you must check that this feature is supported using luau_codegen_supported().
|
||||
LUACODEGEN_API void luau_codegen_create(lua_State* L);
|
||||
|
@ -280,6 +280,42 @@ void AssemblyBuilderA64::ror(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
||||
placeBFM("ror", dst, src1, src2, 0b00'100111, src1.index, src2);
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::ubfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
||||
{
|
||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||
LUAU_ASSERT(w > 0 && f + w <= size);
|
||||
|
||||
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
||||
placeBFM("ubfiz", dst, src, f * 100 + w, 0b10'100110, (-f) & (size - 1), w - 1);
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::ubfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
||||
{
|
||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||
LUAU_ASSERT(w > 0 && f + w <= size);
|
||||
|
||||
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
||||
placeBFM("ubfx", dst, src, f * 100 + w, 0b10'100110, f, f + w - 1);
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::sbfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
||||
{
|
||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||
LUAU_ASSERT(w > 0 && f + w <= size);
|
||||
|
||||
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
||||
placeBFM("sbfiz", dst, src, f * 100 + w, 0b00'100110, (-f) & (size - 1), w - 1);
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::sbfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
||||
{
|
||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||
LUAU_ASSERT(w > 0 && f + w <= size);
|
||||
|
||||
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
||||
placeBFM("sbfx", dst, src, f * 100 + w, 0b00'100110, f, f + w - 1);
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::ldr(RegisterA64 dst, AddressA64 src)
|
||||
{
|
||||
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w || dst.kind == KindA64::s || dst.kind == KindA64::d || dst.kind == KindA64::q);
|
||||
@ -1010,7 +1046,7 @@ void AssemblyBuilderA64::placeBM(const char* name, RegisterA64 dst, RegisterA64
|
||||
commit();
|
||||
}
|
||||
|
||||
void AssemblyBuilderA64::placeBFM(const char* name, RegisterA64 dst, RegisterA64 src1, uint8_t src2, uint8_t op, int immr, int imms)
|
||||
void AssemblyBuilderA64::placeBFM(const char* name, RegisterA64 dst, RegisterA64 src1, int src2, uint8_t op, int immr, int imms)
|
||||
{
|
||||
if (logText)
|
||||
log(name, dst, src1, src2);
|
||||
|
@ -5,6 +5,7 @@
|
||||
#include "Luau/UnwindBuilder.h"
|
||||
|
||||
#include <string.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#if defined(_WIN32) && defined(_M_X64)
|
||||
|
||||
|
@ -34,6 +34,7 @@
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
#ifdef _MSC_VER
|
||||
@ -61,33 +62,34 @@ namespace CodeGen
|
||||
static void* gPerfLogContext = nullptr;
|
||||
static PerfLogFn gPerfLogFn = nullptr;
|
||||
|
||||
static NativeProto* createNativeProto(Proto* proto, const IrBuilder& ir)
|
||||
struct NativeProto
|
||||
{
|
||||
Proto* p;
|
||||
void* execdata;
|
||||
uintptr_t exectarget;
|
||||
};
|
||||
|
||||
static NativeProto createNativeProto(Proto* proto, const IrBuilder& ir)
|
||||
{
|
||||
int sizecode = proto->sizecode;
|
||||
int sizecodeAlloc = (sizecode + 1) & ~1; // align uint32_t array to 8 bytes so that NativeProto is aligned to 8 bytes
|
||||
|
||||
void* memory = ::operator new(sizeof(NativeProto) + sizecodeAlloc * sizeof(uint32_t));
|
||||
NativeProto* result = new (static_cast<char*>(memory) + sizecodeAlloc * sizeof(uint32_t)) NativeProto;
|
||||
result->proto = proto;
|
||||
|
||||
uint32_t* instOffsets = result->instOffsets;
|
||||
uint32_t* instOffsets = new uint32_t[sizecode];
|
||||
uint32_t instTarget = ir.function.bcMapping[0].asmLocation;
|
||||
|
||||
for (int i = 0; i < sizecode; i++)
|
||||
{
|
||||
// instOffsets uses negative indexing for optimal codegen for RETURN opcode
|
||||
instOffsets[-i] = ir.function.bcMapping[i].asmLocation;
|
||||
LUAU_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget);
|
||||
|
||||
instOffsets[i] = ir.function.bcMapping[i].asmLocation - instTarget;
|
||||
}
|
||||
|
||||
return result;
|
||||
// entry target will be relocated when assembly is finalized
|
||||
return {proto, instOffsets, instTarget};
|
||||
}
|
||||
|
||||
static void destroyNativeProto(NativeProto* nativeProto)
|
||||
static void destroyExecData(void* execdata)
|
||||
{
|
||||
int sizecode = nativeProto->proto->sizecode;
|
||||
int sizecodeAlloc = (sizecode + 1) & ~1; // align uint32_t array to 8 bytes so that NativeProto is aligned to 8 bytes
|
||||
void* memory = reinterpret_cast<char*>(nativeProto) - sizecodeAlloc * sizeof(uint32_t);
|
||||
|
||||
::operator delete(memory);
|
||||
delete[] static_cast<uint32_t*>(execdata);
|
||||
}
|
||||
|
||||
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
|
||||
@ -271,7 +273,7 @@ static bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
|
||||
}
|
||||
|
||||
template<typename AssemblyBuilder>
|
||||
static NativeProto* assembleFunction(AssemblyBuilder& build, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
|
||||
static std::optional<NativeProto> assembleFunction(AssemblyBuilder& build, NativeState& data, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options)
|
||||
{
|
||||
if (options.includeAssembly || options.includeIr)
|
||||
{
|
||||
@ -321,7 +323,7 @@ static NativeProto* assembleFunction(AssemblyBuilder& build, NativeState& data,
|
||||
if (build.logText)
|
||||
build.logAppend("; skipping (can't lower)\n\n");
|
||||
|
||||
return nullptr;
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
if (build.logText)
|
||||
@ -337,23 +339,19 @@ static void onCloseState(lua_State* L)
|
||||
|
||||
static void onDestroyFunction(lua_State* L, Proto* proto)
|
||||
{
|
||||
NativeProto* nativeProto = getProtoExecData(proto);
|
||||
LUAU_ASSERT(nativeProto->proto == proto);
|
||||
|
||||
setProtoExecData(proto, nullptr);
|
||||
destroyNativeProto(nativeProto);
|
||||
destroyExecData(proto->execdata);
|
||||
proto->execdata = nullptr;
|
||||
proto->exectarget = 0;
|
||||
}
|
||||
|
||||
static int onEnter(lua_State* L, Proto* proto)
|
||||
{
|
||||
NativeState* data = getNativeState(L);
|
||||
NativeProto* nativeProto = getProtoExecData(proto);
|
||||
|
||||
LUAU_ASSERT(nativeProto);
|
||||
LUAU_ASSERT(L->ci->savedpc);
|
||||
LUAU_ASSERT(proto->execdata);
|
||||
LUAU_ASSERT(L->ci->savedpc >= proto->code && L->ci->savedpc < proto->code + proto->sizecode);
|
||||
|
||||
// instOffsets uses negative indexing for optimal codegen for RETURN opcode
|
||||
uintptr_t target = nativeProto->instBase + nativeProto->instOffsets[proto->code - L->ci->savedpc];
|
||||
uintptr_t target = proto->exectarget + static_cast<uint32_t*>(proto->execdata)[L->ci->savedpc - proto->code];
|
||||
|
||||
// Returns 1 to finish the function in the VM
|
||||
return GateFn(data->context.gateEntry)(L, proto, target, &data->context);
|
||||
@ -361,7 +359,7 @@ static int onEnter(lua_State* L, Proto* proto)
|
||||
|
||||
static void onSetBreakpoint(lua_State* L, Proto* proto, int instruction)
|
||||
{
|
||||
if (!getProtoExecData(proto))
|
||||
if (!proto->execdata)
|
||||
return;
|
||||
|
||||
LUAU_ASSERT(!"native breakpoints are not implemented");
|
||||
@ -444,8 +442,7 @@ void create(lua_State* L)
|
||||
data.codeAllocator.createBlockUnwindInfo = createBlockUnwindInfo;
|
||||
data.codeAllocator.destroyBlockUnwindInfo = destroyBlockUnwindInfo;
|
||||
|
||||
initFallbackTable(data);
|
||||
initHelperFunctions(data);
|
||||
initFunctions(data);
|
||||
|
||||
#if defined(__x86_64__) || defined(_M_X64)
|
||||
if (!X64::initHeaderFunctions(data))
|
||||
@ -514,20 +511,20 @@ void compile(lua_State* L, int idx)
|
||||
X64::assembleHelpers(build, helpers);
|
||||
#endif
|
||||
|
||||
std::vector<NativeProto*> results;
|
||||
std::vector<NativeProto> results;
|
||||
results.reserve(protos.size());
|
||||
|
||||
// Skip protos that have been compiled during previous invocations of CodeGen::compile
|
||||
for (Proto* p : protos)
|
||||
if (p && getProtoExecData(p) == nullptr)
|
||||
if (NativeProto* np = assembleFunction(build, *data, helpers, p, {}))
|
||||
results.push_back(np);
|
||||
if (p && p->execdata == nullptr)
|
||||
if (std::optional<NativeProto> np = assembleFunction(build, *data, helpers, p, {}))
|
||||
results.push_back(*np);
|
||||
|
||||
// Very large modules might result in overflowing a jump offset; in this case we currently abandon the entire module
|
||||
if (!build.finalize())
|
||||
{
|
||||
for (NativeProto* result : results)
|
||||
destroyNativeProto(result);
|
||||
for (NativeProto result : results)
|
||||
destroyExecData(result.execdata);
|
||||
|
||||
return;
|
||||
}
|
||||
@ -542,36 +539,32 @@ void compile(lua_State* L, int idx)
|
||||
if (!data->codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast<const uint8_t*>(build.code.data()),
|
||||
int(build.code.size() * sizeof(build.code[0])), nativeData, sizeNativeData, codeStart))
|
||||
{
|
||||
for (NativeProto* result : results)
|
||||
destroyNativeProto(result);
|
||||
for (NativeProto result : results)
|
||||
destroyExecData(result.execdata);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
if (gPerfLogFn && results.size() > 0)
|
||||
{
|
||||
gPerfLogFn(gPerfLogContext, uintptr_t(codeStart), results[0]->instOffsets[0], "<luau helpers>");
|
||||
gPerfLogFn(gPerfLogContext, uintptr_t(codeStart), uint32_t(results[0].exectarget), "<luau helpers>");
|
||||
|
||||
for (size_t i = 0; i < results.size(); ++i)
|
||||
{
|
||||
uint32_t begin = results[i]->instOffsets[0];
|
||||
uint32_t end = i + 1 < results.size() ? results[i + 1]->instOffsets[0] : uint32_t(build.code.size() * sizeof(build.code[0]));
|
||||
uint32_t begin = uint32_t(results[i].exectarget);
|
||||
uint32_t end = i + 1 < results.size() ? uint32_t(results[i + 1].exectarget) : uint32_t(build.code.size() * sizeof(build.code[0]));
|
||||
LUAU_ASSERT(begin < end);
|
||||
|
||||
logPerfFunction(results[i]->proto, uintptr_t(codeStart) + begin, end - begin);
|
||||
logPerfFunction(results[i].p, uintptr_t(codeStart) + begin, end - begin);
|
||||
}
|
||||
}
|
||||
|
||||
// Record instruction base address; at runtime, instOffsets[] will be used as offsets from instBase
|
||||
for (NativeProto* result : results)
|
||||
for (NativeProto result : results)
|
||||
{
|
||||
result->instBase = uintptr_t(codeStart);
|
||||
result->entryTarget = uintptr_t(codeStart) + result->instOffsets[0];
|
||||
// the memory is now managed by VM and will be freed via onDestroyFunction
|
||||
result.p->execdata = result.execdata;
|
||||
result.p->exectarget = uintptr_t(codeStart) + result.exectarget;
|
||||
}
|
||||
|
||||
// Link native proto objects to Proto; the memory is now managed by VM and will be freed via onDestroyFunction
|
||||
for (NativeProto* result : results)
|
||||
setProtoExecData(result->proto, result);
|
||||
}
|
||||
|
||||
std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
|
||||
@ -586,7 +579,7 @@ std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
|
||||
#endif
|
||||
|
||||
NativeState data;
|
||||
initFallbackTable(data);
|
||||
initFunctions(data);
|
||||
|
||||
std::vector<Proto*> protos;
|
||||
gatherFunctions(protos, clvalue(func)->l.p);
|
||||
@ -600,8 +593,8 @@ std::string getAssembly(lua_State* L, int idx, AssemblyOptions options)
|
||||
|
||||
for (Proto* p : protos)
|
||||
if (p)
|
||||
if (NativeProto* np = assembleFunction(build, data, helpers, p, options))
|
||||
destroyNativeProto(np);
|
||||
if (std::optional<NativeProto> np = assembleFunction(build, data, helpers, p, options))
|
||||
destroyExecData(np->execdata);
|
||||
|
||||
if (!build.finalize())
|
||||
return std::string();
|
||||
|
@ -4,6 +4,7 @@
|
||||
#include "Luau/AssemblyBuilderA64.h"
|
||||
#include "Luau/UnwindBuilder.h"
|
||||
|
||||
#include "BitUtils.h"
|
||||
#include "CustomExecUtils.h"
|
||||
#include "NativeState.h"
|
||||
#include "EmitCommonA64.h"
|
||||
@ -91,6 +92,13 @@ static void emitReentry(AssemblyBuilderA64& build, ModuleHelpers& helpers)
|
||||
// Need to update state of the current function before we jump away
|
||||
build.ldr(x1, mem(x0, offsetof(Closure, l.p))); // cl->l.p aka proto
|
||||
|
||||
build.ldr(x2, mem(rState, offsetof(lua_State, ci))); // L->ci
|
||||
|
||||
// We need to check if the new frame can be executed natively
|
||||
// TOOD: .flags and .savedpc load below can be fused with ldp
|
||||
build.ldr(w3, mem(x2, offsetof(CallInfo, flags)));
|
||||
build.tbz(x3, countrz(LUA_CALLINFO_CUSTOM), helpers.exitContinueVm);
|
||||
|
||||
build.mov(rClosure, x0);
|
||||
build.ldr(rConstants, mem(x1, offsetof(Proto, k))); // proto->k
|
||||
build.ldr(rCode, mem(x1, offsetof(Proto, code))); // proto->code
|
||||
@ -98,22 +106,15 @@ static void emitReentry(AssemblyBuilderA64& build, ModuleHelpers& helpers)
|
||||
// Get instruction index from instruction pointer
|
||||
// To get instruction index from instruction pointer, we need to divide byte offset by 4
|
||||
// But we will actually need to scale instruction index by 4 back to byte offset later so it cancels out
|
||||
// Note that we're computing negative offset here (code-savedpc) so that we can add it to NativeProto address, as we use reverse indexing
|
||||
build.ldr(x2, mem(rState, offsetof(lua_State, ci))); // L->ci
|
||||
build.ldr(x2, mem(x2, offsetof(CallInfo, savedpc))); // L->ci->savedpc
|
||||
build.sub(x2, rCode, x2);
|
||||
|
||||
// We need to check if the new function can be executed natively
|
||||
// TODO: This can be done earlier in the function flow, to reduce the JIT->VM transition penalty
|
||||
build.ldr(x1, mem(x1, offsetofProtoExecData));
|
||||
build.cbz(x1, helpers.exitContinueVm);
|
||||
build.sub(x2, x2, rCode);
|
||||
|
||||
// Get new instruction location and jump to it
|
||||
LUAU_ASSERT(offsetof(NativeProto, instOffsets) == 0);
|
||||
build.ldr(w2, mem(x1, x2));
|
||||
build.ldr(x1, mem(x1, offsetof(NativeProto, instBase)));
|
||||
build.add(x1, x1, x2);
|
||||
build.br(x1);
|
||||
LUAU_ASSERT(offsetof(Proto, exectarget) == offsetof(Proto, execdata) + 8);
|
||||
build.ldp(x3, x4, mem(x1, offsetof(Proto, execdata)));
|
||||
build.ldr(w2, mem(x3, x2));
|
||||
build.add(x4, x4, x2);
|
||||
build.br(x4);
|
||||
}
|
||||
|
||||
static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilder& unwind)
|
||||
|
@ -1,13 +1,64 @@
|
||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#include "CodeGenUtils.h"
|
||||
|
||||
#include "CustomExecUtils.h"
|
||||
|
||||
#include "lvm.h"
|
||||
|
||||
#include "lbuiltins.h"
|
||||
#include "lbytecode.h"
|
||||
#include "ldebug.h"
|
||||
#include "ldo.h"
|
||||
#include "lfunc.h"
|
||||
#include "lgc.h"
|
||||
#include "lmem.h"
|
||||
#include "lnumutils.h"
|
||||
#include "lstate.h"
|
||||
#include "lstring.h"
|
||||
#include "ltable.h"
|
||||
|
||||
#include "FallbacksProlog.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
LUAU_FASTFLAG(LuauUniformTopHandling)
|
||||
|
||||
// All external function calls that can cause stack realloc or Lua calls have to be wrapped in VM_PROTECT
|
||||
// This makes sure that we save the pc (in case the Lua call needs to generate a backtrace) before the call,
|
||||
// and restores the stack pointer after in case stack gets reallocated
|
||||
// Should only be used on the slow paths.
|
||||
#define VM_PROTECT(x) \
|
||||
{ \
|
||||
L->ci->savedpc = pc; \
|
||||
{ \
|
||||
x; \
|
||||
}; \
|
||||
base = L->base; \
|
||||
}
|
||||
|
||||
// Some external functions can cause an error, but never reallocate the stack; for these, VM_PROTECT_PC() is
|
||||
// a cheaper version of VM_PROTECT that can be called before the external call.
|
||||
#define VM_PROTECT_PC() L->ci->savedpc = pc
|
||||
|
||||
#define VM_REG(i) (LUAU_ASSERT(unsigned(i) < unsigned(L->top - base)), &base[i])
|
||||
#define VM_KV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->l.p->sizek)), &k[i])
|
||||
#define VM_UV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->nupvalues)), &cl->l.uprefs[i])
|
||||
|
||||
#define VM_PATCH_C(pc, slot) *const_cast<Instruction*>(pc) = ((uint8_t(slot) << 24) | (0x00ffffffu & *(pc)))
|
||||
#define VM_PATCH_E(pc, slot) *const_cast<Instruction*>(pc) = ((uint32_t(slot) << 8) | (0x000000ffu & *(pc)))
|
||||
|
||||
#define VM_INTERRUPT() \
|
||||
{ \
|
||||
void (*interrupt)(lua_State*, int) = L->global->cb.interrupt; \
|
||||
if (LUAU_UNLIKELY(!!interrupt)) \
|
||||
{ /* the interrupt hook is called right before we advance pc */ \
|
||||
VM_PROTECT(L->ci->savedpc++; interrupt(L, -1)); \
|
||||
if (L->status != 0) \
|
||||
{ \
|
||||
L->ci->savedpc--; \
|
||||
return NULL; \
|
||||
} \
|
||||
} \
|
||||
}
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
namespace CodeGen
|
||||
@ -215,6 +266,10 @@ Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults)
|
||||
|
||||
// keep executing new function
|
||||
ci->savedpc = p->code;
|
||||
|
||||
if (LUAU_LIKELY(p->execdata != NULL))
|
||||
ci->flags = LUA_CALLINFO_CUSTOM;
|
||||
|
||||
return ccl;
|
||||
}
|
||||
else
|
||||
@ -281,7 +336,8 @@ Closure* returnFallback(lua_State* L, StkId ra, StkId valend)
|
||||
// we're done!
|
||||
if (LUAU_UNLIKELY(ci->flags & LUA_CALLINFO_RETURN))
|
||||
{
|
||||
L->top = res;
|
||||
if (!FFlag::LuauUniformTopHandling)
|
||||
L->top = res;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -290,5 +346,614 @@ Closure* returnFallback(lua_State* L, StkId ra, StkId valend)
|
||||
return clvalue(cip->func);
|
||||
}
|
||||
|
||||
const Instruction* executeGETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
uint32_t aux = *pc++;
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
// fast-path should already have been checked, so we skip checking for it here
|
||||
Table* h = cl->env;
|
||||
int slot = LUAU_INSN_C(insn) & h->nodemask8;
|
||||
|
||||
// slow-path, may invoke Lua calls via __index metamethod
|
||||
TValue g;
|
||||
sethvalue(L, &g, h);
|
||||
L->cachedslot = slot;
|
||||
VM_PROTECT(luaV_gettable(L, &g, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* executeSETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
uint32_t aux = *pc++;
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
// fast-path should already have been checked, so we skip checking for it here
|
||||
Table* h = cl->env;
|
||||
int slot = LUAU_INSN_C(insn) & h->nodemask8;
|
||||
|
||||
// slow-path, may invoke Lua calls via __newindex metamethod
|
||||
TValue g;
|
||||
sethvalue(L, &g, h);
|
||||
L->cachedslot = slot;
|
||||
VM_PROTECT(luaV_settable(L, &g, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* executeGETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
StkId rb = VM_REG(LUAU_INSN_B(insn));
|
||||
uint32_t aux = *pc++;
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
// fast-path: built-in table
|
||||
if (ttistable(rb))
|
||||
{
|
||||
Table* h = hvalue(rb);
|
||||
|
||||
int slot = LUAU_INSN_C(insn) & h->nodemask8;
|
||||
LuaNode* n = &h->node[slot];
|
||||
|
||||
// fast-path: value is in expected slot
|
||||
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))))
|
||||
{
|
||||
setobj2s(L, ra, gval(n));
|
||||
return pc;
|
||||
}
|
||||
else if (!h->metatable)
|
||||
{
|
||||
// fast-path: value is not in expected slot, but the table lookup doesn't involve metatable
|
||||
const TValue* res = luaH_getstr(h, tsvalue(kv));
|
||||
|
||||
if (res != luaO_nilobject)
|
||||
{
|
||||
int cachedslot = gval2slot(h, res);
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, cachedslot);
|
||||
}
|
||||
|
||||
setobj2s(L, ra, res);
|
||||
return pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path, may invoke Lua calls via __index metamethod
|
||||
L->cachedslot = slot;
|
||||
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// fast-path: user data with C __index TM
|
||||
const TValue* fn = 0;
|
||||
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_INDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
|
||||
{
|
||||
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
|
||||
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
|
||||
StkId top = L->top;
|
||||
setobj2s(L, top + 0, fn);
|
||||
setobj2s(L, top + 1, rb);
|
||||
setobj2s(L, top + 2, kv);
|
||||
L->top = top + 3;
|
||||
|
||||
L->cachedslot = LUAU_INSN_C(insn);
|
||||
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
else if (ttisvector(rb))
|
||||
{
|
||||
// fast-path: quick case-insensitive comparison with "X"/"Y"/"Z"
|
||||
const char* name = getstr(tsvalue(kv));
|
||||
int ic = (name[0] | ' ') - 'x';
|
||||
|
||||
#if LUA_VECTOR_SIZE == 4
|
||||
// 'w' is before 'x' in ascii, so ic is -1 when indexing with 'w'
|
||||
if (ic == -1)
|
||||
ic = 3;
|
||||
#endif
|
||||
|
||||
if (unsigned(ic) < LUA_VECTOR_SIZE && name[1] == '\0')
|
||||
{
|
||||
const float* v = rb->value.v; // silences ubsan when indexing v[]
|
||||
setnvalue(ra, v[ic]);
|
||||
return pc;
|
||||
}
|
||||
|
||||
fn = fasttm(L, L->global->mt[LUA_TVECTOR], TM_INDEX);
|
||||
|
||||
if (fn && ttisfunction(fn) && clvalue(fn)->isC)
|
||||
{
|
||||
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
|
||||
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
|
||||
StkId top = L->top;
|
||||
setobj2s(L, top + 0, fn);
|
||||
setobj2s(L, top + 1, rb);
|
||||
setobj2s(L, top + 2, kv);
|
||||
L->top = top + 3;
|
||||
|
||||
L->cachedslot = LUAU_INSN_C(insn);
|
||||
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
|
||||
// fall through to slow path
|
||||
}
|
||||
|
||||
// fall through to slow path
|
||||
}
|
||||
|
||||
// slow-path, may invoke Lua calls via __index metamethod
|
||||
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* executeSETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
StkId rb = VM_REG(LUAU_INSN_B(insn));
|
||||
uint32_t aux = *pc++;
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
// fast-path: built-in table
|
||||
if (ttistable(rb))
|
||||
{
|
||||
Table* h = hvalue(rb);
|
||||
|
||||
int slot = LUAU_INSN_C(insn) & h->nodemask8;
|
||||
LuaNode* n = &h->node[slot];
|
||||
|
||||
// fast-path: value is in expected slot
|
||||
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)) && !h->readonly))
|
||||
{
|
||||
setobj2t(L, gval(n), ra);
|
||||
luaC_barriert(L, h, ra);
|
||||
return pc;
|
||||
}
|
||||
else if (fastnotm(h->metatable, TM_NEWINDEX) && !h->readonly)
|
||||
{
|
||||
VM_PROTECT_PC(); // set may fail
|
||||
|
||||
TValue* res = luaH_setstr(L, h, tsvalue(kv));
|
||||
int cachedslot = gval2slot(h, res);
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, cachedslot);
|
||||
setobj2t(L, res, ra);
|
||||
luaC_barriert(L, h, ra);
|
||||
return pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path, may invoke Lua calls via __newindex metamethod
|
||||
L->cachedslot = slot;
|
||||
VM_PROTECT(luaV_settable(L, rb, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// fast-path: user data with C __newindex TM
|
||||
const TValue* fn = 0;
|
||||
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_NEWINDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
|
||||
{
|
||||
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
|
||||
LUAU_ASSERT(L->top + 4 < L->stack + L->stacksize);
|
||||
StkId top = L->top;
|
||||
setobj2s(L, top + 0, fn);
|
||||
setobj2s(L, top + 1, rb);
|
||||
setobj2s(L, top + 2, kv);
|
||||
setobj2s(L, top + 3, ra);
|
||||
L->top = top + 4;
|
||||
|
||||
L->cachedslot = LUAU_INSN_C(insn);
|
||||
VM_PROTECT(luaV_callTM(L, 3, -1));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path, may invoke Lua calls via __newindex metamethod
|
||||
VM_PROTECT(luaV_settable(L, rb, kv, ra));
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const Instruction* executeNEWCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
|
||||
Proto* pv = cl->l.p->p[LUAU_INSN_D(insn)];
|
||||
LUAU_ASSERT(unsigned(LUAU_INSN_D(insn)) < unsigned(cl->l.p->sizep));
|
||||
|
||||
VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM
|
||||
|
||||
// note: we save closure to stack early in case the code below wants to capture it by value
|
||||
Closure* ncl = luaF_newLclosure(L, pv->nups, cl->env, pv);
|
||||
setclvalue(L, ra, ncl);
|
||||
|
||||
for (int ui = 0; ui < pv->nups; ++ui)
|
||||
{
|
||||
Instruction uinsn = *pc++;
|
||||
LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
|
||||
|
||||
switch (LUAU_INSN_A(uinsn))
|
||||
{
|
||||
case LCT_VAL:
|
||||
setobj(L, &ncl->l.uprefs[ui], VM_REG(LUAU_INSN_B(uinsn)));
|
||||
break;
|
||||
|
||||
case LCT_REF:
|
||||
setupvalue(L, &ncl->l.uprefs[ui], luaF_findupval(L, VM_REG(LUAU_INSN_B(uinsn))));
|
||||
break;
|
||||
|
||||
case LCT_UPVAL:
|
||||
setobj(L, &ncl->l.uprefs[ui], VM_UV(LUAU_INSN_B(uinsn)));
|
||||
break;
|
||||
|
||||
default:
|
||||
LUAU_ASSERT(!"Unknown upvalue capture type");
|
||||
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
|
||||
}
|
||||
}
|
||||
|
||||
VM_PROTECT(luaC_checkGC(L));
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* executeNAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
StkId rb = VM_REG(LUAU_INSN_B(insn));
|
||||
uint32_t aux = *pc++;
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
if (ttistable(rb))
|
||||
{
|
||||
Table* h = hvalue(rb);
|
||||
// note: we can't use nodemask8 here because we need to query the main position of the table, and 8-bit nodemask8 only works
|
||||
// for predictive lookups
|
||||
LuaNode* n = &h->node[tsvalue(kv)->hash & (sizenode(h) - 1)];
|
||||
|
||||
const TValue* mt = 0;
|
||||
const LuaNode* mtn = 0;
|
||||
|
||||
// fast-path: key is in the table in expected slot
|
||||
if (ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)))
|
||||
{
|
||||
// note: order of copies allows rb to alias ra+1 or ra
|
||||
setobj2s(L, ra + 1, rb);
|
||||
setobj2s(L, ra, gval(n));
|
||||
}
|
||||
// fast-path: key is absent from the base, table has an __index table, and it has the result in the expected slot
|
||||
else if (gnext(n) == 0 && (mt = fasttm(L, hvalue(rb)->metatable, TM_INDEX)) && ttistable(mt) &&
|
||||
(mtn = &hvalue(mt)->node[LUAU_INSN_C(insn) & hvalue(mt)->nodemask8]) && ttisstring(gkey(mtn)) && tsvalue(gkey(mtn)) == tsvalue(kv) &&
|
||||
!ttisnil(gval(mtn)))
|
||||
{
|
||||
// note: order of copies allows rb to alias ra+1 or ra
|
||||
setobj2s(L, ra + 1, rb);
|
||||
setobj2s(L, ra, gval(mtn));
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path: handles full table lookup
|
||||
setobj2s(L, ra + 1, rb);
|
||||
L->cachedslot = LUAU_INSN_C(insn);
|
||||
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
// recompute ra since stack might have been reallocated
|
||||
ra = VM_REG(LUAU_INSN_A(insn));
|
||||
if (ttisnil(ra))
|
||||
luaG_methoderror(L, ra + 1, tsvalue(kv));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Table* mt = ttisuserdata(rb) ? uvalue(rb)->metatable : L->global->mt[ttype(rb)];
|
||||
const TValue* tmi = 0;
|
||||
|
||||
// fast-path: metatable with __namecall
|
||||
if (const TValue* fn = fasttm(L, mt, TM_NAMECALL))
|
||||
{
|
||||
// note: order of copies allows rb to alias ra+1 or ra
|
||||
setobj2s(L, ra + 1, rb);
|
||||
setobj2s(L, ra, fn);
|
||||
|
||||
L->namecall = tsvalue(kv);
|
||||
}
|
||||
else if ((tmi = fasttm(L, mt, TM_INDEX)) && ttistable(tmi))
|
||||
{
|
||||
Table* h = hvalue(tmi);
|
||||
int slot = LUAU_INSN_C(insn) & h->nodemask8;
|
||||
LuaNode* n = &h->node[slot];
|
||||
|
||||
// fast-path: metatable with __index that has method in expected slot
|
||||
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))))
|
||||
{
|
||||
// note: order of copies allows rb to alias ra+1 or ra
|
||||
setobj2s(L, ra + 1, rb);
|
||||
setobj2s(L, ra, gval(n));
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path: handles slot mismatch
|
||||
setobj2s(L, ra + 1, rb);
|
||||
L->cachedslot = slot;
|
||||
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
// recompute ra since stack might have been reallocated
|
||||
ra = VM_REG(LUAU_INSN_A(insn));
|
||||
if (ttisnil(ra))
|
||||
luaG_methoderror(L, ra + 1, tsvalue(kv));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path: handles non-table __index
|
||||
setobj2s(L, ra + 1, rb);
|
||||
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
|
||||
// recompute ra since stack might have been reallocated
|
||||
ra = VM_REG(LUAU_INSN_A(insn));
|
||||
if (ttisnil(ra))
|
||||
luaG_methoderror(L, ra + 1, tsvalue(kv));
|
||||
}
|
||||
}
|
||||
|
||||
// intentional fallthrough to CALL
|
||||
LUAU_ASSERT(LUAU_INSN_OP(*pc) == LOP_CALL);
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* executeSETLIST(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
StkId rb = &base[LUAU_INSN_B(insn)]; // note: this can point to L->top if c == LUA_MULTRET making VM_REG unsafe to use
|
||||
int c = LUAU_INSN_C(insn) - 1;
|
||||
uint32_t index = *pc++;
|
||||
|
||||
if (c == LUA_MULTRET)
|
||||
{
|
||||
c = int(L->top - rb);
|
||||
L->top = L->ci->top;
|
||||
}
|
||||
|
||||
Table* h = hvalue(ra);
|
||||
|
||||
// TODO: we really don't need this anymore
|
||||
if (!ttistable(ra))
|
||||
return NULL; // temporary workaround to weaken a rather powerful exploitation primitive in case of a MITM attack on bytecode
|
||||
|
||||
int last = index + c - 1;
|
||||
if (last > h->sizearray)
|
||||
{
|
||||
VM_PROTECT_PC(); // luaH_resizearray may fail due to OOM
|
||||
|
||||
luaH_resizearray(L, h, last);
|
||||
}
|
||||
|
||||
TValue* array = h->array;
|
||||
|
||||
for (int i = 0; i < c; ++i)
|
||||
setobj2t(L, &array[index + i - 1], rb + i);
|
||||
|
||||
luaC_barrierfast(L, h);
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* executeFORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
|
||||
if (ttisfunction(ra))
|
||||
{
|
||||
// will be called during FORGLOOP
|
||||
}
|
||||
else
|
||||
{
|
||||
Table* mt = ttistable(ra) ? hvalue(ra)->metatable : ttisuserdata(ra) ? uvalue(ra)->metatable : cast_to(Table*, NULL);
|
||||
|
||||
if (const TValue* fn = fasttm(L, mt, TM_ITER))
|
||||
{
|
||||
setobj2s(L, ra + 1, ra);
|
||||
setobj2s(L, ra, fn);
|
||||
|
||||
L->top = ra + 2; // func + self arg
|
||||
LUAU_ASSERT(L->top <= L->stack_last);
|
||||
|
||||
VM_PROTECT(luaD_call(L, ra, 3));
|
||||
L->top = L->ci->top;
|
||||
|
||||
// recompute ra since stack might have been reallocated
|
||||
ra = VM_REG(LUAU_INSN_A(insn));
|
||||
|
||||
// protect against __iter returning nil, since nil is used as a marker for builtin iteration in FORGLOOP
|
||||
if (ttisnil(ra))
|
||||
{
|
||||
VM_PROTECT_PC(); // next call always errors
|
||||
luaG_typeerror(L, ra, "call");
|
||||
}
|
||||
}
|
||||
else if (fasttm(L, mt, TM_CALL))
|
||||
{
|
||||
// table or userdata with __call, will be called during FORGLOOP
|
||||
// TODO: we might be able to stop supporting this depending on whether it's used in practice
|
||||
}
|
||||
else if (ttistable(ra))
|
||||
{
|
||||
// set up registers for builtin iteration
|
||||
setobj2s(L, ra + 1, ra);
|
||||
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)));
|
||||
setnilvalue(ra);
|
||||
}
|
||||
else
|
||||
{
|
||||
VM_PROTECT_PC(); // next call always errors
|
||||
luaG_typeerror(L, ra, "iterate over");
|
||||
}
|
||||
}
|
||||
|
||||
pc += LUAU_INSN_D(insn);
|
||||
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* executeGETVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
int b = LUAU_INSN_B(insn) - 1;
|
||||
int n = cast_int(base - L->ci->func) - cl->l.p->numparams - 1;
|
||||
|
||||
if (b == LUA_MULTRET)
|
||||
{
|
||||
VM_PROTECT(luaD_checkstack(L, n));
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack
|
||||
|
||||
for (int j = 0; j < n; j++)
|
||||
setobj2s(L, ra + j, base - n + j);
|
||||
|
||||
L->top = ra + n;
|
||||
return pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
|
||||
for (int j = 0; j < b && j < n; j++)
|
||||
setobj2s(L, ra + j, base - n + j);
|
||||
for (int j = n; j < b; j++)
|
||||
setnilvalue(ra + j);
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
|
||||
const Instruction* executeDUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
TValue* kv = VM_KV(LUAU_INSN_D(insn));
|
||||
|
||||
Closure* kcl = clvalue(kv);
|
||||
|
||||
VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM
|
||||
|
||||
// clone closure if the environment is not shared
|
||||
// note: we save closure to stack early in case the code below wants to capture it by value
|
||||
Closure* ncl = (kcl->env == cl->env) ? kcl : luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p);
|
||||
setclvalue(L, ra, ncl);
|
||||
|
||||
// this loop does three things:
|
||||
// - if the closure was created anew, it just fills it with upvalues
|
||||
// - if the closure from the constant table is used, it fills it with upvalues so that it can be shared in the future
|
||||
// - if the closure is reused, it checks if the reuse is safe via rawequal, and falls back to duplicating the closure
|
||||
// normally this would use two separate loops, for reuse check and upvalue setup, but MSVC codegen goes crazy if you do that
|
||||
for (int ui = 0; ui < kcl->nupvalues; ++ui)
|
||||
{
|
||||
Instruction uinsn = pc[ui];
|
||||
LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
|
||||
LUAU_ASSERT(LUAU_INSN_A(uinsn) == LCT_VAL || LUAU_INSN_A(uinsn) == LCT_UPVAL);
|
||||
|
||||
TValue* uv = (LUAU_INSN_A(uinsn) == LCT_VAL) ? VM_REG(LUAU_INSN_B(uinsn)) : VM_UV(LUAU_INSN_B(uinsn));
|
||||
|
||||
// check if the existing closure is safe to reuse
|
||||
if (ncl == kcl && luaO_rawequalObj(&ncl->l.uprefs[ui], uv))
|
||||
continue;
|
||||
|
||||
// lazily clone the closure and update the upvalues
|
||||
if (ncl == kcl && kcl->preload == 0)
|
||||
{
|
||||
ncl = luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p);
|
||||
setclvalue(L, ra, ncl);
|
||||
|
||||
ui = -1; // restart the loop to fill all upvalues
|
||||
continue;
|
||||
}
|
||||
|
||||
// this updates a newly created closure, or an existing closure created during preload, in which case we need a barrier
|
||||
setobj(L, &ncl->l.uprefs[ui], uv);
|
||||
luaC_barrier(L, ncl, uv);
|
||||
}
|
||||
|
||||
// this is a noop if ncl is newly created or shared successfully, but it has to run after the closure is preloaded for the first time
|
||||
ncl->preload = 0;
|
||||
|
||||
if (kcl != ncl)
|
||||
VM_PROTECT(luaC_checkGC(L));
|
||||
|
||||
pc += kcl->nupvalues;
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* executePREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
int numparams = LUAU_INSN_A(insn);
|
||||
|
||||
// all fixed parameters are copied after the top so we need more stack space
|
||||
VM_PROTECT(luaD_checkstack(L, cl->stacksize + numparams));
|
||||
|
||||
// the caller must have filled extra fixed arguments with nil
|
||||
LUAU_ASSERT(cast_int(L->top - base) >= numparams);
|
||||
|
||||
// move fixed parameters to final position
|
||||
StkId fixed = base; // first fixed argument
|
||||
base = L->top; // final position of first argument
|
||||
|
||||
for (int i = 0; i < numparams; ++i)
|
||||
{
|
||||
setobj2s(L, base + i, fixed + i);
|
||||
setnilvalue(fixed + i);
|
||||
}
|
||||
|
||||
// rewire our stack frame to point to the new base
|
||||
L->ci->base = base;
|
||||
L->ci->top = base + cl->stacksize;
|
||||
|
||||
L->base = base;
|
||||
L->top = L->ci->top;
|
||||
return pc;
|
||||
}
|
||||
|
||||
} // namespace CodeGen
|
||||
} // namespace Luau
|
||||
|
@ -20,5 +20,17 @@ void callEpilogC(lua_State* L, int nresults, int n);
|
||||
Closure* callFallback(lua_State* L, StkId ra, StkId argtop, int nresults);
|
||||
Closure* returnFallback(lua_State* L, StkId ra, StkId valend);
|
||||
|
||||
const Instruction* executeGETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* executeSETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* executeGETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* executeSETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* executeNEWCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* executeNAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* executeSETLIST(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* executeFORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* executeGETVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* executeDUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* executePREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
|
||||
} // namespace CodeGen
|
||||
} // namespace Luau
|
||||
|
@ -46,21 +46,6 @@ inline void destroyNativeState(lua_State* L)
|
||||
delete state;
|
||||
}
|
||||
|
||||
inline NativeProto* getProtoExecData(Proto* proto)
|
||||
{
|
||||
return (NativeProto*)proto->execdata;
|
||||
}
|
||||
|
||||
inline void setProtoExecData(Proto* proto, NativeProto* nativeProto)
|
||||
{
|
||||
if (nativeProto)
|
||||
LUAU_ASSERT(proto->execdata == nullptr);
|
||||
|
||||
proto->execdata = nativeProto;
|
||||
}
|
||||
|
||||
#define offsetofProtoExecData offsetof(Proto, execdata)
|
||||
|
||||
#else
|
||||
|
||||
inline lua_ExecutionCallbacks* getExecutionCallbacks(lua_State* L)
|
||||
@ -82,15 +67,6 @@ inline NativeState* createNativeState(lua_State* L)
|
||||
|
||||
inline void destroyNativeState(lua_State* L) {}
|
||||
|
||||
inline NativeProto* getProtoExecData(Proto* proto)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
inline void setProtoExecData(Proto* proto, NativeProto* nativeProto) {}
|
||||
|
||||
#define offsetofProtoExecData 0
|
||||
|
||||
#endif
|
||||
|
||||
inline int getOpLength(LuauOpcode op)
|
||||
|
@ -10,11 +10,12 @@ namespace CodeGen
|
||||
|
||||
constexpr unsigned kTValueSizeLog2 = 4;
|
||||
constexpr unsigned kLuaNodeSizeLog2 = 5;
|
||||
constexpr unsigned kLuaNodeTagMask = 0xf;
|
||||
constexpr unsigned kNextBitOffset = 4;
|
||||
|
||||
constexpr unsigned kOffsetOfTKeyTag = 12; // offsetof cannot be used on a bit field
|
||||
constexpr unsigned kOffsetOfTKeyNext = 12; // offsetof cannot be used on a bit field
|
||||
// TKey.tt and TKey.next are packed together in a bitfield
|
||||
constexpr unsigned kOffsetOfTKeyTagNext = 12; // offsetof cannot be used on a bit field
|
||||
constexpr unsigned kTKeyTagBits = 4;
|
||||
constexpr unsigned kTKeyTagMask = (1 << kTKeyTagBits) - 1;
|
||||
|
||||
constexpr unsigned kOffsetOfInstructionC = 3;
|
||||
|
||||
// Leaf functions that are placed in every module to perform common instruction sequences
|
||||
|
@ -325,10 +325,8 @@ void emitInterrupt(IrRegAllocX64& regs, AssemblyBuilderX64& build, int pcpos)
|
||||
build.setLabel(skip);
|
||||
}
|
||||
|
||||
void emitFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, NativeState& data, int op, int pcpos)
|
||||
void emitFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, int offset, int pcpos)
|
||||
{
|
||||
LUAU_ASSERT(data.context.fallback[op]);
|
||||
|
||||
// fallback(L, instruction, base, k)
|
||||
IrCallWrapperX64 callWrap(regs, build);
|
||||
callWrap.addArgument(SizeX64::qword, rState);
|
||||
@ -339,7 +337,7 @@ void emitFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, NativeState& d
|
||||
|
||||
callWrap.addArgument(SizeX64::qword, rBase);
|
||||
callWrap.addArgument(SizeX64::qword, rConstants);
|
||||
callWrap.call(qword[rNativeContext + offsetof(NativeContext, fallback) + op * sizeof(FallbackFn)]);
|
||||
callWrap.call(qword[rNativeContext + offset]);
|
||||
|
||||
emitUpdateBase(build);
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ inline OperandX64 luauNodeKeyValue(RegisterX64 node)
|
||||
// Note: tag has dirty upper bits
|
||||
inline OperandX64 luauNodeKeyTag(RegisterX64 node)
|
||||
{
|
||||
return dword[node + offsetof(LuaNode, key) + kOffsetOfTKeyTag];
|
||||
return dword[node + offsetof(LuaNode, key) + kOffsetOfTKeyTagNext];
|
||||
}
|
||||
|
||||
inline OperandX64 luauNodeValue(RegisterX64 node)
|
||||
@ -189,7 +189,7 @@ inline void jumpIfNodeKeyTagIsNot(AssemblyBuilderX64& build, RegisterX64 tmp, Re
|
||||
tmp.size = SizeX64::dword;
|
||||
|
||||
build.mov(tmp, luauNodeKeyTag(node));
|
||||
build.and_(tmp, kLuaNodeTagMask);
|
||||
build.and_(tmp, kTKeyTagMask);
|
||||
build.cmp(tmp, tag);
|
||||
build.jcc(ConditionX64::NotEqual, label);
|
||||
}
|
||||
@ -230,7 +230,7 @@ void callStepGc(IrRegAllocX64& regs, AssemblyBuilderX64& build);
|
||||
void emitExit(AssemblyBuilderX64& build, bool continueInVm);
|
||||
void emitUpdateBase(AssemblyBuilderX64& build);
|
||||
void emitInterrupt(IrRegAllocX64& regs, AssemblyBuilderX64& build, int pcpos);
|
||||
void emitFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, NativeState& data, int op, int pcpos);
|
||||
void emitFallback(IrRegAllocX64& regs, AssemblyBuilderX64& build, int offset, int pcpos);
|
||||
|
||||
void emitContinueCallInVm(AssemblyBuilderX64& build);
|
||||
|
||||
|
@ -73,8 +73,6 @@ void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int
|
||||
build.mov(rax, qword[ci + offsetof(CallInfo, top)]);
|
||||
build.mov(qword[rState + offsetof(lua_State, top)], rax);
|
||||
|
||||
build.mov(rax, qword[proto + offsetofProtoExecData]); // We'll need this value later
|
||||
|
||||
// But if it is vararg, update it to 'argi'
|
||||
Label skipVararg;
|
||||
|
||||
@ -84,10 +82,14 @@ void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int
|
||||
build.mov(qword[rState + offsetof(lua_State, top)], argi);
|
||||
build.setLabel(skipVararg);
|
||||
|
||||
// Check native function data
|
||||
// Get native function entry
|
||||
build.mov(rax, qword[proto + offsetof(Proto, exectarget)]);
|
||||
build.test(rax, rax);
|
||||
build.jcc(ConditionX64::Zero, helpers.continueCallInVm);
|
||||
|
||||
// Mark call frame as custom
|
||||
build.mov(dword[ci + offsetof(CallInfo, flags)], LUA_CALLINFO_CUSTOM);
|
||||
|
||||
// Switch current constants
|
||||
build.mov(rConstants, qword[proto + offsetof(Proto, k)]);
|
||||
|
||||
@ -95,7 +97,7 @@ void emitInstCall(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, int
|
||||
build.mov(rdx, qword[proto + offsetof(Proto, code)]);
|
||||
build.mov(sCode, rdx);
|
||||
|
||||
build.jmp(qword[rax + offsetof(NativeProto, entryTarget)]);
|
||||
build.jmp(rax);
|
||||
}
|
||||
|
||||
build.setLabel(cFuncCall);
|
||||
@ -294,8 +296,9 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, i
|
||||
|
||||
build.mov(proto, qword[rax + offsetof(Closure, l.p)]);
|
||||
|
||||
build.mov(execdata, qword[proto + offsetofProtoExecData]);
|
||||
build.test(execdata, execdata);
|
||||
build.mov(execdata, qword[proto + offsetof(Proto, execdata)]);
|
||||
|
||||
build.test(byte[cip + offsetof(CallInfo, flags)], LUA_CALLINFO_CUSTOM);
|
||||
build.jcc(ConditionX64::Zero, helpers.exitContinueVm); // Continue in interpreter if function has no native data
|
||||
|
||||
// Change constants
|
||||
@ -309,13 +312,11 @@ void emitInstReturn(AssemblyBuilderX64& build, ModuleHelpers& helpers, int ra, i
|
||||
|
||||
// To get instruction index from instruction pointer, we need to divide byte offset by 4
|
||||
// But we will actually need to scale instruction index by 4 back to byte offset later so it cancels out
|
||||
// Note that we're computing negative offset here (code-savedpc) so that we can add it to NativeProto address, as we use reverse indexing
|
||||
build.sub(rdx, rax);
|
||||
build.sub(rax, rdx);
|
||||
|
||||
// Get new instruction location and jump to it
|
||||
LUAU_ASSERT(offsetof(NativeProto, instOffsets) == 0);
|
||||
build.mov(edx, dword[execdata + rdx]);
|
||||
build.add(rdx, qword[execdata + offsetof(NativeProto, instBase)]);
|
||||
build.mov(edx, dword[execdata + rax]);
|
||||
build.add(rdx, qword[proto + offsetof(Proto, exectarget)]);
|
||||
build.jmp(rdx);
|
||||
}
|
||||
|
||||
|
@ -1,639 +0,0 @@
|
||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
|
||||
// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand
|
||||
#include "Fallbacks.h"
|
||||
#include "FallbacksProlog.h"
|
||||
|
||||
const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
uint32_t aux = *pc++;
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
// fast-path: value is in expected slot
|
||||
Table* h = cl->env;
|
||||
int slot = LUAU_INSN_C(insn) & h->nodemask8;
|
||||
LuaNode* n = &h->node[slot];
|
||||
|
||||
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv)) && !ttisnil(gval(n)))
|
||||
{
|
||||
setobj2s(L, ra, gval(n));
|
||||
return pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path, may invoke Lua calls via __index metamethod
|
||||
TValue g;
|
||||
sethvalue(L, &g, h);
|
||||
L->cachedslot = slot;
|
||||
VM_PROTECT(luaV_gettable(L, &g, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
uint32_t aux = *pc++;
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
// fast-path: value is in expected slot
|
||||
Table* h = cl->env;
|
||||
int slot = LUAU_INSN_C(insn) & h->nodemask8;
|
||||
LuaNode* n = &h->node[slot];
|
||||
|
||||
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)) && !h->readonly))
|
||||
{
|
||||
setobj2t(L, gval(n), ra);
|
||||
luaC_barriert(L, h, ra);
|
||||
return pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path, may invoke Lua calls via __newindex metamethod
|
||||
TValue g;
|
||||
sethvalue(L, &g, h);
|
||||
L->cachedslot = slot;
|
||||
VM_PROTECT(luaV_settable(L, &g, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
StkId rb = VM_REG(LUAU_INSN_B(insn));
|
||||
uint32_t aux = *pc++;
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
// fast-path: built-in table
|
||||
if (ttistable(rb))
|
||||
{
|
||||
Table* h = hvalue(rb);
|
||||
|
||||
int slot = LUAU_INSN_C(insn) & h->nodemask8;
|
||||
LuaNode* n = &h->node[slot];
|
||||
|
||||
// fast-path: value is in expected slot
|
||||
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))))
|
||||
{
|
||||
setobj2s(L, ra, gval(n));
|
||||
return pc;
|
||||
}
|
||||
else if (!h->metatable)
|
||||
{
|
||||
// fast-path: value is not in expected slot, but the table lookup doesn't involve metatable
|
||||
const TValue* res = luaH_getstr(h, tsvalue(kv));
|
||||
|
||||
if (res != luaO_nilobject)
|
||||
{
|
||||
int cachedslot = gval2slot(h, res);
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, cachedslot);
|
||||
}
|
||||
|
||||
setobj2s(L, ra, res);
|
||||
return pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path, may invoke Lua calls via __index metamethod
|
||||
L->cachedslot = slot;
|
||||
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// fast-path: user data with C __index TM
|
||||
const TValue* fn = 0;
|
||||
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_INDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
|
||||
{
|
||||
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
|
||||
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
|
||||
StkId top = L->top;
|
||||
setobj2s(L, top + 0, fn);
|
||||
setobj2s(L, top + 1, rb);
|
||||
setobj2s(L, top + 2, kv);
|
||||
L->top = top + 3;
|
||||
|
||||
L->cachedslot = LUAU_INSN_C(insn);
|
||||
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
else if (ttisvector(rb))
|
||||
{
|
||||
// fast-path: quick case-insensitive comparison with "X"/"Y"/"Z"
|
||||
const char* name = getstr(tsvalue(kv));
|
||||
int ic = (name[0] | ' ') - 'x';
|
||||
|
||||
#if LUA_VECTOR_SIZE == 4
|
||||
// 'w' is before 'x' in ascii, so ic is -1 when indexing with 'w'
|
||||
if (ic == -1)
|
||||
ic = 3;
|
||||
#endif
|
||||
|
||||
if (unsigned(ic) < LUA_VECTOR_SIZE && name[1] == '\0')
|
||||
{
|
||||
const float* v = rb->value.v; // silences ubsan when indexing v[]
|
||||
setnvalue(ra, v[ic]);
|
||||
return pc;
|
||||
}
|
||||
|
||||
fn = fasttm(L, L->global->mt[LUA_TVECTOR], TM_INDEX);
|
||||
|
||||
if (fn && ttisfunction(fn) && clvalue(fn)->isC)
|
||||
{
|
||||
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
|
||||
LUAU_ASSERT(L->top + 3 < L->stack + L->stacksize);
|
||||
StkId top = L->top;
|
||||
setobj2s(L, top + 0, fn);
|
||||
setobj2s(L, top + 1, rb);
|
||||
setobj2s(L, top + 2, kv);
|
||||
L->top = top + 3;
|
||||
|
||||
L->cachedslot = LUAU_INSN_C(insn);
|
||||
VM_PROTECT(luaV_callTM(L, 2, LUAU_INSN_A(insn)));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
|
||||
// fall through to slow path
|
||||
}
|
||||
|
||||
// fall through to slow path
|
||||
}
|
||||
|
||||
// slow-path, may invoke Lua calls via __index metamethod
|
||||
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
StkId rb = VM_REG(LUAU_INSN_B(insn));
|
||||
uint32_t aux = *pc++;
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
// fast-path: built-in table
|
||||
if (ttistable(rb))
|
||||
{
|
||||
Table* h = hvalue(rb);
|
||||
|
||||
int slot = LUAU_INSN_C(insn) & h->nodemask8;
|
||||
LuaNode* n = &h->node[slot];
|
||||
|
||||
// fast-path: value is in expected slot
|
||||
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)) && !h->readonly))
|
||||
{
|
||||
setobj2t(L, gval(n), ra);
|
||||
luaC_barriert(L, h, ra);
|
||||
return pc;
|
||||
}
|
||||
else if (fastnotm(h->metatable, TM_NEWINDEX) && !h->readonly)
|
||||
{
|
||||
VM_PROTECT_PC(); // set may fail
|
||||
|
||||
TValue* res = luaH_setstr(L, h, tsvalue(kv));
|
||||
int cachedslot = gval2slot(h, res);
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, cachedslot);
|
||||
setobj2t(L, res, ra);
|
||||
luaC_barriert(L, h, ra);
|
||||
return pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path, may invoke Lua calls via __newindex metamethod
|
||||
L->cachedslot = slot;
|
||||
VM_PROTECT(luaV_settable(L, rb, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// fast-path: user data with C __newindex TM
|
||||
const TValue* fn = 0;
|
||||
if (ttisuserdata(rb) && (fn = fasttm(L, uvalue(rb)->metatable, TM_NEWINDEX)) && ttisfunction(fn) && clvalue(fn)->isC)
|
||||
{
|
||||
// note: it's safe to push arguments past top for complicated reasons (see top of the file)
|
||||
LUAU_ASSERT(L->top + 4 < L->stack + L->stacksize);
|
||||
StkId top = L->top;
|
||||
setobj2s(L, top + 0, fn);
|
||||
setobj2s(L, top + 1, rb);
|
||||
setobj2s(L, top + 2, kv);
|
||||
setobj2s(L, top + 3, ra);
|
||||
L->top = top + 4;
|
||||
|
||||
L->cachedslot = LUAU_INSN_C(insn);
|
||||
VM_PROTECT(luaV_callTM(L, 3, -1));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
return pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path, may invoke Lua calls via __newindex metamethod
|
||||
VM_PROTECT(luaV_settable(L, rb, kv, ra));
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
|
||||
Proto* pv = cl->l.p->p[LUAU_INSN_D(insn)];
|
||||
LUAU_ASSERT(unsigned(LUAU_INSN_D(insn)) < unsigned(cl->l.p->sizep));
|
||||
|
||||
VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM
|
||||
|
||||
// note: we save closure to stack early in case the code below wants to capture it by value
|
||||
Closure* ncl = luaF_newLclosure(L, pv->nups, cl->env, pv);
|
||||
setclvalue(L, ra, ncl);
|
||||
|
||||
for (int ui = 0; ui < pv->nups; ++ui)
|
||||
{
|
||||
Instruction uinsn = *pc++;
|
||||
LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
|
||||
|
||||
switch (LUAU_INSN_A(uinsn))
|
||||
{
|
||||
case LCT_VAL:
|
||||
setobj(L, &ncl->l.uprefs[ui], VM_REG(LUAU_INSN_B(uinsn)));
|
||||
break;
|
||||
|
||||
case LCT_REF:
|
||||
setupvalue(L, &ncl->l.uprefs[ui], luaF_findupval(L, VM_REG(LUAU_INSN_B(uinsn))));
|
||||
break;
|
||||
|
||||
case LCT_UPVAL:
|
||||
setobj(L, &ncl->l.uprefs[ui], VM_UV(LUAU_INSN_B(uinsn)));
|
||||
break;
|
||||
|
||||
default:
|
||||
LUAU_ASSERT(!"Unknown upvalue capture type");
|
||||
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
|
||||
}
|
||||
}
|
||||
|
||||
VM_PROTECT(luaC_checkGC(L));
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
StkId rb = VM_REG(LUAU_INSN_B(insn));
|
||||
uint32_t aux = *pc++;
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
if (ttistable(rb))
|
||||
{
|
||||
Table* h = hvalue(rb);
|
||||
// note: we can't use nodemask8 here because we need to query the main position of the table, and 8-bit nodemask8 only works
|
||||
// for predictive lookups
|
||||
LuaNode* n = &h->node[tsvalue(kv)->hash & (sizenode(h) - 1)];
|
||||
|
||||
const TValue* mt = 0;
|
||||
const LuaNode* mtn = 0;
|
||||
|
||||
// fast-path: key is in the table in expected slot
|
||||
if (ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n)))
|
||||
{
|
||||
// note: order of copies allows rb to alias ra+1 or ra
|
||||
setobj2s(L, ra + 1, rb);
|
||||
setobj2s(L, ra, gval(n));
|
||||
}
|
||||
// fast-path: key is absent from the base, table has an __index table, and it has the result in the expected slot
|
||||
else if (gnext(n) == 0 && (mt = fasttm(L, hvalue(rb)->metatable, TM_INDEX)) && ttistable(mt) &&
|
||||
(mtn = &hvalue(mt)->node[LUAU_INSN_C(insn) & hvalue(mt)->nodemask8]) && ttisstring(gkey(mtn)) && tsvalue(gkey(mtn)) == tsvalue(kv) &&
|
||||
!ttisnil(gval(mtn)))
|
||||
{
|
||||
// note: order of copies allows rb to alias ra+1 or ra
|
||||
setobj2s(L, ra + 1, rb);
|
||||
setobj2s(L, ra, gval(mtn));
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path: handles full table lookup
|
||||
setobj2s(L, ra + 1, rb);
|
||||
L->cachedslot = LUAU_INSN_C(insn);
|
||||
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
// recompute ra since stack might have been reallocated
|
||||
ra = VM_REG(LUAU_INSN_A(insn));
|
||||
if (ttisnil(ra))
|
||||
luaG_methoderror(L, ra + 1, tsvalue(kv));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
Table* mt = ttisuserdata(rb) ? uvalue(rb)->metatable : L->global->mt[ttype(rb)];
|
||||
const TValue* tmi = 0;
|
||||
|
||||
// fast-path: metatable with __namecall
|
||||
if (const TValue* fn = fasttm(L, mt, TM_NAMECALL))
|
||||
{
|
||||
// note: order of copies allows rb to alias ra+1 or ra
|
||||
setobj2s(L, ra + 1, rb);
|
||||
setobj2s(L, ra, fn);
|
||||
|
||||
L->namecall = tsvalue(kv);
|
||||
}
|
||||
else if ((tmi = fasttm(L, mt, TM_INDEX)) && ttistable(tmi))
|
||||
{
|
||||
Table* h = hvalue(tmi);
|
||||
int slot = LUAU_INSN_C(insn) & h->nodemask8;
|
||||
LuaNode* n = &h->node[slot];
|
||||
|
||||
// fast-path: metatable with __index that has method in expected slot
|
||||
if (LUAU_LIKELY(ttisstring(gkey(n)) && tsvalue(gkey(n)) == tsvalue(kv) && !ttisnil(gval(n))))
|
||||
{
|
||||
// note: order of copies allows rb to alias ra+1 or ra
|
||||
setobj2s(L, ra + 1, rb);
|
||||
setobj2s(L, ra, gval(n));
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path: handles slot mismatch
|
||||
setobj2s(L, ra + 1, rb);
|
||||
L->cachedslot = slot;
|
||||
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
|
||||
// save cachedslot to accelerate future lookups; patches currently executing instruction since pc-2 rolls back two pc++
|
||||
VM_PATCH_C(pc - 2, L->cachedslot);
|
||||
// recompute ra since stack might have been reallocated
|
||||
ra = VM_REG(LUAU_INSN_A(insn));
|
||||
if (ttisnil(ra))
|
||||
luaG_methoderror(L, ra + 1, tsvalue(kv));
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// slow-path: handles non-table __index
|
||||
setobj2s(L, ra + 1, rb);
|
||||
VM_PROTECT(luaV_gettable(L, rb, kv, ra));
|
||||
// recompute ra since stack might have been reallocated
|
||||
ra = VM_REG(LUAU_INSN_A(insn));
|
||||
if (ttisnil(ra))
|
||||
luaG_methoderror(L, ra + 1, tsvalue(kv));
|
||||
}
|
||||
}
|
||||
|
||||
// intentional fallthrough to CALL
|
||||
LUAU_ASSERT(LUAU_INSN_OP(*pc) == LOP_CALL);
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
StkId rb = &base[LUAU_INSN_B(insn)]; // note: this can point to L->top if c == LUA_MULTRET making VM_REG unsafe to use
|
||||
int c = LUAU_INSN_C(insn) - 1;
|
||||
uint32_t index = *pc++;
|
||||
|
||||
if (c == LUA_MULTRET)
|
||||
{
|
||||
c = int(L->top - rb);
|
||||
L->top = L->ci->top;
|
||||
}
|
||||
|
||||
Table* h = hvalue(ra);
|
||||
|
||||
// TODO: we really don't need this anymore
|
||||
if (!ttistable(ra))
|
||||
return NULL; // temporary workaround to weaken a rather powerful exploitation primitive in case of a MITM attack on bytecode
|
||||
|
||||
int last = index + c - 1;
|
||||
if (last > h->sizearray)
|
||||
{
|
||||
VM_PROTECT_PC(); // luaH_resizearray may fail due to OOM
|
||||
|
||||
luaH_resizearray(L, h, last);
|
||||
}
|
||||
|
||||
TValue* array = h->array;
|
||||
|
||||
for (int i = 0; i < c; ++i)
|
||||
setobj2t(L, &array[index + i - 1], rb + i);
|
||||
|
||||
luaC_barrierfast(L, h);
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
|
||||
if (ttisfunction(ra))
|
||||
{
|
||||
// will be called during FORGLOOP
|
||||
}
|
||||
else
|
||||
{
|
||||
Table* mt = ttistable(ra) ? hvalue(ra)->metatable : ttisuserdata(ra) ? uvalue(ra)->metatable : cast_to(Table*, NULL);
|
||||
|
||||
if (const TValue* fn = fasttm(L, mt, TM_ITER))
|
||||
{
|
||||
setobj2s(L, ra + 1, ra);
|
||||
setobj2s(L, ra, fn);
|
||||
|
||||
L->top = ra + 2; // func + self arg
|
||||
LUAU_ASSERT(L->top <= L->stack_last);
|
||||
|
||||
VM_PROTECT(luaD_call(L, ra, 3));
|
||||
L->top = L->ci->top;
|
||||
|
||||
// recompute ra since stack might have been reallocated
|
||||
ra = VM_REG(LUAU_INSN_A(insn));
|
||||
|
||||
// protect against __iter returning nil, since nil is used as a marker for builtin iteration in FORGLOOP
|
||||
if (ttisnil(ra))
|
||||
{
|
||||
VM_PROTECT_PC(); // next call always errors
|
||||
luaG_typeerror(L, ra, "call");
|
||||
}
|
||||
}
|
||||
else if (fasttm(L, mt, TM_CALL))
|
||||
{
|
||||
// table or userdata with __call, will be called during FORGLOOP
|
||||
// TODO: we might be able to stop supporting this depending on whether it's used in practice
|
||||
}
|
||||
else if (ttistable(ra))
|
||||
{
|
||||
// set up registers for builtin iteration
|
||||
setobj2s(L, ra + 1, ra);
|
||||
setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)));
|
||||
setnilvalue(ra);
|
||||
}
|
||||
else
|
||||
{
|
||||
VM_PROTECT_PC(); // next call always errors
|
||||
luaG_typeerror(L, ra, "iterate over");
|
||||
}
|
||||
}
|
||||
|
||||
pc += LUAU_INSN_D(insn);
|
||||
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
int b = LUAU_INSN_B(insn) - 1;
|
||||
int n = cast_int(base - L->ci->func) - cl->l.p->numparams - 1;
|
||||
|
||||
if (b == LUA_MULTRET)
|
||||
{
|
||||
VM_PROTECT(luaD_checkstack(L, n));
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn)); // previous call may change the stack
|
||||
|
||||
for (int j = 0; j < n; j++)
|
||||
setobj2s(L, ra + j, base - n + j);
|
||||
|
||||
L->top = ra + n;
|
||||
return pc;
|
||||
}
|
||||
else
|
||||
{
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
|
||||
for (int j = 0; j < b && j < n; j++)
|
||||
setobj2s(L, ra + j, base - n + j);
|
||||
for (int j = n; j < b; j++)
|
||||
setnilvalue(ra + j);
|
||||
return pc;
|
||||
}
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
StkId ra = VM_REG(LUAU_INSN_A(insn));
|
||||
TValue* kv = VM_KV(LUAU_INSN_D(insn));
|
||||
|
||||
Closure* kcl = clvalue(kv);
|
||||
|
||||
VM_PROTECT_PC(); // luaF_newLclosure may fail due to OOM
|
||||
|
||||
// clone closure if the environment is not shared
|
||||
// note: we save closure to stack early in case the code below wants to capture it by value
|
||||
Closure* ncl = (kcl->env == cl->env) ? kcl : luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p);
|
||||
setclvalue(L, ra, ncl);
|
||||
|
||||
// this loop does three things:
|
||||
// - if the closure was created anew, it just fills it with upvalues
|
||||
// - if the closure from the constant table is used, it fills it with upvalues so that it can be shared in the future
|
||||
// - if the closure is reused, it checks if the reuse is safe via rawequal, and falls back to duplicating the closure
|
||||
// normally this would use two separate loops, for reuse check and upvalue setup, but MSVC codegen goes crazy if you do that
|
||||
for (int ui = 0; ui < kcl->nupvalues; ++ui)
|
||||
{
|
||||
Instruction uinsn = pc[ui];
|
||||
LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
|
||||
LUAU_ASSERT(LUAU_INSN_A(uinsn) == LCT_VAL || LUAU_INSN_A(uinsn) == LCT_UPVAL);
|
||||
|
||||
TValue* uv = (LUAU_INSN_A(uinsn) == LCT_VAL) ? VM_REG(LUAU_INSN_B(uinsn)) : VM_UV(LUAU_INSN_B(uinsn));
|
||||
|
||||
// check if the existing closure is safe to reuse
|
||||
if (ncl == kcl && luaO_rawequalObj(&ncl->l.uprefs[ui], uv))
|
||||
continue;
|
||||
|
||||
// lazily clone the closure and update the upvalues
|
||||
if (ncl == kcl && kcl->preload == 0)
|
||||
{
|
||||
ncl = luaF_newLclosure(L, kcl->nupvalues, cl->env, kcl->l.p);
|
||||
setclvalue(L, ra, ncl);
|
||||
|
||||
ui = -1; // restart the loop to fill all upvalues
|
||||
continue;
|
||||
}
|
||||
|
||||
// this updates a newly created closure, or an existing closure created during preload, in which case we need a barrier
|
||||
setobj(L, &ncl->l.uprefs[ui], uv);
|
||||
luaC_barrier(L, ncl, uv);
|
||||
}
|
||||
|
||||
// this is a noop if ncl is newly created or shared successfully, but it has to run after the closure is preloaded for the first time
|
||||
ncl->preload = 0;
|
||||
|
||||
if (kcl != ncl)
|
||||
VM_PROTECT(luaC_checkGC(L));
|
||||
|
||||
pc += kcl->nupvalues;
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
[[maybe_unused]] Closure* cl = clvalue(L->ci->func);
|
||||
Instruction insn = *pc++;
|
||||
int numparams = LUAU_INSN_A(insn);
|
||||
|
||||
// all fixed parameters are copied after the top so we need more stack space
|
||||
VM_PROTECT(luaD_checkstack(L, cl->stacksize + numparams));
|
||||
|
||||
// the caller must have filled extra fixed arguments with nil
|
||||
LUAU_ASSERT(cast_int(L->top - base) >= numparams);
|
||||
|
||||
// move fixed parameters to final position
|
||||
StkId fixed = base; // first fixed argument
|
||||
base = L->top; // final position of first argument
|
||||
|
||||
for (int i = 0; i < numparams; ++i)
|
||||
{
|
||||
setobj2s(L, base + i, fixed + i);
|
||||
setnilvalue(fixed + i);
|
||||
}
|
||||
|
||||
// rewire our stack frame to point to the new base
|
||||
L->ci->base = base;
|
||||
L->ci->top = base + cl->stacksize;
|
||||
|
||||
L->base = base;
|
||||
L->top = L->ci->top;
|
||||
return pc;
|
||||
}
|
||||
|
||||
const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, StkId base, TValue* k)
|
||||
{
|
||||
LUAU_ASSERT(!"Unsupported deprecated opcode");
|
||||
LUAU_UNREACHABLE();
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
struct lua_State;
|
||||
struct Closure;
|
||||
typedef uint32_t Instruction;
|
||||
typedef struct lua_TValue TValue;
|
||||
typedef TValue* StkId;
|
||||
|
||||
const Instruction* execute_LOP_GETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_SETGLOBAL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_GETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_SETTABLEKS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_NEWCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_NAMECALL(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_SETLIST(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_FORGPREP(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_GETVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_DUPCLOSURE(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_PREPVARARGS(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
const Instruction* execute_LOP_BREAK(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
@ -1,56 +0,0 @@
|
||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#pragma once
|
||||
|
||||
#include "lvm.h"
|
||||
|
||||
#include "lbuiltins.h"
|
||||
#include "lbytecode.h"
|
||||
#include "ldebug.h"
|
||||
#include "ldo.h"
|
||||
#include "lfunc.h"
|
||||
#include "lgc.h"
|
||||
#include "lmem.h"
|
||||
#include "lnumutils.h"
|
||||
#include "lstate.h"
|
||||
#include "lstring.h"
|
||||
#include "ltable.h"
|
||||
|
||||
#include <string.h>
|
||||
|
||||
// All external function calls that can cause stack realloc or Lua calls have to be wrapped in VM_PROTECT
|
||||
// This makes sure that we save the pc (in case the Lua call needs to generate a backtrace) before the call,
|
||||
// and restores the stack pointer after in case stack gets reallocated
|
||||
// Should only be used on the slow paths.
|
||||
#define VM_PROTECT(x) \
|
||||
{ \
|
||||
L->ci->savedpc = pc; \
|
||||
{ \
|
||||
x; \
|
||||
}; \
|
||||
base = L->base; \
|
||||
}
|
||||
|
||||
// Some external functions can cause an error, but never reallocate the stack; for these, VM_PROTECT_PC() is
|
||||
// a cheaper version of VM_PROTECT that can be called before the external call.
|
||||
#define VM_PROTECT_PC() L->ci->savedpc = pc
|
||||
|
||||
#define VM_REG(i) (LUAU_ASSERT(unsigned(i) < unsigned(L->top - base)), &base[i])
|
||||
#define VM_KV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->l.p->sizek)), &k[i])
|
||||
#define VM_UV(i) (LUAU_ASSERT(unsigned(i) < unsigned(cl->nupvalues)), &cl->l.uprefs[i])
|
||||
|
||||
#define VM_PATCH_C(pc, slot) *const_cast<Instruction*>(pc) = ((uint8_t(slot) << 24) | (0x00ffffffu & *(pc)))
|
||||
#define VM_PATCH_E(pc, slot) *const_cast<Instruction*>(pc) = ((uint32_t(slot) << 8) | (0x000000ffu & *(pc)))
|
||||
|
||||
#define VM_INTERRUPT() \
|
||||
{ \
|
||||
void (*interrupt)(lua_State*, int) = L->global->cb.interrupt; \
|
||||
if (LUAU_UNLIKELY(!!interrupt)) \
|
||||
{ /* the interrupt hook is called right before we advance pc */ \
|
||||
VM_PROTECT(L->ci->savedpc++; interrupt(L, -1)); \
|
||||
if (L->status != 0) \
|
||||
{ \
|
||||
L->ci->savedpc--; \
|
||||
return NULL; \
|
||||
} \
|
||||
} \
|
||||
}
|
@ -96,14 +96,14 @@ static void emitAddOffset(AssemblyBuilderA64& build, RegisterA64 dst, RegisterA6
|
||||
}
|
||||
}
|
||||
|
||||
static void emitFallback(AssemblyBuilderA64& build, int op, int pcpos)
|
||||
static void emitFallback(AssemblyBuilderA64& build, int offset, int pcpos)
|
||||
{
|
||||
// fallback(L, instruction, base, k)
|
||||
build.mov(x0, rState);
|
||||
emitAddOffset(build, x1, rCode, pcpos * sizeof(Instruction));
|
||||
build.mov(x2, rBase);
|
||||
build.mov(x3, rConstants);
|
||||
build.ldr(x4, mem(rNativeContext, offsetof(NativeContext, fallback) + op * sizeof(FallbackFn)));
|
||||
build.ldr(x4, mem(rNativeContext, offset));
|
||||
build.blr(x4);
|
||||
|
||||
emitUpdateBase(build);
|
||||
@ -658,30 +658,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
||||
jumpOrFallthrough(blockOp(inst.e), next);
|
||||
break;
|
||||
}
|
||||
case IrCmd::JUMP_SLOT_MATCH:
|
||||
{
|
||||
// TODO: share code with CHECK_SLOT_MATCH
|
||||
RegisterA64 temp1 = regs.allocTemp(KindA64::x);
|
||||
RegisterA64 temp1w = castReg(KindA64::w, temp1);
|
||||
RegisterA64 temp2 = regs.allocTemp(KindA64::x);
|
||||
|
||||
build.ldr(temp1w, mem(regOp(inst.a), offsetof(LuaNode, key) + kOffsetOfTKeyTag));
|
||||
build.and_(temp1w, temp1w, kLuaNodeTagMask);
|
||||
build.cmp(temp1w, LUA_TSTRING);
|
||||
build.b(ConditionA64::NotEqual, labelOp(inst.d));
|
||||
|
||||
AddressA64 addr = tempAddr(inst.b, offsetof(TValue, value));
|
||||
build.ldr(temp1, mem(regOp(inst.a), offsetof(LuaNode, key.value)));
|
||||
build.ldr(temp2, addr);
|
||||
build.cmp(temp1, temp2);
|
||||
build.b(ConditionA64::NotEqual, labelOp(inst.d));
|
||||
|
||||
build.ldr(temp1w, mem(regOp(inst.a), offsetof(LuaNode, val.tt)));
|
||||
LUAU_ASSERT(LUA_TNIL == 0);
|
||||
build.cbz(temp1w, labelOp(inst.d));
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
break;
|
||||
}
|
||||
// IrCmd::JUMP_SLOT_MATCH implemented below
|
||||
case IrCmd::TABLE_LEN:
|
||||
{
|
||||
RegisterA64 reg = regOp(inst.a); // note: we need to call regOp before spill so that we don't do redundant reloads
|
||||
@ -1078,34 +1055,40 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
||||
build.b(ConditionA64::UnsignedLessEqual, labelOp(inst.c));
|
||||
break;
|
||||
}
|
||||
case IrCmd::JUMP_SLOT_MATCH:
|
||||
case IrCmd::CHECK_SLOT_MATCH:
|
||||
{
|
||||
Label& mismatch = inst.cmd == IrCmd::JUMP_SLOT_MATCH ? labelOp(inst.d) : labelOp(inst.c);
|
||||
|
||||
RegisterA64 temp1 = regs.allocTemp(KindA64::x);
|
||||
RegisterA64 temp1w = castReg(KindA64::w, temp1);
|
||||
RegisterA64 temp2 = regs.allocTemp(KindA64::x);
|
||||
|
||||
build.ldr(temp1w, mem(regOp(inst.a), offsetof(LuaNode, key) + kOffsetOfTKeyTag));
|
||||
build.and_(temp1w, temp1w, kLuaNodeTagMask);
|
||||
build.cmp(temp1w, LUA_TSTRING);
|
||||
build.b(ConditionA64::NotEqual, labelOp(inst.c));
|
||||
LUAU_ASSERT(offsetof(LuaNode, key.value) == offsetof(LuaNode, key) && kOffsetOfTKeyTagNext >= 8 && kOffsetOfTKeyTagNext < 16);
|
||||
build.ldp(temp1, temp2, mem(regOp(inst.a), offsetof(LuaNode, key))); // load key.value into temp1 and key.tt (alongside other bits) into temp2
|
||||
build.ubfx(temp2, temp2, (kOffsetOfTKeyTagNext - 8) * 8, kTKeyTagBits); // .tt is right before .next, and 8 bytes are skipped by ldp
|
||||
build.cmp(temp2, LUA_TSTRING);
|
||||
build.b(ConditionA64::NotEqual, mismatch);
|
||||
|
||||
AddressA64 addr = tempAddr(inst.b, offsetof(TValue, value));
|
||||
build.ldr(temp1, mem(regOp(inst.a), offsetof(LuaNode, key.value)));
|
||||
build.ldr(temp2, addr);
|
||||
build.cmp(temp1, temp2);
|
||||
build.b(ConditionA64::NotEqual, labelOp(inst.c));
|
||||
build.b(ConditionA64::NotEqual, mismatch);
|
||||
|
||||
build.ldr(temp1w, mem(regOp(inst.a), offsetof(LuaNode, val.tt)));
|
||||
LUAU_ASSERT(LUA_TNIL == 0);
|
||||
build.cbz(temp1w, labelOp(inst.c));
|
||||
build.cbz(temp1w, mismatch);
|
||||
|
||||
if (inst.cmd == IrCmd::JUMP_SLOT_MATCH)
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
break;
|
||||
}
|
||||
case IrCmd::CHECK_NODE_NO_NEXT:
|
||||
{
|
||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||
|
||||
build.ldr(temp, mem(regOp(inst.a), offsetof(LuaNode, key) + kOffsetOfTKeyNext));
|
||||
build.lsr(temp, temp, kNextBitOffset);
|
||||
build.ldr(temp, mem(regOp(inst.a), offsetof(LuaNode, key) + kOffsetOfTKeyTagNext));
|
||||
build.lsr(temp, temp, kTKeyTagBits);
|
||||
build.cbnz(temp, labelOp(inst.b));
|
||||
break;
|
||||
}
|
||||
@ -1139,6 +1122,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
||||
|
||||
Label skip;
|
||||
build.ldr(temp1, mem(rState, offsetof(lua_State, global)));
|
||||
// TODO: totalbytes and GCthreshold loads can be fused with ldp
|
||||
build.ldr(temp2, mem(temp1, offsetof(global_State, totalbytes)));
|
||||
build.ldr(temp1, mem(temp1, offsetof(global_State, GCthreshold)));
|
||||
build.cmp(temp1, temp2);
|
||||
@ -1265,7 +1249,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
||||
break;
|
||||
case IrCmd::SETLIST:
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_SETLIST, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executeSETLIST), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::CALL:
|
||||
regs.spill(build, index);
|
||||
@ -1368,14 +1352,14 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_GETGLOBAL, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executeGETGLOBAL), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_SETGLOBAL:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_SETGLOBAL, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executeSETGLOBAL), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_GETTABLEKS:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
@ -1383,7 +1367,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_GETTABLEKS, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executeGETTABLEKS), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_SETTABLEKS:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
@ -1391,7 +1375,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_SETTABLEKS, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executeSETTABLEKS), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_NAMECALL:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
@ -1399,38 +1383,38 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_NAMECALL, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executeNAMECALL), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_PREPVARARGS:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::Constant);
|
||||
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_PREPVARARGS, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executePREPVARARGS), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_GETVARARGS:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
|
||||
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_GETVARARGS, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executeGETVARARGS), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_NEWCLOSURE:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
|
||||
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_NEWCLOSURE, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executeNEWCLOSURE), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_DUPCLOSURE:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_DUPCLOSURE, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executeDUPCLOSURE), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_FORGPREP:
|
||||
regs.spill(build, index);
|
||||
emitFallback(build, LOP_FORGPREP, uintOp(inst.a));
|
||||
emitFallback(build, offsetof(NativeContext, executeFORGPREP), uintOp(inst.a));
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
break;
|
||||
|
||||
|
@ -938,8 +938,8 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
||||
{
|
||||
ScopedRegX64 tmp{regs, SizeX64::dword};
|
||||
|
||||
build.mov(tmp.reg, dword[regOp(inst.a) + offsetof(LuaNode, key) + kOffsetOfTKeyNext]);
|
||||
build.shr(tmp.reg, kNextBitOffset);
|
||||
build.mov(tmp.reg, dword[regOp(inst.a) + offsetof(LuaNode, key) + kOffsetOfTKeyTagNext]);
|
||||
build.shr(tmp.reg, kTKeyTagBits);
|
||||
build.jcc(ConditionX64::NotZero, labelOp(inst.b));
|
||||
break;
|
||||
}
|
||||
@ -1098,60 +1098,60 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, IrBlock& next)
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||
|
||||
emitFallback(regs, build, data, LOP_GETGLOBAL, uintOp(inst.a));
|
||||
emitFallback(regs, build, offsetof(NativeContext, executeGETGLOBAL), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_SETGLOBAL:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||
|
||||
emitFallback(regs, build, data, LOP_SETGLOBAL, uintOp(inst.a));
|
||||
emitFallback(regs, build, offsetof(NativeContext, executeSETGLOBAL), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_GETTABLEKS:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||
|
||||
emitFallback(regs, build, data, LOP_GETTABLEKS, uintOp(inst.a));
|
||||
emitFallback(regs, build, offsetof(NativeContext, executeGETTABLEKS), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_SETTABLEKS:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||
|
||||
emitFallback(regs, build, data, LOP_SETTABLEKS, uintOp(inst.a));
|
||||
emitFallback(regs, build, offsetof(NativeContext, executeSETTABLEKS), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_NAMECALL:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||
|
||||
emitFallback(regs, build, data, LOP_NAMECALL, uintOp(inst.a));
|
||||
emitFallback(regs, build, offsetof(NativeContext, executeNAMECALL), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_PREPVARARGS:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::Constant);
|
||||
|
||||
emitFallback(regs, build, data, LOP_PREPVARARGS, uintOp(inst.a));
|
||||
emitFallback(regs, build, offsetof(NativeContext, executePREPVARARGS), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_GETVARARGS:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
|
||||
|
||||
emitFallback(regs, build, data, LOP_GETVARARGS, uintOp(inst.a));
|
||||
emitFallback(regs, build, offsetof(NativeContext, executeGETVARARGS), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_NEWCLOSURE:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
|
||||
|
||||
emitFallback(regs, build, data, LOP_NEWCLOSURE, uintOp(inst.a));
|
||||
emitFallback(regs, build, offsetof(NativeContext, executeNEWCLOSURE), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_DUPCLOSURE:
|
||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||
|
||||
emitFallback(regs, build, data, LOP_DUPCLOSURE, uintOp(inst.a));
|
||||
emitFallback(regs, build, offsetof(NativeContext, executeDUPCLOSURE), uintOp(inst.a));
|
||||
break;
|
||||
case IrCmd::FALLBACK_FORGPREP:
|
||||
emitFallback(regs, build, data, LOP_FORGPREP, uintOp(inst.a));
|
||||
emitFallback(regs, build, offsetof(NativeContext, executeFORGPREP), uintOp(inst.a));
|
||||
jumpOrFallthrough(blockOp(inst.c), next);
|
||||
break;
|
||||
case IrCmd::BITAND_UINT:
|
||||
|
@ -5,7 +5,6 @@
|
||||
|
||||
#include "CodeGenUtils.h"
|
||||
#include "CustomExecUtils.h"
|
||||
#include "Fallbacks.h"
|
||||
|
||||
#include "lbuiltins.h"
|
||||
#include "lgc.h"
|
||||
@ -16,8 +15,6 @@
|
||||
#include <math.h>
|
||||
#include <string.h>
|
||||
|
||||
#define CODEGEN_SET_FALLBACK(op) data.context.fallback[op] = {execute_##op}
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
namespace CodeGen
|
||||
@ -33,27 +30,7 @@ NativeState::NativeState()
|
||||
|
||||
NativeState::~NativeState() = default;
|
||||
|
||||
void initFallbackTable(NativeState& data)
|
||||
{
|
||||
// When fallback is completely removed, remove it from includeInsts list in lvmexecute_split.py
|
||||
CODEGEN_SET_FALLBACK(LOP_NEWCLOSURE);
|
||||
CODEGEN_SET_FALLBACK(LOP_NAMECALL);
|
||||
CODEGEN_SET_FALLBACK(LOP_FORGPREP);
|
||||
CODEGEN_SET_FALLBACK(LOP_GETVARARGS);
|
||||
CODEGEN_SET_FALLBACK(LOP_DUPCLOSURE);
|
||||
CODEGEN_SET_FALLBACK(LOP_PREPVARARGS);
|
||||
CODEGEN_SET_FALLBACK(LOP_BREAK);
|
||||
CODEGEN_SET_FALLBACK(LOP_SETLIST);
|
||||
|
||||
// Fallbacks that are called from partial implementation of an instruction
|
||||
// TODO: these fallbacks should be replaced with special functions that exclude the (redundantly executed) fast path from the fallback
|
||||
CODEGEN_SET_FALLBACK(LOP_GETGLOBAL);
|
||||
CODEGEN_SET_FALLBACK(LOP_SETGLOBAL);
|
||||
CODEGEN_SET_FALLBACK(LOP_GETTABLEKS);
|
||||
CODEGEN_SET_FALLBACK(LOP_SETTABLEKS);
|
||||
}
|
||||
|
||||
void initHelperFunctions(NativeState& data)
|
||||
void initFunctions(NativeState& data)
|
||||
{
|
||||
static_assert(sizeof(data.context.luauF_table) == sizeof(luauF_table), "fastcall tables are not of the same length");
|
||||
memcpy(data.context.luauF_table, luauF_table, sizeof(luauF_table));
|
||||
@ -115,6 +92,19 @@ void initHelperFunctions(NativeState& data)
|
||||
|
||||
data.context.callFallback = callFallback;
|
||||
data.context.returnFallback = returnFallback;
|
||||
|
||||
data.context.executeGETGLOBAL = executeGETGLOBAL;
|
||||
data.context.executeSETGLOBAL = executeSETGLOBAL;
|
||||
data.context.executeGETTABLEKS = executeGETTABLEKS;
|
||||
data.context.executeSETTABLEKS = executeSETTABLEKS;
|
||||
|
||||
data.context.executeNEWCLOSURE = executeNEWCLOSURE;
|
||||
data.context.executeNAMECALL = executeNAMECALL;
|
||||
data.context.executeFORGPREP = executeFORGPREP;
|
||||
data.context.executeGETVARARGS = executeGETVARARGS;
|
||||
data.context.executeDUPCLOSURE = executeDUPCLOSURE;
|
||||
data.context.executePREPVARARGS = executePREPVARARGS;
|
||||
data.context.executeSETLIST = executeSETLIST;
|
||||
}
|
||||
|
||||
} // namespace CodeGen
|
||||
|
@ -23,19 +23,6 @@ namespace CodeGen
|
||||
|
||||
class UnwindBuilder;
|
||||
|
||||
using FallbackFn = const Instruction* (*)(lua_State* L, const Instruction* pc, StkId base, TValue* k);
|
||||
|
||||
struct NativeProto
|
||||
{
|
||||
// This array is stored before NativeProto in reverse order, so to get offset of instruction i you need to index instOffsets[-i]
|
||||
// This awkward layout is helpful for maximally efficient address computation on X64/A64
|
||||
uint32_t instOffsets[1];
|
||||
|
||||
uintptr_t instBase = 0;
|
||||
uintptr_t entryTarget = 0; // = instOffsets[0] + instBase
|
||||
Proto* proto = nullptr;
|
||||
};
|
||||
|
||||
struct NativeContext
|
||||
{
|
||||
// Gateway (C => native transition) entry & exit, compiled at runtime
|
||||
@ -102,7 +89,17 @@ struct NativeContext
|
||||
Closure* (*returnFallback)(lua_State* L, StkId ra, StkId valend) = nullptr;
|
||||
|
||||
// Opcode fallbacks, implemented in C
|
||||
FallbackFn fallback[LOP__COUNT] = {};
|
||||
const Instruction* (*executeGETGLOBAL)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
const Instruction* (*executeSETGLOBAL)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
const Instruction* (*executeGETTABLEKS)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
const Instruction* (*executeSETTABLEKS)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
const Instruction* (*executeNEWCLOSURE)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
const Instruction* (*executeNAMECALL)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
const Instruction* (*executeSETLIST)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
const Instruction* (*executeFORGPREP)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
const Instruction* (*executeGETVARARGS)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
const Instruction* (*executeDUPCLOSURE)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
const Instruction* (*executePREPVARARGS)(lua_State* L, const Instruction* pc, StkId base, TValue* k) = nullptr;
|
||||
|
||||
// Fast call methods, implemented in C
|
||||
luau_FastFunction luauF_table[256] = {};
|
||||
@ -124,8 +121,7 @@ struct NativeState
|
||||
NativeContext context;
|
||||
};
|
||||
|
||||
void initFallbackTable(NativeState& data);
|
||||
void initHelperFunctions(NativeState& data);
|
||||
void initFunctions(NativeState& data);
|
||||
|
||||
} // namespace CodeGen
|
||||
} // namespace Luau
|
||||
|
@ -714,10 +714,23 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
||||
case IrCmd::DUP_TABLE:
|
||||
case IrCmd::TRY_NUM_TO_INDEX:
|
||||
case IrCmd::TRY_CALL_FASTGETTM:
|
||||
break;
|
||||
case IrCmd::INT_TO_NUM:
|
||||
case IrCmd::UINT_TO_NUM:
|
||||
state.substituteOrRecord(inst, index);
|
||||
break;
|
||||
case IrCmd::NUM_TO_INT:
|
||||
if (IrInst* src = function.asInstOp(inst.a); src && src->cmd == IrCmd::INT_TO_NUM)
|
||||
substitute(function, inst, src->a);
|
||||
else
|
||||
state.substituteOrRecord(inst, index);
|
||||
break;
|
||||
case IrCmd::NUM_TO_UINT:
|
||||
if (IrInst* src = function.asInstOp(inst.a); src && src->cmd == IrCmd::UINT_TO_NUM)
|
||||
substitute(function, inst, src->a);
|
||||
else
|
||||
state.substituteOrRecord(inst, index);
|
||||
break;
|
||||
case IrCmd::CHECK_ARRAY_SIZE:
|
||||
case IrCmd::CHECK_SLOT_MATCH:
|
||||
case IrCmd::CHECK_NODE_NO_NEXT:
|
||||
|
1
Makefile
1
Makefile
@ -136,6 +136,7 @@ $(FUZZ_OBJECTS): CXXFLAGS+=-std=c++17 -ICommon/include -IAst/include -ICompiler/
|
||||
|
||||
$(TESTS_TARGET): LDFLAGS+=-lpthread
|
||||
$(REPL_CLI_TARGET): LDFLAGS+=-lpthread
|
||||
$(ANALYZE_CLI_TARGET): LDFLAGS+=-lpthread
|
||||
fuzz-proto fuzz-prototest: LDFLAGS+=build/libprotobuf-mutator/src/libfuzzer/libprotobuf-mutator-libfuzzer.a build/libprotobuf-mutator/src/libprotobuf-mutator.a $(LPROTOBUF)
|
||||
|
||||
# pseudo targets
|
||||
|
@ -92,7 +92,6 @@ target_sources(Luau.CodeGen PRIVATE
|
||||
CodeGen/src/EmitBuiltinsX64.cpp
|
||||
CodeGen/src/EmitCommonX64.cpp
|
||||
CodeGen/src/EmitInstructionX64.cpp
|
||||
CodeGen/src/Fallbacks.cpp
|
||||
CodeGen/src/IrAnalysis.cpp
|
||||
CodeGen/src/IrBuilder.cpp
|
||||
CodeGen/src/IrCallWrapperX64.cpp
|
||||
@ -123,8 +122,6 @@ target_sources(Luau.CodeGen PRIVATE
|
||||
CodeGen/src/EmitCommonA64.h
|
||||
CodeGen/src/EmitCommonX64.h
|
||||
CodeGen/src/EmitInstructionX64.h
|
||||
CodeGen/src/Fallbacks.h
|
||||
CodeGen/src/FallbacksProlog.h
|
||||
CodeGen/src/IrLoweringA64.h
|
||||
CodeGen/src/IrLoweringX64.h
|
||||
CodeGen/src/IrRegAllocA64.h
|
||||
@ -171,6 +168,7 @@ target_sources(Luau.Analysis PRIVATE
|
||||
Analysis/include/Luau/RecursionCounter.h
|
||||
Analysis/include/Luau/RequireTracer.h
|
||||
Analysis/include/Luau/Scope.h
|
||||
Analysis/include/Luau/Simplify.h
|
||||
Analysis/include/Luau/Substitution.h
|
||||
Analysis/include/Luau/Symbol.h
|
||||
Analysis/include/Luau/ToDot.h
|
||||
@ -185,7 +183,6 @@ target_sources(Luau.Analysis PRIVATE
|
||||
Analysis/include/Luau/TypeFamily.h
|
||||
Analysis/include/Luau/TypeInfer.h
|
||||
Analysis/include/Luau/TypePack.h
|
||||
Analysis/include/Luau/TypeReduction.h
|
||||
Analysis/include/Luau/TypeUtils.h
|
||||
Analysis/include/Luau/Type.h
|
||||
Analysis/include/Luau/Unifiable.h
|
||||
@ -222,6 +219,7 @@ target_sources(Luau.Analysis PRIVATE
|
||||
Analysis/src/Quantify.cpp
|
||||
Analysis/src/RequireTracer.cpp
|
||||
Analysis/src/Scope.cpp
|
||||
Analysis/src/Simplify.cpp
|
||||
Analysis/src/Substitution.cpp
|
||||
Analysis/src/Symbol.cpp
|
||||
Analysis/src/ToDot.cpp
|
||||
@ -236,7 +234,6 @@ target_sources(Luau.Analysis PRIVATE
|
||||
Analysis/src/TypeFamily.cpp
|
||||
Analysis/src/TypeInfer.cpp
|
||||
Analysis/src/TypePack.cpp
|
||||
Analysis/src/TypeReduction.cpp
|
||||
Analysis/src/TypeUtils.cpp
|
||||
Analysis/src/Type.cpp
|
||||
Analysis/src/Unifiable.cpp
|
||||
@ -380,6 +377,7 @@ if(TARGET Luau.UnitTest)
|
||||
tests/Parser.test.cpp
|
||||
tests/RequireTracer.test.cpp
|
||||
tests/RuntimeLimits.test.cpp
|
||||
tests/Simplify.test.cpp
|
||||
tests/StringUtils.test.cpp
|
||||
tests/Symbol.test.cpp
|
||||
tests/ToDot.test.cpp
|
||||
@ -414,7 +412,6 @@ if(TARGET Luau.UnitTest)
|
||||
tests/TypeInfer.unionTypes.test.cpp
|
||||
tests/TypeInfer.unknownnever.test.cpp
|
||||
tests/TypePack.test.cpp
|
||||
tests/TypeReduction.test.cpp
|
||||
tests/TypeVar.test.cpp
|
||||
tests/Variant.test.cpp
|
||||
tests/VisitType.test.cpp
|
||||
|
@ -17,6 +17,8 @@
|
||||
|
||||
#include <string.h>
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauUniformTopHandling, false)
|
||||
|
||||
/*
|
||||
** {======================================================
|
||||
** Error-recovery functions
|
||||
@ -229,12 +231,14 @@ void luaD_checkCstack(lua_State* L)
|
||||
** When returns, all the results are on the stack, starting at the original
|
||||
** function position.
|
||||
*/
|
||||
void luaD_call(lua_State* L, StkId func, int nResults)
|
||||
void luaD_call(lua_State* L, StkId func, int nresults)
|
||||
{
|
||||
if (++L->nCcalls >= LUAI_MAXCCALLS)
|
||||
luaD_checkCstack(L);
|
||||
|
||||
if (luau_precall(L, func, nResults) == PCRLUA)
|
||||
ptrdiff_t old_func = savestack(L, func);
|
||||
|
||||
if (luau_precall(L, func, nresults) == PCRLUA)
|
||||
{ // is a Lua function?
|
||||
L->ci->flags |= LUA_CALLINFO_RETURN; // luau_execute will stop after returning from the stack frame
|
||||
|
||||
@ -248,6 +252,9 @@ void luaD_call(lua_State* L, StkId func, int nResults)
|
||||
L->isactive = false;
|
||||
}
|
||||
|
||||
if (FFlag::LuauUniformTopHandling && nresults != LUA_MULTRET)
|
||||
L->top = restorestack(L, old_func) + nresults;
|
||||
|
||||
L->nCcalls--;
|
||||
luaC_checkGC(L);
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ typedef void (*Pfunc)(lua_State* L, void* ud);
|
||||
|
||||
LUAI_FUNC CallInfo* luaD_growCI(lua_State* L);
|
||||
|
||||
LUAI_FUNC void luaD_call(lua_State* L, StkId func, int nResults);
|
||||
LUAI_FUNC void luaD_call(lua_State* L, StkId func, int nresults);
|
||||
LUAI_FUNC int luaD_pcall(lua_State* L, Pfunc func, void* u, ptrdiff_t oldtop, ptrdiff_t ef);
|
||||
LUAI_FUNC void luaD_reallocCI(lua_State* L, int newsize);
|
||||
LUAI_FUNC void luaD_reallocstack(lua_State* L, int newsize);
|
||||
|
@ -32,9 +32,8 @@ Proto* luaF_newproto(lua_State* L)
|
||||
f->debugname = NULL;
|
||||
f->debuginsn = NULL;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
f->execdata = NULL;
|
||||
#endif
|
||||
f->exectarget = 0;
|
||||
|
||||
return f;
|
||||
}
|
||||
|
@ -275,9 +275,8 @@ typedef struct Proto
|
||||
TString* debugname;
|
||||
uint8_t* debuginsn; // a copy of code[] array with just opcodes
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
void* execdata;
|
||||
#endif
|
||||
uintptr_t exectarget;
|
||||
|
||||
GCObject* gclist;
|
||||
|
||||
|
@ -69,6 +69,7 @@ typedef struct CallInfo
|
||||
|
||||
#define LUA_CALLINFO_RETURN (1 << 0) // should the interpreter return after returning from this callinfo? first frame must have this set
|
||||
#define LUA_CALLINFO_HANDLE (1 << 1) // should the error thrown during execution get handled by continuation from this callinfo? func must be C
|
||||
#define LUA_CALLINFO_CUSTOM (1 << 2) // should this function be executed using custom execution callback
|
||||
|
||||
#define curr_func(L) (clvalue(L->ci->func))
|
||||
#define ci_func(ci) (clvalue((ci)->func))
|
||||
|
@ -16,6 +16,8 @@
|
||||
|
||||
#include <string.h>
|
||||
|
||||
LUAU_FASTFLAG(LuauUniformTopHandling)
|
||||
|
||||
// Disable c99-designator to avoid the warning in CGOTO dispatch table
|
||||
#ifdef __clang__
|
||||
#if __has_warning("-Wc99-designator")
|
||||
@ -208,10 +210,11 @@ static void luau_execute(lua_State* L)
|
||||
LUAU_ASSERT(!isblack(obj2gco(L))); // we don't use luaC_threadbarrier because active threads never turn black
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
Proto* p = clvalue(L->ci->func)->l.p;
|
||||
|
||||
if (p->execdata && !SingleStep)
|
||||
if ((L->ci->flags & LUA_CALLINFO_CUSTOM) && !SingleStep)
|
||||
{
|
||||
Proto* p = clvalue(L->ci->func)->l.p;
|
||||
LUAU_ASSERT(p->execdata);
|
||||
|
||||
if (L->global->ecb.enter(L, p) == 0)
|
||||
return;
|
||||
}
|
||||
@ -448,7 +451,7 @@ reentry:
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
// fast-path: built-in table
|
||||
if (ttistable(rb))
|
||||
if (LUAU_LIKELY(ttistable(rb)))
|
||||
{
|
||||
Table* h = hvalue(rb);
|
||||
|
||||
@ -565,7 +568,7 @@ reentry:
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
// fast-path: built-in table
|
||||
if (ttistable(rb))
|
||||
if (LUAU_LIKELY(ttistable(rb)))
|
||||
{
|
||||
Table* h = hvalue(rb);
|
||||
|
||||
@ -801,7 +804,7 @@ reentry:
|
||||
TValue* kv = VM_KV(aux);
|
||||
LUAU_ASSERT(ttisstring(kv));
|
||||
|
||||
if (ttistable(rb))
|
||||
if (LUAU_LIKELY(ttistable(rb)))
|
||||
{
|
||||
Table* h = hvalue(rb);
|
||||
// note: we can't use nodemask8 here because we need to query the main position of the table, and 8-bit nodemask8 only works
|
||||
@ -954,6 +957,7 @@ reentry:
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (LUAU_UNLIKELY(p->execdata && !SingleStep))
|
||||
{
|
||||
ci->flags = LUA_CALLINFO_CUSTOM;
|
||||
ci->savedpc = p->code;
|
||||
|
||||
if (L->global->ecb.enter(L, p) == 1)
|
||||
@ -1040,7 +1044,8 @@ reentry:
|
||||
// we're done!
|
||||
if (LUAU_UNLIKELY(ci->flags & LUA_CALLINFO_RETURN))
|
||||
{
|
||||
L->top = res;
|
||||
if (!FFlag::LuauUniformTopHandling)
|
||||
L->top = res;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
@ -1050,7 +1055,7 @@ reentry:
|
||||
Proto* nextproto = nextcl->l.p;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (LUAU_UNLIKELY(nextproto->execdata && !SingleStep))
|
||||
if (LUAU_UNLIKELY((cip->flags & LUA_CALLINFO_CUSTOM) && !SingleStep))
|
||||
{
|
||||
if (L->global->ecb.enter(L, nextproto) == 1)
|
||||
goto reentry;
|
||||
@ -1333,7 +1338,7 @@ reentry:
|
||||
|
||||
// fast-path: number
|
||||
// Note that all jumps below jump by 1 in the "false" case to skip over aux
|
||||
if (ttisnumber(ra) && ttisnumber(rb))
|
||||
if (LUAU_LIKELY(ttisnumber(ra) && ttisnumber(rb)))
|
||||
{
|
||||
pc += nvalue(ra) <= nvalue(rb) ? LUAU_INSN_D(insn) : 1;
|
||||
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
|
||||
@ -1366,7 +1371,7 @@ reentry:
|
||||
|
||||
// fast-path: number
|
||||
// Note that all jumps below jump by 1 in the "true" case to skip over aux
|
||||
if (ttisnumber(ra) && ttisnumber(rb))
|
||||
if (LUAU_LIKELY(ttisnumber(ra) && ttisnumber(rb)))
|
||||
{
|
||||
pc += !(nvalue(ra) <= nvalue(rb)) ? LUAU_INSN_D(insn) : 1;
|
||||
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
|
||||
@ -1399,7 +1404,7 @@ reentry:
|
||||
|
||||
// fast-path: number
|
||||
// Note that all jumps below jump by 1 in the "false" case to skip over aux
|
||||
if (ttisnumber(ra) && ttisnumber(rb))
|
||||
if (LUAU_LIKELY(ttisnumber(ra) && ttisnumber(rb)))
|
||||
{
|
||||
pc += nvalue(ra) < nvalue(rb) ? LUAU_INSN_D(insn) : 1;
|
||||
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
|
||||
@ -1432,7 +1437,7 @@ reentry:
|
||||
|
||||
// fast-path: number
|
||||
// Note that all jumps below jump by 1 in the "true" case to skip over aux
|
||||
if (ttisnumber(ra) && ttisnumber(rb))
|
||||
if (LUAU_LIKELY(ttisnumber(ra) && ttisnumber(rb)))
|
||||
{
|
||||
pc += !(nvalue(ra) < nvalue(rb)) ? LUAU_INSN_D(insn) : 1;
|
||||
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
|
||||
@ -1464,7 +1469,7 @@ reentry:
|
||||
StkId rc = VM_REG(LUAU_INSN_C(insn));
|
||||
|
||||
// fast-path
|
||||
if (ttisnumber(rb) && ttisnumber(rc))
|
||||
if (LUAU_LIKELY(ttisnumber(rb) && ttisnumber(rc)))
|
||||
{
|
||||
setnvalue(ra, nvalue(rb) + nvalue(rc));
|
||||
VM_NEXT();
|
||||
@ -1510,7 +1515,7 @@ reentry:
|
||||
StkId rc = VM_REG(LUAU_INSN_C(insn));
|
||||
|
||||
// fast-path
|
||||
if (ttisnumber(rb) && ttisnumber(rc))
|
||||
if (LUAU_LIKELY(ttisnumber(rb) && ttisnumber(rc)))
|
||||
{
|
||||
setnvalue(ra, nvalue(rb) - nvalue(rc));
|
||||
VM_NEXT();
|
||||
@ -1556,7 +1561,7 @@ reentry:
|
||||
StkId rc = VM_REG(LUAU_INSN_C(insn));
|
||||
|
||||
// fast-path
|
||||
if (ttisnumber(rb) && ttisnumber(rc))
|
||||
if (LUAU_LIKELY(ttisnumber(rb) && ttisnumber(rc)))
|
||||
{
|
||||
setnvalue(ra, nvalue(rb) * nvalue(rc));
|
||||
VM_NEXT();
|
||||
@ -1617,7 +1622,7 @@ reentry:
|
||||
StkId rc = VM_REG(LUAU_INSN_C(insn));
|
||||
|
||||
// fast-path
|
||||
if (ttisnumber(rb) && ttisnumber(rc))
|
||||
if (LUAU_LIKELY(ttisnumber(rb) && ttisnumber(rc)))
|
||||
{
|
||||
setnvalue(ra, nvalue(rb) / nvalue(rc));
|
||||
VM_NEXT();
|
||||
@ -1764,7 +1769,7 @@ reentry:
|
||||
TValue* kv = VM_KV(LUAU_INSN_C(insn));
|
||||
|
||||
// fast-path
|
||||
if (ttisnumber(rb))
|
||||
if (LUAU_LIKELY(ttisnumber(rb)))
|
||||
{
|
||||
setnvalue(ra, nvalue(rb) * nvalue(kv));
|
||||
VM_NEXT();
|
||||
@ -1810,7 +1815,7 @@ reentry:
|
||||
TValue* kv = VM_KV(LUAU_INSN_C(insn));
|
||||
|
||||
// fast-path
|
||||
if (ttisnumber(rb))
|
||||
if (LUAU_LIKELY(ttisnumber(rb)))
|
||||
{
|
||||
setnvalue(ra, nvalue(rb) / nvalue(kv));
|
||||
VM_NEXT();
|
||||
@ -1976,7 +1981,7 @@ reentry:
|
||||
StkId rb = VM_REG(LUAU_INSN_B(insn));
|
||||
|
||||
// fast-path
|
||||
if (ttisnumber(rb))
|
||||
if (LUAU_LIKELY(ttisnumber(rb)))
|
||||
{
|
||||
setnvalue(ra, -nvalue(rb));
|
||||
VM_NEXT();
|
||||
@ -2019,7 +2024,7 @@ reentry:
|
||||
StkId rb = VM_REG(LUAU_INSN_B(insn));
|
||||
|
||||
// fast-path #1: tables
|
||||
if (ttistable(rb))
|
||||
if (LUAU_LIKELY(ttistable(rb)))
|
||||
{
|
||||
Table* h = hvalue(rb);
|
||||
|
||||
@ -2878,14 +2883,21 @@ int luau_precall(lua_State* L, StkId func, int nresults)
|
||||
|
||||
if (!ccl->isC)
|
||||
{
|
||||
Proto* p = ccl->l.p;
|
||||
|
||||
// fill unused parameters with nil
|
||||
StkId argi = L->top;
|
||||
StkId argend = L->base + ccl->l.p->numparams;
|
||||
StkId argend = L->base + p->numparams;
|
||||
while (argi < argend)
|
||||
setnilvalue(argi++); // complete missing arguments
|
||||
L->top = ccl->l.p->is_vararg ? argi : ci->top;
|
||||
L->top = p->is_vararg ? argi : ci->top;
|
||||
|
||||
L->ci->savedpc = ccl->l.p->code;
|
||||
ci->savedpc = p->code;
|
||||
|
||||
#if LUA_CUSTOM_EXECUTION
|
||||
if (p->execdata)
|
||||
ci->flags = LUA_CALLINFO_CUSTOM;
|
||||
#endif
|
||||
|
||||
return PCRLUA;
|
||||
}
|
||||
|
@ -135,6 +135,19 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "BinaryImm")
|
||||
SINGLE_COMPARE(ror(x1, x2, 1), 0x93C20441);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Bitfield")
|
||||
{
|
||||
SINGLE_COMPARE(ubfiz(x1, x2, 37, 5), 0xD35B1041);
|
||||
SINGLE_COMPARE(ubfx(x1, x2, 37, 5), 0xD365A441);
|
||||
SINGLE_COMPARE(sbfiz(x1, x2, 37, 5), 0x935B1041);
|
||||
SINGLE_COMPARE(sbfx(x1, x2, 37, 5), 0x9365A441);
|
||||
|
||||
SINGLE_COMPARE(ubfiz(w1, w2, 17, 5), 0x530F1041);
|
||||
SINGLE_COMPARE(ubfx(w1, w2, 17, 5), 0x53115441);
|
||||
SINGLE_COMPARE(sbfiz(w1, w2, 17, 5), 0x130F1041);
|
||||
SINGLE_COMPARE(sbfx(w1, w2, 17, 5), 0x13115441);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "Loads")
|
||||
{
|
||||
// address forms
|
||||
@ -481,6 +494,8 @@ TEST_CASE("LogTest")
|
||||
|
||||
build.fcvt(s1, d2);
|
||||
|
||||
build.ubfx(x1, x2, 37, 5);
|
||||
|
||||
build.setLabel(l);
|
||||
build.ret();
|
||||
|
||||
@ -513,6 +528,7 @@ TEST_CASE("LogTest")
|
||||
fmov d0,#0.25
|
||||
tbz x0,#5,.L1
|
||||
fcvt s1,d2
|
||||
ubfx x1,x2,#3705
|
||||
.L1:
|
||||
ret
|
||||
)";
|
||||
|
@ -3388,38 +3388,6 @@ TEST_CASE_FIXTURE(ACFixture, "globals_are_order_independent")
|
||||
CHECK(ac.entryMap.count("abc1"));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(ACFixture, "type_reduction_is_hooked_up_to_autocomplete")
|
||||
{
|
||||
ScopedFastFlag sff{"DebugLuauDeferredConstraintResolution", true};
|
||||
|
||||
check(R"(
|
||||
type T = { x: (number & string)? }
|
||||
|
||||
function f(thingamabob: T)
|
||||
thingamabob.@1
|
||||
end
|
||||
|
||||
function g(thingamabob: T)
|
||||
thingama@2
|
||||
end
|
||||
)");
|
||||
|
||||
ToStringOptions opts;
|
||||
opts.exhaustive = true;
|
||||
|
||||
auto ac1 = autocomplete('1');
|
||||
REQUIRE(ac1.entryMap.count("x"));
|
||||
std::optional<TypeId> ty1 = ac1.entryMap.at("x").type;
|
||||
REQUIRE(ty1);
|
||||
CHECK("nil" == toString(*ty1, opts));
|
||||
|
||||
auto ac2 = autocomplete('2');
|
||||
REQUIRE(ac2.entryMap.count("thingamabob"));
|
||||
std::optional<TypeId> ty2 = ac2.entryMap.at("thingamabob").type;
|
||||
REQUIRE(ty2);
|
||||
CHECK("{| x: nil |}" == toString(*ty2, opts));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(ACFixture, "string_contents_is_available_to_callback")
|
||||
{
|
||||
loadDefinition(R"(
|
||||
@ -3490,8 +3458,6 @@ local c = b.@1
|
||||
|
||||
TEST_CASE_FIXTURE(ACFixture, "suggest_exported_types")
|
||||
{
|
||||
ScopedFastFlag luauCopyExportedTypes{"LuauCopyExportedTypes", true};
|
||||
|
||||
check(R"(
|
||||
export type Type = {a: number}
|
||||
local a: T@1
|
||||
|
@ -14,6 +14,7 @@ ClassFixture::ClassFixture()
|
||||
GlobalTypes& globals = frontend.globals;
|
||||
TypeArena& arena = globals.globalTypes;
|
||||
TypeId numberType = builtinTypes->numberType;
|
||||
TypeId stringType = builtinTypes->stringType;
|
||||
|
||||
unfreeze(arena);
|
||||
|
||||
@ -35,7 +36,7 @@ ClassFixture::ClassFixture()
|
||||
TypeId childClassInstanceType = arena.addType(ClassType{"ChildClass", {}, baseClassInstanceType, nullopt, {}, {}, "Test"});
|
||||
|
||||
getMutable<ClassType>(childClassInstanceType)->props = {
|
||||
{"Method", {makeFunction(arena, childClassInstanceType, {}, {builtinTypes->stringType})}},
|
||||
{"Method", {makeFunction(arena, childClassInstanceType, {}, {stringType})}},
|
||||
};
|
||||
|
||||
TypeId childClassType = arena.addType(ClassType{"ChildClass", {}, baseClassType, nullopt, {}, {}, "Test"});
|
||||
@ -48,7 +49,7 @@ ClassFixture::ClassFixture()
|
||||
TypeId grandChildInstanceType = arena.addType(ClassType{"GrandChild", {}, childClassInstanceType, nullopt, {}, {}, "Test"});
|
||||
|
||||
getMutable<ClassType>(grandChildInstanceType)->props = {
|
||||
{"Method", {makeFunction(arena, grandChildInstanceType, {}, {builtinTypes->stringType})}},
|
||||
{"Method", {makeFunction(arena, grandChildInstanceType, {}, {stringType})}},
|
||||
};
|
||||
|
||||
TypeId grandChildType = arena.addType(ClassType{"GrandChild", {}, baseClassType, nullopt, {}, {}, "Test"});
|
||||
@ -61,7 +62,7 @@ ClassFixture::ClassFixture()
|
||||
TypeId anotherChildInstanceType = arena.addType(ClassType{"AnotherChild", {}, baseClassInstanceType, nullopt, {}, {}, "Test"});
|
||||
|
||||
getMutable<ClassType>(anotherChildInstanceType)->props = {
|
||||
{"Method", {makeFunction(arena, anotherChildInstanceType, {}, {builtinTypes->stringType})}},
|
||||
{"Method", {makeFunction(arena, anotherChildInstanceType, {}, {stringType})}},
|
||||
};
|
||||
|
||||
TypeId anotherChildType = arena.addType(ClassType{"AnotherChild", {}, baseClassType, nullopt, {}, {}, "Test"});
|
||||
@ -101,7 +102,7 @@ ClassFixture::ClassFixture()
|
||||
TypeId callableClassMetaType = arena.addType(TableType{});
|
||||
TypeId callableClassType = arena.addType(ClassType{"CallableClass", {}, nullopt, callableClassMetaType, {}, {}, "Test"});
|
||||
getMutable<TableType>(callableClassMetaType)->props = {
|
||||
{"__call", {makeFunction(arena, nullopt, {callableClassType, builtinTypes->stringType}, {builtinTypes->numberType})}},
|
||||
{"__call", {makeFunction(arena, nullopt, {callableClassType, stringType}, {numberType})}},
|
||||
};
|
||||
globals.globalScope->exportedTypeBindings["CallableClass"] = TypeFun{{}, callableClassType};
|
||||
|
||||
@ -114,7 +115,7 @@ ClassFixture::ClassFixture()
|
||||
};
|
||||
|
||||
// IndexableClass has a table indexer with a key type of 'number | string' and a return type of 'number'
|
||||
addIndexableClass("IndexableClass", arena.addType(Luau::UnionType{{builtinTypes->stringType, numberType}}), numberType);
|
||||
addIndexableClass("IndexableClass", arena.addType(Luau::UnionType{{stringType, numberType}}), numberType);
|
||||
// IndexableNumericKeyClass has a table indexer with a key type of 'number' and a return type of 'number'
|
||||
addIndexableClass("IndexableNumericKeyClass", numberType, numberType);
|
||||
|
||||
|
@ -1,8 +1,6 @@
|
||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
#include "ConstraintGraphBuilderFixture.h"
|
||||
|
||||
#include "Luau/TypeReduction.h"
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
|
||||
@ -13,7 +11,6 @@ ConstraintGraphBuilderFixture::ConstraintGraphBuilderFixture()
|
||||
{
|
||||
mainModule->name = "MainModule";
|
||||
mainModule->humanReadableName = "MainModule";
|
||||
mainModule->reduction = std::make_unique<TypeReduction>(NotNull{&mainModule->internalTypes}, builtinTypes, NotNull{&ice});
|
||||
|
||||
BlockedType::DEPRECATED_nextIndex = 0;
|
||||
BlockedTypePack::nextIndex = 0;
|
||||
|
@ -1521,6 +1521,36 @@ bb_3:
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(IrBuilderFixture, "IntNumIntPeepholes")
|
||||
{
|
||||
IrOp block = build.block(IrBlockKind::Internal);
|
||||
|
||||
build.beginBlock(block);
|
||||
|
||||
IrOp i1 = build.inst(IrCmd::LOAD_INT, build.vmReg(0));
|
||||
IrOp u1 = build.inst(IrCmd::LOAD_INT, build.vmReg(1));
|
||||
IrOp ni1 = build.inst(IrCmd::INT_TO_NUM, i1);
|
||||
IrOp nu1 = build.inst(IrCmd::UINT_TO_NUM, u1);
|
||||
IrOp i2 = build.inst(IrCmd::NUM_TO_INT, ni1);
|
||||
IrOp u2 = build.inst(IrCmd::NUM_TO_UINT, nu1);
|
||||
build.inst(IrCmd::STORE_INT, build.vmReg(0), i2);
|
||||
build.inst(IrCmd::STORE_INT, build.vmReg(1), u2);
|
||||
build.inst(IrCmd::RETURN, build.constUint(2));
|
||||
|
||||
updateUseCounts(build.function);
|
||||
constPropInBlockChains(build, true);
|
||||
|
||||
CHECK("\n" + toString(build.function, /* includeUseInfo */ false) == R"(
|
||||
bb_0:
|
||||
%0 = LOAD_INT R0
|
||||
%1 = LOAD_INT R1
|
||||
STORE_INT R0, %0
|
||||
STORE_INT R1, %1
|
||||
RETURN 2u
|
||||
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
||||
TEST_SUITE_BEGIN("LinearExecutionFlowExtraction");
|
||||
|
@ -350,6 +350,35 @@ TEST_CASE_FIXTURE(Fixture, "clone_recursion_limit")
|
||||
CHECK_THROWS_AS(clone(table, dest, cloneState), RecursionLimitException);
|
||||
}
|
||||
|
||||
// Unions should never be cyclic, but we should clone them correctly even if
|
||||
// they are.
|
||||
TEST_CASE_FIXTURE(Fixture, "clone_cyclic_union")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauCloneCyclicUnions", true};
|
||||
|
||||
TypeArena src;
|
||||
|
||||
TypeId u = src.addType(UnionType{{builtinTypes->numberType, builtinTypes->stringType}});
|
||||
UnionType* uu = getMutable<UnionType>(u);
|
||||
REQUIRE(uu);
|
||||
|
||||
uu->options.push_back(u);
|
||||
|
||||
TypeArena dest;
|
||||
CloneState cloneState;
|
||||
|
||||
TypeId cloned = clone(u, dest, cloneState);
|
||||
REQUIRE(cloned);
|
||||
|
||||
const UnionType* clonedUnion = get<UnionType>(cloned);
|
||||
REQUIRE(clonedUnion);
|
||||
REQUIRE(3 == clonedUnion->options.size());
|
||||
|
||||
CHECK(builtinTypes->numberType == clonedUnion->options[0]);
|
||||
CHECK(builtinTypes->stringType == clonedUnion->options[1]);
|
||||
CHECK(cloned == clonedUnion->options[2]);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "any_persistance_does_not_leak")
|
||||
{
|
||||
ScopedFastFlag flags[] = {
|
||||
|
@ -494,7 +494,7 @@ struct NormalizeFixture : Fixture
|
||||
REQUIRE(node);
|
||||
AstStatTypeAlias* alias = node->as<AstStatTypeAlias>();
|
||||
REQUIRE(alias);
|
||||
TypeId* originalTy = getMainModule()->astOriginalResolvedTypes.find(alias->type);
|
||||
TypeId* originalTy = getMainModule()->astResolvedTypes.find(alias->type);
|
||||
REQUIRE(originalTy);
|
||||
return normalizer.normalize(*originalTy);
|
||||
}
|
||||
@ -732,15 +732,11 @@ TEST_CASE_FIXTURE(NormalizeFixture, "narrow_union_of_classes_with_intersection")
|
||||
|
||||
TEST_CASE_FIXTURE(NormalizeFixture, "intersection_of_metatables_where_the_metatable_is_top_or_bottom")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauNormalizeMetatableFixes", true};
|
||||
|
||||
CHECK("{ @metatable *error-type*, {| |} }" == toString(normal("Mt<{}, any> & Mt<{}, err>")));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(NormalizeFixture, "crazy_metatable")
|
||||
{
|
||||
ScopedFastFlag sff{"LuauNormalizeMetatableFixes", true};
|
||||
|
||||
CHECK("never" == toString(normal("Mt<{}, number> & Mt<{}, string>")));
|
||||
}
|
||||
|
||||
|
508
tests/Simplify.test.cpp
Normal file
508
tests/Simplify.test.cpp
Normal file
@ -0,0 +1,508 @@
|
||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
|
||||
#include "Fixture.h"
|
||||
|
||||
#include "doctest.h"
|
||||
|
||||
#include "Luau/Simplify.h"
|
||||
|
||||
using namespace Luau;
|
||||
|
||||
namespace
|
||||
{
|
||||
|
||||
struct SimplifyFixture : Fixture
|
||||
{
|
||||
TypeArena _arena;
|
||||
const NotNull<TypeArena> arena{&_arena};
|
||||
|
||||
ToStringOptions opts;
|
||||
|
||||
Scope scope{builtinTypes->anyTypePack};
|
||||
|
||||
const TypeId anyTy = builtinTypes->anyType;
|
||||
const TypeId unknownTy = builtinTypes->unknownType;
|
||||
const TypeId neverTy = builtinTypes->neverType;
|
||||
const TypeId errorTy = builtinTypes->errorType;
|
||||
|
||||
const TypeId functionTy = builtinTypes->functionType;
|
||||
const TypeId tableTy = builtinTypes->tableType;
|
||||
|
||||
const TypeId numberTy = builtinTypes->numberType;
|
||||
const TypeId stringTy = builtinTypes->stringType;
|
||||
const TypeId booleanTy = builtinTypes->booleanType;
|
||||
const TypeId nilTy = builtinTypes->nilType;
|
||||
const TypeId threadTy = builtinTypes->threadType;
|
||||
|
||||
const TypeId classTy = builtinTypes->classType;
|
||||
|
||||
const TypeId trueTy = builtinTypes->trueType;
|
||||
const TypeId falseTy = builtinTypes->falseType;
|
||||
|
||||
const TypeId truthyTy = builtinTypes->truthyType;
|
||||
const TypeId falsyTy = builtinTypes->falsyType;
|
||||
|
||||
const TypeId freeTy = arena->addType(FreeType{&scope});
|
||||
const TypeId genericTy = arena->addType(GenericType{});
|
||||
const TypeId blockedTy = arena->addType(BlockedType{});
|
||||
const TypeId pendingTy = arena->addType(PendingExpansionType{{}, {}, {}, {}});
|
||||
|
||||
const TypeId helloTy = arena->addType(SingletonType{StringSingleton{"hello"}});
|
||||
const TypeId worldTy = arena->addType(SingletonType{StringSingleton{"world"}});
|
||||
|
||||
const TypePackId emptyTypePack = arena->addTypePack({});
|
||||
|
||||
const TypeId fn1Ty = arena->addType(FunctionType{emptyTypePack, emptyTypePack});
|
||||
const TypeId fn2Ty = arena->addType(FunctionType{builtinTypes->anyTypePack, emptyTypePack});
|
||||
|
||||
TypeId parentClassTy = nullptr;
|
||||
TypeId childClassTy = nullptr;
|
||||
TypeId anotherChildClassTy = nullptr;
|
||||
TypeId unrelatedClassTy = nullptr;
|
||||
|
||||
SimplifyFixture()
|
||||
{
|
||||
createSomeClasses(&frontend);
|
||||
|
||||
parentClassTy = frontend.globals.globalScope->linearSearchForBinding("Parent")->typeId;
|
||||
childClassTy = frontend.globals.globalScope->linearSearchForBinding("Child")->typeId;
|
||||
anotherChildClassTy = frontend.globals.globalScope->linearSearchForBinding("AnotherChild")->typeId;
|
||||
unrelatedClassTy = frontend.globals.globalScope->linearSearchForBinding("Unrelated")->typeId;
|
||||
}
|
||||
|
||||
TypeId intersect(TypeId a, TypeId b)
|
||||
{
|
||||
return simplifyIntersection(builtinTypes, arena, a, b).result;
|
||||
}
|
||||
|
||||
std::string intersectStr(TypeId a, TypeId b)
|
||||
{
|
||||
return toString(intersect(a, b), opts);
|
||||
}
|
||||
|
||||
bool isIntersection(TypeId a)
|
||||
{
|
||||
return bool(get<IntersectionType>(follow(a)));
|
||||
}
|
||||
|
||||
TypeId mkTable(std::map<Name, TypeId> propTypes)
|
||||
{
|
||||
TableType::Props props;
|
||||
for (const auto& [name, ty] : propTypes)
|
||||
props[name] = Property{ty};
|
||||
|
||||
return arena->addType(TableType{props, {}, TypeLevel{}, TableState::Sealed});
|
||||
}
|
||||
|
||||
TypeId mkNegation(TypeId ty)
|
||||
{
|
||||
return arena->addType(NegationType{ty});
|
||||
}
|
||||
|
||||
TypeId mkFunction(TypeId arg, TypeId ret)
|
||||
{
|
||||
return arena->addType(FunctionType{arena->addTypePack({arg}), arena->addTypePack({ret})});
|
||||
}
|
||||
|
||||
TypeId union_(TypeId a, TypeId b)
|
||||
{
|
||||
return simplifyUnion(builtinTypes, arena, a, b).result;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
TEST_SUITE_BEGIN("Simplify");
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "unknown_and_other_tops_and_bottom_types")
|
||||
{
|
||||
CHECK(unknownTy == intersect(unknownTy, unknownTy));
|
||||
|
||||
CHECK(unknownTy == intersect(unknownTy, anyTy));
|
||||
CHECK(unknownTy == intersect(anyTy, unknownTy));
|
||||
|
||||
CHECK(neverTy == intersect(unknownTy, neverTy));
|
||||
CHECK(neverTy == intersect(neverTy, unknownTy));
|
||||
|
||||
CHECK(neverTy == intersect(unknownTy, errorTy));
|
||||
CHECK(neverTy == intersect(errorTy, unknownTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "nil")
|
||||
{
|
||||
CHECK(nilTy == intersect(nilTy, nilTy));
|
||||
CHECK(neverTy == intersect(nilTy, numberTy));
|
||||
CHECK(neverTy == intersect(nilTy, trueTy));
|
||||
CHECK(neverTy == intersect(nilTy, tableTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "boolean_singletons")
|
||||
{
|
||||
CHECK(trueTy == intersect(trueTy, booleanTy));
|
||||
CHECK(trueTy == intersect(booleanTy, trueTy));
|
||||
|
||||
CHECK(falseTy == intersect(falseTy, booleanTy));
|
||||
CHECK(falseTy == intersect(booleanTy, falseTy));
|
||||
|
||||
CHECK(neverTy == intersect(falseTy, trueTy));
|
||||
CHECK(neverTy == intersect(trueTy, falseTy));
|
||||
|
||||
CHECK(booleanTy == union_(trueTy, booleanTy));
|
||||
CHECK(booleanTy == union_(booleanTy, trueTy));
|
||||
CHECK(booleanTy == union_(falseTy, booleanTy));
|
||||
CHECK(booleanTy == union_(booleanTy, falseTy));
|
||||
CHECK(booleanTy == union_(falseTy, trueTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "boolean_and_truthy_and_falsy")
|
||||
{
|
||||
TypeId optionalBooleanTy = arena->addType(UnionType{{booleanTy, nilTy}});
|
||||
|
||||
CHECK(trueTy == intersect(booleanTy, truthyTy));
|
||||
|
||||
CHECK(trueTy == intersect(optionalBooleanTy, truthyTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "any_and_indeterminate_types")
|
||||
{
|
||||
CHECK("a" == intersectStr(anyTy, freeTy));
|
||||
CHECK("a" == intersectStr(freeTy, anyTy));
|
||||
|
||||
CHECK("b" == intersectStr(anyTy, genericTy));
|
||||
CHECK("b" == intersectStr(genericTy, anyTy));
|
||||
|
||||
CHECK(blockedTy == intersect(anyTy, blockedTy));
|
||||
CHECK(blockedTy == intersect(blockedTy, anyTy));
|
||||
|
||||
CHECK(pendingTy == intersect(anyTy, pendingTy));
|
||||
CHECK(pendingTy == intersect(pendingTy, anyTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "unknown_and_indeterminate_types")
|
||||
{
|
||||
CHECK(isIntersection(intersect(unknownTy, freeTy)));
|
||||
CHECK(isIntersection(intersect(freeTy, unknownTy)));
|
||||
|
||||
CHECK(isIntersection(intersect(unknownTy, genericTy)));
|
||||
CHECK(isIntersection(intersect(genericTy, unknownTy)));
|
||||
|
||||
CHECK(isIntersection(intersect(unknownTy, blockedTy)));
|
||||
CHECK(isIntersection(intersect(blockedTy, unknownTy)));
|
||||
|
||||
CHECK(isIntersection(intersect(unknownTy, pendingTy)));
|
||||
CHECK(isIntersection(intersect(pendingTy, unknownTy)));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "unknown_and_concrete")
|
||||
{
|
||||
CHECK(numberTy == intersect(numberTy, unknownTy));
|
||||
CHECK(numberTy == intersect(unknownTy, numberTy));
|
||||
CHECK(trueTy == intersect(trueTy, unknownTy));
|
||||
CHECK(trueTy == intersect(unknownTy, trueTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "error_and_other_tops_and_bottom_types")
|
||||
{
|
||||
CHECK(errorTy == intersect(errorTy, errorTy));
|
||||
|
||||
CHECK(errorTy == intersect(errorTy, anyTy));
|
||||
CHECK(errorTy == intersect(anyTy, errorTy));
|
||||
|
||||
CHECK(neverTy == intersect(errorTy, neverTy));
|
||||
CHECK(neverTy == intersect(neverTy, errorTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "error_and_indeterminate_types")
|
||||
{
|
||||
CHECK("*error-type* & a" == intersectStr(errorTy, freeTy));
|
||||
CHECK("*error-type* & a" == intersectStr(freeTy, errorTy));
|
||||
|
||||
CHECK("*error-type* & b" == intersectStr(errorTy, genericTy));
|
||||
CHECK("*error-type* & b" == intersectStr(genericTy, errorTy));
|
||||
|
||||
CHECK(isIntersection(intersect(errorTy, blockedTy)));
|
||||
CHECK(isIntersection(intersect(blockedTy, errorTy)));
|
||||
|
||||
CHECK(isIntersection(intersect(errorTy, pendingTy)));
|
||||
CHECK(isIntersection(intersect(pendingTy, errorTy)));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "unknown_and_concrete")
|
||||
{
|
||||
CHECK(neverTy == intersect(numberTy, errorTy));
|
||||
CHECK(neverTy == intersect(errorTy, numberTy));
|
||||
CHECK(neverTy == intersect(trueTy, errorTy));
|
||||
CHECK(neverTy == intersect(errorTy, trueTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "primitives")
|
||||
{
|
||||
// This shouldn't be possible, but we'll make it work even if it is.
|
||||
TypeId numberTyDuplicate = arena->addType(PrimitiveType{PrimitiveType::Number});
|
||||
|
||||
CHECK(numberTy == intersect(numberTy, numberTyDuplicate));
|
||||
CHECK(neverTy == intersect(numberTy, stringTy));
|
||||
|
||||
CHECK(neverTy == intersect(neverTy, numberTy));
|
||||
CHECK(neverTy == intersect(numberTy, neverTy));
|
||||
|
||||
CHECK(neverTy == intersect(neverTy, functionTy));
|
||||
CHECK(neverTy == intersect(functionTy, neverTy));
|
||||
|
||||
CHECK(neverTy == intersect(neverTy, tableTy));
|
||||
CHECK(neverTy == intersect(tableTy, neverTy));
|
||||
|
||||
CHECK(numberTy == intersect(anyTy, numberTy));
|
||||
CHECK(numberTy == intersect(numberTy, anyTy));
|
||||
|
||||
CHECK(neverTy == intersect(stringTy, nilTy));
|
||||
CHECK(neverTy == intersect(nilTy, stringTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "primitives_and_falsy")
|
||||
{
|
||||
CHECK(neverTy == intersect(numberTy, falsyTy));
|
||||
CHECK(neverTy == intersect(falsyTy, numberTy));
|
||||
|
||||
CHECK(nilTy == intersect(nilTy, falsyTy));
|
||||
CHECK(nilTy == intersect(falsyTy, nilTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "primitives_and_singletons")
|
||||
{
|
||||
CHECK(helloTy == intersect(helloTy, stringTy));
|
||||
CHECK(helloTy == intersect(stringTy, helloTy));
|
||||
|
||||
CHECK(neverTy == intersect(worldTy, helloTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "functions")
|
||||
{
|
||||
CHECK(fn1Ty == intersect(fn1Ty, functionTy));
|
||||
CHECK(fn1Ty == intersect(functionTy, fn1Ty));
|
||||
|
||||
// Intersections of functions are super weird if you think about it.
|
||||
CHECK("(() -> ()) & ((...any) -> ())" == intersectStr(fn1Ty, fn2Ty));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "negated_top_function_type")
|
||||
{
|
||||
TypeId negatedFunctionTy = mkNegation(functionTy);
|
||||
|
||||
CHECK(numberTy == intersect(numberTy, negatedFunctionTy));
|
||||
CHECK(numberTy == intersect(negatedFunctionTy, numberTy));
|
||||
|
||||
CHECK(falsyTy == intersect(falsyTy, negatedFunctionTy));
|
||||
CHECK(falsyTy == intersect(negatedFunctionTy, falsyTy));
|
||||
|
||||
TypeId f = mkFunction(stringTy, numberTy);
|
||||
|
||||
CHECK(neverTy == intersect(f, negatedFunctionTy));
|
||||
CHECK(neverTy == intersect(negatedFunctionTy, f));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "optional_overloaded_function_and_top_function")
|
||||
{
|
||||
// (((number) -> string) & ((string) -> number))? & ~function
|
||||
|
||||
TypeId f1 = mkFunction(numberTy, stringTy);
|
||||
TypeId f2 = mkFunction(stringTy, numberTy);
|
||||
|
||||
TypeId f12 = arena->addType(IntersectionType{{f1, f2}});
|
||||
|
||||
TypeId t = arena->addType(UnionType{{f12, nilTy}});
|
||||
|
||||
TypeId notFunctionTy = mkNegation(functionTy);
|
||||
|
||||
CHECK(nilTy == intersect(t, notFunctionTy));
|
||||
CHECK(nilTy == intersect(notFunctionTy, t));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "negated_function_does_not_intersect_cleanly_with_truthy")
|
||||
{
|
||||
// ~function & ~(false?)
|
||||
// ~function & ~(false | nil)
|
||||
// ~function & ~false & ~nil
|
||||
|
||||
TypeId negatedFunctionTy = mkNegation(functionTy);
|
||||
CHECK(isIntersection(intersect(negatedFunctionTy, truthyTy)));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "tables")
|
||||
{
|
||||
TypeId t1 = mkTable({{"tag", stringTy}});
|
||||
|
||||
CHECK(t1 == intersect(t1, tableTy));
|
||||
CHECK(neverTy == intersect(t1, functionTy));
|
||||
|
||||
TypeId t2 = mkTable({{"tag", helloTy}});
|
||||
|
||||
CHECK(t2 == intersect(t1, t2));
|
||||
CHECK(t2 == intersect(t2, t1));
|
||||
|
||||
TypeId t3 = mkTable({});
|
||||
|
||||
CHECK(t1 == intersect(t1, t3));
|
||||
CHECK(t1 == intersect(t3, t1));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "tables_and_top_table")
|
||||
{
|
||||
TypeId notTableType = mkNegation(tableTy);
|
||||
TypeId t1 = mkTable({{"prop", stringTy}, {"another", numberTy}});
|
||||
|
||||
CHECK(t1 == intersect(t1, tableTy));
|
||||
CHECK(t1 == intersect(tableTy, t1));
|
||||
|
||||
CHECK(neverTy == intersect(t1, notTableType));
|
||||
CHECK(neverTy == intersect(notTableType, t1));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "tables_and_truthy")
|
||||
{
|
||||
TypeId t1 = mkTable({{"prop", stringTy}, {"another", numberTy}});
|
||||
|
||||
CHECK(t1 == intersect(t1, truthyTy));
|
||||
CHECK(t1 == intersect(truthyTy, t1));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "table_with_a_tag")
|
||||
{
|
||||
// {tag: string, prop: number} & {tag: "hello"}
|
||||
// I think we can decline to simplify this:
|
||||
TypeId t1 = mkTable({{"tag", stringTy}, {"prop", numberTy}});
|
||||
TypeId t2 = mkTable({{"tag", helloTy}});
|
||||
|
||||
CHECK("{| prop: number, tag: string |} & {| tag: \"hello\" |}" == intersectStr(t1, t2));
|
||||
CHECK("{| prop: number, tag: string |} & {| tag: \"hello\" |}" == intersectStr(t2, t1));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "nested_table_tag_test")
|
||||
{
|
||||
TypeId t1 = mkTable({
|
||||
{"subtable", mkTable({
|
||||
{"tag", helloTy},
|
||||
{"subprop", numberTy},
|
||||
})},
|
||||
{"prop", stringTy},
|
||||
});
|
||||
TypeId t2 = mkTable({
|
||||
{"subtable", mkTable({
|
||||
{"tag", helloTy},
|
||||
})},
|
||||
});
|
||||
|
||||
CHECK(t1 == intersect(t1, t2));
|
||||
CHECK(t1 == intersect(t2, t1));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "union")
|
||||
{
|
||||
TypeId t1 = arena->addType(UnionType{{numberTy, stringTy, nilTy, tableTy}});
|
||||
|
||||
CHECK(nilTy == intersect(t1, nilTy));
|
||||
// CHECK(nilTy == intersect(nilTy, t1)); // TODO?
|
||||
|
||||
CHECK(builtinTypes->stringType == intersect(builtinTypes->optionalStringType, truthyTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "two_unions")
|
||||
{
|
||||
TypeId t1 = arena->addType(UnionType{{numberTy, booleanTy, stringTy, nilTy, tableTy}});
|
||||
|
||||
CHECK("false?" == intersectStr(t1, falsyTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "curious_union")
|
||||
{
|
||||
// (a & false) | (a & nil)
|
||||
TypeId curious =
|
||||
arena->addType(UnionType{{arena->addType(IntersectionType{{freeTy, falseTy}}), arena->addType(IntersectionType{{freeTy, nilTy}})}});
|
||||
|
||||
CHECK("(a & false) | (a & nil) | number" == toString(union_(curious, numberTy)));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "negations")
|
||||
{
|
||||
TypeId notNumberTy = mkNegation(numberTy);
|
||||
TypeId notStringTy = mkNegation(stringTy);
|
||||
|
||||
CHECK(neverTy == intersect(numberTy, notNumberTy));
|
||||
|
||||
CHECK(numberTy == intersect(numberTy, notStringTy));
|
||||
CHECK(numberTy == intersect(notStringTy, numberTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "top_class_type")
|
||||
{
|
||||
CHECK(neverTy == intersect(classTy, stringTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "classes")
|
||||
{
|
||||
CHECK(childClassTy == intersect(childClassTy, parentClassTy));
|
||||
CHECK(childClassTy == intersect(parentClassTy, childClassTy));
|
||||
|
||||
CHECK(parentClassTy == union_(childClassTy, parentClassTy));
|
||||
CHECK(parentClassTy == union_(parentClassTy, childClassTy));
|
||||
|
||||
CHECK(neverTy == intersect(childClassTy, unrelatedClassTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "negations_of_classes")
|
||||
{
|
||||
TypeId notChildClassTy = mkNegation(childClassTy);
|
||||
TypeId notParentClassTy = mkNegation(parentClassTy);
|
||||
|
||||
CHECK(neverTy == intersect(childClassTy, notParentClassTy));
|
||||
CHECK(neverTy == intersect(notParentClassTy, childClassTy));
|
||||
|
||||
CHECK("Parent & ~Child" == intersectStr(notChildClassTy, parentClassTy));
|
||||
CHECK("Parent & ~Child" == intersectStr(parentClassTy, notChildClassTy));
|
||||
|
||||
CHECK(notParentClassTy == intersect(notChildClassTy, notParentClassTy));
|
||||
CHECK(notParentClassTy == intersect(notParentClassTy, notChildClassTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "intersection_of_intersection_of_a_free_type_can_result_in_removal_of_that_free_type")
|
||||
{
|
||||
// a & string and number
|
||||
// (a & number) & (string & number)
|
||||
|
||||
TypeId t1 = arena->addType(IntersectionType{{freeTy, stringTy}});
|
||||
|
||||
CHECK(neverTy == intersect(t1, numberTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "some_tables_are_really_never")
|
||||
{
|
||||
TypeId notAnyTy = mkNegation(anyTy);
|
||||
|
||||
TypeId t1 = mkTable({{"someKey", notAnyTy}});
|
||||
|
||||
CHECK(neverTy == intersect(t1, numberTy));
|
||||
CHECK(neverTy == intersect(numberTy, t1));
|
||||
CHECK(neverTy == intersect(t1, t1));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "simplify_stops_at_cycles")
|
||||
{
|
||||
TypeId t = mkTable({});
|
||||
TableType* tt = getMutable<TableType>(t);
|
||||
REQUIRE(tt);
|
||||
|
||||
TypeId t2 = mkTable({});
|
||||
TableType* t2t = getMutable<TableType>(t2);
|
||||
REQUIRE(t2t);
|
||||
|
||||
tt->props["cyclic"] = Property{t2};
|
||||
t2t->props["cyclic"] = Property{t};
|
||||
|
||||
CHECK(t == intersect(t, anyTy));
|
||||
CHECK(t == intersect(anyTy, t));
|
||||
|
||||
CHECK(t2 == intersect(t2, anyTy));
|
||||
CHECK(t2 == intersect(anyTy, t2));
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
@ -291,9 +291,9 @@ TEST_CASE_FIXTURE(Fixture, "quit_stringifying_type_when_length_is_exceeded")
|
||||
{
|
||||
o.maxTypeLength = 30;
|
||||
CHECK_EQ(toString(requireType("f0"), o), "() -> ()");
|
||||
CHECK_EQ(toString(requireType("f1"), o), "<a>(a) -> (() -> ()) | (a & ~false & ~nil)... *TRUNCATED*");
|
||||
CHECK_EQ(toString(requireType("f2"), o), "<b>(b) -> (<a>(a) -> (() -> ()) | (a & ~false & ~nil)... *TRUNCATED*");
|
||||
CHECK_EQ(toString(requireType("f3"), o), "<c>(c) -> (<b>(b) -> (<a>(a) -> (() -> ()) | (a & ~false & ~nil)... *TRUNCATED*");
|
||||
CHECK_EQ(toString(requireType("f1"), o), "<a>(a) -> (() -> ()) | (a & ~(false?))... *TRUNCATED*");
|
||||
CHECK_EQ(toString(requireType("f2"), o), "<b>(b) -> (<a>(a) -> (() -> ()) | (a & ~(false?))... *TRUNCATED*");
|
||||
CHECK_EQ(toString(requireType("f3"), o), "<c>(c) -> (<b>(b) -> (<a>(a) -> (() -> ()) | (a & ~(false?))... *TRUNCATED*");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -321,9 +321,9 @@ TEST_CASE_FIXTURE(Fixture, "stringifying_type_is_still_capped_when_exhaustive")
|
||||
{
|
||||
o.maxTypeLength = 30;
|
||||
CHECK_EQ(toString(requireType("f0"), o), "() -> ()");
|
||||
CHECK_EQ(toString(requireType("f1"), o), "<a>(a) -> (() -> ()) | (a & ~false & ~nil)... *TRUNCATED*");
|
||||
CHECK_EQ(toString(requireType("f2"), o), "<b>(b) -> (<a>(a) -> (() -> ()) | (a & ~false & ~nil)... *TRUNCATED*");
|
||||
CHECK_EQ(toString(requireType("f3"), o), "<c>(c) -> (<b>(b) -> (<a>(a) -> (() -> ()) | (a & ~false & ~nil)... *TRUNCATED*");
|
||||
CHECK_EQ(toString(requireType("f1"), o), "<a>(a) -> (() -> ()) | (a & ~(false?))... *TRUNCATED*");
|
||||
CHECK_EQ(toString(requireType("f2"), o), "<b>(b) -> (<a>(a) -> (() -> ()) | (a & ~(false?))... *TRUNCATED*");
|
||||
CHECK_EQ(toString(requireType("f3"), o), "<c>(c) -> (<b>(b) -> (<a>(a) -> (() -> ()) | (a & ~(false?))... *TRUNCATED*");
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -507,25 +507,25 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "toStringDetailed2")
|
||||
CHECK_EQ("{ @metatable { __index: { @metatable {| __index: base |}, child } }, inst }", r.name);
|
||||
CHECK(0 == opts.nameMap.types.size());
|
||||
|
||||
const MetatableType* tMeta = get<MetatableType>(tType);
|
||||
const MetatableType* tMeta = get<MetatableType>(follow(tType));
|
||||
REQUIRE(tMeta);
|
||||
|
||||
TableType* tMeta2 = getMutable<TableType>(tMeta->metatable);
|
||||
TableType* tMeta2 = getMutable<TableType>(follow(tMeta->metatable));
|
||||
REQUIRE(tMeta2);
|
||||
REQUIRE(tMeta2->props.count("__index"));
|
||||
|
||||
const MetatableType* tMeta3 = get<MetatableType>(tMeta2->props["__index"].type());
|
||||
const MetatableType* tMeta3 = get<MetatableType>(follow(tMeta2->props["__index"].type()));
|
||||
REQUIRE(tMeta3);
|
||||
|
||||
TableType* tMeta4 = getMutable<TableType>(tMeta3->metatable);
|
||||
TableType* tMeta4 = getMutable<TableType>(follow(tMeta3->metatable));
|
||||
REQUIRE(tMeta4);
|
||||
REQUIRE(tMeta4->props.count("__index"));
|
||||
|
||||
TableType* tMeta5 = getMutable<TableType>(tMeta4->props["__index"].type());
|
||||
TableType* tMeta5 = getMutable<TableType>(follow(tMeta4->props["__index"].type()));
|
||||
REQUIRE(tMeta5);
|
||||
REQUIRE(tMeta5->props.count("one") > 0);
|
||||
|
||||
TableType* tMeta6 = getMutable<TableType>(tMeta3->table);
|
||||
TableType* tMeta6 = getMutable<TableType>(follow(tMeta3->table));
|
||||
REQUIRE(tMeta6);
|
||||
REQUIRE(tMeta6->props.count("two") > 0);
|
||||
|
||||
|
@ -25,6 +25,8 @@ struct TxnLogFixture
|
||||
TypeId a = arena.freshType(globalScope.get());
|
||||
TypeId b = arena.freshType(globalScope.get());
|
||||
TypeId c = arena.freshType(childScope.get());
|
||||
|
||||
TypeId g = arena.addType(GenericType{"G"});
|
||||
};
|
||||
|
||||
TEST_SUITE_BEGIN("TxnLog");
|
||||
@ -110,4 +112,13 @@ TEST_CASE_FIXTURE(TxnLogFixture, "colliding_coincident_logs_do_not_create_degene
|
||||
CHECK("a" == toString(b));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(TxnLogFixture, "replacing_persistent_types_is_allowed_but_makes_the_log_radioactive")
|
||||
{
|
||||
persist(g);
|
||||
|
||||
log.replace(g, BoundType{a});
|
||||
|
||||
CHECK(log.radioactive);
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
@ -20,7 +20,7 @@ struct FamilyFixture : Fixture
|
||||
swapFamily = TypeFamily{/* name */ "Swap",
|
||||
/* reducer */
|
||||
[](std::vector<TypeId> tys, std::vector<TypePackId> tps, NotNull<TypeArena> arena, NotNull<BuiltinTypes> builtins,
|
||||
NotNull<const TxnLog> log) -> TypeFamilyReductionResult<TypeId> {
|
||||
NotNull<const TxnLog> log, NotNull<Scope> scope, NotNull<Normalizer> normalizer) -> TypeFamilyReductionResult<TypeId> {
|
||||
LUAU_ASSERT(tys.size() == 1);
|
||||
TypeId param = log->follow(tys.at(0));
|
||||
|
||||
@ -78,18 +78,6 @@ TEST_CASE_FIXTURE(FamilyFixture, "basic_type_family")
|
||||
CHECK("Type family instance Swap<boolean> is uninhabited" == toString(result.errors[0]));
|
||||
};
|
||||
|
||||
TEST_CASE_FIXTURE(FamilyFixture, "type_reduction_reduces_families")
|
||||
{
|
||||
if (!FFlag::DebugLuauDeferredConstraintResolution)
|
||||
return;
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local x: Swap<string> & nil
|
||||
)");
|
||||
|
||||
CHECK("never" == toString(requireType("x")));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(FamilyFixture, "family_as_fn_ret")
|
||||
{
|
||||
if (!FFlag::DebugLuauDeferredConstraintResolution)
|
||||
@ -202,4 +190,27 @@ TEST_CASE_FIXTURE(FamilyFixture, "function_internal_families")
|
||||
CHECK(toString(result.errors[0]) == "Type family instance Swap<boolean> is uninhabited");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "add_family_at_work")
|
||||
{
|
||||
if (!FFlag::DebugLuauDeferredConstraintResolution)
|
||||
return;
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local function add(a, b)
|
||||
return a + b
|
||||
end
|
||||
|
||||
local a = add(1, 2)
|
||||
local b = add(1, "foo")
|
||||
local c = add("foo", 1)
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(2, result);
|
||||
CHECK(toString(requireType("a")) == "number");
|
||||
CHECK(toString(requireType("b")) == "Add<number, string>");
|
||||
CHECK(toString(requireType("c")) == "Add<string, number>");
|
||||
CHECK(toString(result.errors[0]) == "Type family instance Add<number, string> is uninhabited");
|
||||
CHECK(toString(result.errors[1]) == "Type family instance Add<string, number> is uninhabited");
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
@ -736,6 +736,18 @@ TEST_CASE_FIXTURE(Fixture, "luau_print_is_not_special_without_the_flag")
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "luau_print_incomplete")
|
||||
{
|
||||
ScopedFastFlag sffs{"DebugLuauMagicTypes", true};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local a: _luau_print
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
CHECK_EQ("_luau_print requires one generic parameter", toString(result.errors[0]));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "instantiate_type_fun_should_not_trip_rbxassert")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
|
@ -352,10 +352,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "tagged_unions")
|
||||
CHECK_EQ("\"err\"", toString(requireTypeAtPosition({13, 31})));
|
||||
CHECK_EQ("E", toString(requireTypeAtPosition({14, 31})));
|
||||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ("{| error: E, tag: \"err\" |}", toString(requireTypeAtPosition({16, 19})));
|
||||
else
|
||||
CHECK_EQ("Err<E>", toString(requireTypeAtPosition({16, 19})));
|
||||
CHECK_EQ("Err<E>", toString(requireTypeAtPosition({16, 19})));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "do_assert_x")
|
||||
|
@ -552,6 +552,8 @@ TEST_CASE_FIXTURE(ClassFixture, "indexable_classes")
|
||||
local x : IndexableClass
|
||||
local y = x[true]
|
||||
)");
|
||||
|
||||
|
||||
CHECK_EQ(
|
||||
toString(result.errors[0]), "Type 'boolean' could not be converted into 'number | string'; none of the union options are compatible");
|
||||
}
|
||||
@ -560,6 +562,7 @@ TEST_CASE_FIXTURE(ClassFixture, "indexable_classes")
|
||||
local x : IndexableClass
|
||||
x[true] = 42
|
||||
)");
|
||||
|
||||
CHECK_EQ(
|
||||
toString(result.errors[0]), "Type 'boolean' could not be converted into 'number | string'; none of the union options are compatible");
|
||||
}
|
||||
@ -593,7 +596,10 @@ TEST_CASE_FIXTURE(ClassFixture, "indexable_classes")
|
||||
local x : IndexableNumericKeyClass
|
||||
x["key"] = 1
|
||||
)");
|
||||
CHECK_EQ(toString(result.errors[0]), "Type 'string' could not be converted into 'number'");
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ(toString(result.errors[0]), "Key 'key' not found in class 'IndexableNumericKeyClass'");
|
||||
else
|
||||
CHECK_EQ(toString(result.errors[0]), "Type 'string' could not be converted into 'number'");
|
||||
}
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
@ -615,7 +621,10 @@ TEST_CASE_FIXTURE(ClassFixture, "indexable_classes")
|
||||
local x : IndexableNumericKeyClass
|
||||
local y = x["key"]
|
||||
)");
|
||||
CHECK_EQ(toString(result.errors[0]), "Type 'string' could not be converted into 'number'");
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ(toString(result.errors[0]), "Key 'key' not found in class 'IndexableNumericKeyClass'");
|
||||
else
|
||||
CHECK_EQ(toString(result.errors[0]), "Type 'string' could not be converted into 'number'");
|
||||
}
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
|
@ -358,6 +358,22 @@ TEST_CASE_FIXTURE(Fixture, "another_recursive_local_function")
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
// We had a bug where we'd look up the type of a recursive call using the DFG,
|
||||
// not the bindings tables. As a result, we would erroneously use the
|
||||
// generalized type of foo() in this recursive fragment. This creates a
|
||||
// constraint cycle that doesn't always work itself out.
|
||||
//
|
||||
// The fix is for the DFG node within the scope of foo() to retain the
|
||||
// ungeneralized type of foo.
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "recursive_calls_must_refer_to_the_ungeneralized_type")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
function foo()
|
||||
string.format('%s: %s', "51", foo())
|
||||
end
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "cyclic_function_type_in_rets")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
@ -1029,7 +1045,7 @@ TEST_CASE_FIXTURE(Fixture, "no_lossy_function_type")
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
TypeId type = requireTypeAtPosition(Position(6, 14));
|
||||
CHECK_EQ("(tbl, number, number) -> number", toString(type));
|
||||
auto ftv = get<FunctionType>(type);
|
||||
auto ftv = get<FunctionType>(follow(type));
|
||||
REQUIRE(ftv);
|
||||
CHECK(ftv->hasSelf);
|
||||
}
|
||||
@ -1967,7 +1983,7 @@ TEST_CASE_FIXTURE(Fixture, "inner_frees_become_generic_in_dcr")
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
std::optional<TypeId> ty = findTypeAtPosition(Position{3, 19});
|
||||
REQUIRE(ty);
|
||||
CHECK(get<GenericType>(*ty));
|
||||
CHECK(get<GenericType>(follow(*ty)));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "function_exprs_are_generalized_at_signature_scope_not_enclosing")
|
||||
|
@ -132,40 +132,23 @@ TEST_CASE_FIXTURE(Fixture, "should_still_pick_an_overload_whose_arguments_are_un
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "propagates_name")
|
||||
{
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
type A={a:number}
|
||||
type B={b:string}
|
||||
const std::string code = R"(
|
||||
type A={a:number}
|
||||
type B={b:string}
|
||||
|
||||
local c:A&B
|
||||
local b = c
|
||||
)");
|
||||
local c:A&B
|
||||
local b = c
|
||||
)";
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
const std::string expected = R"(
|
||||
type A={a:number}
|
||||
type B={b:string}
|
||||
|
||||
CHECK("{| a: number, b: string |}" == toString(requireType("b")));
|
||||
}
|
||||
else
|
||||
{
|
||||
const std::string code = R"(
|
||||
type A={a:number}
|
||||
type B={b:string}
|
||||
local c:A&B
|
||||
local b:A&B=c
|
||||
)";
|
||||
|
||||
local c:A&B
|
||||
local b = c
|
||||
)";
|
||||
|
||||
const std::string expected = R"(
|
||||
type A={a:number}
|
||||
type B={b:string}
|
||||
|
||||
local c:A&B
|
||||
local b:A&B=c
|
||||
)";
|
||||
|
||||
CHECK_EQ(expected, decorateWithTypes(code));
|
||||
}
|
||||
CHECK_EQ(expected, decorateWithTypes(code));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "index_on_an_intersection_type_with_property_guaranteed_to_exist")
|
||||
@ -328,11 +311,7 @@ TEST_CASE_FIXTURE(Fixture, "table_intersection_write_sealed")
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
auto e = toString(result.errors[0]);
|
||||
// In DCR, because of type normalization, we print a different error message
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ("Cannot add property 'z' to table '{| x: number, y: number |}'", e);
|
||||
else
|
||||
CHECK_EQ("Cannot add property 'z' to table 'X & Y'", e);
|
||||
CHECK_EQ("Cannot add property 'z' to table 'X & Y'", e);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "table_intersection_write_sealed_indirect")
|
||||
@ -406,10 +385,7 @@ local a: XYZ = 3
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ(toString(result.errors[0]), R"(Type 'number' could not be converted into '{| x: number, y: number, z: number |}')");
|
||||
else
|
||||
CHECK_EQ(toString(result.errors[0]), R"(Type 'number' could not be converted into 'X & Y & Z'
|
||||
CHECK_EQ(toString(result.errors[0]), R"(Type 'number' could not be converted into 'X & Y & Z'
|
||||
caused by:
|
||||
Not all intersection parts are compatible. Type 'number' could not be converted into 'X')");
|
||||
}
|
||||
@ -426,11 +402,7 @@ local b: number = a
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ(toString(result.errors[0]), R"(Type '{| x: number, y: number, z: number |}' could not be converted into 'number')");
|
||||
else
|
||||
CHECK_EQ(
|
||||
toString(result.errors[0]), R"(Type 'X & Y & Z' could not be converted into 'number'; none of the intersection parts are compatible)");
|
||||
CHECK_EQ(toString(result.errors[0]), R"(Type 'X & Y & Z' could not be converted into 'number'; none of the intersection parts are compatible)");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "overload_is_not_a_function")
|
||||
@ -470,11 +442,7 @@ TEST_CASE_FIXTURE(Fixture, "intersect_bool_and_false")
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ(toString(result.errors[0]), "Type 'false' could not be converted into 'true'");
|
||||
else
|
||||
CHECK_EQ(
|
||||
toString(result.errors[0]), "Type 'boolean & false' could not be converted into 'true'; none of the intersection parts are compatible");
|
||||
CHECK_EQ(toString(result.errors[0]), "Type 'boolean & false' could not be converted into 'true'; none of the intersection parts are compatible");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "intersect_false_and_bool_and_false")
|
||||
@ -486,14 +454,9 @@ TEST_CASE_FIXTURE(Fixture, "intersect_false_and_bool_and_false")
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ(toString(result.errors[0]), "Type 'false' could not be converted into 'true'");
|
||||
else
|
||||
{
|
||||
// TODO: odd stringification of `false & (boolean & false)`.)
|
||||
CHECK_EQ(toString(result.errors[0]),
|
||||
"Type 'boolean & false & false' could not be converted into 'true'; none of the intersection parts are compatible");
|
||||
}
|
||||
// TODO: odd stringification of `false & (boolean & false)`.)
|
||||
CHECK_EQ(toString(result.errors[0]),
|
||||
"Type 'boolean & false & false' could not be converted into 'true'; none of the intersection parts are compatible");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "intersect_saturate_overloaded_functions")
|
||||
@ -531,21 +494,8 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables")
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
CHECK_EQ(toString(result.errors[0]),
|
||||
"Type '{| p: number?, q: nil, r: number? |}' could not be converted into '{| p: nil |}'\n"
|
||||
"caused by:\n"
|
||||
" Property 'p' is not compatible. Type 'number?' could not be converted into 'nil'\n"
|
||||
"caused by:\n"
|
||||
" Not all union options are compatible. Type 'number' could not be converted into 'nil' in an invariant context");
|
||||
}
|
||||
else
|
||||
{
|
||||
CHECK_EQ(toString(result.errors[0]),
|
||||
"Type '{| p: number?, q: number?, r: number? |} & {| p: number?, q: string? |}' could not be converted into "
|
||||
"'{| p: nil |}'; none of the intersection parts are compatible");
|
||||
}
|
||||
CHECK_EQ(toString(result.errors[0]), "Type '{| p: number?, q: number?, r: number? |} & {| p: number?, q: string? |}' could not be converted into "
|
||||
"'{| p: nil |}'; none of the intersection parts are compatible");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_top_properties")
|
||||
@ -558,27 +508,9 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_top_properties")
|
||||
local z : { p : string?, q : number? } = x -- Not OK
|
||||
)");
|
||||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
LUAU_REQUIRE_ERROR_COUNT(2, result);
|
||||
|
||||
CHECK_EQ(toString(result.errors[0]),
|
||||
"Type '{| p: number?, q: string? |}' could not be converted into '{| p: string?, q: number? |}'\n"
|
||||
"caused by:\n"
|
||||
" Property 'p' is not compatible. Type 'number' could not be converted into 'string' in an invariant context");
|
||||
|
||||
CHECK_EQ(toString(result.errors[1]),
|
||||
"Type '{| p: number?, q: string? |}' could not be converted into '{| p: string?, q: number? |}'\n"
|
||||
"caused by:\n"
|
||||
" Property 'q' is not compatible. Type 'string' could not be converted into 'number' in an invariant context");
|
||||
}
|
||||
else
|
||||
{
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
CHECK_EQ(toString(result.errors[0]),
|
||||
"Type '{| p: number?, q: any |} & {| p: unknown, q: string? |}' could not be converted into "
|
||||
"'{| p: string?, q: number? |}'; none of the intersection parts are compatible");
|
||||
}
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
CHECK_EQ(toString(result.errors[0]), "Type '{| p: number?, q: any |} & {| p: unknown, q: string? |}' could not be converted into "
|
||||
"'{| p: string?, q: number? |}'; none of the intersection parts are compatible");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "intersection_of_tables_with_never_properties")
|
||||
@ -605,18 +537,9 @@ TEST_CASE_FIXTURE(Fixture, "overloaded_functions_returning_intersections")
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
CHECK_EQ(toString(result.errors[0]),
|
||||
"Type '((number?) -> {| p: number, q: number |}) & ((string?) -> {| p: number, r: number |})' could not be converted into "
|
||||
"'(number?) -> {| p: number, q: number, r: number |}'; none of the intersection parts are compatible");
|
||||
}
|
||||
else
|
||||
{
|
||||
CHECK_EQ(toString(result.errors[0]),
|
||||
"Type '((number?) -> {| p: number |} & {| q: number |}) & ((string?) -> {| p: number |} & {| r: number |})' could not be converted into "
|
||||
"'(number?) -> {| p: number, q: number, r: number |}'; none of the intersection parts are compatible");
|
||||
}
|
||||
CHECK_EQ(toString(result.errors[0]),
|
||||
"Type '((number?) -> {| p: number |} & {| q: number |}) & ((string?) -> {| p: number |} & {| r: number |})' could not be converted into "
|
||||
"'(number?) -> {| p: number, q: number, r: number |}'; none of the intersection parts are compatible");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "overloaded_functions_mentioning_generic")
|
||||
@ -917,7 +840,8 @@ TEST_CASE_FIXTURE(Fixture, "less_greedy_unification_with_intersection_types")
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
|
||||
CHECK_EQ("(never) -> never", toString(requireType("f")));
|
||||
// TODO? We do not simplify types from explicit annotations.
|
||||
CHECK_EQ("({| x: number |} & {| x: string |}) -> {| x: number |} & {| x: string |}", toString(requireType("f")));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "less_greedy_unification_with_intersection_types_2")
|
||||
@ -933,7 +857,7 @@ TEST_CASE_FIXTURE(Fixture, "less_greedy_unification_with_intersection_types_2")
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
|
||||
CHECK_EQ("(never) -> never", toString(requireType("f")));
|
||||
CHECK_EQ("({| x: number |} & {| x: string |}) -> never", toString(requireType("f")));
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
@ -676,9 +676,19 @@ TEST_CASE_FIXTURE(Fixture, "strict_binary_op_where_lhs_unknown")
|
||||
src += "end";
|
||||
|
||||
CheckResult result = check(src);
|
||||
LUAU_REQUIRE_ERROR_COUNT(ops.size(), result);
|
||||
|
||||
CHECK_EQ("Unknown type used in + operation; consider adding a type annotation to 'a'", toString(result.errors[0]));
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
// TODO: This will eventually entirely go away, but for now the Add
|
||||
// family will ensure there's one less error.
|
||||
LUAU_REQUIRE_ERROR_COUNT(ops.size() - 1, result);
|
||||
CHECK_EQ("Unknown type used in - operation; consider adding a type annotation to 'a'", toString(result.errors[0]));
|
||||
}
|
||||
else
|
||||
{
|
||||
LUAU_REQUIRE_ERROR_COUNT(ops.size(), result);
|
||||
CHECK_EQ("Unknown type used in + operation; consider adding a type annotation to 'a'", toString(result.errors[0]));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "and_binexps_dont_unify")
|
||||
@ -889,8 +899,16 @@ TEST_CASE_FIXTURE(Fixture, "infer_any_in_all_modes_when_lhs_is_unknown")
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
CHECK_EQ(toString(result.errors[0]), "Unknown type used in + operation; consider adding a type annotation to 'x'");
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
CHECK(toString(requireType("f")) == "<a, b>(a, b) -> Add<a, b>");
|
||||
}
|
||||
else
|
||||
{
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
CHECK_EQ(toString(result.errors[0]), "Unknown type used in + operation; consider adding a type annotation to 'x'");
|
||||
}
|
||||
|
||||
result = check(Mode::Nonstrict, R"(
|
||||
local function f(x, y)
|
||||
@ -985,31 +1003,6 @@ TEST_CASE_FIXTURE(Fixture, "unrelated_primitives_cannot_be_compared")
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "mm_ops_must_return_a_value")
|
||||
{
|
||||
if (!FFlag::DebugLuauDeferredConstraintResolution)
|
||||
return;
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local mm = {
|
||||
__add = function(self, other)
|
||||
return
|
||||
end,
|
||||
}
|
||||
|
||||
local x = setmetatable({}, mm)
|
||||
local y = x + 123
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(2, result);
|
||||
|
||||
CHECK(requireType("y") == builtinTypes->errorRecoveryType());
|
||||
|
||||
const GenericError* ge = get<GenericError>(result.errors[1]);
|
||||
REQUIRE(ge);
|
||||
CHECK(ge->message == "Metamethod '__add' must return a value");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "mm_comparisons_must_return_a_boolean")
|
||||
{
|
||||
if (!FFlag::DebugLuauDeferredConstraintResolution)
|
||||
@ -1179,6 +1172,38 @@ end
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "luau-polyfill.String.slice")
|
||||
{
|
||||
|
||||
CheckResult result = check(R"(
|
||||
--!strict
|
||||
local function slice(str: string, startIndexStr: string | number, lastIndexStr: (string | number)?): string
|
||||
local strLen, invalidBytePosition = utf8.len(str)
|
||||
assert(strLen ~= nil, ("string `%s` has an invalid byte at position %s"):format(str, tostring(invalidBytePosition)))
|
||||
local startIndex = tonumber(startIndexStr)
|
||||
|
||||
|
||||
-- if no last index length set, go to str length + 1
|
||||
local lastIndex = strLen + 1
|
||||
|
||||
assert(typeof(lastIndex) == "number", "lastIndexStr should convert to number")
|
||||
|
||||
if lastIndex > strLen then
|
||||
lastIndex = strLen + 1
|
||||
end
|
||||
|
||||
local startIndexByte = utf8.offset(str, startIndex)
|
||||
|
||||
return string.sub(str, startIndexByte, startIndexByte)
|
||||
end
|
||||
|
||||
return slice
|
||||
|
||||
|
||||
)");
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "luau-polyfill.Array.startswith")
|
||||
{
|
||||
// This test also exercises whether the binary operator == passes the correct expected type
|
||||
@ -1204,5 +1229,24 @@ return startsWith
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "add_type_family_works")
|
||||
{
|
||||
if (!FFlag::DebugLuauDeferredConstraintResolution)
|
||||
return;
|
||||
|
||||
CheckResult result = check(R"(
|
||||
local function add(x, y)
|
||||
return x + y
|
||||
end
|
||||
|
||||
local a = add(1, 2)
|
||||
local b = add("foo", "bar")
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
CHECK(toString(requireType("a")) == "number");
|
||||
CHECK(toString(requireType("b")) == "Add<string, string>");
|
||||
CHECK(toString(result.errors[0]) == "Type family instance Add<string, string> is uninhabited");
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
@ -532,7 +532,7 @@ return wrapStrictTable(Constants, "Constants")
|
||||
std::optional<TypeId> result = first(m->returnType);
|
||||
REQUIRE(result);
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ("(any?) & ~table", toString(*result));
|
||||
CHECK_EQ("(any & ~table)?", toString(*result));
|
||||
else
|
||||
CHECK_MESSAGE(get<AnyType>(*result), *result);
|
||||
}
|
||||
@ -819,4 +819,61 @@ TEST_CASE_FIXTURE(Fixture, "lookup_prop_of_intersection_containing_unions_of_tab
|
||||
// CHECK("variable" == unknownProp->key);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "expected_type_should_be_a_helpful_deduction_guide_for_function_calls")
|
||||
{
|
||||
ScopedFastFlag sffs[]{
|
||||
{"LuauUnifyTwoOptions", true},
|
||||
{"LuauTypeMismatchInvarianceInError", true},
|
||||
};
|
||||
|
||||
CheckResult result = check(R"(
|
||||
type Ref<T> = { val: T }
|
||||
|
||||
local function useRef<T>(x: T): Ref<T?>
|
||||
return { val = x }
|
||||
end
|
||||
|
||||
local x: Ref<number?> = useRef(nil)
|
||||
)");
|
||||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
// This is actually wrong! Sort of. It's doing the wrong thing, it's actually asking whether
|
||||
// `{| val: number? |} <: {| val: nil |}`
|
||||
// instead of the correct way, which is
|
||||
// `{| val: nil |} <: {| val: number? |}`
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
else
|
||||
{
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
|
||||
CHECK_EQ(toString(result.errors[0]), R"(Type 'Ref<nil>' could not be converted into 'Ref<number?>'
|
||||
caused by:
|
||||
Property 'val' is not compatible. Type 'nil' could not be converted into 'number' in an invariant context)");
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "floating_generics_should_not_be_allowed")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
local assign : <T, U, V, W>(target: T, source0: U?, source1: V?, source2: W?, ...any) -> T & U & V & W = (nil :: any)
|
||||
|
||||
-- We have a big problem here: The generics U, V, and W are not bound to anything!
|
||||
-- Things get strange because of this.
|
||||
local benchmark = assign({})
|
||||
local options = benchmark.options
|
||||
do
|
||||
local resolve2: any = nil
|
||||
options.fn({
|
||||
resolve = function(...)
|
||||
resolve2(...)
|
||||
end,
|
||||
})
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
@ -1020,16 +1020,8 @@ TEST_CASE_FIXTURE(Fixture, "discriminate_tag")
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
CHECK_EQ(R"({| catfood: string, name: string, tag: "Cat" |})", toString(requireTypeAtPosition({7, 33})));
|
||||
CHECK_EQ(R"({| dogfood: string, name: string, tag: "Dog" |})", toString(requireTypeAtPosition({9, 33})));
|
||||
}
|
||||
else
|
||||
{
|
||||
CHECK_EQ("Cat", toString(requireTypeAtPosition({7, 33})));
|
||||
CHECK_EQ("Dog", toString(requireTypeAtPosition({9, 33})));
|
||||
}
|
||||
CHECK_EQ("Cat", toString(requireTypeAtPosition({7, 33})));
|
||||
CHECK_EQ("Dog", toString(requireTypeAtPosition({9, 33})));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "discriminate_tag_with_implicit_else")
|
||||
@ -1050,16 +1042,8 @@ TEST_CASE_FIXTURE(Fixture, "discriminate_tag_with_implicit_else")
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
CHECK_EQ(R"({| catfood: string, name: string, tag: "Cat" |})", toString(requireTypeAtPosition({7, 33})));
|
||||
CHECK_EQ(R"({| dogfood: string, name: string, tag: "Dog" |})", toString(requireTypeAtPosition({9, 33})));
|
||||
}
|
||||
else
|
||||
{
|
||||
CHECK_EQ("Cat", toString(requireTypeAtPosition({7, 33})));
|
||||
CHECK_EQ("Dog", toString(requireTypeAtPosition({9, 33})));
|
||||
}
|
||||
CHECK_EQ("Cat", toString(requireTypeAtPosition({7, 33})));
|
||||
CHECK_EQ("Dog", toString(requireTypeAtPosition({9, 33})));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "and_or_peephole_refinement")
|
||||
@ -1403,7 +1387,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "refine_unknowns")
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
CHECK_EQ("string", toString(requireTypeAtPosition({3, 28})));
|
||||
CHECK_EQ("~string", toString(requireTypeAtPosition({5, 28})));
|
||||
CHECK_EQ("unknown & ~string", toString(requireTypeAtPosition({5, 28})));
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1508,14 +1492,7 @@ local _ = _ ~= _ or _ or _
|
||||
end
|
||||
)");
|
||||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
// Without a realistic motivating case, it's hard to tell if it's important for this to work without errors.
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
CHECK(get<NormalizationTooComplex>(result.errors[0]));
|
||||
}
|
||||
else
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "refine_unknown_to_table_then_take_the_length")
|
||||
@ -1615,7 +1592,7 @@ TEST_CASE_FIXTURE(Fixture, "refine_a_property_of_some_global")
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(3, result);
|
||||
|
||||
CHECK_EQ("~false & ~nil", toString(requireTypeAtPosition({4, 30})));
|
||||
CHECK_EQ("~(false?)", toString(requireTypeAtPosition({4, 30})));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "dataflow_analysis_can_tell_refinements_when_its_appropriate_to_refine_into_nil_or_never")
|
||||
|
@ -1059,11 +1059,11 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "unification_of_unions_in_a_self_referential_
|
||||
|
||||
const MetatableType* amtv = get<MetatableType>(requireType("a"));
|
||||
REQUIRE(amtv);
|
||||
CHECK_EQ(amtv->metatable, requireType("amt"));
|
||||
CHECK_EQ(follow(amtv->metatable), follow(requireType("amt")));
|
||||
|
||||
const MetatableType* bmtv = get<MetatableType>(requireType("b"));
|
||||
REQUIRE(bmtv);
|
||||
CHECK_EQ(bmtv->metatable, requireType("bmt"));
|
||||
CHECK_EQ(follow(bmtv->metatable), follow(requireType("bmt")));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "oop_polymorphic")
|
||||
|
@ -267,10 +267,7 @@ TEST_CASE_FIXTURE(Fixture, "should_be_able_to_infer_this_without_stack_overflowi
|
||||
end
|
||||
)");
|
||||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
else
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "exponential_blowup_from_copying_types")
|
||||
|
@ -1060,4 +1060,14 @@ end
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "type_param_overflow")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
type Two<T,U> = { a: T, b: U }
|
||||
local x: Two<number, string, number> = { a = 1, b = 'c' }
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
@ -354,10 +354,7 @@ a.x = 2
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
auto s = toString(result.errors[0]);
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ("Value of type '{| x: number, y: number |}?' could be nil", s);
|
||||
else
|
||||
CHECK_EQ("Value of type '({| x: number |} & {| y: number |})?' could be nil", s);
|
||||
CHECK_EQ("Value of type '({| x: number |} & {| y: number |})?' could be nil", s);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "optional_length_error")
|
||||
@ -870,4 +867,50 @@ TEST_CASE_FIXTURE(Fixture, "optional_class_instances_are_invariant")
|
||||
CHECK(expectedError == toString(result.errors[0]));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "luau-polyfill.Map.entries")
|
||||
{
|
||||
|
||||
fileResolver.source["Module/Map"] = R"(
|
||||
--!strict
|
||||
|
||||
type Object = { [any]: any }
|
||||
type Array<T> = { [number]: T }
|
||||
type Table<T, V> = { [T]: V }
|
||||
type Tuple<T, V> = Array<T | V>
|
||||
|
||||
local Map = {}
|
||||
|
||||
export type Map<K, V> = {
|
||||
size: number,
|
||||
-- method definitions
|
||||
set: (self: Map<K, V>, K, V) -> Map<K, V>,
|
||||
get: (self: Map<K, V>, K) -> V | nil,
|
||||
clear: (self: Map<K, V>) -> (),
|
||||
delete: (self: Map<K, V>, K) -> boolean,
|
||||
has: (self: Map<K, V>, K) -> boolean,
|
||||
keys: (self: Map<K, V>) -> Array<K>,
|
||||
values: (self: Map<K, V>) -> Array<V>,
|
||||
entries: (self: Map<K, V>) -> Array<Tuple<K, V>>,
|
||||
ipairs: (self: Map<K, V>) -> any,
|
||||
[K]: V,
|
||||
_map: { [K]: V },
|
||||
_array: { [number]: K },
|
||||
}
|
||||
|
||||
function Map:entries()
|
||||
return {}
|
||||
end
|
||||
|
||||
local function coerceToTable(mapLike: Map<any, any> | Table<any, any>): Array<Tuple<any, any>>
|
||||
local e = mapLike:entries();
|
||||
return e
|
||||
end
|
||||
|
||||
)";
|
||||
|
||||
CheckResult result = frontend.check("Module/Map");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2,7 +2,6 @@
|
||||
#include "Luau/Scope.h"
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypeInfer.h"
|
||||
#include "Luau/TypeReduction.h"
|
||||
#include "Luau/VisitType.h"
|
||||
|
||||
#include "Fixture.h"
|
||||
|
@ -1,13 +1,10 @@
|
||||
AnnotationTests.too_many_type_params
|
||||
AstQuery.last_argument_function_call_type
|
||||
AutocompleteTest.autocomplete_response_perf1
|
||||
BuiltinTests.aliased_string_format
|
||||
BuiltinTests.assert_removes_falsy_types
|
||||
BuiltinTests.assert_removes_falsy_types2
|
||||
BuiltinTests.assert_removes_falsy_types_even_from_type_pack_tail_but_only_for_the_first_type
|
||||
BuiltinTests.assert_returns_false_and_string_iff_it_knows_the_first_argument_cannot_be_truthy
|
||||
BuiltinTests.bad_select_should_not_crash
|
||||
BuiltinTests.dont_add_definitions_to_persistent_types
|
||||
BuiltinTests.gmatch_definition
|
||||
BuiltinTests.math_max_checks_for_numbers
|
||||
BuiltinTests.select_slightly_out_of_range
|
||||
@ -22,7 +19,6 @@ BuiltinTests.string_format_tostring_specifier_type_constraint
|
||||
BuiltinTests.string_format_use_correct_argument2
|
||||
DefinitionTests.class_definition_overload_metamethods
|
||||
DefinitionTests.class_definition_string_props
|
||||
GenericsTests.apply_type_function_nested_generics2
|
||||
GenericsTests.better_mismatch_error_messages
|
||||
GenericsTests.bound_tables_do_not_clone_original_fields
|
||||
GenericsTests.check_mutual_generic_functions
|
||||
@ -35,6 +31,7 @@ GenericsTests.generic_functions_should_be_memory_safe
|
||||
GenericsTests.generic_type_pack_parentheses
|
||||
GenericsTests.higher_rank_polymorphism_should_not_accept_instantiated_arguments
|
||||
GenericsTests.infer_generic_function_function_argument_2
|
||||
GenericsTests.infer_generic_function_function_argument_3
|
||||
GenericsTests.infer_generic_function_function_argument_overloaded
|
||||
GenericsTests.infer_generic_lib_function_function_argument
|
||||
GenericsTests.instantiated_function_argument_names
|
||||
@ -42,23 +39,24 @@ GenericsTests.no_stack_overflow_from_quantifying
|
||||
GenericsTests.self_recursive_instantiated_param
|
||||
IntersectionTypes.table_intersection_write_sealed_indirect
|
||||
IntersectionTypes.table_write_sealed_indirect
|
||||
isSubtype.any_is_unknown_union_error
|
||||
ProvisionalTests.assign_table_with_refined_property_with_a_similar_type_is_illegal
|
||||
ProvisionalTests.bail_early_if_unification_is_too_complicated
|
||||
ProvisionalTests.do_not_ice_when_trying_to_pick_first_of_generic_type_pack
|
||||
ProvisionalTests.error_on_eq_metamethod_returning_a_type_other_than_boolean
|
||||
ProvisionalTests.generic_type_leak_to_module_interface_variadic
|
||||
ProvisionalTests.expected_type_should_be_a_helpful_deduction_guide_for_function_calls
|
||||
ProvisionalTests.greedy_inference_with_shared_self_triggers_function_with_no_returns
|
||||
ProvisionalTests.luau-polyfill.Array.filter
|
||||
ProvisionalTests.setmetatable_constrains_free_type_into_free_table
|
||||
ProvisionalTests.specialization_binds_with_prototypes_too_early
|
||||
ProvisionalTests.table_insert_with_a_singleton_argument
|
||||
ProvisionalTests.typeguard_inference_incomplete
|
||||
RefinementTest.type_guard_can_filter_for_intersection_of_tables
|
||||
RefinementTest.discriminate_from_truthiness_of_x
|
||||
RefinementTest.not_t_or_some_prop_of_t
|
||||
RefinementTest.truthy_constraint_on_properties
|
||||
RefinementTest.type_narrow_to_vector
|
||||
RefinementTest.typeguard_cast_free_table_to_vector
|
||||
RefinementTest.typeguard_in_assert_position
|
||||
RefinementTest.x_as_any_if_x_is_instance_elseif_x_is_table
|
||||
RuntimeLimits.typescript_port_of_Result_type
|
||||
TableTests.a_free_shape_cannot_turn_into_a_scalar_if_it_is_not_compatible
|
||||
TableTests.checked_prop_too_early
|
||||
TableTests.disallow_indexing_into_an_unsealed_table_with_no_indexer_in_strict_mode
|
||||
@ -71,9 +69,6 @@ TableTests.expected_indexer_value_type_extra
|
||||
TableTests.expected_indexer_value_type_extra_2
|
||||
TableTests.explicitly_typed_table
|
||||
TableTests.explicitly_typed_table_with_indexer
|
||||
TableTests.found_like_key_in_table_function_call
|
||||
TableTests.found_like_key_in_table_property_access
|
||||
TableTests.found_multiple_like_keys
|
||||
TableTests.fuzz_table_unify_instantiated_table
|
||||
TableTests.generic_table_instantiation_potential_regression
|
||||
TableTests.give_up_after_one_metatable_index_look_up
|
||||
@ -92,7 +87,6 @@ TableTests.oop_polymorphic
|
||||
TableTests.quantify_even_that_table_was_never_exported_at_all
|
||||
TableTests.quantify_metatables_of_metatables_of_table
|
||||
TableTests.reasonable_error_when_adding_a_nonexistent_property_to_an_array_like_table
|
||||
TableTests.result_is_always_any_if_lhs_is_any
|
||||
TableTests.result_is_bool_for_equality_operators_if_lhs_is_any
|
||||
TableTests.right_table_missing_key2
|
||||
TableTests.shared_selfs
|
||||
@ -101,7 +95,6 @@ TableTests.shared_selfs_through_metatables
|
||||
TableTests.table_call_metamethod_basic
|
||||
TableTests.table_simple_call
|
||||
TableTests.table_subtyping_with_missing_props_dont_report_multiple_errors
|
||||
TableTests.table_unification_4
|
||||
TableTests.used_colon_instead_of_dot
|
||||
TableTests.used_dot_instead_of_colon
|
||||
ToString.toStringDetailed2
|
||||
@ -122,7 +115,6 @@ TypeAliases.type_alias_local_mutation
|
||||
TypeAliases.type_alias_local_rename
|
||||
TypeAliases.type_alias_locations
|
||||
TypeAliases.type_alias_of_an_imported_recursive_generic_type
|
||||
TypeFamilyTests.function_internal_families
|
||||
TypeInfer.check_type_infer_recursion_count
|
||||
TypeInfer.cli_50041_committing_txnlog_in_apollo_client_error
|
||||
TypeInfer.dont_report_type_errors_within_an_AstExprError
|
||||
@ -131,18 +123,14 @@ TypeInfer.follow_on_new_types_in_substitution
|
||||
TypeInfer.fuzz_free_table_type_change_during_index_check
|
||||
TypeInfer.infer_assignment_value_types_mutable_lval
|
||||
TypeInfer.no_stack_overflow_from_isoptional
|
||||
TypeInfer.no_stack_overflow_from_isoptional2
|
||||
TypeInfer.recursive_function_that_invokes_itself_with_a_refinement_of_its_parameter_2
|
||||
TypeInfer.tc_after_error_recovery_no_replacement_name_in_error
|
||||
TypeInfer.type_infer_recursion_limit_no_ice
|
||||
TypeInfer.type_infer_recursion_limit_normalizer
|
||||
TypeInferAnyError.for_in_loop_iterator_is_any2
|
||||
TypeInferClasses.class_type_mismatch_with_name_conflict
|
||||
TypeInferClasses.classes_without_overloaded_operators_cannot_be_added
|
||||
TypeInferClasses.index_instance_property
|
||||
TypeInferClasses.table_class_unification_reports_sane_errors_for_missing_properties
|
||||
TypeInferClasses.warn_when_prop_almost_matches
|
||||
TypeInferFunctions.cannot_hoist_interior_defns_into_signature
|
||||
TypeInferFunctions.dont_infer_parameter_types_for_functions_from_their_call_site
|
||||
TypeInferFunctions.function_cast_error_uses_correct_language
|
||||
TypeInferFunctions.function_decl_non_self_sealed_overwrite_2
|
||||
TypeInferFunctions.function_decl_non_self_unsealed_overwrite
|
||||
@ -177,6 +165,8 @@ TypeInferOperators.CallOrOfFunctions
|
||||
TypeInferOperators.cli_38355_recursive_union
|
||||
TypeInferOperators.compound_assign_mismatch_metatable
|
||||
TypeInferOperators.disallow_string_and_types_without_metatables_from_arithmetic_binary_ops
|
||||
TypeInferOperators.luau-polyfill.String.slice
|
||||
TypeInferOperators.luau_polyfill_is_array
|
||||
TypeInferOperators.operator_eq_completely_incompatible
|
||||
TypeInferOperators.typecheck_overloaded_multiply_that_is_an_intersection
|
||||
TypeInferOperators.typecheck_overloaded_multiply_that_is_an_intersection_on_rhs
|
||||
@ -191,7 +181,6 @@ TypePackTests.detect_cyclic_typepacks2
|
||||
TypePackTests.pack_tail_unification_check
|
||||
TypePackTests.type_alias_backwards_compatible
|
||||
TypePackTests.type_alias_default_type_errors
|
||||
TypePackTests.type_alias_type_packs_errors
|
||||
TypePackTests.unify_variadic_tails_in_arguments
|
||||
TypePackTests.variadic_packs
|
||||
TypeSingletons.function_call_with_singletons
|
||||
@ -202,6 +191,7 @@ TypeSingletons.return_type_of_f_is_not_widened
|
||||
TypeSingletons.table_properties_type_error_escapes
|
||||
TypeSingletons.widen_the_supertype_if_it_is_free_and_subtype_has_singleton
|
||||
TypeSingletons.widening_happens_almost_everywhere
|
||||
UnionTypes.dont_allow_cyclic_unions_to_be_inferred
|
||||
UnionTypes.generic_function_with_optional_arg
|
||||
UnionTypes.index_on_a_union_type_with_missing_property
|
||||
UnionTypes.optional_union_follow
|
||||
|
@ -1,112 +0,0 @@
|
||||
#!/usr/bin/python3
|
||||
# This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
|
||||
# This code can be used to split lvmexecute.cpp VM switch into separate functions for use as native code generation fallbacks
|
||||
import sys
|
||||
import re
|
||||
|
||||
input = sys.stdin.readlines()
|
||||
|
||||
inst = ""
|
||||
|
||||
header = """// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand
|
||||
#pragma once
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
struct lua_State;
|
||||
struct Closure;
|
||||
typedef uint32_t Instruction;
|
||||
typedef struct lua_TValue TValue;
|
||||
typedef TValue* StkId;
|
||||
|
||||
"""
|
||||
|
||||
source = """// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||
// This code is based on Lua 5.x implementation licensed under MIT License; see lua_LICENSE.txt for details
|
||||
// This file was generated by 'tools/lvmexecute_split.py' script, do not modify it by hand
|
||||
#include "Fallbacks.h"
|
||||
#include "FallbacksProlog.h"
|
||||
|
||||
"""
|
||||
|
||||
function = ""
|
||||
signature = ""
|
||||
|
||||
includeInsts = ["LOP_NEWCLOSURE", "LOP_NAMECALL", "LOP_FORGPREP", "LOP_GETVARARGS", "LOP_DUPCLOSURE", "LOP_PREPVARARGS", "LOP_BREAK", "LOP_GETGLOBAL", "LOP_SETGLOBAL", "LOP_GETTABLEKS", "LOP_SETTABLEKS", "LOP_SETLIST"]
|
||||
|
||||
state = 0
|
||||
|
||||
# parse with the state machine
|
||||
for line in input:
|
||||
# find the start of an instruction
|
||||
if state == 0:
|
||||
match = re.match("\s+VM_CASE\((LOP_[A-Z_0-9]+)\)", line)
|
||||
|
||||
if match:
|
||||
inst = match[1]
|
||||
signature = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, StkId base, TValue* k)"
|
||||
function = signature + "\n"
|
||||
function += "{\n"
|
||||
function += " [[maybe_unused]] Closure* cl = clvalue(L->ci->func);\n"
|
||||
state = 1
|
||||
|
||||
# first line of the instruction which is "{"
|
||||
elif state == 1:
|
||||
assert(line == " {\n")
|
||||
state = 2
|
||||
|
||||
# find the end of an instruction
|
||||
elif state == 2:
|
||||
# remove jumps back into the native code
|
||||
if line == "#if LUA_CUSTOM_EXECUTION\n":
|
||||
state = 3
|
||||
continue
|
||||
|
||||
if line[0] == ' ':
|
||||
finalline = line[12:-1] + "\n"
|
||||
else:
|
||||
finalline = line
|
||||
|
||||
finalline = finalline.replace("VM_NEXT();", "return pc;");
|
||||
finalline = finalline.replace("goto exit;", "return NULL;");
|
||||
finalline = finalline.replace("return;", "return NULL;");
|
||||
|
||||
function += finalline
|
||||
match = re.match(" }", line)
|
||||
|
||||
if match:
|
||||
# break is not supported
|
||||
if inst == "LOP_BREAK":
|
||||
function = "const Instruction* execute_" + inst + "(lua_State* L, const Instruction* pc, StkId base, TValue* k)\n"
|
||||
function += "{\n LUAU_ASSERT(!\"Unsupported deprecated opcode\");\n LUAU_UNREACHABLE();\n}\n"
|
||||
# handle fallthrough
|
||||
elif inst == "LOP_NAMECALL":
|
||||
function = function[:-len(finalline)]
|
||||
function += " return pc;\n}\n"
|
||||
|
||||
if inst in includeInsts:
|
||||
header += signature + ";\n"
|
||||
source += function + "\n"
|
||||
|
||||
state = 0
|
||||
|
||||
# skip LUA_CUSTOM_EXECUTION code blocks
|
||||
elif state == 3:
|
||||
if line == "#endif\n":
|
||||
state = 4
|
||||
continue
|
||||
|
||||
# skip extra line
|
||||
elif state == 4:
|
||||
state = 2
|
||||
|
||||
# make sure we found the ending
|
||||
assert(state == 0)
|
||||
|
||||
with open("Fallbacks.h", "w") as fp:
|
||||
fp.writelines(header)
|
||||
|
||||
with open("Fallbacks.cpp", "w") as fp:
|
||||
fp.writelines(source)
|
Loading…
Reference in New Issue
Block a user