Sync to upstream/release/620

This commit is contained in:
Vighnesh 2024-04-05 10:41:05 -07:00
parent fb90dc083b
commit c730a51ca8
42 changed files with 1371 additions and 791 deletions

View File

@ -216,6 +216,20 @@ struct NormalizedFunctionType
struct NormalizedType;
using NormalizedTyvars = std::unordered_map<TypeId, std::unique_ptr<NormalizedType>>;
// Operations provided by `Normalizer` can have ternary results:
// 1. The operation returned true.
// 2. The operation returned false.
// 3. They can hit resource limitations, which invalidates _all normalized types_.
enum class NormalizationResult
{
// The operation returned true or succeeded.
True,
// The operation returned false or failed.
False,
// Resource limits were hit, invalidating all normalized types.
HitLimits,
};
// A normalized type is either any, unknown, or one of the form P | T | F | G where
// * P is a union of primitive types (including singletons, classes and the error type)
// * T is a union of table types
@ -366,8 +380,8 @@ public:
void unionFunctions(NormalizedFunctionType& heress, const NormalizedFunctionType& theress);
void unionTablesWithTable(TypeIds& heres, TypeId there);
void unionTables(TypeIds& heres, const TypeIds& theres);
bool unionNormals(NormalizedType& here, const NormalizedType& there, int ignoreSmallerTyvars = -1);
bool unionNormalWithTy(NormalizedType& here, TypeId there, Set<TypeId>& seenSetTypes, int ignoreSmallerTyvars = -1);
NormalizationResult unionNormals(NormalizedType& here, const NormalizedType& there, int ignoreSmallerTyvars = -1);
NormalizationResult unionNormalWithTy(NormalizedType& here, TypeId there, Set<TypeId>& seenSetTypes, int ignoreSmallerTyvars = -1);
// ------- Negations
std::optional<NormalizedType> negateNormal(const NormalizedType& here);
@ -389,19 +403,19 @@ public:
std::optional<TypeId> intersectionOfFunctions(TypeId here, TypeId there);
void intersectFunctionsWithFunction(NormalizedFunctionType& heress, TypeId there);
void intersectFunctions(NormalizedFunctionType& heress, const NormalizedFunctionType& theress);
bool intersectTyvarsWithTy(NormalizedTyvars& here, TypeId there, Set<TypeId>& seenSetTypes);
bool intersectNormals(NormalizedType& here, const NormalizedType& there, int ignoreSmallerTyvars = -1);
bool intersectNormalWithTy(NormalizedType& here, TypeId there, Set<TypeId>& seenSetTypes);
bool normalizeIntersections(const std::vector<TypeId>& intersections, NormalizedType& outType);
NormalizationResult intersectTyvarsWithTy(NormalizedTyvars& here, TypeId there, Set<TypeId>& seenSetTypes);
NormalizationResult intersectNormals(NormalizedType& here, const NormalizedType& there, int ignoreSmallerTyvars = -1);
NormalizationResult intersectNormalWithTy(NormalizedType& here, TypeId there, Set<TypeId>& seenSetTypes);
NormalizationResult normalizeIntersections(const std::vector<TypeId>& intersections, NormalizedType& outType);
// Check for inhabitance
bool isInhabited(TypeId ty);
bool isInhabited(TypeId ty, Set<TypeId>& seen);
bool isInhabited(const NormalizedType* norm);
bool isInhabited(const NormalizedType* norm, Set<TypeId>& seen);
NormalizationResult isInhabited(TypeId ty);
NormalizationResult isInhabited(TypeId ty, Set<TypeId>& seen);
NormalizationResult isInhabited(const NormalizedType* norm);
NormalizationResult isInhabited(const NormalizedType* norm, Set<TypeId>& seen);
// Check for intersections being inhabited
bool isIntersectionInhabited(TypeId left, TypeId right);
NormalizationResult isIntersectionInhabited(TypeId left, TypeId right);
// -------- Convert back from a normalized type to a type
TypeId typeFromNormal(const NormalizedType& norm);

View File

@ -103,6 +103,14 @@ struct SubtypingEnvironment
DenseHashMap<TypeId, GenericBounds> mappedGenerics{nullptr};
DenseHashMap<TypePackId, TypePackId> mappedGenericPacks{nullptr};
/*
* See the test cyclic_tables_are_assumed_to_be_compatible_with_classes for
* details.
*
* An empty value is equivalent to a nonexistent key.
*/
DenseHashMap<TypeId, TypeId> substitutions{nullptr};
DenseHashMap<std::pair<TypeId, TypeId>, SubtypingResult, TypePairHash> ephemeralCache{{}};
/// Applies `mappedGenerics` to the given type.
@ -192,7 +200,7 @@ private:
SubtypingResult isCovariantWith(SubtypingEnvironment& env, const MetatableType* subMt, const MetatableType* superMt);
SubtypingResult isCovariantWith(SubtypingEnvironment& env, const MetatableType* subMt, const TableType* superTable);
SubtypingResult isCovariantWith(SubtypingEnvironment& env, const ClassType* subClass, const ClassType* superClass);
SubtypingResult isCovariantWith(SubtypingEnvironment& env, const ClassType* subClass, const TableType* superTable);
SubtypingResult isCovariantWith(SubtypingEnvironment& env, TypeId subTy, const ClassType* subClass, TypeId superTy, const TableType* superTable);
SubtypingResult isCovariantWith(SubtypingEnvironment& env, const FunctionType* subFunction, const FunctionType* superFunction);
SubtypingResult isCovariantWith(SubtypingEnvironment& env, const PrimitiveType* subPrim, const TableType* superTable);
SubtypingResult isCovariantWith(SubtypingEnvironment& env, const SingletonType* subSingleton, const TableType* superTable);

View File

@ -14,7 +14,6 @@
#include <utility>
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAGVARIABLE(LuauAutocompleteStringLiteralBounds, false);
static const std::unordered_set<std::string> kStatementStartingKeywords = {
"while", "if", "local", "repeat", "function", "do", "for", "return", "break", "continue", "type", "export"};
@ -465,15 +464,12 @@ AutocompleteEntryMap autocompleteModuleTypes(const Module& module, Position posi
static void autocompleteStringSingleton(TypeId ty, bool addQuotes, AstNode* node, Position position, AutocompleteEntryMap& result)
{
if (FFlag::LuauAutocompleteStringLiteralBounds)
if (position == node->location.begin || position == node->location.end)
{
if (position == node->location.begin || position == node->location.end)
{
if (auto str = node->as<AstExprConstantString>(); str && str->quoteStyle == AstExprConstantString::Quoted)
return;
else if (node->is<AstExprInterpString>())
return;
}
if (auto str = node->as<AstExprConstantString>(); str && str->quoteStyle == AstExprConstantString::Quoted)
return;
else if (node->is<AstExprInterpString>())
return;
}
auto formatKey = [addQuotes](const std::string& key) {

View File

@ -738,14 +738,18 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatLocal* stat
scope->lvalueTypes[def] = assignee;
}
TypePackId resultPack = checkPack(scope, statLocal->values, expectedTypes).tp;
addConstraint(scope, statLocal->location, UnpackConstraint{arena->addTypePack(std::move(assignees)), resultPack, /*resultIsLValue*/ true});
TypePackId rvaluePack = checkPack(scope, statLocal->values, expectedTypes).tp;
// Types must flow between whatever annotations were provided and the rhs expression.
if (hasAnnotation)
addConstraint(scope, statLocal->location, PackSubtypeConstraint{resultPack, arena->addTypePack(std::move(annotatedTypes))});
{
TypePackId annotatedPack = arena->addTypePack(std::move(annotatedTypes));
addConstraint(scope, statLocal->location, UnpackConstraint{arena->addTypePack(std::move(assignees)), annotatedPack, /*resultIsLValue*/ true});
addConstraint(scope, statLocal->location, PackSubtypeConstraint{rvaluePack, annotatedPack});
}
else
addConstraint(scope, statLocal->location, UnpackConstraint{arena->addTypePack(std::move(assignees)), rvaluePack, /*resultIsLValue*/ true});
if (statLocal->vars.size == 1 && statLocal->values.size == 1 && firstValueType && scope.get() == rootScope)
if (statLocal->vars.size == 1 && statLocal->values.size == 1 && firstValueType && scope.get() == rootScope && !hasAnnotation)
{
AstLocal* var = statLocal->vars.data[0];
AstExpr* value = statLocal->values.data[0];

View File

@ -27,7 +27,7 @@
#include <utility>
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolver, false);
LUAU_FASTFLAGVARIABLE(DebugLuauLogBindings, false);
LUAU_FASTINTVARIABLE(LuauSolverRecursionLimit, 500);
namespace Luau
@ -465,10 +465,8 @@ void ConstraintSolver::run()
reduceFamilies(instance, Location{}, TypeFamilyContext{arena, builtinTypes, rootScope, normalizer, NotNull{&iceReporter}, NotNull{&limits}}, false);
}
if (FFlag::DebugLuauLogSolver)
{
if (FFlag::DebugLuauLogSolver || FFlag::DebugLuauLogBindings)
dumpBindings(rootScope, opts);
}
if (logger)
{
@ -1761,7 +1759,7 @@ bool ConstraintSolver::tryDispatchUnpack1(NotNull<const Constraint> constraint,
else
{
LUAU_ASSERT(resultIsLValue);
unify(constraint, resultTy, srcTy);
unify(constraint, srcTy, resultTy);
}
unblock(resultTy, constraint->location);
@ -1812,7 +1810,7 @@ bool ConstraintSolver::tryDispatch(const UnpackConstraint& c, NotNull<const Cons
tryDispatchUnpack1(constraint, resultTy, srcTy, c.resultIsLValue);
}
else
unify(constraint, resultTy, srcTy);
unify(constraint, srcTy, resultTy);
++resultIter;
++i;

View File

@ -314,9 +314,9 @@ type DateTypeResult = {
}
declare os: {
time: @checked (time: DateTypeArg?) -> number,
time: (time: DateTypeArg?) -> number,
date: ((formatString: "*t" | "!*t", time: number?) -> DateTypeResult) & ((formatString: string?, time: number?) -> string),
difftime: @checked (t2: DateTypeResult | number, t1: DateTypeResult | number) -> number,
difftime: (t2: DateTypeResult | number, t1: DateTypeResult | number) -> number,
clock: () -> number,
}

View File

@ -38,6 +38,8 @@ LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJson, false)
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolverToJsonFile, false)
LUAU_FASTFLAGVARIABLE(DebugLuauForbidInternalTypes, false)
LUAU_FASTFLAGVARIABLE(DebugLuauForceStrictMode, false)
LUAU_FASTFLAGVARIABLE(DebugLuauForceNonStrictMode, false)
namespace Luau
{
@ -891,7 +893,13 @@ void Frontend::checkBuildQueueItem(BuildQueueItem& item)
SourceNode& sourceNode = *item.sourceNode;
const SourceModule& sourceModule = *item.sourceModule;
const Config& config = item.config;
Mode mode = sourceModule.mode.value_or(config.mode);
Mode mode;
if (FFlag::DebugLuauForceStrictMode)
mode = Mode::Strict;
else if (FFlag::DebugLuauForceNonStrictMode)
mode = Mode::Nonstrict;
else
mode = sourceModule.mode.value_or(config.mode);
ScopePtr environmentScope = item.environmentScope;
double timestamp = getTimestamp();
const std::vector<RequireCycle>& requireCycles = item.requireCycles;

View File

@ -421,77 +421,77 @@ static bool isShallowInhabited(const NormalizedType& norm)
!get<NeverType>(norm.buffers) || !norm.functions.isNever() || !norm.tables.empty() || !norm.tyvars.empty();
}
bool Normalizer::isInhabited(const NormalizedType* norm)
NormalizationResult Normalizer::isInhabited(const NormalizedType* norm)
{
Set<TypeId> seen{nullptr};
return isInhabited(norm, seen);
}
bool Normalizer::isInhabited(const NormalizedType* norm, Set<TypeId>& seen)
NormalizationResult Normalizer::isInhabited(const NormalizedType* norm, Set<TypeId>& seen)
{
RecursionCounter _rc(&sharedState->counters.recursionCount);
if (!withinResourceLimits())
return false;
// If normalization failed, the type is complex, and so is more likely than not to be inhabited.
if (!norm)
return true;
if (!withinResourceLimits() || !norm)
return NormalizationResult::HitLimits;
if (!get<NeverType>(norm->tops) || !get<NeverType>(norm->booleans) || !get<NeverType>(norm->errors) || !get<NeverType>(norm->nils) ||
!get<NeverType>(norm->numbers) || !get<NeverType>(norm->threads) || !get<NeverType>(norm->buffers) || !norm->classes.isNever() ||
!norm->strings.isNever() || !norm->functions.isNever())
return true;
return NormalizationResult::True;
for (const auto& [_, intersect] : norm->tyvars)
{
if (isInhabited(intersect.get(), seen))
return true;
NormalizationResult res = isInhabited(intersect.get(), seen);
if (res != NormalizationResult::False)
return res;
}
for (TypeId table : norm->tables)
{
if (isInhabited(table, seen))
return true;
NormalizationResult res = isInhabited(table, seen);
if (res != NormalizationResult::False)
return res;
}
return false;
return NormalizationResult::False;
}
bool Normalizer::isInhabited(TypeId ty)
NormalizationResult Normalizer::isInhabited(TypeId ty)
{
if (cacheInhabitance)
{
if (bool* result = cachedIsInhabited.find(ty))
return *result;
return *result ? NormalizationResult::True : NormalizationResult::False;
}
Set<TypeId> seen{nullptr};
bool result = isInhabited(ty, seen);
NormalizationResult result = isInhabited(ty, seen);
if (cacheInhabitance)
cachedIsInhabited[ty] = result;
if (cacheInhabitance && result == NormalizationResult::True)
cachedIsInhabited[ty] = true;
else if (cacheInhabitance && result == NormalizationResult::False)
cachedIsInhabited[ty] = false;
return result;
}
bool Normalizer::isInhabited(TypeId ty, Set<TypeId>& seen)
NormalizationResult Normalizer::isInhabited(TypeId ty, Set<TypeId>& seen)
{
RecursionCounter _rc(&sharedState->counters.recursionCount);
if (!withinResourceLimits())
return false;
return NormalizationResult::HitLimits;
// TODO: use log.follow(ty), CLI-64291
ty = follow(ty);
if (get<NeverType>(ty))
return false;
return NormalizationResult::False;
if (!get<IntersectionType>(ty) && !get<UnionType>(ty) && !get<TableType>(ty) && !get<MetatableType>(ty))
return true;
return NormalizationResult::True;
if (seen.count(ty))
return true;
return NormalizationResult::True;
seen.insert(ty);
@ -503,26 +503,36 @@ bool Normalizer::isInhabited(TypeId ty, Set<TypeId>& seen)
{
// A table enclosing a read property whose type is uninhabitable is also itself uninhabitable,
// but not its write property. That just means the write property doesn't exist, and so is readonly.
if (auto ty = prop.readTy; ty && !isInhabited(*ty, seen))
return false;
if (auto ty = prop.readTy)
{
NormalizationResult res = isInhabited(*ty, seen);
if (res != NormalizationResult::True)
return res;
}
}
else
{
if (!isInhabited(prop.type(), seen))
return false;
NormalizationResult res = isInhabited(prop.type(), seen);
if (res != NormalizationResult::True)
return res;
}
}
return true;
return NormalizationResult::True;
}
if (const MetatableType* mtv = get<MetatableType>(ty))
return isInhabited(mtv->table, seen) && isInhabited(mtv->metatable, seen);
{
NormalizationResult res = isInhabited(mtv->table, seen);
if (res != NormalizationResult::True)
return res;
return isInhabited(mtv->metatable, seen);
}
const NormalizedType* norm = normalize(ty);
return isInhabited(norm, seen);
}
bool Normalizer::isIntersectionInhabited(TypeId left, TypeId right)
NormalizationResult Normalizer::isIntersectionInhabited(TypeId left, TypeId right)
{
left = follow(left);
right = follow(right);
@ -530,7 +540,7 @@ bool Normalizer::isIntersectionInhabited(TypeId left, TypeId right)
if (cacheInhabitance)
{
if (bool* result = cachedIsInhabitedIntersection.find({left, right}))
return *result;
return *result ? NormalizationResult::True : NormalizationResult::False;
}
Set<TypeId> seen{nullptr};
@ -538,18 +548,21 @@ bool Normalizer::isIntersectionInhabited(TypeId left, TypeId right)
seen.insert(right);
NormalizedType norm{builtinTypes};
if (!normalizeIntersections({left, right}, norm))
NormalizationResult res = normalizeIntersections({left, right}, norm);
if (res != NormalizationResult::True)
{
if (cacheInhabitance)
if (cacheInhabitance && res == NormalizationResult::False)
cachedIsInhabitedIntersection[{left, right}] = false;
return false;
return res;
}
bool result = isInhabited(&norm, seen);
NormalizationResult result = isInhabited(&norm, seen);
if (cacheInhabitance)
cachedIsInhabitedIntersection[{left, right}] = result;
if (cacheInhabitance && result == NormalizationResult::True)
cachedIsInhabitedIntersection[{left, right}] = true;
else if (cacheInhabitance && result == NormalizationResult::False)
cachedIsInhabitedIntersection[{left, right}] = false;
return result;
}
@ -827,7 +840,8 @@ const NormalizedType* Normalizer::normalize(TypeId ty)
NormalizedType norm{builtinTypes};
Set<TypeId> seenSetTypes{nullptr};
if (!unionNormalWithTy(norm, ty, seenSetTypes))
NormalizationResult res = unionNormalWithTy(norm, ty, seenSetTypes);
if (res != NormalizationResult::True)
return nullptr;
if (norm.isUnknown())
{
@ -840,7 +854,7 @@ const NormalizedType* Normalizer::normalize(TypeId ty)
return result;
}
bool Normalizer::normalizeIntersections(const std::vector<TypeId>& intersections, NormalizedType& outType)
NormalizationResult Normalizer::normalizeIntersections(const std::vector<TypeId>& intersections, NormalizedType& outType)
{
if (!arena)
sharedState->iceHandler->ice("Normalizing types outside a module");
@ -850,14 +864,16 @@ bool Normalizer::normalizeIntersections(const std::vector<TypeId>& intersections
Set<TypeId> seenSetTypes{nullptr};
for (auto ty : intersections)
{
if (!intersectNormalWithTy(norm, ty, seenSetTypes))
return false;
NormalizationResult res = intersectNormalWithTy(norm, ty, seenSetTypes);
if (res != NormalizationResult::True)
return res;
}
if (!unionNormals(outType, norm))
return false;
NormalizationResult res = unionNormals(outType, norm);
if (res != NormalizationResult::True)
return res;
return true;
return NormalizationResult::True;
}
void Normalizer::clearNormal(NormalizedType& norm)
@ -1521,7 +1537,7 @@ void Normalizer::unionTables(TypeIds& heres, const TypeIds& theres)
//
// And yes, this is essentially a SAT solver hidden inside a typechecker.
// That's what you get for having a type system with generics, intersection and union types.
bool Normalizer::unionNormals(NormalizedType& here, const NormalizedType& there, int ignoreSmallerTyvars)
NormalizationResult Normalizer::unionNormals(NormalizedType& here, const NormalizedType& there, int ignoreSmallerTyvars)
{
TypeId tops = unionOfTops(here.tops, there.tops);
if (FFlag::LuauTransitiveSubtyping && get<UnknownType>(tops) && (get<ErrorType>(here.errors) || get<ErrorType>(there.errors)))
@ -1530,7 +1546,7 @@ bool Normalizer::unionNormals(NormalizedType& here, const NormalizedType& there,
{
clearNormal(here);
here.tops = tops;
return true;
return NormalizationResult::True;
}
for (auto it = there.tyvars.begin(); it != there.tyvars.end(); it++)
@ -1542,10 +1558,15 @@ bool Normalizer::unionNormals(NormalizedType& here, const NormalizedType& there,
continue;
auto [emplaced, fresh] = here.tyvars.emplace(tyvar, std::make_unique<NormalizedType>(NormalizedType{builtinTypes}));
if (fresh)
if (!unionNormals(*emplaced->second, here, index))
return false;
if (!unionNormals(*emplaced->second, inter, index))
return false;
{
NormalizationResult res = unionNormals(*emplaced->second, here, index);
if (res != NormalizationResult::True)
return res;
}
NormalizationResult res = unionNormals(*emplaced->second, inter, index);
if (res != NormalizationResult::True)
return res;
}
here.booleans = unionOfBools(here.booleans, there.booleans);
@ -1559,7 +1580,7 @@ bool Normalizer::unionNormals(NormalizedType& here, const NormalizedType& there,
here.buffers = (get<NeverType>(there.buffers) ? here.buffers : there.buffers);
unionFunctions(here.functions, there.functions);
unionTables(here.tables, there.tables);
return true;
return NormalizationResult::True;
}
bool Normalizer::withinResourceLimits()
@ -1585,11 +1606,11 @@ bool Normalizer::withinResourceLimits()
}
// See above for an explaination of `ignoreSmallerTyvars`.
bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, Set<TypeId>& seenSetTypes, int ignoreSmallerTyvars)
NormalizationResult Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, Set<TypeId>& seenSetTypes, int ignoreSmallerTyvars)
{
RecursionCounter _rc(&sharedState->counters.recursionCount);
if (!withinResourceLimits())
return false;
return NormalizationResult::HitLimits;
there = follow(there);
@ -1600,34 +1621,35 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, Set<TypeI
tops = builtinTypes->anyType;
clearNormal(here);
here.tops = tops;
return true;
return NormalizationResult::True;
}
else if (!FFlag::LuauTransitiveSubtyping && (get<NeverType>(there) || !get<NeverType>(here.tops)))
return true;
return NormalizationResult::True;
else if (FFlag::LuauTransitiveSubtyping && (get<NeverType>(there) || get<AnyType>(here.tops)))
return true;
return NormalizationResult::True;
else if (FFlag::LuauTransitiveSubtyping && get<ErrorType>(there) && get<UnknownType>(here.tops))
{
here.tops = builtinTypes->anyType;
return true;
return NormalizationResult::True;
}
else if (const UnionType* utv = get<UnionType>(there))
{
if (seenSetTypes.count(there))
return true;
return NormalizationResult::True;
seenSetTypes.insert(there);
for (UnionTypeIterator it = begin(utv); it != end(utv); ++it)
{
if (!unionNormalWithTy(here, *it, seenSetTypes))
NormalizationResult res = unionNormalWithTy(here, *it, seenSetTypes);
if (res != NormalizationResult::True)
{
seenSetTypes.erase(there);
return false;
return res;
}
}
seenSetTypes.erase(there);
return true;
return NormalizationResult::True;
}
else if (const IntersectionType* itv = get<IntersectionType>(there))
{
@ -1635,18 +1657,19 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, Set<TypeI
norm.tops = builtinTypes->anyType;
for (IntersectionTypeIterator it = begin(itv); it != end(itv); ++it)
{
if (!intersectNormalWithTy(norm, *it, seenSetTypes))
return false;
NormalizationResult res = intersectNormalWithTy(norm, *it, seenSetTypes);
if (res != NormalizationResult::True)
return res;
}
return unionNormals(here, norm);
}
else if (FFlag::LuauTransitiveSubtyping && get<UnknownType>(here.tops))
return true;
return NormalizationResult::True;
else if (get<GenericType>(there) || get<FreeType>(there) || get<BlockedType>(there) || get<PendingExpansionType>(there) ||
get<TypeFamilyInstanceType>(there))
{
if (tyvarIndex(there) <= ignoreSmallerTyvars)
return true;
return NormalizationResult::True;
NormalizedType inter{builtinTypes};
inter.tops = builtinTypes->unknownType;
here.tyvars.insert_or_assign(there, std::make_unique<NormalizedType>(std::move(inter)));
@ -1714,10 +1737,11 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, Set<TypeI
const NormalizedType* thereNormal = normalize(ntv->ty);
std::optional<NormalizedType> tn = negateNormal(*thereNormal);
if (!tn)
return false;
return NormalizationResult::False;
if (!unionNormals(here, *tn))
return false;
NormalizationResult res = unionNormals(here, *tn);
if (res != NormalizationResult::True)
return res;
}
else if (get<PendingExpansionType>(there) || get<TypeFamilyInstanceType>(there))
{
@ -1727,11 +1751,14 @@ bool Normalizer::unionNormalWithTy(NormalizedType& here, TypeId there, Set<TypeI
LUAU_ASSERT(!"Unreachable");
for (auto& [tyvar, intersect] : here.tyvars)
if (!unionNormalWithTy(*intersect, there, seenSetTypes, tyvarIndex(tyvar)))
return false;
{
NormalizationResult res = unionNormalWithTy(*intersect, there, seenSetTypes, tyvarIndex(tyvar));
if (res != NormalizationResult::True)
return res;
}
assertInvariant(here);
return true;
return NormalizationResult::True;
}
// ------- Negations
@ -2740,28 +2767,29 @@ void Normalizer::intersectFunctions(NormalizedFunctionType& heres, const Normali
}
}
bool Normalizer::intersectTyvarsWithTy(NormalizedTyvars& here, TypeId there, Set<TypeId>& seenSetTypes)
NormalizationResult Normalizer::intersectTyvarsWithTy(NormalizedTyvars& here, TypeId there, Set<TypeId>& seenSetTypes)
{
for (auto it = here.begin(); it != here.end();)
{
NormalizedType& inter = *it->second;
if (!intersectNormalWithTy(inter, there, seenSetTypes))
return false;
NormalizationResult res = intersectNormalWithTy(inter, there, seenSetTypes);
if (res != NormalizationResult::True)
return res;
if (isShallowInhabited(inter))
++it;
else
it = here.erase(it);
}
return true;
return NormalizationResult::True;
}
// See above for an explaination of `ignoreSmallerTyvars`.
bool Normalizer::intersectNormals(NormalizedType& here, const NormalizedType& there, int ignoreSmallerTyvars)
NormalizationResult Normalizer::intersectNormals(NormalizedType& here, const NormalizedType& there, int ignoreSmallerTyvars)
{
if (!get<NeverType>(there.tops))
{
here.tops = intersectionOfTops(here.tops, there.tops);
return true;
return NormalizationResult::True;
}
else if (!get<NeverType>(here.tops))
{
@ -2789,8 +2817,9 @@ bool Normalizer::intersectNormals(NormalizedType& here, const NormalizedType& th
auto [found, fresh] = here.tyvars.emplace(tyvar, std::make_unique<NormalizedType>(NormalizedType{builtinTypes}));
if (fresh)
{
if (!unionNormals(*found->second, here, index))
return false;
NormalizationResult res = unionNormals(*found->second, here, index);
if (res != NormalizationResult::True)
return res;
}
}
}
@ -2803,34 +2832,36 @@ bool Normalizer::intersectNormals(NormalizedType& here, const NormalizedType& th
auto found = there.tyvars.find(tyvar);
if (found == there.tyvars.end())
{
if (!intersectNormals(inter, there, index))
return false;
NormalizationResult res = intersectNormals(inter, there, index);
if (res != NormalizationResult::True)
return res;
}
else
{
if (!intersectNormals(inter, *found->second, index))
return false;
NormalizationResult res = intersectNormals(inter, *found->second, index);
if (res != NormalizationResult::True)
return res;
}
if (isShallowInhabited(inter))
it++;
else
it = here.tyvars.erase(it);
}
return true;
return NormalizationResult::True;
}
bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there, Set<TypeId>& seenSetTypes)
NormalizationResult Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there, Set<TypeId>& seenSetTypes)
{
RecursionCounter _rc(&sharedState->counters.recursionCount);
if (!withinResourceLimits())
return false;
return NormalizationResult::HitLimits;
there = follow(there);
if (get<AnyType>(there) || get<UnknownType>(there))
{
here.tops = intersectionOfTops(here.tops, there);
return true;
return NormalizationResult::True;
}
else if (!get<NeverType>(here.tops))
{
@ -2841,16 +2872,22 @@ bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there, Set<T
{
NormalizedType norm{builtinTypes};
for (UnionTypeIterator it = begin(utv); it != end(utv); ++it)
if (!unionNormalWithTy(norm, *it, seenSetTypes))
return false;
{
NormalizationResult res = unionNormalWithTy(norm, *it, seenSetTypes);
if (res != NormalizationResult::True)
return res;
}
return intersectNormals(here, norm);
}
else if (const IntersectionType* itv = get<IntersectionType>(there))
{
for (IntersectionTypeIterator it = begin(itv); it != end(itv); ++it)
if (!intersectNormalWithTy(here, *it, seenSetTypes))
return false;
return true;
{
NormalizationResult res = intersectNormalWithTy(here, *it, seenSetTypes);
if (res != NormalizationResult::True)
return res;
}
return NormalizationResult::True;
}
else if (get<GenericType>(there) || get<FreeType>(there) || get<BlockedType>(there) || get<PendingExpansionType>(there) ||
get<TypeFamilyInstanceType>(there) || get<LocalType>(there))
@ -2956,7 +2993,7 @@ bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there, Set<T
const NormalizedType* normal = normalize(t);
std::optional<NormalizedType> negated = negateNormal(*normal);
if (!negated)
return false;
return NormalizationResult::False;
intersectNormals(here, *negated);
}
else if (const UnionType* itv = get<UnionType>(t))
@ -2966,7 +3003,7 @@ bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there, Set<T
const NormalizedType* normalPart = normalize(part);
std::optional<NormalizedType> negated = negateNormal(*normalPart);
if (!negated)
return false;
return NormalizationResult::False;
intersectNormals(here, *negated);
}
}
@ -2974,13 +3011,13 @@ bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there, Set<T
{
// HACK: Refinements sometimes intersect with ~any under the
// assumption that it is the same as any.
return true;
return NormalizationResult::True;
}
else if (get<NeverType>(t))
{
// if we're intersecting with `~never`, this is equivalent to intersecting with `unknown`
// this is a noop since an intersection with `unknown` is trivial.
return true;
return NormalizationResult::True;
}
else if (auto nt = get<NegationType>(t))
return intersectNormalWithTy(here, nt->ty, seenSetTypes);
@ -2998,11 +3035,12 @@ bool Normalizer::intersectNormalWithTy(NormalizedType& here, TypeId there, Set<T
else
LUAU_ASSERT(!"Unreachable");
if (!intersectTyvarsWithTy(tyvars, there, seenSetTypes))
return false;
NormalizationResult res = intersectTyvarsWithTy(tyvars, there, seenSetTypes);
if (res != NormalizationResult::True)
return res;
here.tyvars = std::move(tyvars);
return true;
return NormalizationResult::True;
}
void makeTableShared(TypeId ty)

View File

@ -347,9 +347,12 @@ SubtypingResult Subtyping::isSubtype(TypeId subTy, TypeId superTy)
TypeId upperBound = makeAggregateType<IntersectionType>(ub, builtinTypes->unknownType);
const NormalizedType* nt = normalizer->normalize(upperBound);
if (!nt)
// we say that the result is true if normalization failed because complex types are likely to be inhabited.
NormalizationResult res = nt ? normalizer->isInhabited(nt) : NormalizationResult::True;
if (!nt || res == NormalizationResult::HitLimits)
result.normalizationTooComplex = true;
else if (!normalizer->isInhabited(nt))
else if (res == NormalizationResult::False)
{
/* If the normalized upper bound we're mapping to a generic is
* uninhabited, then we must consider the subtyping relation not to
@ -433,6 +436,12 @@ SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, TypeId sub
subTy = follow(subTy);
superTy = follow(superTy);
if (TypeId* subIt = env.substitutions.find(subTy); subIt && *subIt)
subTy = *subIt;
if (TypeId* superIt = env.substitutions.find(superTy); superIt && *superIt)
superTy = *superIt;
SubtypingResult* cachedResult = resultCache.find({subTy, superTy});
if (cachedResult)
return *cachedResult;
@ -612,7 +621,7 @@ SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, TypeId sub
else if (auto p = get2<ClassType, ClassType>(subTy, superTy))
result = isCovariantWith(env, p);
else if (auto p = get2<ClassType, TableType>(subTy, superTy))
result = isCovariantWith(env, p);
result = isCovariantWith(env, subTy, p.first, superTy, p.second);
else if (auto p = get2<PrimitiveType, TableType>(subTy, superTy))
result = isCovariantWith(env, p);
else if (auto p = get2<SingletonType, TableType>(subTy, superTy))
@ -1301,10 +1310,12 @@ SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, const Clas
return {isSubclass(subClass, superClass)};
}
SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, const ClassType* subClass, const TableType* superTable)
SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, TypeId subTy, const ClassType* subClass, TypeId superTy, const TableType* superTable)
{
SubtypingResult result{true};
env.substitutions[superTy] = subTy;
for (const auto& [name, prop] : superTable->props)
{
if (auto classProp = lookupClassProp(subClass, name))
@ -1312,9 +1323,14 @@ SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, const Clas
result.andAlso(isCovariantWith(env, *classProp, prop, name));
}
else
return SubtypingResult{false};
{
result = {false};
break;
}
}
env.substitutions[superTy] = nullptr;
return result;
}

View File

@ -1352,12 +1352,15 @@ struct TypeChecker2
auto norm = normalizer.normalize(fnTy);
if (!norm)
reportError(NormalizationTooComplex{}, call->func->location);
auto isInhabited = normalizer.isInhabited(norm);
if (isInhabited == NormalizationResult::HitLimits)
reportError(NormalizationTooComplex{}, call->func->location);
if (norm && norm->shouldSuppressErrors())
return; // error suppressing function type!
else if (!resolver.ok.empty())
return; // We found a call that works, so this is ok.
else if (!norm || !normalizer.isInhabited(norm))
else if (!norm || isInhabited == NormalizationResult::False)
return; // Ok. Calling an uninhabited type is no-op.
else if (!resolver.nonviableOverloads.empty())
{
@ -1802,7 +1805,7 @@ struct TypeChecker2
return leftType;
}
bool typesHaveIntersection = normalizer.isIntersectionInhabited(leftType, rightType);
NormalizationResult typesHaveIntersection = normalizer.isIntersectionInhabited(leftType, rightType);
if (auto it = kBinaryOpMetamethods.find(expr->op); it != kBinaryOpMetamethods.end())
{
std::optional<TypeId> leftMt = getMetatable(leftType, builtinTypes);
@ -1836,11 +1839,11 @@ struct TypeChecker2
// If we're working with things that are not tables, the metatable comparisons above are a little excessive
// It's ok for one type to have a meta table and the other to not. In that case, we should fall back on
// checking if the intersection of the types is inhabited.
// checking if the intersection of the types is inhabited. If `typesHaveIntersection` failed due to limits,
// TODO: Maybe add more checks here (e.g. for functions, classes, etc)
if (!(get<TableType>(leftType) || get<TableType>(rightType)))
if (!leftMt.has_value() || !rightMt.has_value())
matches = matches || typesHaveIntersection;
matches = matches || typesHaveIntersection != NormalizationResult::False;
if (!matches && isComparison)
{
@ -2594,35 +2597,59 @@ struct TypeChecker2
bool foundOneProp = false;
std::vector<TypeId> typesMissingTheProp;
// this is `false` if we ever hit the resource limits during any of our uses of `fetch`.
bool normValid = true;
auto fetch = [&](TypeId ty) {
if (!normalizer.isInhabited(ty))
NormalizationResult result = normalizer.isInhabited(ty);
if (result == NormalizationResult::HitLimits)
normValid = false;
if (result != NormalizationResult::True)
return;
DenseHashSet<TypeId> seen{nullptr};
bool found = hasIndexTypeFromType(ty, prop, context, location, seen, astIndexExprType, errors);
foundOneProp |= found;
if (!found)
NormalizationResult found = hasIndexTypeFromType(ty, prop, context, location, seen, astIndexExprType, errors);
if (found == NormalizationResult::HitLimits)
{
normValid = false;
return;
}
foundOneProp |= found == NormalizationResult::True;
if (found == NormalizationResult::False)
typesMissingTheProp.push_back(ty);
};
fetch(norm->tops);
fetch(norm->booleans);
if (normValid)
fetch(norm->tops);
if (normValid)
fetch(norm->booleans);
for (const auto& [ty, _negations] : norm->classes.classes)
if (normValid)
{
fetch(ty);
for (const auto& [ty, _negations] : norm->classes.classes)
{
fetch(ty);
}
}
fetch(norm->errors);
fetch(norm->nils);
fetch(norm->numbers);
if (!norm->strings.isNever())
if (normValid)
fetch(norm->errors);
if (normValid)
fetch(norm->nils);
if (normValid)
fetch(norm->numbers);
if (normValid && !norm->strings.isNever())
fetch(builtinTypes->stringType);
fetch(norm->threads);
if (normValid)
fetch(norm->threads);
for (TypeId ty : norm->tables)
fetch(ty);
if (norm->functions.isTop)
if (normValid)
fetch(ty);
if (normValid && norm->functions.isTop)
fetch(builtinTypes->functionType);
else if (!norm->functions.isNever())
else if (normValid && !norm->functions.isNever())
{
if (norm->functions.parts.size() == 1)
fetch(norm->functions.parts.front());
@ -2633,15 +2660,19 @@ struct TypeChecker2
fetch(module->internalTypes.addType(IntersectionType{std::move(parts)}));
}
}
for (const auto& [tyvar, intersect] : norm->tyvars)
if (normValid)
{
if (get<NeverType>(intersect->tops))
for (const auto& [tyvar, intersect] : norm->tyvars)
{
TypeId ty = normalizer.typeFromNormal(*intersect);
fetch(module->internalTypes.addType(IntersectionType{{tyvar, ty}}));
if (get<NeverType>(intersect->tops))
{
TypeId ty = normalizer.typeFromNormal(*intersect);
fetch(module->internalTypes.addType(IntersectionType{{tyvar, ty}}));
}
else
fetch(tyvar);
}
else
fetch(tyvar);
}
return {foundOneProp, typesMissingTheProp};
@ -2695,18 +2726,18 @@ struct TypeChecker2
}
}
bool hasIndexTypeFromType(TypeId ty, const std::string& prop, ValueContext context, const Location& location, DenseHashSet<TypeId>& seen,
TypeId astIndexExprType, std::vector<TypeError>& errors)
NormalizationResult hasIndexTypeFromType(TypeId ty, const std::string& prop, ValueContext context, const Location& location,
DenseHashSet<TypeId>& seen, TypeId astIndexExprType, std::vector<TypeError>& errors)
{
// If we have already encountered this type, we must assume that some
// other codepath will do the right thing and signal false if the
// property is not present.
if (seen.contains(ty))
return true;
return NormalizationResult::True;
seen.insert(ty);
if (get<ErrorType>(ty) || get<AnyType>(ty) || get<NeverType>(ty))
return true;
return NormalizationResult::True;
if (isString(ty))
{
@ -2718,23 +2749,23 @@ struct TypeChecker2
if (auto tt = getTableType(ty))
{
if (findTablePropertyRespectingMeta(builtinTypes, errors, ty, prop, context, location))
return true;
return NormalizationResult::True;
if (tt->indexer)
{
TypeId indexType = follow(tt->indexer->indexType);
if (isPrim(indexType, PrimitiveType::String))
return true;
return NormalizationResult::True;
// If the indexer looks like { [any] : _} - the prop lookup should be allowed!
else if (get<AnyType>(indexType) || get<UnknownType>(indexType))
return true;
return NormalizationResult::True;
}
// if we are in a conditional context, we treat the property as present and `unknown` because
// we may be _refining_ `tableTy` to include that property. we will want to revisit this a bit
// in the future once luau has support for exact tables since this only applies when inexact.
return inConditional(typeContext);
return inConditional(typeContext) ? NormalizationResult::True : NormalizationResult::False;
}
else if (const ClassType* cls = get<ClassType>(ty))
{
@ -2743,26 +2774,40 @@ struct TypeChecker2
// is compatible with the indexer's indexType
// Construct the intersection and test inhabitedness!
if (auto property = lookupClassProp(cls, prop))
return true;
return NormalizationResult::True;
if (cls->indexer)
{
TypeId inhabitatedTestType = module->internalTypes.addType(IntersectionType{{cls->indexer->indexType, astIndexExprType}});
return normalizer.isInhabited(inhabitatedTestType);
}
return false;
return NormalizationResult::False;
}
else if (const UnionType* utv = get<UnionType>(ty))
return std::all_of(begin(utv), end(utv), [&](TypeId part) {
return hasIndexTypeFromType(part, prop, context, location, seen, astIndexExprType, errors);
});
{
for (TypeId part : utv)
{
NormalizationResult result = hasIndexTypeFromType(part, prop, context, location, seen, astIndexExprType, errors);
if (result != NormalizationResult::True)
return result;
}
return NormalizationResult::True;
}
else if (const IntersectionType* itv = get<IntersectionType>(ty))
return std::any_of(begin(itv), end(itv), [&](TypeId part) {
return hasIndexTypeFromType(part, prop, context, location, seen, astIndexExprType, errors);
});
{
for (TypeId part : itv)
{
NormalizationResult result = hasIndexTypeFromType(part, prop, context, location, seen, astIndexExprType, errors);
if (result != NormalizationResult::False)
return result;
}
return NormalizationResult::False;
}
else if (const PrimitiveType* pt = get<PrimitiveType>(ty))
return inConditional(typeContext) && pt->type == PrimitiveType::Table;
return (inConditional(typeContext) && pt->type == PrimitiveType::Table) ? NormalizationResult::True : NormalizationResult::False;
else
return false;
return NormalizationResult::False;
}
void diagnoseMissingTableKey(UnknownProperty* utk, TypeErrorData& data) const

View File

@ -1124,7 +1124,8 @@ TypeFamilyReductionResult<TypeId> eqFamilyFn(
mmType = findMetatableEntry(ctx->builtins, dummy, rhsTy, "__eq", Location{});
// if neither type has a metatable entry for `__eq`, then we'll check for inhabitance of the intersection!
if (!mmType && ctx->normalizer->isIntersectionInhabited(lhsTy, rhsTy))
NormalizationResult intersectInhabited = ctx->normalizer->isIntersectionInhabited(lhsTy, rhsTy);
if (!mmType && intersectInhabited == NormalizationResult::True)
return {ctx->builtins->booleanType, false, {}, {}}; // if it's inhabited, everything is okay!
else if (!mmType)
return {std::nullopt, true, {}, {}}; // if it's not, then this family is irreducible!

View File

@ -2653,7 +2653,18 @@ static std::optional<bool> areEqComparable(NotNull<TypeArena> arena, NotNull<Nor
if (!n)
return std::nullopt;
return normalizer->isInhabited(n);
switch (normalizer->isInhabited(n))
{
case NormalizationResult::HitLimits:
return std::nullopt;
case NormalizationResult::False:
return false;
case NormalizationResult::True:
return true;
}
// n.b. msvc can never figure this stuff out.
LUAU_UNREACHABLE();
}
TypeId TypeChecker::checkRelationalOperation(

View File

@ -732,7 +732,8 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool
else if (log.get<NegationType>(superTy) || log.get<NegationType>(subTy))
tryUnifyNegations(subTy, superTy);
else if (checkInhabited && !normalizer->isInhabited(subTy))
// If the normalizer hits resource limits, we can't show it's uninhabited, so, we should error.
else if (checkInhabited && normalizer->isInhabited(subTy) == NormalizationResult::False)
{
}
else
@ -2378,7 +2379,8 @@ void Unifier::tryUnifyScalarShape(TypeId subTy, TypeId superTy, bool reversed)
TypeId osubTy = subTy;
TypeId osuperTy = superTy;
if (checkInhabited && !normalizer->isInhabited(subTy))
// If the normalizer hits resource limits, we can't show it's uninhabited, so, we should continue.
if (checkInhabited && normalizer->isInhabited(subTy) == NormalizationResult::False)
return;
if (reversed)

View File

@ -584,8 +584,11 @@ struct FreeTypeSearcher : TypeVisitor
}
}
DenseHashMap<TypeId, size_t> negativeTypes{0};
DenseHashMap<TypeId, size_t> positiveTypes{0};
// The keys in these maps are either TypeIds or TypePackIds. It's safe to
// mix them because we only use these pointers as unique keys. We never
// indirect them.
DenseHashMap<const void*, size_t> negativeTypes{0};
DenseHashMap<const void*, size_t> positiveTypes{0};
bool visit(TypeId ty) override
{
@ -673,6 +676,28 @@ struct FreeTypeSearcher : TypeVisitor
{
return false;
}
bool visit(TypePackId tp, const FreeTypePack& ftp) override
{
if (!subsumes(scope, ftp.scope))
return true;
switch (polarity)
{
case Positive:
positiveTypes[tp]++;
break;
case Negative:
negativeTypes[tp]++;
break;
case Both:
positiveTypes[tp]++;
negativeTypes[tp]++;
break;
}
return true;
}
};
struct MutatingGeneralizer : TypeOnceVisitor
@ -680,15 +705,15 @@ struct MutatingGeneralizer : TypeOnceVisitor
NotNull<BuiltinTypes> builtinTypes;
NotNull<Scope> scope;
DenseHashMap<TypeId, size_t> positiveTypes;
DenseHashMap<TypeId, size_t> negativeTypes;
DenseHashMap<const void*, size_t> positiveTypes;
DenseHashMap<const void*, size_t> negativeTypes;
std::vector<TypeId> generics;
std::vector<TypePackId> genericPacks;
bool isWithinFunction = false;
MutatingGeneralizer(NotNull<BuiltinTypes> builtinTypes, NotNull<Scope> scope, DenseHashMap<TypeId, size_t> positiveTypes,
DenseHashMap<TypeId, size_t> negativeTypes)
MutatingGeneralizer(NotNull<BuiltinTypes> builtinTypes, NotNull<Scope> scope, DenseHashMap<const void*, size_t> positiveTypes,
DenseHashMap<const void*, size_t> negativeTypes)
: TypeOnceVisitor(/* skipBoundTypes */ true)
, builtinTypes(builtinTypes)
, scope(scope)
@ -816,7 +841,7 @@ struct MutatingGeneralizer : TypeOnceVisitor
return false;
}
size_t getCount(const DenseHashMap<TypeId, size_t>& map, TypeId ty)
size_t getCount(const DenseHashMap<const void*, size_t>& map, const void* ty)
{
if (const size_t* count = map.find(ty))
return *count;
@ -849,9 +874,18 @@ struct MutatingGeneralizer : TypeOnceVisitor
if (!subsumes(scope, ftp.scope))
return true;
asMutable(tp)->ty.emplace<GenericTypePack>(scope);
tp = follow(tp);
genericPacks.push_back(tp);
const size_t positiveCount = getCount(positiveTypes, tp);
const size_t negativeCount = getCount(negativeTypes, tp);
if (1 == positiveCount + negativeCount)
asMutable(tp)->ty.emplace<BoundTypePack>(builtinTypes->unknownTypePack);
else
{
asMutable(tp)->ty.emplace<GenericTypePack>(scope);
genericPacks.push_back(tp);
}
return true;
}

View File

@ -188,6 +188,12 @@ if(MSVC_IDE)
target_sources(Luau.VM PRIVATE tools/natvis/VM.natvis)
endif()
# On Windows and Android threads are provided, on Linux/Mac/iOS we use pthreads
add_library(osthreads INTERFACE)
if(CMAKE_SYSTEM_NAME MATCHES "Linux|Darwin|iOS")
target_link_libraries(osthreads INTERFACE "-lpthread")
endif ()
if(LUAU_BUILD_CLI)
target_compile_options(Luau.Repl.CLI PRIVATE ${LUAU_OPTIONS})
target_compile_options(Luau.Reduce.CLI PRIVATE ${LUAU_OPTIONS})
@ -200,13 +206,8 @@ if(LUAU_BUILD_CLI)
target_link_libraries(Luau.Repl.CLI PRIVATE Luau.Compiler Luau.Config Luau.CodeGen Luau.VM Luau.CLI.lib isocline)
if(UNIX)
find_library(LIBPTHREAD pthread)
if (LIBPTHREAD)
target_link_libraries(Luau.Repl.CLI PRIVATE pthread)
target_link_libraries(Luau.Analyze.CLI PRIVATE pthread)
endif()
endif()
target_link_libraries(Luau.Repl.CLI PRIVATE osthreads)
target_link_libraries(Luau.Analyze.CLI PRIVATE osthreads)
target_link_libraries(Luau.Analyze.CLI PRIVATE Luau.Analysis Luau.CLI.lib)
@ -230,18 +231,17 @@ if(LUAU_BUILD_TESTS)
target_compile_options(Luau.Conformance PRIVATE ${LUAU_OPTIONS})
target_include_directories(Luau.Conformance PRIVATE extern)
target_link_libraries(Luau.Conformance PRIVATE Luau.Analysis Luau.Compiler Luau.CodeGen Luau.VM)
file(REAL_PATH "tests/conformance" LUAU_CONFORMANCE_SOURCE_DIR)
if(CMAKE_SYSTEM_NAME MATCHES "Android|iOS")
set(LUAU_CONFORMANCE_SOURCE_DIR "Client/Luau/tests/conformance")
else ()
file(REAL_PATH "tests/conformance" LUAU_CONFORMANCE_SOURCE_DIR)
endif ()
target_compile_definitions(Luau.Conformance PRIVATE LUAU_CONFORMANCE_SOURCE_DIR="${LUAU_CONFORMANCE_SOURCE_DIR}")
target_compile_options(Luau.CLI.Test PRIVATE ${LUAU_OPTIONS})
target_include_directories(Luau.CLI.Test PRIVATE extern CLI)
target_link_libraries(Luau.CLI.Test PRIVATE Luau.Compiler Luau.Config Luau.CodeGen Luau.VM Luau.CLI.lib isocline)
if(UNIX)
find_library(LIBPTHREAD pthread)
if (LIBPTHREAD)
target_link_libraries(Luau.CLI.Test PRIVATE pthread)
endif()
endif()
target_link_libraries(Luau.CLI.Test PRIVATE osthreads)
endif()

View File

@ -21,6 +21,13 @@ struct NativeProtoExecDataHeader
// when the NativeProto is bound to the NativeModule via assignToModule().
NativeModule* nativeModule = nullptr;
// We store the native code offset until the code is allocated in executable
// pages, after which point we store the actual address.
const uint8_t* entryOffsetOrAddress = nullptr;
// The bytecode id of the proto
uint32_t bytecodeId = 0;
// The number of bytecode instructions in the proto. This is the number of
// elements in the instruction offsets array following this header.
uint32_t bytecodeInstructionCount = 0;

View File

@ -28,68 +28,11 @@ namespace CodeGen
using ModuleId = std::array<uint8_t, 16>;
class NativeProto;
struct CodeAllocator;
class NativeModule;
class NativeModuleRef;
class SharedCodeAllocator;
// A NativeProto represents a single natively-compiled function. A NativeProto
// should be constructed for each function as it is compiled. When compilation
// of all of the functions in a module is complete, the set of NativeProtos
// representing those functions should be passed to the NativeModule constructor.
class NativeProto
{
public:
NativeProto(uint32_t bytecodeId, NativeProtoExecDataPtr nativeExecData);
NativeProto(const NativeProto&) = delete;
NativeProto(NativeProto&&) noexcept = default;
NativeProto& operator=(const NativeProto&) = delete;
NativeProto& operator=(NativeProto&&) noexcept = default;
// This should be called to initialize the NativeProto state prior to
// passing the NativeProto to the NativeModule constructor.
void setEntryOffset(uint32_t entryOffset) noexcept;
// This will be called by the NativeModule constructor to bind this
// NativeProto to the NativeModule.
void assignToModule(NativeModule* nativeModule) noexcept;
// Gets the bytecode id for the Proto that was compiled into this NativeProto
[[nodiscard]] uint32_t getBytecodeId() const noexcept;
// Gets the address of the entry point for this function
[[nodiscard]] const uint8_t* getEntryAddress() const noexcept;
// Gets the native exec data for this function
[[nodiscard]] const NativeProtoExecDataHeader& getNativeExecDataHeader() const noexcept;
// The NativeProto stores an array that maps bytecode instruction indices to
// native code offsets relative to the native entry point. When compilation
// and code allocation is complete, we store a pointer to this data in the
// Luau VM Proto object for this function. When we do this, we must acquire
// a reference to the NativeModule that owns this NativeProto. The
// getOwning-version of this function acquires that reference and gets the
// instruction offsets pointer. When the Proto object is destroyed, this
// pointer must be passed to releaseOwningPointerToInstructionOffsets to
// release the reference.
//
// (This structure is designed to make it much more difficult to "forget"
// to acquire a reference.)
[[nodiscard]] const uint32_t* getNonOwningPointerToInstructionOffsets() const noexcept;
[[nodiscard]] const uint32_t* getOwningPointerToInstructionOffsets() const noexcept;
static void releaseOwningPointerToInstructionOffsets(const uint32_t* ownedInstructionOffsets) noexcept;
private:
uint32_t bytecodeId = 0;
// We store the native code offset until assignToModule() is called, after
// which point we store the actual address.
const uint8_t* entryOffsetOrAddress = nullptr;
NativeProtoExecDataPtr nativeExecData = {};
};
// A NativeModule represents a single natively-compiled module (script). It is
// the unit of shared ownership and is thus where the reference count is
@ -98,8 +41,8 @@ private:
class NativeModule
{
public:
NativeModule(
SharedCodeAllocator* allocator, const ModuleId& moduleId, const uint8_t* moduleBaseAddress, std::vector<NativeProto> nativeProtos) noexcept;
NativeModule(SharedCodeAllocator* allocator, const ModuleId& moduleId, const uint8_t* moduleBaseAddress,
std::vector<NativeProtoExecDataPtr> nativeProtos) noexcept;
NativeModule(const NativeModule&) = delete;
NativeModule(NativeModule&&) = delete;
@ -112,6 +55,7 @@ public:
~NativeModule() noexcept;
size_t addRef() const noexcept;
size_t addRefs(size_t count) const noexcept;
size_t release() const noexcept;
[[nodiscard]] size_t getRefcount() const noexcept;
@ -120,7 +64,9 @@ public:
// Attempts to find the NativeProto with the given bytecode id. If no
// NativeProto for that bytecode id exists, a null pointer is returned.
[[nodiscard]] const NativeProto* tryGetNativeProto(uint32_t bytecodeId) const noexcept;
[[nodiscard]] const uint32_t* tryGetNativeProto(uint32_t bytecodeId) const noexcept;
[[nodiscard]] const std::vector<NativeProtoExecDataPtr>& getNativeProtos() const noexcept;
private:
mutable std::atomic<size_t> refcount = 0;
@ -129,7 +75,7 @@ private:
ModuleId moduleId = {};
const uint8_t* moduleBaseAddress = nullptr;
std::vector<NativeProto> nativeProtos = {};
std::vector<NativeProtoExecDataPtr> nativeProtos = {};
};
// A NativeModuleRef is an owning reference to a NativeModule. (Note: We do
@ -164,7 +110,7 @@ private:
class SharedCodeAllocator
{
public:
SharedCodeAllocator() = default;
SharedCodeAllocator(CodeAllocator* codeAllocator) noexcept;
SharedCodeAllocator(const SharedCodeAllocator&) = delete;
SharedCodeAllocator(SharedCodeAllocator&&) = delete;
@ -180,9 +126,11 @@ public:
// If we have a NativeModule for the given ModuleId, an owning reference to
// it is returned. Otherwise, a new NativeModule is created for that ModuleId
// using the provided NativeProtos, data, and code (space is allocated for the
// data and code such that it can be executed).
NativeModuleRef getOrInsertNativeModule(
const ModuleId& moduleId, std::vector<NativeProto> nativeProtos, const std::vector<uint8_t>& data, const std::vector<uint8_t>& code);
// data and code such that it can be executed). Like std::map::insert, the
// bool result is true if a new module was created; false if an existing
// module is being returned.
std::pair<NativeModuleRef, bool> getOrInsertNativeModule(const ModuleId& moduleId, std::vector<NativeProtoExecDataPtr> nativeProtos,
const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize);
// If a NativeModule exists for the given ModuleId and that NativeModule
// is no longer referenced, the NativeModule is destroyed. This should
@ -200,10 +148,9 @@ private:
mutable std::mutex mutex;
// Will be removed when backend allocator is integrated
const uint8_t* baseAddress = reinterpret_cast<const uint8_t*>(0x0f00'0000);
std::unordered_map<ModuleId, std::unique_ptr<NativeModule>, ModuleIdHash, std::equal_to<>> nativeModules;
CodeAllocator* codeAllocator = nullptr;
};
} // namespace CodeGen

View File

@ -67,8 +67,8 @@ namespace CodeGen
static const Instruction kCodeEntryInsn = LOP_NATIVECALL;
static void* gPerfLogContext = nullptr;
static PerfLogFn gPerfLogFn = nullptr;
void* gPerfLogContext = nullptr;
PerfLogFn gPerfLogFn = nullptr;
struct OldNativeProto
{

View File

@ -2,6 +2,7 @@
#include "CodeGenContext.h"
#include "CodeGenA64.h"
#include "CodeGenLower.h"
#include "CodeGenX64.h"
#include "Luau/CodeBlockUnwind.h"
@ -9,7 +10,8 @@
#include "Luau/UnwindBuilderDwarf2.h"
#include "Luau/UnwindBuilderWin.h"
LUAU_FASTFLAG(LuauCodegenHeapSizeReport)
#include "lapi.h"
LUAU_FASTINT(LuauCodeGenBlockSize)
LUAU_FASTINT(LuauCodeGenMaxTotalSize)
@ -19,10 +21,99 @@ namespace Luau
namespace CodeGen
{
static const Instruction kCodeEntryInsn = LOP_NATIVECALL;
// From CodeGen.cpp
extern void* gPerfLogContext;
extern PerfLogFn gPerfLogFn;
unsigned int getCpuFeaturesA64();
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
{
CODEGEN_ASSERT(p->source);
const char* source = getstr(p->source);
source = (source[0] == '=' || source[0] == '@') ? source + 1 : "[string]";
char name[256];
snprintf(name, sizeof(name), "<luau> %s:%d %s", source, p->linedefined, p->debugname ? getstr(p->debugname) : "");
if (gPerfLogFn)
gPerfLogFn(gPerfLogContext, addr, size, name);
}
static void logPerfFunctions(
const std::vector<Proto*>& moduleProtos, const uint8_t* nativeModuleBaseAddress, const std::vector<NativeProtoExecDataPtr>& nativeProtos)
{
if (gPerfLogFn == nullptr)
return;
if (nativeProtos.size() > 0)
gPerfLogFn(gPerfLogContext, uintptr_t(nativeModuleBaseAddress),
unsigned(getNativeProtoExecDataHeader(nativeProtos[0].get()).entryOffsetOrAddress - nativeModuleBaseAddress), "<luau helpers>");
auto protoIt = moduleProtos.begin();
for (const NativeProtoExecDataPtr& nativeProto : nativeProtos)
{
const NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProto.get());
while (protoIt != moduleProtos.end() && uint32_t((**protoIt).bytecodeid) != header.bytecodeId)
{
++protoIt;
}
CODEGEN_ASSERT(protoIt != moduleProtos.end());
logPerfFunction(*protoIt, uintptr_t(header.entryOffsetOrAddress), uint32_t(header.nativeCodeSize));
}
}
// If Release is true, the native proto will be removed from the vector and
// ownership will be assigned to the Proto object (for use with the
// StandaloneCodeContext). If Release is false, the native proto will not be
// removed from the vector (for use with the SharedCodeContext).
template<bool Release, typename NativeProtosVector>
static size_t bindNativeProtos(const std::vector<Proto*>& moduleProtos, NativeProtosVector& nativeProtos)
{
size_t protosBound = 0;
auto protoIt = moduleProtos.begin();
for (auto& nativeProto : nativeProtos)
{
const NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProto.get());
while (protoIt != moduleProtos.end() && uint32_t((**protoIt).bytecodeid) != header.bytecodeId)
{
++protoIt;
}
CODEGEN_ASSERT(protoIt != moduleProtos.end());
// The NativeProtoExecData is now owned by the VM and will be destroyed
// via onDestroyFunction.
Proto* proto = *protoIt;
if constexpr (Release)
{
proto->execdata = nativeProto.release();
}
else
{
proto->execdata = nativeProto.get();
}
proto->exectarget = reinterpret_cast<uintptr_t>(header.entryOffsetOrAddress);
proto->codeentry = &kCodeEntryInsn;
++protosBound;
}
return protosBound;
}
BaseCodeGenContext::BaseCodeGenContext(size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
: codeAllocator{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
{
@ -64,7 +155,37 @@ StandaloneCodeGenContext::StandaloneCodeGenContext(
{
}
void StandaloneCodeGenContext::compileOrBindModule(const ModuleId&, lua_State*, int, unsigned int, CompilationStats*) {}
[[nodiscard]] std::optional<CodeGenCompilationResult> StandaloneCodeGenContext::tryBindExistingModule(const ModuleId&, const std::vector<Proto*>&)
{
// The StandaloneCodeGenContext does not support sharing of native code
return {};
}
[[nodiscard]] CodeGenCompilationResult StandaloneCodeGenContext::bindModule(const ModuleId&, const std::vector<Proto*>& moduleProtos,
std::vector<NativeProtoExecDataPtr> nativeProtos, const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize)
{
uint8_t* nativeData = nullptr;
size_t sizeNativeData = 0;
uint8_t* codeStart = nullptr;
if (!codeAllocator.allocate(data, int(dataSize), code, int(codeSize), nativeData, sizeNativeData, codeStart))
{
return CodeGenCompilationResult::AllocationFailed;
}
// Relocate the entry offsets to their final executable addresses:
for (const NativeProtoExecDataPtr& nativeProto : nativeProtos)
{
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProto.get());
header.entryOffsetOrAddress = codeStart + reinterpret_cast<uintptr_t>(header.entryOffsetOrAddress);
}
logPerfFunctions(moduleProtos, codeStart, nativeProtos);
bindNativeProtos<true>(moduleProtos, nativeProtos);
return CodeGenCompilationResult::Success;
}
void StandaloneCodeGenContext::onCloseState() noexcept
{
@ -82,10 +203,44 @@ void StandaloneCodeGenContext::onDestroyFunction(void* execdata) noexcept
SharedCodeGenContext::SharedCodeGenContext(
size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext)
: BaseCodeGenContext{blockSize, maxTotalSize, allocationCallback, allocationCallbackContext}
, sharedAllocator{&codeAllocator}
{
}
void SharedCodeGenContext::compileOrBindModule(const ModuleId&, lua_State*, int, unsigned int, CompilationStats*) {}
[[nodiscard]] std::optional<CodeGenCompilationResult> SharedCodeGenContext::tryBindExistingModule(
const ModuleId& moduleId, const std::vector<Proto*>& moduleProtos)
{
NativeModuleRef nativeModule = sharedAllocator.tryGetNativeModule(moduleId);
if (nativeModule.empty())
{
return {};
}
// Bind the native protos and acquire an owning reference for each:
nativeModule->addRefs(bindNativeProtos<false>(moduleProtos, nativeModule->getNativeProtos()));
return CodeGenCompilationResult::Success;
}
[[nodiscard]] CodeGenCompilationResult SharedCodeGenContext::bindModule(const ModuleId& moduleId, const std::vector<Proto*>& moduleProtos,
std::vector<NativeProtoExecDataPtr> nativeProtos, const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize)
{
const std::pair<NativeModuleRef, bool> insertionResult =
sharedAllocator.getOrInsertNativeModule(moduleId, std::move(nativeProtos), data, dataSize, code, codeSize);
// If we did not get a NativeModule back, allocation failed:
if (insertionResult.first.empty())
return CodeGenCompilationResult::AllocationFailed;
// If we allocated a new module, log the function code ranges for perf:
if (insertionResult.second)
logPerfFunctions(moduleProtos, insertionResult.first->getModuleBaseAddress(), insertionResult.first->getNativeProtos());
// Bind the native protos and acquire an owning reference for each:
insertionResult.first->addRefs(bindNativeProtos<false>(moduleProtos, insertionResult.first->getNativeProtos()));
return CodeGenCompilationResult::Success;
}
void SharedCodeGenContext::onCloseState() noexcept
{
@ -165,13 +320,16 @@ static int onEnter(lua_State* L, Proto* proto)
return GateFn(codeGenContext->context.gateEntry)(L, proto, target, &codeGenContext->context);
}
static int onEnterDisabled(lua_State* L, Proto* proto)
{
return 1;
}
// Defined in CodeGen.cpp
void onDisable(lua_State* L, Proto* proto);
static size_t getMemorySize(lua_State* L, Proto* proto)
{
CODEGEN_ASSERT(FFlag::LuauCodegenHeapSizeReport);
const NativeProtoExecDataHeader& execDataHeader = getNativeProtoExecDataHeader(static_cast<const uint32_t*>(proto->execdata));
const size_t execDataSize = sizeof(NativeProtoExecDataHeader) + execDataHeader.bytecodeInstructionCount * sizeof(Instruction);
@ -191,9 +349,7 @@ static void initializeExecutionCallbacks(lua_State* L, BaseCodeGenContext* codeG
ecb->destroy = onDestroyFunction;
ecb->enter = onEnter;
ecb->disable = onDisable;
if (FFlag::LuauCodegenHeapSizeReport)
ecb->getmemorysize = getMemorySize;
ecb->getmemorysize = getMemorySize;
}
void create_NEW(lua_State* L)
@ -222,5 +378,186 @@ void create_NEW(lua_State* L, SharedCodeGenContext* codeGenContext)
initializeExecutionCallbacks(L, codeGenContext);
}
[[nodiscard]] static NativeProtoExecDataPtr createNativeProtoExecData(Proto* proto, const IrBuilder& ir)
{
NativeProtoExecDataPtr nativeExecData = createNativeProtoExecData(proto->sizecode);
uint32_t instTarget = ir.function.entryLocation;
for (int i = 0; i < proto->sizecode; ++i)
{
CODEGEN_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget);
nativeExecData[i] = ir.function.bcMapping[i].asmLocation - instTarget;
}
// Set first instruction offset to 0 so that entering this function still
// executes any generated entry code.
nativeExecData[0] = 0;
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeExecData.get());
header.entryOffsetOrAddress = reinterpret_cast<const uint8_t*>(static_cast<uintptr_t>(instTarget));
header.bytecodeId = uint32_t(proto->bytecodeid);
header.bytecodeInstructionCount = proto->sizecode;
return nativeExecData;
}
template<typename AssemblyBuilder>
[[nodiscard]] static NativeProtoExecDataPtr createNativeFunction(
AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, uint32_t& totalIrInstCount, CodeGenCompilationResult& result)
{
IrBuilder ir;
ir.buildFunctionIr(proto);
unsigned instCount = unsigned(ir.function.instructions.size());
if (totalIrInstCount + instCount >= unsigned(FInt::CodegenHeuristicsInstructionLimit.value))
{
result = CodeGenCompilationResult::CodeGenOverflowInstructionLimit;
return {};
}
totalIrInstCount += instCount;
if (!lowerFunction(ir, build, helpers, proto, {}, /* stats */ nullptr, result))
{
return {};
}
return createNativeProtoExecData(proto, ir);
}
CompilationResult compile_NEW(const ModuleId& moduleId, lua_State* L, int idx, unsigned int flags, CompilationStats* stats)
{
CODEGEN_ASSERT(lua_isLfunction(L, idx));
const TValue* func = luaA_toobject(L, idx);
Proto* root = clvalue(func)->l.p;
if ((flags & CodeGen_OnlyNativeModules) != 0 && (root->flags & LPF_NATIVE_MODULE) == 0)
return CompilationResult{CodeGenCompilationResult::NotNativeModule};
BaseCodeGenContext* codeGenContext = getCodeGenContext(L);
if (codeGenContext == nullptr)
return CompilationResult{CodeGenCompilationResult::CodeGenNotInitialized};
std::vector<Proto*> protos;
gatherFunctions(protos, root, flags);
// Skip protos that have been compiled during previous invocations of CodeGen::compile
protos.erase(std::remove_if(protos.begin(), protos.end(),
[](Proto* p) {
return p == nullptr || p->execdata != nullptr;
}),
protos.end());
if (protos.empty())
return CompilationResult{CodeGenCompilationResult::NothingToCompile};
if (std::optional<CodeGenCompilationResult> existingModuleBindResult = codeGenContext->tryBindExistingModule(moduleId, protos))
return CompilationResult{*existingModuleBindResult};
if (stats != nullptr)
stats->functionsTotal = uint32_t(protos.size());
#if defined(__aarch64__)
static unsigned int cpuFeatures = getCpuFeaturesA64();
A64::AssemblyBuilderA64 build(/* logText= */ false, cpuFeatures);
#else
X64::AssemblyBuilderX64 build(/* logText= */ false);
#endif
ModuleHelpers helpers;
#if defined(__aarch64__)
A64::assembleHelpers(build, helpers);
#else
X64::assembleHelpers(build, helpers);
#endif
CompilationResult compilationResult;
std::vector<NativeProtoExecDataPtr> nativeProtos;
nativeProtos.reserve(protos.size());
uint32_t totalIrInstCount = 0;
for (size_t i = 0; i != protos.size(); ++i)
{
CodeGenCompilationResult protoResult = CodeGenCompilationResult::Success;
NativeProtoExecDataPtr nativeExecData = createNativeFunction(build, helpers, protos[i], totalIrInstCount, protoResult);
if (nativeExecData != nullptr)
{
nativeProtos.push_back(std::move(nativeExecData));
}
else
{
compilationResult.protoFailures.push_back(
{protoResult, protos[i]->debugname ? getstr(protos[i]->debugname) : "", protos[i]->linedefined});
}
}
// Very large modules might result in overflowing a jump offset; in this
// case we currently abandon the entire module
if (!build.finalize())
{
compilationResult.result = CodeGenCompilationResult::CodeGenAssemblerFinalizationFailure;
return compilationResult;
}
// If no functions were assembled, we don't need to allocate/copy executable pages for helpers
if (nativeProtos.empty())
return compilationResult;
if (stats != nullptr)
{
for (const NativeProtoExecDataPtr& nativeExecData : nativeProtos)
{
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeExecData.get());
stats->bytecodeSizeBytes += header.bytecodeInstructionCount * sizeof(Instruction);
// Account for the native -> bytecode instruction offsets mapping:
stats->nativeMetadataSizeBytes += header.bytecodeInstructionCount * sizeof(uint32_t);
}
stats->functionsCompiled += uint32_t(nativeProtos.size());
stats->nativeCodeSizeBytes += build.code.size();
stats->nativeDataSizeBytes += build.data.size();
}
for (size_t i = 0; i < nativeProtos.size(); ++i)
{
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProtos[i].get());
uint32_t begin = uint32_t(reinterpret_cast<uintptr_t>(header.entryOffsetOrAddress));
uint32_t end = i + 1 < nativeProtos.size() ? uint32_t(uintptr_t(getNativeProtoExecDataHeader(nativeProtos[i + 1].get()).entryOffsetOrAddress))
: uint32_t(build.code.size());
CODEGEN_ASSERT(begin < end);
header.nativeCodeSize = end - begin;
}
const CodeGenCompilationResult bindResult =
codeGenContext->bindModule(moduleId, protos, std::move(nativeProtos), reinterpret_cast<const uint8_t*>(build.data.data()), build.data.size(),
reinterpret_cast<const uint8_t*>(build.code.data()), build.code.size());
if (bindResult != CodeGenCompilationResult::Success)
compilationResult.result = bindResult;
return compilationResult;
}
[[nodiscard]] bool isNativeExecutionEnabled_NEW(lua_State* L)
{
return getCodeGenContext(L) != nullptr && L->global->ecb.enter == onEnter;
}
void setNativeExecutionEnabled_NEW(lua_State* L, bool enabled)
{
if (getCodeGenContext(L) != nullptr)
L->global->ecb.enter = enabled ? onEnter : onEnterDisabled;
}
} // namespace CodeGen
} // namespace Luau

View File

@ -6,6 +6,7 @@
#include "NativeState.h"
#include <memory>
#include <optional>
#include <stdint.h>
namespace Luau
@ -27,7 +28,11 @@ public:
[[nodiscard]] bool initHeaderFunctions();
virtual void compileOrBindModule(const ModuleId& moduleId, lua_State* L, int idx, unsigned int flags, CompilationStats* stats) = 0;
[[nodiscard]] virtual std::optional<CodeGenCompilationResult> tryBindExistingModule(
const ModuleId& moduleId, const std::vector<Proto*>& moduleProtos) = 0;
[[nodiscard]] virtual CodeGenCompilationResult bindModule(const ModuleId& moduleId, const std::vector<Proto*>& moduleProtos,
std::vector<NativeProtoExecDataPtr> nativeExecDatas, const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize) = 0;
virtual void onCloseState() noexcept = 0;
virtual void onDestroyFunction(void* execdata) noexcept = 0;
@ -46,7 +51,11 @@ class StandaloneCodeGenContext final : public BaseCodeGenContext
public:
StandaloneCodeGenContext(size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext);
virtual void compileOrBindModule(const ModuleId& moduleId, lua_State* L, int idx, unsigned int flags, CompilationStats* stats) override;
[[nodiscard]] virtual std::optional<CodeGenCompilationResult> tryBindExistingModule(
const ModuleId& moduleId, const std::vector<Proto*>& moduleProtos) override;
[[nodiscard]] virtual CodeGenCompilationResult bindModule(const ModuleId& moduleId, const std::vector<Proto*>& moduleProtos,
std::vector<NativeProtoExecDataPtr> nativeExecDatas, const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize) override;
virtual void onCloseState() noexcept override;
virtual void onDestroyFunction(void* execdata) noexcept override;
@ -59,7 +68,11 @@ class SharedCodeGenContext final : public BaseCodeGenContext
public:
SharedCodeGenContext(size_t blockSize, size_t maxTotalSize, AllocationCallback* allocationCallback, void* allocationCallbackContext);
virtual void compileOrBindModule(const ModuleId& moduleId, lua_State* L, int idx, unsigned int flags, CompilationStats* stats) override;
[[nodiscard]] virtual std::optional<CodeGenCompilationResult> tryBindExistingModule(
const ModuleId& moduleId, const std::vector<Proto*>& moduleProtos) override;
[[nodiscard]] virtual CodeGenCompilationResult bindModule(const ModuleId& moduleId, const std::vector<Proto*>& moduleProtos,
std::vector<NativeProtoExecDataPtr> nativeExecDatas, const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize) override;
virtual void onCloseState() noexcept override;
virtual void onDestroyFunction(void* execdata) noexcept override;
@ -110,5 +123,13 @@ void create_NEW(lua_State* L, size_t blockSize, size_t maxTotalSize, AllocationC
// destroyed via lua_close.
void create_NEW(lua_State* L, SharedCodeGenContext* codeGenContext);
CompilationResult compile_NEW(const ModuleId& moduleId, lua_State* L, int idx, unsigned int flags, CompilationStats* stats);
// Returns true if native execution is currently enabled for this VM
[[nodiscard]] bool isNativeExecutionEnabled_NEW(lua_State* L);
// Enables or disables native excution for this VM
void setNativeExecutionEnabled_NEW(lua_State* L, bool enabled);
} // namespace CodeGen
} // namespace Luau

View File

@ -15,8 +15,6 @@
#include "lstate.h"
#include "lgc.h"
LUAU_FASTFLAGVARIABLE(LuauCodegenVectorOptAnd, false)
LUAU_FASTFLAGVARIABLE(LuauCodegenSmallerUnm, false)
LUAU_FASTFLAGVARIABLE(LuauCodegenCheckTruthyFormB, false)
namespace Luau
@ -543,24 +541,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a});
if (FFlag::LuauCodegenSmallerUnm)
{
build.vxorpd(inst.regX64, regOp(inst.a), build.f64(-0.0));
}
else
{
RegisterX64 src = regOp(inst.a);
if (inst.regX64 == src)
{
build.vxorpd(inst.regX64, inst.regX64, build.f64(-0.0));
}
else
{
build.vmovsd(inst.regX64, src, src);
build.vxorpd(inst.regX64, inst.regX64, build.f64(-0.0));
}
}
build.vxorpd(inst.regX64, regOp(inst.a), build.f64(-0.0));
break;
}
case IrCmd::FLOOR_NUM:
@ -611,128 +592,59 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
if (FFlag::LuauCodegenVectorOptAnd)
{
ScopedRegX64 tmp1{regs};
ScopedRegX64 tmp2{regs};
ScopedRegX64 tmp1{regs};
ScopedRegX64 tmp2{regs};
RegisterX64 tmpa = vecOp(inst.a, tmp1);
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
RegisterX64 tmpa = vecOp(inst.a, tmp1);
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
build.vaddps(inst.regX64, tmpa, tmpb);
}
else
{
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
// Fourth component is the tag number which is interpreted as a denormal and has to be filtered out
build.vandps(tmp1.reg, regOp(inst.a), vectorAndMaskOp());
build.vandps(tmp2.reg, regOp(inst.b), vectorAndMaskOp());
build.vaddps(inst.regX64, tmp1.reg, tmp2.reg);
}
build.vaddps(inst.regX64, tmpa, tmpb);
break;
}
case IrCmd::SUB_VEC:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
if (FFlag::LuauCodegenVectorOptAnd)
{
ScopedRegX64 tmp1{regs};
ScopedRegX64 tmp2{regs};
ScopedRegX64 tmp1{regs};
ScopedRegX64 tmp2{regs};
RegisterX64 tmpa = vecOp(inst.a, tmp1);
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
RegisterX64 tmpa = vecOp(inst.a, tmp1);
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
build.vsubps(inst.regX64, tmpa, tmpb);
}
else
{
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
// Fourth component is the tag number which is interpreted as a denormal and has to be filtered out
build.vandps(tmp1.reg, regOp(inst.a), vectorAndMaskOp());
build.vandps(tmp2.reg, regOp(inst.b), vectorAndMaskOp());
build.vsubps(inst.regX64, tmp1.reg, tmp2.reg);
}
build.vsubps(inst.regX64, tmpa, tmpb);
break;
}
case IrCmd::MUL_VEC:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
if (FFlag::LuauCodegenVectorOptAnd)
{
ScopedRegX64 tmp1{regs};
ScopedRegX64 tmp2{regs};
ScopedRegX64 tmp1{regs};
ScopedRegX64 tmp2{regs};
RegisterX64 tmpa = vecOp(inst.a, tmp1);
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
RegisterX64 tmpa = vecOp(inst.a, tmp1);
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
build.vmulps(inst.regX64, tmpa, tmpb);
}
else
{
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
// Fourth component is the tag number which is interpreted as a denormal and has to be filtered out
build.vandps(tmp1.reg, regOp(inst.a), vectorAndMaskOp());
build.vandps(tmp2.reg, regOp(inst.b), vectorAndMaskOp());
build.vmulps(inst.regX64, tmp1.reg, tmp2.reg);
}
build.vmulps(inst.regX64, tmpa, tmpb);
break;
}
case IrCmd::DIV_VEC:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a, inst.b});
if (FFlag::LuauCodegenVectorOptAnd)
{
ScopedRegX64 tmp1{regs};
ScopedRegX64 tmp2{regs};
ScopedRegX64 tmp1{regs};
ScopedRegX64 tmp2{regs};
RegisterX64 tmpa = vecOp(inst.a, tmp1);
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
RegisterX64 tmpa = vecOp(inst.a, tmp1);
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
build.vdivps(inst.regX64, tmpa, tmpb);
}
else
{
ScopedRegX64 tmp1{regs, SizeX64::xmmword};
ScopedRegX64 tmp2{regs, SizeX64::xmmword};
// Fourth component is the tag number which is interpreted as a denormal and has to be filtered out
build.vandps(tmp1.reg, regOp(inst.a), vectorAndMaskOp());
build.vandps(tmp2.reg, regOp(inst.b), vectorAndMaskOp());
build.vdivps(inst.regX64, tmp1.reg, tmp2.reg);
}
build.vdivps(inst.regX64, tmpa, tmpb);
break;
}
case IrCmd::UNM_VEC:
{
inst.regX64 = regs.allocRegOrReuse(SizeX64::xmmword, index, {inst.a});
if (FFlag::LuauCodegenSmallerUnm)
{
build.vxorpd(inst.regX64, regOp(inst.a), build.f32x4(-0.0, -0.0, -0.0, -0.0));
}
else
{
RegisterX64 src = regOp(inst.a);
if (inst.regX64 == src)
{
build.vxorpd(inst.regX64, inst.regX64, build.f32x4(-0.0, -0.0, -0.0, -0.0));
}
else
{
build.vmovsd(inst.regX64, src, src);
build.vxorpd(inst.regX64, inst.regX64, build.f32x4(-0.0, -0.0, -0.0, -0.0));
}
}
build.vxorpd(inst.regX64, regOp(inst.a), build.f32x4(-0.0, -0.0, -0.0, -0.0));
break;
}
case IrCmd::NOT_ANY:
@ -2293,17 +2205,15 @@ OperandX64 IrLoweringX64::bufferAddrOp(IrOp bufferOp, IrOp indexOp)
RegisterX64 IrLoweringX64::vecOp(IrOp op, ScopedRegX64& tmp)
{
if (FFlag::LuauCodegenVectorOptAnd)
{
IrInst source = function.instOp(op);
CODEGEN_ASSERT(source.cmd != IrCmd::SUBSTITUTE); // we don't process substitutions
IrInst source = function.instOp(op);
CODEGEN_ASSERT(source.cmd != IrCmd::SUBSTITUTE); // we don't process substitutions
// source that comes from memory or from tag instruction has .w = TVECTOR, which is denormal
// to avoid performance degradation on some CPUs we mask this component to produce zero
// otherwise we conservatively assume the vector is a result of a well formed math op so .w is a normal number or zero
if (source.cmd != IrCmd::LOAD_TVALUE && source.cmd != IrCmd::TAG_VECTOR)
return regOp(op);
// source that comes from memory or from tag instruction has .w = TVECTOR, which is denormal
// to avoid performance degradation on some CPUs we mask this component to produce zero
// otherwise we conservatively assume the vector is a result of a well formed math op so .w is a normal number or zero
if (source.cmd != IrCmd::LOAD_TVALUE && source.cmd != IrCmd::TAG_VECTOR)
return regOp(op);
}
tmp.alloc(SizeX64::xmmword);
build.vandps(tmp.reg, regOp(op), vectorAndMaskOp());
return tmp.reg;

View File

@ -17,7 +17,6 @@
LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3)
LUAU_FASTINTVARIABLE(LuauCodeGenReuseSlotLimit, 64)
LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false)
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauCodeGenCoverForgprepEffect, false)
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores4)
LUAU_FASTFLAG(LuauCodegenLoadTVTag)
LUAU_FASTFLAGVARIABLE(LuauCodegenInferNumTag, false)
@ -1462,9 +1461,7 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 0u});
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 1u});
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 2u});
if (DFFlag::LuauCodeGenCoverForgprepEffect)
state.invalidateUserCall();
state.invalidateUserCall();
break;
}
}

View File

@ -1,6 +1,8 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/SharedCodeAllocator.h"
#include "Luau/CodeAllocator.h"
#include <algorithm>
#include <string_view>
#include <utility>
@ -11,85 +13,34 @@ namespace CodeGen
{
NativeProto::NativeProto(uint32_t bytecodeId, NativeProtoExecDataPtr nativeExecData)
: bytecodeId{bytecodeId}
, nativeExecData{std::move(nativeExecData)}
{
}
void NativeProto::setEntryOffset(uint32_t entryOffset) noexcept
{
entryOffsetOrAddress = reinterpret_cast<const uint8_t*>(static_cast<uintptr_t>(entryOffset));
}
void NativeProto::assignToModule(NativeModule* nativeModule) noexcept
{
getNativeProtoExecDataHeader(nativeExecData.get()).nativeModule = nativeModule;
entryOffsetOrAddress = nativeModule->getModuleBaseAddress() + reinterpret_cast<uintptr_t>(entryOffsetOrAddress);
}
[[nodiscard]] uint32_t NativeProto::getBytecodeId() const noexcept
{
return bytecodeId;
}
[[nodiscard]] const uint8_t* NativeProto::getEntryAddress() const noexcept
{
return entryOffsetOrAddress;
}
[[nodiscard]] const NativeProtoExecDataHeader& NativeProto::getNativeExecDataHeader() const noexcept
{
return getNativeProtoExecDataHeader(nativeExecData.get());
}
[[nodiscard]] const uint32_t* NativeProto::getNonOwningPointerToInstructionOffsets() const noexcept
{
return nativeExecData.get();
}
[[nodiscard]] const uint32_t* NativeProto::getOwningPointerToInstructionOffsets() const noexcept
{
getNativeProtoExecDataHeader(nativeExecData.get()).nativeModule->addRef();
return nativeExecData.get();
}
void NativeProto::releaseOwningPointerToInstructionOffsets(const uint32_t* ownedInstructionOffsets) noexcept
{
getNativeProtoExecDataHeader(ownedInstructionOffsets).nativeModule->release();
}
struct NativeProtoBytecodeIdEqual
{
[[nodiscard]] bool operator()(const NativeProto& left, const NativeProto& right) const noexcept
[[nodiscard]] bool operator()(const NativeProtoExecDataPtr& left, const NativeProtoExecDataPtr& right) const noexcept
{
return left.getBytecodeId() == right.getBytecodeId();
return getNativeProtoExecDataHeader(left.get()).bytecodeId == getNativeProtoExecDataHeader(right.get()).bytecodeId;
}
};
struct NativeProtoBytecodeIdLess
{
[[nodiscard]] bool operator()(const NativeProto& left, const NativeProto& right) const noexcept
[[nodiscard]] bool operator()(const NativeProtoExecDataPtr& left, const NativeProtoExecDataPtr& right) const noexcept
{
return left.getBytecodeId() < right.getBytecodeId();
return getNativeProtoExecDataHeader(left.get()).bytecodeId < getNativeProtoExecDataHeader(right.get()).bytecodeId;
}
[[nodiscard]] bool operator()(const NativeProto& left, uint32_t right) const noexcept
[[nodiscard]] bool operator()(const NativeProtoExecDataPtr& left, uint32_t right) const noexcept
{
return left.getBytecodeId() < right;
return getNativeProtoExecDataHeader(left.get()).bytecodeId < right;
}
[[nodiscard]] bool operator()(uint32_t left, const NativeProto& right) const noexcept
[[nodiscard]] bool operator()(uint32_t left, const NativeProtoExecDataPtr& right) const noexcept
{
return left < right.getBytecodeId();
return left < getNativeProtoExecDataHeader(right.get()).bytecodeId;
}
};
NativeModule::NativeModule(
SharedCodeAllocator* allocator, const ModuleId& moduleId, const uint8_t* moduleBaseAddress, std::vector<NativeProto> nativeProtos) noexcept
NativeModule::NativeModule(SharedCodeAllocator* allocator, const ModuleId& moduleId, const uint8_t* moduleBaseAddress,
std::vector<NativeProtoExecDataPtr> nativeProtos) noexcept
: allocator{allocator}
, moduleId{moduleId}
, moduleBaseAddress{moduleBaseAddress}
@ -99,9 +50,11 @@ NativeModule::NativeModule(
LUAU_ASSERT(moduleBaseAddress != nullptr);
// Bind all of the NativeProtos to this module:
for (NativeProto& nativeProto : this->nativeProtos)
for (const NativeProtoExecDataPtr& nativeProto : this->nativeProtos)
{
nativeProto.assignToModule(this);
NativeProtoExecDataHeader& header = getNativeProtoExecDataHeader(nativeProto.get());
header.nativeModule = this;
header.entryOffsetOrAddress = moduleBaseAddress + reinterpret_cast<uintptr_t>(header.entryOffsetOrAddress);
}
std::sort(this->nativeProtos.begin(), this->nativeProtos.end(), NativeProtoBytecodeIdLess{});
@ -120,6 +73,11 @@ size_t NativeModule::addRef() const noexcept
return refcount.fetch_add(1) + 1;
}
size_t NativeModule::addRefs(size_t count) const noexcept
{
return refcount.fetch_add(count) + count;
}
size_t NativeModule::release() const noexcept
{
size_t newRefcount = refcount.fetch_sub(1) - 1;
@ -143,7 +101,7 @@ size_t NativeModule::release() const noexcept
return moduleBaseAddress;
}
[[nodiscard]] const NativeProto* NativeModule::tryGetNativeProto(uint32_t bytecodeId) const noexcept
[[nodiscard]] const uint32_t* NativeModule::tryGetNativeProto(uint32_t bytecodeId) const noexcept
{
const auto range = std::equal_range(nativeProtos.begin(), nativeProtos.end(), bytecodeId, NativeProtoBytecodeIdLess{});
if (range.first == range.second)
@ -151,7 +109,12 @@ size_t NativeModule::release() const noexcept
LUAU_ASSERT(std::next(range.first) == range.second);
return &*range.first;
return range.first->get();
}
[[nodiscard]] const std::vector<NativeProtoExecDataPtr>& NativeModule::getNativeProtos() const noexcept
{
return nativeProtos;
}
@ -226,6 +189,11 @@ NativeModuleRef::operator bool() const noexcept
}
SharedCodeAllocator::SharedCodeAllocator(CodeAllocator* codeAllocator) noexcept
: codeAllocator{codeAllocator}
{
}
SharedCodeAllocator::~SharedCodeAllocator() noexcept
{
// The allocator should not be destroyed until all outstanding references
@ -240,22 +208,26 @@ SharedCodeAllocator::~SharedCodeAllocator() noexcept
return tryGetNativeModuleWithLockHeld(moduleId);
}
NativeModuleRef SharedCodeAllocator::getOrInsertNativeModule(
const ModuleId& moduleId, std::vector<NativeProto> nativeProtos, const std::vector<uint8_t>& data, const std::vector<uint8_t>& code)
std::pair<NativeModuleRef, bool> SharedCodeAllocator::getOrInsertNativeModule(const ModuleId& moduleId,
std::vector<NativeProtoExecDataPtr> nativeProtos, const uint8_t* data, size_t dataSize, const uint8_t* code, size_t codeSize)
{
std::unique_lock lock{mutex};
if (NativeModuleRef existingModule = tryGetNativeModuleWithLockHeld(moduleId))
return existingModule;
return {std::move(existingModule), false};
// We simulate allocation until the backend allocator is integrated
uint8_t* nativeData = nullptr;
size_t sizeNativeData = 0;
uint8_t* codeStart = nullptr;
if (!codeAllocator->allocate(data, int(dataSize), code, int(codeSize), nativeData, sizeNativeData, codeStart))
{
return {};
}
std::unique_ptr<NativeModule>& nativeModule = nativeModules[moduleId];
nativeModule = std::make_unique<NativeModule>(this, moduleId, baseAddress, std::move(nativeProtos));
nativeModule = std::make_unique<NativeModule>(this, moduleId, codeStart, std::move(nativeProtos));
baseAddress += data.size() + code.size();
return NativeModuleRef{nativeModule.get()};
return {NativeModuleRef{nativeModule.get()}, true};
}
void SharedCodeAllocator::eraseNativeModuleIfUnreferenced(const ModuleId& moduleId)

View File

@ -6,85 +6,50 @@
#include "lmem.h"
#include "lgc.h"
LUAU_FASTFLAGVARIABLE(LuauNewProtoInitAll, false)
Proto* luaF_newproto(lua_State* L)
{
Proto* f = luaM_newgco(L, Proto, sizeof(Proto), L->activememcat);
luaC_init(L, f, LUA_TPROTO);
if (FFlag::LuauNewProtoInitAll)
{
f->nups = 0;
f->numparams = 0;
f->is_vararg = 0;
f->maxstacksize = 0;
f->flags = 0;
f->nups = 0;
f->numparams = 0;
f->is_vararg = 0;
f->maxstacksize = 0;
f->flags = 0;
f->k = NULL;
f->code = NULL;
f->p = NULL;
f->codeentry = NULL;
f->k = NULL;
f->code = NULL;
f->p = NULL;
f->codeentry = NULL;
f->execdata = NULL;
f->exectarget = 0;
f->execdata = NULL;
f->exectarget = 0;
f->lineinfo = NULL;
f->abslineinfo = NULL;
f->locvars = NULL;
f->upvalues = NULL;
f->source = NULL;
f->lineinfo = NULL;
f->abslineinfo = NULL;
f->locvars = NULL;
f->upvalues = NULL;
f->source = NULL;
f->debugname = NULL;
f->debuginsn = NULL;
f->debugname = NULL;
f->debuginsn = NULL;
f->typeinfo = NULL;
f->typeinfo = NULL;
f->userdata = NULL;
f->userdata = NULL;
f->gclist = NULL;
f->gclist = NULL;
f->sizecode = 0;
f->sizep = 0;
f->sizelocvars = 0;
f->sizeupvalues = 0;
f->sizek = 0;
f->sizelineinfo = 0;
f->linegaplog2 = 0;
f->linedefined = 0;
f->bytecodeid = 0;
}
else
{
f->k = NULL;
f->sizek = 0;
f->p = NULL;
f->sizep = 0;
f->code = NULL;
f->sizecode = 0;
f->sizeupvalues = 0;
f->nups = 0;
f->upvalues = NULL;
f->numparams = 0;
f->is_vararg = 0;
f->maxstacksize = 0;
f->flags = 0;
f->sizelineinfo = 0;
f->linegaplog2 = 0;
f->lineinfo = NULL;
f->abslineinfo = NULL;
f->sizelocvars = 0;
f->locvars = NULL;
f->source = NULL;
f->debugname = NULL;
f->debuginsn = NULL;
f->codeentry = NULL;
f->execdata = NULL;
f->exectarget = 0;
f->typeinfo = NULL;
f->userdata = NULL;
}
f->sizecode = 0;
f->sizep = 0;
f->sizelocvars = 0;
f->sizeupvalues = 0;
f->sizek = 0;
f->sizelineinfo = 0;
f->linegaplog2 = 0;
f->linedefined = 0;
f->bytecodeid = 0;
return f;
}

View File

@ -13,8 +13,6 @@
#include <string.h>
LUAU_FASTFLAGVARIABLE(LuauLoadExceptionSafe, false)
// TODO: RAII deallocation doesn't work for longjmp builds if a memory error happens
template<typename T>
struct TempBuffer
@ -54,11 +52,8 @@ public:
ScopedSetGCThreshold(global_State* global, size_t newThreshold) noexcept
: global{global}
{
if (FFlag::LuauLoadExceptionSafe)
{
originalThreshold = global->GCthreshold;
global->GCthreshold = newThreshold;
}
originalThreshold = global->GCthreshold;
global->GCthreshold = newThreshold;
}
ScopedSetGCThreshold(const ScopedSetGCThreshold&) = delete;
@ -69,10 +64,7 @@ public:
~ScopedSetGCThreshold() noexcept
{
if (FFlag::LuauLoadExceptionSafe)
{
global->GCthreshold = originalThreshold;
}
global->GCthreshold = originalThreshold;
}
private:
@ -222,12 +214,6 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
// pause GC for the duration of deserialization - some objects we're creating aren't rooted
const ScopedSetGCThreshold pauseGC{L->global, SIZE_MAX};
size_t GCthreshold = L->global->GCthreshold;
if (!FFlag::LuauLoadExceptionSafe)
{
L->global->GCthreshold = SIZE_MAX;
}
// env is 0 for current environment and a stack index otherwise
Table* envt = (env == 0) ? L->gt : hvalue(luaA_toobject(L, env));
@ -289,44 +275,25 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
offset += typesize;
}
if (FFlag::LuauLoadExceptionSafe)
{
const int sizecode = readVarInt(data, size, offset);
p->code = luaM_newarray(L, sizecode, Instruction, p->memcat);
p->sizecode = sizecode;
}
else
{
p->sizecode = readVarInt(data, size, offset);
p->code = luaM_newarray(L, p->sizecode, Instruction, p->memcat);
}
const int sizecode = readVarInt(data, size, offset);
p->code = luaM_newarray(L, sizecode, Instruction, p->memcat);
p->sizecode = sizecode;
for (int j = 0; j < p->sizecode; ++j)
p->code[j] = read<uint32_t>(data, size, offset);
p->codeentry = p->code;
if (FFlag::LuauLoadExceptionSafe)
{
const int sizek = readVarInt(data, size, offset);
p->k = luaM_newarray(L, sizek, TValue, p->memcat);
p->sizek = sizek;
}
else
{
p->sizek = readVarInt(data, size, offset);
p->k = luaM_newarray(L, p->sizek, TValue, p->memcat);
}
const int sizek = readVarInt(data, size, offset);
p->k = luaM_newarray(L, sizek, TValue, p->memcat);
p->sizek = sizek;
if (FFlag::LuauLoadExceptionSafe)
// Initialize the constants to nil to ensure they have a valid state
// in the event that some operation in the following loop fails with
// an exception.
for (int j = 0; j < p->sizek; ++j)
{
// Initialize the constants to nil to ensure they have a valid state
// in the event that some operation in the following loop fails with
// an exception.
for (int j = 0; j < p->sizek; ++j)
{
setnilvalue(&p->k[j]);
}
setnilvalue(&p->k[j]);
}
for (int j = 0; j < p->sizek; ++j)
@ -334,10 +301,7 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
switch (read<uint8_t>(data, size, offset))
{
case LBC_CONSTANT_NIL:
if (!FFlag::LuauLoadExceptionSafe)
{
setnilvalue(&p->k[j]);
}
// All constants have already been pre-initialized to nil
break;
case LBC_CONSTANT_BOOLEAN:
@ -409,17 +373,9 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
}
}
if (FFlag::LuauLoadExceptionSafe)
{
const int sizep = readVarInt(data, size, offset);
p->p = luaM_newarray(L, sizep, Proto*, p->memcat);
p->sizep = sizep;
}
else
{
p->sizep = readVarInt(data, size, offset);
p->p = luaM_newarray(L, p->sizep, Proto*, p->memcat);
}
const int sizep = readVarInt(data, size, offset);
p->p = luaM_newarray(L, sizep, Proto*, p->memcat);
p->sizep = sizep;
for (int j = 0; j < p->sizep; ++j)
{
@ -439,17 +395,9 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
int intervals = ((p->sizecode - 1) >> p->linegaplog2) + 1;
int absoffset = (p->sizecode + 3) & ~3;
if (FFlag::LuauLoadExceptionSafe)
{
const int sizelineinfo = absoffset + intervals * sizeof(int);
p->lineinfo = luaM_newarray(L, sizelineinfo, uint8_t, p->memcat);
p->sizelineinfo = sizelineinfo;
}
else
{
p->sizelineinfo = absoffset + intervals * sizeof(int);
p->lineinfo = luaM_newarray(L, p->sizelineinfo, uint8_t, p->memcat);
}
const int sizelineinfo = absoffset + intervals * sizeof(int);
p->lineinfo = luaM_newarray(L, sizelineinfo, uint8_t, p->memcat);
p->sizelineinfo = sizelineinfo;
p->abslineinfo = (int*)(p->lineinfo + absoffset);
@ -472,17 +420,9 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
if (debuginfo)
{
if (FFlag::LuauLoadExceptionSafe)
{
const int sizelocvars = readVarInt(data, size, offset);
p->locvars = luaM_newarray(L, sizelocvars, LocVar, p->memcat);
p->sizelocvars = sizelocvars;
}
else
{
p->sizelocvars = readVarInt(data, size, offset);
p->locvars = luaM_newarray(L, p->sizelocvars, LocVar, p->memcat);
}
const int sizelocvars = readVarInt(data, size, offset);
p->locvars = luaM_newarray(L, sizelocvars, LocVar, p->memcat);
p->sizelocvars = sizelocvars;
for (int j = 0; j < p->sizelocvars; ++j)
{
@ -492,17 +432,9 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
p->locvars[j].reg = read<uint8_t>(data, size, offset);
}
if (FFlag::LuauLoadExceptionSafe)
{
const int sizeupvalues = readVarInt(data, size, offset);
p->upvalues = luaM_newarray(L, sizeupvalues, TString*, p->memcat);
p->sizeupvalues = sizeupvalues;
}
else
{
p->sizeupvalues = readVarInt(data, size, offset);
p->upvalues = luaM_newarray(L, p->sizeupvalues, TString*, p->memcat);
}
const int sizeupvalues = readVarInt(data, size, offset);
p->upvalues = luaM_newarray(L, sizeupvalues, TString*, p->memcat);
p->sizeupvalues = sizeupvalues;
for (int j = 0; j < p->sizeupvalues; ++j)
{
@ -523,10 +455,5 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
setclvalue(L, L->top, cl);
incr_top(L);
if (!FFlag::LuauLoadExceptionSafe)
{
L->global->GCthreshold = GCthreshold;
}
return 0;
}

View File

@ -15,7 +15,6 @@
LUAU_FASTFLAG(LuauTraceTypesInNonstrictMode2)
LUAU_FASTFLAG(LuauSetMetatableDoesNotTimeTravel)
LUAU_FASTFLAG(LuauAutocompleteStringLiteralBounds);
using namespace Luau;
@ -3188,7 +3187,6 @@ TEST_CASE_FIXTURE(ACFixture, "string_singleton_as_table_key")
TEST_CASE_FIXTURE(ACFixture, "string_singleton_in_if_statement")
{
ScopedFastFlag sff[]{
{FFlag::LuauAutocompleteStringLiteralBounds, true},
{FFlag::DebugLuauDeferredConstraintResolution, true},
};
@ -3215,7 +3213,93 @@ TEST_CASE_FIXTURE(ACFixture, "string_singleton_in_if_statement")
ac = autocomplete('2');
CHECK(ac.entryMap.count("left"));
LUAU_CHECK_HAS_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_KEY(ac.entryMap, "right");
ac = autocomplete('3');
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "right");
ac = autocomplete('4');
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "right");
ac = autocomplete('5');
LUAU_CHECK_HAS_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_KEY(ac.entryMap, "right");
ac = autocomplete('6');
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "right");
ac = autocomplete('7');
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "right");
ac = autocomplete('8');
LUAU_CHECK_HAS_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_KEY(ac.entryMap, "right");
ac = autocomplete('9');
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "right");
ac = autocomplete('A');
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "right");
ac = autocomplete('B');
LUAU_CHECK_HAS_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_KEY(ac.entryMap, "right");
ac = autocomplete('C');
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "right");
}
// https://github.com/Roblox/luau/issues/858
TEST_CASE_FIXTURE(ACFixture, "string_singleton_in_if_statement2")
{
ScopedFastFlag sff[]{
{FFlag::DebugLuauDeferredConstraintResolution, true},
};
check(R"(
--!strict
type Direction = "left" | "right"
local dir: Direction
-- typestate here means dir is actually typed as `"left"`
dir = "left"
if dir == @1"@2"@3 then end
local a: {[Direction]: boolean} = {[@4"@5"@6]}
if dir == @7`@8`@9 then end
local a: {[Direction]: boolean} = {[@A`@B`@C]}
)");
Luau::AutocompleteResult ac;
ac = autocomplete('1');
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "right");
ac = autocomplete('2');
LUAU_CHECK_HAS_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "right");
ac = autocomplete('3');
@ -3231,7 +3315,7 @@ TEST_CASE_FIXTURE(ACFixture, "string_singleton_in_if_statement")
ac = autocomplete('5');
LUAU_CHECK_HAS_KEY(ac.entryMap, "left");
CHECK(ac.entryMap.count("right"));
LUAU_CHECK_HAS_KEY(ac.entryMap, "right");
ac = autocomplete('6');
@ -3245,7 +3329,7 @@ TEST_CASE_FIXTURE(ACFixture, "string_singleton_in_if_statement")
ac = autocomplete('8');
CHECK(ac.entryMap.count("left"));
LUAU_CHECK_HAS_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_NO_KEY(ac.entryMap, "right");
ac = autocomplete('9');
@ -3260,8 +3344,8 @@ TEST_CASE_FIXTURE(ACFixture, "string_singleton_in_if_statement")
ac = autocomplete('B');
CHECK(ac.entryMap.count("left"));
CHECK(ac.entryMap.count("right"));
LUAU_CHECK_HAS_KEY(ac.entryMap, "left");
LUAU_CHECK_HAS_KEY(ac.entryMap, "right");
ac = autocomplete('C');

View File

@ -22,10 +22,10 @@ ClassFixture::ClassFixture()
TypeId baseClassInstanceType = arena.addType(ClassType{"BaseClass", {}, nullopt, nullopt, {}, {}, "Test"});
getMutable<ClassType>(baseClassInstanceType)->props = {
{"BaseMethod", {makeFunction(arena, baseClassInstanceType, {numberType}, {})}},
{"BaseMethod", Property::readonly(makeFunction(arena, baseClassInstanceType, {numberType}, {}))},
{"BaseField", {numberType}},
{"Touched", {connectionType}},
{"Touched", Property::readonly(connectionType)},
};
getMutable<ClassType>(connectionType)->props = {

View File

@ -32,7 +32,6 @@ void luaC_validate(lua_State* L);
LUAU_FASTFLAG(DebugLuauAbortingChecks)
LUAU_FASTINT(CodegenHeuristicsInstructionLimit)
LUAU_FASTFLAG(LuauLoadExceptionSafe)
LUAU_DYNAMIC_FASTFLAG(LuauDebugInfoDupArgLeftovers)
LUAU_FASTFLAG(LuauCompileRepeatUntilSkippedLocals)
LUAU_FASTFLAG(LuauCodegenInferNumTag)
@ -2171,8 +2170,6 @@ TEST_CASE("HugeFunctionLoadFailure")
// luau_load. This should require two "large" allocations: One for the
// code array and one for the constants array (k). We run this test twice
// and fail each of these two allocations.
ScopedFastFlag luauLoadExceptionSafe{FFlag::LuauLoadExceptionSafe, true};
std::string source = makeHugeFunctionSource();
static const size_t expectedTotalLargeAllocations = 2;

View File

@ -92,6 +92,20 @@ declare foo: {
declare function @checked optionalArgsAtTheEnd1(x: string, y: number?, z: number?) : number
declare function @checked optionalArgsAtTheEnd2(x: string, y: number?, z: string) : number
type DateTypeArg = {
year: number,
month: number,
day: number,
hour: number?,
min: number?,
sec: number?,
isdst: boolean?,
}
declare os : {
time: @checked (time: DateTypeArg?) -> number
}
)BUILTIN_SRC";
};
@ -505,4 +519,12 @@ optionalArgsAtTheEnd2("a", "b", "c") -- error
CHECK_EQ(2, r1->actual);
}
TEST_CASE_FIXTURE(NonStrictTypeCheckerFixture, "non_testable_type_throws_ice")
{
CHECK_THROWS_AS(checkNonStrict(R"(
os.time({year = 0, month = 0, day = 0, min = 0, isdst = nil})
)"),
Luau::InternalCompilerError);
}
TEST_SUITE_END();

View File

@ -731,7 +731,7 @@ TEST_CASE_FIXTURE(NormalizeFixture, "trivial_intersection_inhabited")
const NormalizedType* n = normalizer.normalize(c);
REQUIRE(n);
CHECK(normalizer.isInhabited(n));
CHECK(normalizer.isInhabited(n) == NormalizationResult::True);
}
TEST_CASE_FIXTURE(NormalizeFixture, "bare_negated_boolean")

View File

@ -13,6 +13,46 @@
#include <initializer_list>
#include <memory>
#if __APPLE__
#include <TargetConditionals.h>
#if TARGET_OS_IPHONE
#include <CoreFoundation/CoreFoundation.h>
std::optional<std::string> getResourcePath0()
{
CFBundleRef mainBundle = CFBundleGetMainBundle();
if (mainBundle == NULL)
{
return std::nullopt;
}
CFURLRef mainBundleURL = CFBundleCopyBundleURL(mainBundle);
if (mainBundleURL == NULL)
{
CFRelease(mainBundle);
return std::nullopt;
}
char pathBuffer[PATH_MAX];
if (!CFURLGetFileSystemRepresentation(mainBundleURL, true, (UInt8*)pathBuffer, PATH_MAX))
{
CFRelease(mainBundleURL);
CFRelease(mainBundle);
return std::nullopt;
}
CFRelease(mainBundleURL);
CFRelease(mainBundle);
return std::string(pathBuffer);
}
std::optional<std::string> getResourcePath()
{
static std::optional<std::string> path0 = getResourcePath0();
return path0;
}
#endif
#endif
LUAU_FASTFLAG(LuauUpdatedRequireByStringSemantics)
class ReplWithPathFixture
@ -49,7 +89,24 @@ public:
std::string luauDirRel = ".";
std::string luauDirAbs;
#if TARGET_OS_IPHONE
std::optional<std::string> cwd0 = getCurrentWorkingDirectory();
std::optional<std::string> cwd = getResourcePath();
if (cwd && cwd0)
{
// when running in xcode cwd0 is "/", however that is not always the case
const auto& _res = *cwd;
const auto& _cwd = *cwd0;
if (_res.find(_cwd) == 0)
{
// we need relative path so we subtract cwd0 from cwd
luauDirRel = "./" + _res.substr(_cwd.length());
}
}
#else
std::optional<std::string> cwd = getCurrentWorkingDirectory();
#endif
REQUIRE_MESSAGE(cwd, "Error getting Luau path");
std::replace((*cwd).begin(), (*cwd).end(), '\\', '/');
luauDirAbs = *cwd;

View File

@ -1,6 +1,10 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/SharedCodeAllocator.h"
#include "Luau/CodeAllocator.h"
#include "luacodegen.h"
#include "doctest.h"
// We explicitly test correctness of self-assignment for some types
@ -10,15 +14,25 @@
using namespace Luau::CodeGen;
constexpr size_t kBlockSize = 1024 * 1024;
constexpr size_t kMaxTotalSize = 1024 * 1024;
static const uint8_t fakeCode[1] = {0x00};
TEST_SUITE_BEGIN("SharedCodeAllocator");
TEST_CASE("NativeModuleRefRefcounting")
{
SharedCodeAllocator allocator{};
if (!luau_codegen_supported())
return;
CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize};
SharedCodeAllocator allocator{&codeAllocator};
REQUIRE(allocator.tryGetNativeModule(ModuleId{0x0a}).empty());
NativeModuleRef modRefA = allocator.getOrInsertNativeModule(ModuleId{0x0a}, {}, {}, {});
NativeModuleRef modRefA = allocator.getOrInsertNativeModule(ModuleId{0x0a}, {}, nullptr, 0, fakeCode, std::size(fakeCode)).first;
REQUIRE(!modRefA.empty());
// If we attempt to get the module again, we should get the same module back:
@ -26,14 +40,14 @@ TEST_CASE("NativeModuleRefRefcounting")
// If we try to insert another instance of the module, we should get the
// existing module back:
REQUIRE(allocator.getOrInsertNativeModule(ModuleId{0x0a}, {}, {}, {}).get() == modRefA.get());
REQUIRE(allocator.getOrInsertNativeModule(ModuleId{0x0a}, {}, nullptr, 0, fakeCode, std::size(fakeCode)).first.get() == modRefA.get());
// If we try to look up a different module, we should not get the existing
// module back:
REQUIRE(allocator.tryGetNativeModule(ModuleId{0x0b}).empty());
// (Insert a second module to help with validation below)
NativeModuleRef modRefB = allocator.getOrInsertNativeModule(ModuleId{0x0b}, {}, {}, {});
NativeModuleRef modRefB = allocator.getOrInsertNativeModule(ModuleId{0x0b}, {}, nullptr, 0, fakeCode, std::size(fakeCode)).first;
REQUIRE(!modRefB.empty());
REQUIRE(modRefB.get() != modRefA.get());
@ -226,41 +240,40 @@ TEST_CASE("NativeModuleRefRefcounting")
TEST_CASE("NativeProtoRefcounting")
{
SharedCodeAllocator allocator{};
if (!luau_codegen_supported())
return;
std::vector<NativeProto> nativeProtos;
CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize};
SharedCodeAllocator allocator{&codeAllocator};
std::vector<NativeProtoExecDataPtr> nativeProtos;
nativeProtos.reserve(1);
nativeProtos.push_back(NativeProto{0x01, createNativeProtoExecData(0)});
NativeProtoExecDataPtr nativeProto = createNativeProtoExecData(0);
getNativeProtoExecDataHeader(nativeProto.get()).bytecodeId = 0x01;
nativeProtos.push_back(std::move(nativeProto));
NativeModuleRef modRefA = allocator.getOrInsertNativeModule(ModuleId{0x0a}, std::move(nativeProtos), {}, {});
NativeModuleRef modRefA =
allocator.getOrInsertNativeModule(ModuleId{0x0a}, std::move(nativeProtos), nullptr, 0, fakeCode, std::size(fakeCode)).first;
REQUIRE(!modRefA.empty());
REQUIRE(modRefA->getRefcount());
const NativeProto* proto1 = modRefA->tryGetNativeProto(0x01);
REQUIRE(proto1 != nullptr);
// getNonOwningPointerToInstructionOffsets should not acquire ownership:
const uint32_t* unownedInstructionOffsets = proto1->getNonOwningPointerToInstructionOffsets();
REQUIRE(unownedInstructionOffsets != nullptr);
REQUIRE(modRefA->getRefcount() == 1);
// getOwningPointerToInstructionOffsets should acquire ownership:
const uint32_t* ownedInstructionOffsets = proto1->getOwningPointerToInstructionOffsets();
REQUIRE(ownedInstructionOffsets == unownedInstructionOffsets);
// Verify behavior of addRef:
modRefA->addRef();
REQUIRE(modRefA->getRefcount() == 2);
// We should be able to call it multiple times to get multiple references:
const uint32_t* ownedInstructionOffsets2 = proto1->getOwningPointerToInstructionOffsets();
REQUIRE(ownedInstructionOffsets2 == unownedInstructionOffsets);
// Verify behavior of addRefs:
modRefA->addRefs(2);
REQUIRE(modRefA->getRefcount() == 4);
// Undo two of our addRef(s):
modRefA->release();
REQUIRE(modRefA->getRefcount() == 3);
// releaseOwningPointerToInstructionOffsets should be callable to release
// the reference:
NativeProto::releaseOwningPointerToInstructionOffsets(ownedInstructionOffsets2);
modRefA->release();
REQUIRE(modRefA->getRefcount() == 2);
// If we release our NativeModuleRef, the module should be kept alive by
// the owning instruction offsets pointer:
// the owning reference we acquired:
modRefA.reset();
modRefA = allocator.tryGetNativeModule(ModuleId{0x0a});
@ -269,62 +282,65 @@ TEST_CASE("NativeProtoRefcounting")
// If the last "release" comes via releaseOwningPointerToInstructionOffsets,
// the module should be successfully destroyed:
const NativeModule* rawModA = modRefA.get();
modRefA.reset();
NativeProto::releaseOwningPointerToInstructionOffsets(ownedInstructionOffsets);
rawModA->release();
REQUIRE(allocator.tryGetNativeModule(ModuleId{0x0a}).empty());
}
TEST_CASE("NativeProtoState")
{
SharedCodeAllocator allocator{};
if (!luau_codegen_supported())
return;
CodeAllocator codeAllocator{kBlockSize, kMaxTotalSize};
SharedCodeAllocator allocator{&codeAllocator};
const std::vector<uint8_t> data(16);
const std::vector<uint8_t> code(16);
std::vector<NativeProto> nativeProtos;
std::vector<NativeProtoExecDataPtr> nativeProtos;
nativeProtos.reserve(2);
{
NativeProtoExecDataPtr nativeExecData = createNativeProtoExecData(2);
nativeExecData[0] = 0;
nativeExecData[1] = 4;
NativeProtoExecDataPtr nativeProto = createNativeProtoExecData(2);
getNativeProtoExecDataHeader(nativeProto.get()).bytecodeId = 1;
getNativeProtoExecDataHeader(nativeProto.get()).entryOffsetOrAddress = reinterpret_cast<const uint8_t*>(0x00);
nativeProto[0] = 0;
nativeProto[1] = 4;
NativeProto proto{1, std::move(nativeExecData)};
proto.setEntryOffset(0x00);
nativeProtos.push_back(std::move(proto));
nativeProtos.push_back(std::move(nativeProto));
}
{
NativeProtoExecDataPtr nativeExecData = createNativeProtoExecData(2);
nativeExecData[0] = 8;
nativeExecData[1] = 12;
NativeProtoExecDataPtr nativeProto = createNativeProtoExecData(2);
getNativeProtoExecDataHeader(nativeProto.get()).bytecodeId = 3;
getNativeProtoExecDataHeader(nativeProto.get()).entryOffsetOrAddress = reinterpret_cast<const uint8_t*>(0x08);
nativeProto[0] = 8;
nativeProto[1] = 12;
NativeProto proto{3, std::move(nativeExecData)};
proto.setEntryOffset(0x08);
nativeProtos.push_back(std::move(proto));
nativeProtos.push_back(std::move(nativeProto));
}
NativeModuleRef modRefA = allocator.getOrInsertNativeModule(ModuleId{0x0a}, std::move(nativeProtos), data, code);
NativeModuleRef modRefA =
allocator.getOrInsertNativeModule(ModuleId{0x0a}, std::move(nativeProtos), data.data(), data.size(), code.data(), code.size()).first;
REQUIRE(!modRefA.empty());
REQUIRE(modRefA->getModuleBaseAddress() != nullptr);
const NativeProto* proto1 = modRefA->tryGetNativeProto(1);
const uint32_t* proto1 = modRefA->tryGetNativeProto(1);
REQUIRE(proto1 != nullptr);
REQUIRE(proto1->getBytecodeId() == 1);
REQUIRE(proto1->getEntryAddress() == modRefA->getModuleBaseAddress() + 0x00);
const uint32_t* proto1Offsets = proto1->getNonOwningPointerToInstructionOffsets();
REQUIRE(proto1Offsets != nullptr);
REQUIRE(proto1Offsets[0] == 0);
REQUIRE(proto1Offsets[1] == 4);
REQUIRE(getNativeProtoExecDataHeader(proto1).bytecodeId == 1);
REQUIRE(getNativeProtoExecDataHeader(proto1).entryOffsetOrAddress == modRefA->getModuleBaseAddress() + 0x00);
REQUIRE(proto1[0] == 0);
REQUIRE(proto1[1] == 4);
const NativeProto* proto3 = modRefA->tryGetNativeProto(3);
const uint32_t* proto3 = modRefA->tryGetNativeProto(3);
REQUIRE(proto3 != nullptr);
REQUIRE(proto3->getBytecodeId() == 3);
REQUIRE(proto3->getEntryAddress() == modRefA->getModuleBaseAddress() + 0x08);
const uint32_t* proto3Offsets = proto3->getNonOwningPointerToInstructionOffsets();
REQUIRE(proto3Offsets != nullptr);
REQUIRE(proto3Offsets[0] == 8);
REQUIRE(proto3Offsets[1] == 12);
REQUIRE(getNativeProtoExecDataHeader(proto3).bytecodeId == 3);
REQUIRE(getNativeProtoExecDataHeader(proto3).entryOffsetOrAddress == modRefA->getModuleBaseAddress() + 0x08);
REQUIRE(proto3[0] == 8);
REQUIRE(proto3[1] == 12);
// Ensure that non-existent native protos cannot be found:
REQUIRE(modRefA->tryGetNativeProto(0) == nullptr);

View File

@ -202,7 +202,7 @@ TEST_CASE_FIXTURE(Fixture, "generic_aliases")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
const std::string expected = R"(Type 'bad' could not be converted into 'T<number>'; at [read "v"], string is not exactly number)";
const std::string expected = R"(Type '{ v: string }' could not be converted into 'T<number>'; at [read "v"], string is not exactly number)";
CHECK(result.errors[0].location == Location{{4, 31}, {4, 44}});
CHECK_EQ(expected, toString(result.errors[0]));
}
@ -221,7 +221,7 @@ TEST_CASE_FIXTURE(Fixture, "dependent_generic_aliases")
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
const std::string expected = R"(Type 'bad' could not be converted into 'U<number>'; at [read "t"][read "v"], string is not exactly number)";
const std::string expected = R"(Type '{ t: { v: string } }' could not be converted into 'U<number>'; at [read "t"][read "v"], string is not exactly number)";
CHECK(result.errors[0].location == Location{{4, 31}, {4, 52}});
CHECK_EQ(expected, toString(result.errors[0]));

View File

@ -463,7 +463,7 @@ local b: B = a
LUAU_REQUIRE_ERRORS(result);
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK(toString(result.errors.at(0)) == "Type 'a' could not be converted into 'B'; at [read \"x\"], ChildClass is not exactly BaseClass");
CHECK(toString(result.errors.at(0)) == "Type 'A' could not be converted into 'B'; at [read \"x\"], ChildClass is not exactly BaseClass");
else
{
const std::string expected = R"(Type 'A' could not be converted into 'B'
@ -707,4 +707,64 @@ TEST_CASE_FIXTURE(ClassFixture, "cannot_index_a_class_with_no_indexer")
CHECK(builtinTypes->errorType == requireType("c"));
}
TEST_CASE_FIXTURE(ClassFixture, "cyclic_tables_are_assumed_to_be_compatible_with_classes")
{
/*
* This is technically documenting a case where we are intentionally
* unsound.
*
* Our builtins are essentially defined like so:
*
* declare class BaseClass
* BaseField: number
* function BaseMethod(self, number): ()
* read Touched: Connection
* end
*
* declare class Connection
* Connect: (Connection, (BaseClass) -> ()) -> ()
* end
*
* The type we infer for `onTouch` is
*
* (t1) -> () where t1 = { read BaseField: unknown, read BaseMethod: (t1, number) -> () }
*
* In order to validate that onTouch can be passed to Connect, we must
* verify the following relation:
*
* BaseClass <: t1 where t1 = { read BaseField: unknown, read BaseMethod: (t1, number) -> () }
*
* However, the cycle between the table and the function gums up the works
* here and the worst thing is that it's perfectly reasonable in principle.
* Just from these types, we cannot see that BaseMethod will only be passed
* t1. Without that guarantee, BaseClass cannot be used as a subtype of t1.
*
* I think the theoretically-correct way to untangle this would be to infer
* t1 as a bounded existential type.
*
* For now, we have a subtyping has a rule that provisionally substitutes
* the table for the class type when performing the subtyping test. We
* essentially assume that, for all cyclic functions, that the table and the
* class are mutually subtypes of one another.
*
* For more information, read uses of Subtyping::substitutions.
*/
CheckResult result = check(R"(
local c = BaseClass.New()
function requiresNothing() end
function onTouch(other)
requiresNothing(other:BaseMethod(0))
print(other.BaseField)
end
c.Touched:Connect(onTouch)
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END();

View File

@ -2439,9 +2439,41 @@ TEST_CASE_FIXTURE(Fixture, "dont_infer_overloaded_functions")
LUAU_REQUIRE_NO_ERRORS(result);
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK("<a...>(t1) -> () where t1 = { read FindFirstChild: (t1, string) -> (a...) }" == toString(requireType("getR6Attachments")));
CHECK("(t1) -> () where t1 = { read FindFirstChild: (t1, string) -> (...unknown) }" == toString(requireType("getR6Attachments")));
else
CHECK("<a...>(t1) -> () where t1 = {+ FindFirstChild: (t1, string) -> (a...) +}" == toString(requireType("getR6Attachments")));
}
TEST_CASE_FIXTURE(Fixture, "param_y_is_bounded_by_x_of_type_string")
{
CheckResult result = check(R"(
local function f(x: string, y)
x = y
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("(string, string) -> ()" == toString(requireType("f")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "function_that_could_return_anything_is_compatible_with_function_that_is_expected_to_return_nothing")
{
CheckResult result = check(R"(
-- We infer foo : (g: (number) -> (...unknown)) -> ()
function foo(g)
g(0)
end
-- a requires a function that returns no values
function a(f: ((number) -> ()) -> ())
end
-- "Returns an unknown number of values" is close enough to "returns no values."
a(foo)
)");
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END();

View File

@ -828,7 +828,7 @@ y.a.c = y
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK(
toString(result.errors.at(0)) ==
R"(Type 'x' could not be converted into 'T<number>'; type x[read "a"][read "c"] (nil) is not exactly T<number>[read "a"][read "c"][0] (T<number>))");
R"(Type '{ a: { c: nil, d: number }, b: number }' could not be converted into 'T<number>'; type { a: { c: nil, d: number }, b: number }[read "a"][read "c"] (nil) is not exactly T<number>[read "a"][read "c"][0] (T<number>))");
else
{
const std::string expected = R"(Type 'y' could not be converted into 'T<string>'

View File

@ -414,7 +414,7 @@ local b: B.T = a
LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK(toString(result.errors.at(0)) == "Type 'a' could not be converted into 'T'; at [read \"x\"], number is not exactly string");
CHECK(toString(result.errors.at(0)) == "Type 'T' could not be converted into 'T'; at [read \"x\"], number is not exactly string");
else
{
const std::string expected = R"(Type 'T' from 'game/A' could not be converted into 'T' from 'game/B'
@ -455,7 +455,7 @@ local b: B.T = a
LUAU_REQUIRE_ERROR_COUNT(1, result);
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK(toString(result.errors.at(0)) == "Type 'a' could not be converted into 'T'; at [read \"x\"], number is not exactly string");
CHECK(toString(result.errors.at(0)) == "Type 'T' could not be converted into 'T'; at [read \"x\"], number is not exactly string");
else
{
const std::string expected = R"(Type 'T' from 'game/B' could not be converted into 'T' from 'game/C'

View File

@ -1105,6 +1105,45 @@ foo(1 :: any)
LUAU_REQUIRE_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "luau_roact_useState_nilable_state_1")
{
ScopedFastFlag sff{FFlag::DebugLuauDeferredConstraintResolution, true};
CheckResult result = check(R"(
type Dispatch<A> = (A) -> ()
type BasicStateAction<S> = ((S) -> S) | S
type ScriptConnection = { Disconnect: (ScriptConnection) -> () }
local blah = nil :: any
local function useState<S>(
initialState: (() -> S) | S,
...
): (S, Dispatch<BasicStateAction<S>>)
return blah, blah
end
local a, b = useState(nil :: ScriptConnection?)
if a then
a:Disconnect()
b(nil :: ScriptConnection?)
end
)");
if (FFlag::DebugLuauDeferredConstraintResolution)
LUAU_REQUIRE_NO_ERRORS(result);
else
{
// This is a known bug in the old solver.
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK(Location{{19, 14}, {19, 41}} == result.errors[0].location);
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "luau_roact_useState_minimization")
{
// We don't expect this test to work on the old solver, but it also does not yet work on the new solver.

View File

@ -228,7 +228,7 @@ TEST_CASE_FIXTURE(Fixture, "tagged_unions_immutable_tag")
type Dog = { tag: "Dog", howls: boolean }
type Cat = { tag: "Cat", meows: boolean }
type Animal = Dog | Cat
local a : Animal = { tag = "Cat", meows = true }
local a: Animal = { tag = "Cat", meows = true }
a.tag = "Dog"
)");
@ -365,8 +365,10 @@ TEST_CASE_FIXTURE(Fixture, "parametric_tagged_union_alias")
LUAU_REQUIRE_ERROR_COUNT(1, result);
// FIXME: This could be improved by expanding the contents of `a`
const std::string expectedError = "Type 'a' could not be converted into 'Err<number> | Ok<string>'";
const std::string expectedError = R"(Type
'{ result: string, success: boolean }'
could not be converted into
'Err<number> | Ok<string>')";
CHECK(toString(result.errors[0]) == expectedError);
}
@ -539,4 +541,26 @@ TEST_CASE_FIXTURE(Fixture, "no_widening_from_callsites")
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "singletons_stick_around_under_assignment")
{
CheckResult result = check(R"(
type Foo = {
kind: "Foo",
}
local foo = (nil :: any) :: Foo
print(foo.kind == "Bar") -- TypeError: Type "Foo" cannot be compared with "Bar"
local kind = foo.kind
print(kind == "Bar") -- SHOULD BE: TypeError: Type "Foo" cannot be compared with "Bar"
)");
// FIXME: Under the new solver, we get both the errors we expect, but they're
// duplicated because of how we are currently running type family reduction.
if (FFlag::DebugLuauDeferredConstraintResolution)
LUAU_REQUIRE_ERROR_COUNT(4, result);
else
LUAU_REQUIRE_ERROR_COUNT(1, result);
}
TEST_SUITE_END();

View File

@ -2174,7 +2174,7 @@ local b: B = a
LUAU_REQUIRE_ERRORS(result);
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK(toString(result.errors.at(0)) == R"(Type 'a' could not be converted into 'B'; at [read "y"], number is not exactly string)");
CHECK(toString(result.errors.at(0)) == R"(Type 'A' could not be converted into 'B'; at [read "y"], number is not exactly string)");
else
{
const std::string expected = R"(Type 'A' could not be converted into 'B'
@ -2201,7 +2201,7 @@ local b: B = a
LUAU_REQUIRE_ERRORS(result);
if (FFlag::DebugLuauDeferredConstraintResolution)
CHECK(toString(result.errors.at(0)) == R"(Type 'a' could not be converted into 'B'; at [read "b"][read "y"], number is not exactly string)");
CHECK(toString(result.errors.at(0)) == R"(Type 'A' could not be converted into 'B'; at [read "b"][read "y"], number is not exactly string)");
else
{
const std::string expected = R"(Type 'A' could not be converted into 'B'
@ -3994,7 +3994,7 @@ TEST_CASE_FIXTURE(Fixture, "identify_all_problematic_table_fields")
LUAU_REQUIRE_ERROR_COUNT(1, result);
std::string expected = "Type 'a' could not be converted into 'T'; at [read \"a\"], string is not exactly number"
std::string expected = "Type '{ a: string, b: boolean, c: number }' could not be converted into 'T'; at [read \"a\"], string is not exactly number"
"\n\tat [read \"b\"], boolean is not exactly string"
"\n\tat [read \"c\"], number is not exactly boolean";
CHECK(toString(result.errors[0]) == expected);

View File

@ -25,7 +25,7 @@ TEST_CASE_FIXTURE(TypeStateFixture, "initialize_x_of_type_string_or_nil_with_nil
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("nil" == toString(requireType("a")));
CHECK("string?" == toString(requireType("a")));
}
TEST_CASE_FIXTURE(TypeStateFixture, "extraneous_lvalues_are_populated_with_nil")
@ -55,7 +55,7 @@ TEST_CASE_FIXTURE(TypeStateFixture, "assign_different_values_to_x")
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("nil" == toString(requireType("a")));
CHECK("string?" == toString(requireType("a")));
CHECK("string" == toString(requireType("b")));
}
@ -73,8 +73,28 @@ TEST_CASE_FIXTURE(TypeStateFixture, "parameter_x_was_constrained_by_two_types")
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("(string) -> string?" == toString(requireType("f")));
if (FFlag::DebugLuauDeferredConstraintResolution)
{
// `y` is annotated `string | number` which is explicitly not compatible with `string?`
// as such, we produce an error here for that mismatch.
//
// this is not necessarily the best inference here, since we can indeed produce `string`
// as a type for `x`, but it's a limitation we can accept for now.
LUAU_REQUIRE_ERRORS(result);
TypePackMismatch* tpm = get<TypePackMismatch>(result.errors[0]);
REQUIRE(tpm);
CHECK("string?" == toString(tpm->wantedTp));
CHECK("number | string" == toString(tpm->givenTp));
CHECK("(number | string) -> string?" == toString(requireType("f")));
}
else
{
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("(string) -> string?" == toString(requireType("f")));
}
}
#if 0
@ -451,4 +471,24 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "typestates_preserve_error_suppression_proper
CHECK("*error-type* | string" == toString(requireTypeAtPosition({3, 16}), {true}));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "typestates_do_not_apply_to_the_initial_local_definition")
{
// early return if the flag isn't set since this is blocking gated commits
if (!FFlag::DebugLuauDeferredConstraintResolution)
return;
CheckResult result = check(R"(
type MyType = number | string
local foo: MyType = 5
print(foo)
foo = 7
print(foo)
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("number | string" == toString(requireTypeAtPosition({3, 14}), {true}));
CHECK("number" == toString(requireTypeAtPosition({5, 14}), {true}));
}
TEST_SUITE_END();

View File

@ -1,7 +1,6 @@
AstQuery.last_argument_function_call_type
AutocompleteTest.anonymous_autofilled_generic_on_argument_type_pack_vararg
AutocompleteTest.anonymous_autofilled_generic_type_pack_vararg
AutocompleteTest.autocomplete_string_singleton_equality
AutocompleteTest.autocomplete_string_singletons
AutocompleteTest.do_wrong_compatible_nonself_calls
AutocompleteTest.string_singleton_as_table_key
@ -34,11 +33,7 @@ BuiltinTests.string_format_use_correct_argument2
BuiltinTests.table_freeze_is_generic
BuiltinTests.tonumber_returns_optional_number_type
ControlFlowAnalysis.tagged_unions
DefinitionTests.class_definition_indexer
DefinitionTests.class_definition_overload_metamethods
DefinitionTests.class_definition_string_props
DefinitionTests.declaring_generic_functions
DefinitionTests.definition_file_classes
Differ.metatable_metamissing_left
Differ.metatable_metamissing_right
Differ.metatable_metanormal
@ -81,7 +76,6 @@ GenericsTests.properties_can_be_instantiated_polytypes
GenericsTests.quantify_functions_even_if_they_have_an_explicit_generic
GenericsTests.self_recursive_instantiated_param
GenericsTests.type_parameters_can_be_polytypes
GenericsTests.typefuns_sharing_types
IntersectionTypes.CLI-44817
IntersectionTypes.error_detailed_intersection_all
IntersectionTypes.error_detailed_intersection_part
@ -108,10 +102,8 @@ IntersectionTypes.overloadeded_functions_with_weird_typepacks_3
IntersectionTypes.overloadeded_functions_with_weird_typepacks_4
IntersectionTypes.table_write_sealed_indirect
IntersectionTypes.union_saturate_overloaded_functions
Linter.FormatStringTyped
Linter.TableOperationsIndexer
ModuleTests.clone_self_property
Negations.negated_string_is_a_subtype_of_string
NonstrictModeTests.inconsistent_module_return_types_are_ok
NonstrictModeTests.infer_nullary_function
NonstrictModeTests.infer_the_maximum_number_of_values_the_function_could_return
@ -143,7 +135,6 @@ ProvisionalTests.table_insert_with_a_singleton_argument
ProvisionalTests.table_unification_infinite_recursion
ProvisionalTests.typeguard_inference_incomplete
ProvisionalTests.while_body_are_also_refined
RefinementTest.assert_a_to_be_truthy_then_assert_a_to_be_number
RefinementTest.call_an_incompatible_function_after_using_typeguard
RefinementTest.dataflow_analysis_can_tell_refinements_when_its_appropriate_to_refine_into_nil_or_never
RefinementTest.discriminate_from_isa_of_x
@ -159,8 +150,6 @@ RefinementTest.refine_a_param_that_got_resolved_during_constraint_solving_stage
RefinementTest.refine_a_property_of_some_global
RefinementTest.refine_param_of_type_folder_or_part_without_using_typeof
RefinementTest.refine_unknown_to_table_then_clone_it
RefinementTest.refinements_should_preserve_error_suppression
RefinementTest.string_not_equal_to_string_or_nil
RefinementTest.truthy_constraint_on_properties
RefinementTest.type_annotations_arent_relevant_when_doing_dataflow_analysis
RefinementTest.type_guard_narrowed_into_nothingness
@ -196,17 +185,13 @@ TableTests.explicitly_typed_table_error
TableTests.explicitly_typed_table_with_indexer
TableTests.generalize_table_argument
TableTests.generic_table_instantiation_potential_regression
TableTests.indexer_mismatch
TableTests.indexer_on_sealed_table_must_unify_with_free_table
TableTests.indexers_get_quantified_too
TableTests.infer_indexer_from_array_like_table
TableTests.infer_indexer_from_its_variable_type_and_unifiable
TableTests.inferred_return_type_of_free_table
TableTests.instantiate_table_cloning_3
TableTests.invariant_table_properties_means_instantiating_tables_in_assignment_is_unsound
TableTests.invariant_table_properties_means_instantiating_tables_in_call_is_unsound
TableTests.length_operator_intersection
TableTests.length_operator_non_table_union
TableTests.length_operator_union
TableTests.less_exponential_blowup_please
TableTests.meta_add
@ -226,13 +211,10 @@ TableTests.parameter_was_set_an_indexer_and_bounded_by_string
TableTests.pass_a_union_of_tables_to_a_function_that_requires_a_table
TableTests.pass_a_union_of_tables_to_a_function_that_requires_a_table_2
TableTests.persistent_sealed_table_is_immutable
TableTests.prop_access_on_key_whose_types_mismatches
TableTests.prop_access_on_unions_of_indexers_where_key_whose_types_mismatches
TableTests.quantify_even_that_table_was_never_exported_at_all
TableTests.quantify_metatables_of_metatables_of_table
TableTests.reasonable_error_when_adding_a_nonexistent_property_to_an_array_like_table
TableTests.recursive_metatable_type_call
TableTests.result_is_always_any_if_lhs_is_any
TableTests.right_table_missing_key2
TableTests.scalar_is_a_subtype_of_a_compatible_polymorphic_shape_type
TableTests.scalar_is_not_a_subtype_of_a_compatible_polymorphic_shape_type
@ -251,7 +233,6 @@ TableTests.table_subtyping_with_missing_props_dont_report_multiple_errors2
TableTests.table_unification_4
TableTests.table_unifies_into_map
TableTests.type_mismatch_on_massive_table_is_cut_short
TableTests.unification_of_unions_in_a_self_referential_type
TableTests.used_colon_instead_of_dot
TableTests.used_dot_instead_of_colon
TableTests.when_augmenting_an_unsealed_table_with_an_indexer_apply_the_correct_scope_to_the_indexer_type
@ -262,7 +243,6 @@ ToString.named_metatable_toStringNamedFunction
ToString.no_parentheses_around_cyclic_function_type_in_intersection
ToString.pick_distinct_names_for_mixed_explicit_and_implicit_generics
ToString.primitive
ToString.tostring_unsee_ttv_if_array
ToString.toStringDetailed2
ToString.toStringErrorPack
TryUnifyTests.members_of_failed_typepack_unification_are_unified_with_errorType
@ -278,19 +258,13 @@ TypeAliases.mutually_recursive_types_restriction_not_ok_2
TypeAliases.mutually_recursive_types_swapsies_not_ok
TypeAliases.recursive_types_restriction_not_ok
TypeAliases.report_shadowed_aliases
TypeAliases.saturate_to_first_type_pack
TypeAliases.type_alias_local_mutation
TypeAliases.type_alias_local_rename
TypeAliases.type_alias_locations
TypeAliases.type_alias_of_an_imported_recursive_generic_type
TypeAliases.use_table_name_and_generic_params_in_errors
TypeFamilyTests.add_family_at_work
TypeFamilyTests.family_as_fn_arg
TypeFamilyTests.family_as_fn_ret
TypeFamilyTests.function_internal_families
TypeFamilyTests.internal_families_raise_errors
TypeFamilyTests.table_internal_families
TypeFamilyTests.type_families_inhabited_with_normalization
TypeFamilyTests.unsolvable_family
TypeInfer.be_sure_to_use_active_txnlog_when_evaluating_a_variadic_overload
TypeInfer.check_type_infer_recursion_count
@ -305,34 +279,24 @@ TypeInfer.globals2
TypeInfer.globals_are_banned_in_strict_mode
TypeInfer.infer_through_group_expr
TypeInfer.no_stack_overflow_from_isoptional
TypeInfer.promote_tail_type_packs
TypeInfer.recursive_function_that_invokes_itself_with_a_refinement_of_its_parameter
TypeInfer.recursive_function_that_invokes_itself_with_a_refinement_of_its_parameter_2
TypeInfer.stringify_nested_unions_with_optionals
TypeInfer.tc_after_error_recovery_no_replacement_name_in_error
TypeInfer.type_infer_recursion_limit_no_ice
TypeInfer.type_infer_recursion_limit_normalizer
TypeInfer.unify_nearly_identical_recursive_types
TypeInferAnyError.any_type_propagates
TypeInferAnyError.assign_prop_to_table_by_calling_any_yields_any
TypeInferAnyError.call_to_any_yields_any
TypeInferAnyError.can_subscript_any
TypeInferAnyError.for_in_loop_iterator_is_error
TypeInferAnyError.for_in_loop_iterator_is_error2
TypeInferAnyError.metatable_of_any_can_be_a_table
TypeInferAnyError.quantify_any_does_not_bind_to_itself
TypeInferAnyError.replace_every_free_type_when_unifying_a_complex_function_with_any
TypeInferClasses.callable_classes
TypeInferClasses.cannot_unify_class_instance_with_primitive
TypeInferClasses.class_type_mismatch_with_name_conflict
TypeInferClasses.class_unification_type_mismatch_is_correct_order
TypeInferClasses.detailed_class_unification_error
TypeInferClasses.indexable_classes
TypeInferClasses.intersections_of_unions_of_classes
TypeInferClasses.optional_class_field_access_error
TypeInferClasses.table_class_unification_reports_sane_errors_for_missing_properties
TypeInferClasses.table_indexers_are_invariant
TypeInferClasses.unions_of_intersections_of_classes
TypeInferClasses.we_can_report_when_someone_is_trying_to_use_a_table_rather_than_a_class
TypeInferFunctions.another_other_higher_order_function
TypeInferFunctions.bidirectional_checking_of_callback_property
@ -364,11 +328,9 @@ TypeInferFunctions.infer_anonymous_function_arguments
TypeInferFunctions.infer_anonymous_function_arguments_outside_call
TypeInferFunctions.infer_generic_function_function_argument
TypeInferFunctions.infer_generic_function_function_argument_overloaded
TypeInferFunctions.infer_return_type_from_selected_overload
TypeInferFunctions.infer_return_value_type
TypeInferFunctions.inferred_higher_order_functions_are_quantified_at_the_right_time3
TypeInferFunctions.instantiated_type_packs_must_have_a_non_null_scope
TypeInferFunctions.list_all_overloads_if_no_overload_takes_given_argument_count
TypeInferFunctions.list_only_alternative_overloads_that_match_argument_count
TypeInferFunctions.luau_subtyping_is_np_hard
TypeInferFunctions.no_lossy_function_type
@ -376,7 +338,6 @@ TypeInferFunctions.occurs_check_failure_in_function_return_type
TypeInferFunctions.other_things_are_not_related_to_function
TypeInferFunctions.param_1_and_2_both_takes_the_same_generic_but_their_arguments_are_incompatible
TypeInferFunctions.param_1_and_2_both_takes_the_same_generic_but_their_arguments_are_incompatible_2
TypeInferFunctions.record_matching_overload
TypeInferFunctions.report_exiting_without_return_nonstrict
TypeInferFunctions.return_type_by_overload
TypeInferFunctions.too_few_arguments_variadic
@ -396,13 +357,10 @@ TypeInferLoops.for_in_loop_error_on_iterator_requiring_args_but_none_given
TypeInferLoops.for_in_loop_on_error
TypeInferLoops.for_in_loop_on_non_function
TypeInferLoops.for_in_loop_with_next
TypeInferLoops.for_in_with_an_iterator_of_type_any
TypeInferLoops.for_loop
TypeInferLoops.ipairs_produces_integral_indices
TypeInferLoops.iterate_over_free_table
TypeInferLoops.iterate_over_properties
TypeInferLoops.iteration_no_table_passed
TypeInferLoops.iteration_regression_issue_69967
TypeInferLoops.iteration_regression_issue_69967_alt
TypeInferLoops.loop_iter_metamethod_nil
TypeInferLoops.loop_iter_metamethod_ok
@ -415,11 +373,8 @@ TypeInferLoops.repeat_loop
TypeInferLoops.varlist_declared_by_for_in_loop_should_be_free
TypeInferLoops.while_loop
TypeInferModules.custom_require_global
TypeInferModules.do_not_modify_imported_types
TypeInferModules.do_not_modify_imported_types_5
TypeInferModules.require
TypeInferOOP.CheckMethodsOfSealed
TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_another_overload_works
TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_it_wont_help_2
TypeInferOOP.dont_suggest_using_colon_rather_than_dot_if_not_defined_with_colon
TypeInferOOP.inferring_hundreds_of_self_calls_should_not_suffocate_memory
@ -432,12 +387,9 @@ TypeInferOperators.compound_assign_result_must_be_compatible_with_var
TypeInferOperators.concat_op_on_free_lhs_and_string_rhs
TypeInferOperators.concat_op_on_string_lhs_and_free_rhs
TypeInferOperators.disallow_string_and_types_without_metatables_from_arithmetic_binary_ops
TypeInferOperators.equality_operations_succeed_if_any_union_branch_succeeds
TypeInferOperators.error_on_invalid_operand_types_to_relational_operators2
TypeInferOperators.luau_polyfill_is_array
TypeInferOperators.mm_comparisons_must_return_a_boolean
TypeInferOperators.reworked_and
TypeInferOperators.reworked_or
TypeInferOperators.strict_binary_op_where_lhs_unknown
TypeInferOperators.typecheck_overloaded_multiply_that_is_an_intersection
TypeInferOperators.typecheck_overloaded_multiply_that_is_an_intersection_on_rhs
@ -445,7 +397,6 @@ TypeInferOperators.typecheck_unary_len_error
TypeInferOperators.typecheck_unary_minus_error
TypeInferOperators.UnknownGlobalCompoundAssign
TypeInferPrimitives.CheckMethodsOfNumber
TypeInferPrimitives.string_function_indirect
TypeInferPrimitives.string_index
TypeInferUnknownNever.assign_to_local_which_is_never
TypeInferUnknownNever.index_on_union_of_tables_for_properties_that_is_never
@ -457,7 +408,6 @@ TypePackTests.fuzz_typepack_iter_follow_2
TypePackTests.pack_tail_unification_check
TypePackTests.type_alias_backwards_compatible
TypePackTests.type_alias_default_type_errors
TypePackTests.type_alias_type_packs_import
TypePackTests.unify_variadic_tails_in_arguments
TypeSingletons.enums_using_singletons_mismatch
TypeSingletons.error_detailed_tagged_union_mismatch_bool
@ -465,6 +415,7 @@ TypeSingletons.error_detailed_tagged_union_mismatch_string
TypeSingletons.overloaded_function_call_with_singletons_mismatch
TypeSingletons.return_type_of_f_is_not_widened
TypeSingletons.table_properties_type_error_escapes
TypeSingletons.tagged_unions_immutable_tag
TypeSingletons.widen_the_supertype_if_it_is_free_and_subtype_has_singleton
TypeStatesTest.prototyped_recursive_functions_but_has_future_assignments
TypeStatesTest.typestates_preserve_error_suppression_properties