Sync to upstream/release/617 (#1204)

# What's Changed

* Fix a case where the stack wasn't completely cleaned up where
`debug.info` errored when passed `"f"` option and a thread.
* Fix a case of uninitialized field in `luaF_newproto`.

### New Type Solver

* When a local is captured in a function, don't add a new entry to the
`DfgScope::bindings` if the capture occurs within a loop.
* Fix a poor performance characteristic during unification by not trying
to simplify an intersection.
* Fix a case of multiple constraints mutating the same blocked type
causing incorrect inferences.
* Fix a case of assertion failure when overload resolution encounters a
return typepack mismatch.
* When refining a property of the top `table` type, we no longer signal
an unknown property error.
* Fix a misuse of free types when trying to infer the type of a
subscript expression.
* Fix a case of assertion failure when trying to resolve an overload
from `never`.

### Native Code Generation

* Fix dead store optimization issues caused by partial stores.

---

### Internal Contributors

Co-authored-by: Aaron Weiss <aaronweiss@roblox.com>
Co-authored-by: Andy Friesen <afriesen@roblox.com>
Co-authored-by: Aviral Goel <agoel@roblox.com>
Co-authored-by: David Cope <dcope@roblox.com>
Co-authored-by: Lily Brown <lbrown@roblox.com>
Co-authored-by: Vyacheslav Egorov <vegorov@roblox.com>

---------

Co-authored-by: Aaron Weiss <aaronweiss@roblox.com>
Co-authored-by: Andy Friesen <afriesen@roblox.com>
Co-authored-by: Vighnesh <vvijay@roblox.com>
Co-authored-by: Aviral Goel <agoel@roblox.com>
Co-authored-by: David Cope <dcope@roblox.com>
Co-authored-by: Lily Brown <lbrown@roblox.com>
Co-authored-by: Vyacheslav Egorov <vegorov@roblox.com>
This commit is contained in:
Alexander McCord 2024-03-15 16:37:39 -07:00 committed by GitHub
parent a7683110d7
commit d21b6fdb93
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
54 changed files with 1688 additions and 266 deletions

View File

@ -157,6 +157,15 @@ struct HasPropConstraint
std::string prop;
ValueContext context;
// We want to track if this `HasPropConstraint` comes from a conditional.
// If it does, we're going to change the behavior of property look-up a bit.
// In particular, we're going to return `unknownType` for property lookups
// on `table` or inexact table types where the property is not present.
//
// This allows us to refine table types to have additional properties
// without reporting errors in typechecking on the property tests.
bool inConditional = false;
// HACK: We presently need types like true|false or string|"hello" when
// deciding whether a particular literal expression should have a singleton
// type. This boolean is set to true when extracting the property type of a
@ -193,6 +202,19 @@ struct SetPropConstraint
TypeId propType;
};
// resultType ~ hasIndexer subjectType indexType
//
// If the subject type is a table or table-like thing that supports indexing,
// populate the type result with the result type of such an index operation.
//
// If the subject is not indexable, resultType is bound to errorType.
struct HasIndexerConstraint
{
TypeId resultType;
TypeId subjectType;
TypeId indexType;
};
// result ~ setIndexer subjectType indexType propType
//
// If the subject is a table or table-like thing that already has an indexer,
@ -267,7 +289,7 @@ struct ReducePackConstraint
using ConstraintV = Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, IterableConstraint,
NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, FunctionCheckConstraint, PrimitiveTypeConstraint, HasPropConstraint,
SetPropConstraint, SetIndexerConstraint, SingletonOrTopTypeConstraint, UnpackConstraint, SetOpConstraint, ReduceConstraint, ReducePackConstraint,
SetPropConstraint, HasIndexerConstraint, SetIndexerConstraint, SingletonOrTopTypeConstraint, UnpackConstraint, SetOpConstraint, ReduceConstraint, ReducePackConstraint,
EqualityConstraint>;
struct Constraint

View File

@ -71,6 +71,8 @@ struct ConstraintGenerator
// This is null when the CG is initially constructed.
Scope* rootScope;
TypeContext typeContext = TypeContext::Default;
struct InferredBinding
{
Scope* scope;

View File

@ -131,6 +131,10 @@ struct ConstraintSolver
bool tryDispatch(const PrimitiveTypeConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const HasPropConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const SetPropConstraint& c, NotNull<const Constraint> constraint, bool force);
bool tryDispatchHasIndexer(int& recursionDepth, NotNull<const Constraint> constraint, TypeId subjectType, TypeId indexType, TypeId resultType);
bool tryDispatch(const HasIndexerConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const SetIndexerConstraint& c, NotNull<const Constraint> constraint, bool force);
bool tryDispatch(const SingletonOrTopTypeConstraint& c, NotNull<const Constraint> constraint);
bool tryDispatch(const UnpackConstraint& c, NotNull<const Constraint> constraint);
@ -148,9 +152,9 @@ struct ConstraintSolver
TypeId nextTy, TypeId tableTy, TypeId firstIndexTy, const IterableConstraint& c, NotNull<const Constraint> constraint, bool force);
std::pair<std::vector<TypeId>, std::optional<TypeId>> lookupTableProp(
TypeId subjectType, const std::string& propName, ValueContext context, bool suppressSimplification = false);
TypeId subjectType, const std::string& propName, ValueContext context, bool inConditional = false, bool suppressSimplification = false);
std::pair<std::vector<TypeId>, std::optional<TypeId>> lookupTableProp(
TypeId subjectType, const std::string& propName, ValueContext context, bool suppressSimplification, DenseHashSet<TypeId>& seen);
TypeId subjectType, const std::string& propName, ValueContext context, bool inConditional, bool suppressSimplification, DenseHashSet<TypeId>& seen);
void block(NotNull<const Constraint> target, NotNull<const Constraint> constraint);
/**

View File

@ -1,8 +1,12 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#pragma once
#include "Luau/Common.h"
#include "Luau/DenseHash.h"
LUAU_FASTFLAG(LuauFixSetIter)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
namespace Luau
{
@ -124,10 +128,15 @@ public:
using difference_type = ptrdiff_t;
using iterator_category = std::forward_iterator_tag;
const_iterator(typename Impl::const_iterator impl, typename Impl::const_iterator end)
: impl(impl)
, end(end)
const_iterator(typename Impl::const_iterator impl_, typename Impl::const_iterator end_)
: impl(impl_)
, end(end_)
{
if (FFlag::LuauFixSetIter || FFlag::DebugLuauDeferredConstraintResolution)
{
while (impl != end && impl->second == false)
++impl;
}
}
const T& operator*() const
@ -140,7 +149,6 @@ public:
return &impl->first;
}
bool operator==(const const_iterator& other) const
{
return impl == other.impl;

View File

@ -33,6 +33,7 @@ struct Scope;
using ScopePtr = std::shared_ptr<Scope>;
struct TypeFamily;
struct Constraint;
/**
* There are three kinds of type variables:
@ -144,6 +145,10 @@ struct BlockedType
{
BlockedType();
int index;
// The constraint that is intended to unblock this type. Other constraints
// should block on this constraint if present.
Constraint* owner = nullptr;
};
struct PrimitiveType

View File

@ -22,6 +22,37 @@ enum class ValueContext
RValue
};
/// the current context of the type checker
enum class TypeContext
{
/// the default context
Default,
/// inside of a condition
Condition,
};
bool inConditional(const TypeContext& context);
// sets the given type context to `Condition` and restores it to its original
// value when the struct drops out of scope
struct InConditionalContext
{
TypeContext* typeContext;
TypeContext oldValue;
InConditionalContext(TypeContext* c)
: typeContext(c)
, oldValue(*c)
{
*typeContext = TypeContext::Condition;
}
~InConditionalContext()
{
*typeContext = oldValue;
}
};
using ScopePtr = std::shared_ptr<struct Scope>;
std::optional<TypeId> findMetatableEntry(

View File

@ -15,7 +15,6 @@
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
LUAU_FASTFLAGVARIABLE(LuauAutocompleteStringLiteralBounds, false);
LUAU_FASTFLAGVARIABLE(LuauAutocompleteTableKeysNoInitialCharacter, false);
static const std::unordered_set<std::string> kStatementStartingKeywords = {
"while", "if", "local", "repeat", "function", "do", "for", "return", "break", "continue", "type", "export"};
@ -1741,7 +1740,7 @@ static AutocompleteResult autocomplete(const SourceModule& sourceModule, const M
}
}
}
else if (AstExprTable* exprTable = node->as<AstExprTable>(); exprTable && FFlag::LuauAutocompleteTableKeysNoInitialCharacter)
else if (AstExprTable* exprTable = node->as<AstExprTable>())
{
AutocompleteEntryMap result;

View File

@ -1175,7 +1175,10 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatCompoundAss
ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatIf* ifStatement)
{
RefinementId refinement = check(scope, ifStatement->condition, std::nullopt).refinement;
RefinementId refinement = [&](){
InConditionalContext flipper{&typeContext};
return check(scope, ifStatement->condition, std::nullopt).refinement;
}();
ScopePtr thenScope = childScope(ifStatement->thenbody, scope);
applyRefinements(thenScope, ifStatement->condition->location, refinement);
@ -1910,7 +1913,7 @@ Inference ConstraintGenerator::checkIndexName(const ScopePtr& scope, const Refin
scope->rvalueRefinements[key->def] = result;
}
addConstraint(scope, indexee->location, HasPropConstraint{result, obj, std::move(index), ValueContext::RValue});
addConstraint(scope, indexee->location, HasPropConstraint{result, obj, std::move(index), ValueContext::RValue, inConditional(typeContext)});
if (key)
return Inference{result, refinementArena.proposition(key, builtinTypes->truthyType)};
@ -1935,7 +1938,7 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprIndexExpr* in
TypeId obj = check(scope, indexExpr->expr).ty;
TypeId indexType = check(scope, indexExpr->index).ty;
TypeId result = freshType(scope);
TypeId result = arena->addType(BlockedType{});
const RefinementKey* key = dfg->getRefinementKey(indexExpr);
if (key)
@ -1946,10 +1949,7 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprIndexExpr* in
scope->rvalueRefinements[key->def] = result;
}
TableIndexer indexer{indexType, result};
TypeId tableType = arena->addType(TableType{TableType::Props{}, TableIndexer{indexType, result}, TypeLevel{}, scope.get(), TableState::Free});
addConstraint(scope, indexExpr->expr->location, SubtypeConstraint{obj, tableType});
addConstraint(scope, indexExpr->expr->location, HasIndexerConstraint{result, obj, indexType});
if (key)
return Inference{result, refinementArena.proposition(key, builtinTypes->truthyType)};
@ -2200,8 +2200,11 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprBinary* binar
Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprIfElse* ifElse, std::optional<TypeId> expectedType)
{
ScopePtr condScope = childScope(ifElse->condition, scope);
RefinementId refinement = check(condScope, ifElse->condition).refinement;
RefinementId refinement = [&](){
InConditionalContext flipper{&typeContext};
ScopePtr condScope = childScope(ifElse->condition, scope);
return check(condScope, ifElse->condition).refinement;
}();
ScopePtr thenScope = childScope(ifElse->trueExpr, scope);
applyRefinements(thenScope, ifElse->trueExpr->location, refinement);
@ -2406,10 +2409,17 @@ std::optional<TypeId> ConstraintGenerator::checkLValue(const ScopePtr& scope, As
if (transform)
{
addConstraint(scope, local->location,
Constraint* owner = nullptr;
if (auto blocked = get<BlockedType>(*ty))
owner = blocked->owner;
auto unpackC = addConstraint(scope, local->location,
UnpackConstraint{arena->addTypePack({*ty}), arena->addTypePack({assignedTy}),
/*resultIsLValue*/ true});
if (owner)
unpackC->dependencies.push_back(NotNull{owner});
recordInferredBinding(local->local, *ty);
}
@ -2538,6 +2548,7 @@ TypeId ConstraintGenerator::updateProperty(const ScopePtr& scope, AstExpr* expr,
TypeId updatedType = arena->addType(BlockedType{});
auto setC = addConstraint(scope, expr->location, SetPropConstraint{updatedType, subjectType, std::move(segmentStrings), assignedTy});
getMutable<BlockedType>(updatedType)->owner = setC.get();
TypeId prevSegmentTy = updatedType;
for (size_t i = 0; i < segments.size(); ++i)
@ -2545,7 +2556,7 @@ TypeId ConstraintGenerator::updateProperty(const ScopePtr& scope, AstExpr* expr,
TypeId segmentTy = arena->addType(BlockedType{});
module->astTypes[exprs[i]] = segmentTy;
ValueContext ctx = i == segments.size() - 1 ? ValueContext::LValue : ValueContext::RValue;
auto hasC = addConstraint(scope, expr->location, HasPropConstraint{segmentTy, prevSegmentTy, segments[i], ctx});
auto hasC = addConstraint(scope, expr->location, HasPropConstraint{segmentTy, prevSegmentTy, segments[i], ctx, inConditional(typeContext)});
setC->dependencies.push_back(hasC);
prevSegmentTy = segmentTy;
}
@ -2582,16 +2593,12 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprTable* expr,
interiorTypes.back().push_back(ty);
auto createIndexer = [this, scope, ttv](const Location& location, TypeId currentIndexType, TypeId currentResultType) {
if (!ttv->indexer)
{
TypeId indexType = this->freshType(scope);
TypeId resultType = this->freshType(scope);
ttv->indexer = TableIndexer{indexType, resultType};
}
TypeIds indexKeyLowerBound;
TypeIds indexValueLowerBound;
addConstraint(scope, location, SubtypeConstraint{ttv->indexer->indexType, currentIndexType});
addConstraint(scope, location, SubtypeConstraint{ttv->indexer->indexResultType, currentResultType});
auto createIndexer = [&indexKeyLowerBound, &indexValueLowerBound](const Location& location, TypeId currentIndexType, TypeId currentResultType) {
indexKeyLowerBound.insert(follow(currentIndexType));
indexValueLowerBound.insert(follow(currentResultType));
};
std::optional<TypeId> annotatedKeyType;
@ -2633,7 +2640,7 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprTable* expr,
expectedValueType = arena->addType(BlockedType{});
addConstraint(scope, item.value->location,
HasPropConstraint{
*expectedValueType, *expectedType, stringKey->value.data, ValueContext::RValue, /*suppressSimplification*/ true});
*expectedValueType, *expectedType, stringKey->value.data, ValueContext::RValue, /*inConditional*/ inConditional(typeContext), /*suppressSimplification*/ true});
}
}
}
@ -2705,6 +2712,23 @@ Inference ConstraintGenerator::check(const ScopePtr& scope, AstExprTable* expr,
}
}
if (!indexKeyLowerBound.empty())
{
LUAU_ASSERT(!indexValueLowerBound.empty());
TypeId indexKey = indexKeyLowerBound.size() == 1
? *indexKeyLowerBound.begin()
: arena->addType(UnionType{std::vector(indexKeyLowerBound.begin(), indexKeyLowerBound.end())})
;
TypeId indexValue = indexValueLowerBound.size() == 1
? *indexValueLowerBound.begin()
: arena->addType(UnionType{std::vector(indexValueLowerBound.begin(), indexValueLowerBound.end())})
;
ttv->indexer = TableIndexer{indexKey, indexValue};
}
return Inference{ty};
}

View File

@ -11,11 +11,13 @@
#include "Luau/ModuleResolver.h"
#include "Luau/OverloadResolution.h"
#include "Luau/Quantify.h"
#include "Luau/RecursionCounter.h"
#include "Luau/Simplify.h"
#include "Luau/TimeTrace.h"
#include "Luau/ToString.h"
#include "Luau/Type.h"
#include "Luau/TypeFamily.h"
#include "Luau/TypeFwd.h"
#include "Luau/TypeUtils.h"
#include "Luau/Unifier2.h"
#include "Luau/VecDeque.h"
@ -25,6 +27,8 @@
LUAU_FASTFLAGVARIABLE(DebugLuauLogSolver, false);
LUAU_FASTINTVARIABLE(LuauSolverRecursionLimit, 500);
namespace Luau
{
@ -512,6 +516,8 @@ bool ConstraintSolver::tryDispatch(NotNull<const Constraint> constraint, bool fo
success = tryDispatch(*hpc, constraint);
else if (auto spc = get<SetPropConstraint>(*constraint))
success = tryDispatch(*spc, constraint, force);
else if (auto spc = get<HasIndexerConstraint>(*constraint))
success = tryDispatch(*spc, constraint);
else if (auto spc = get<SetIndexerConstraint>(*constraint))
success = tryDispatch(*spc, constraint, force);
else if (auto sottc = get<SingletonOrTopTypeConstraint>(*constraint))
@ -979,6 +985,13 @@ bool ConstraintSolver::tryDispatch(const FunctionCallConstraint& c, NotNull<cons
return true;
}
if (get<NeverType>(fn))
{
asMutable(c.result)->ty.emplace<BoundTypePack>(builtinTypes->neverTypePack);
unblock(c.result, constraint->location);
return true;
}
auto [argsHead, argsTail] = flatten(argsPack);
bool blocked = false;
@ -1258,7 +1271,7 @@ bool ConstraintSolver::tryDispatch(const HasPropConstraint& c, NotNull<const Con
if (isBlocked(subjectType) || get<PendingExpansionType>(subjectType) || get<TypeFamilyInstanceType>(subjectType))
return block(subjectType, constraint);
auto [blocked, result] = lookupTableProp(subjectType, c.prop, c.context, c.suppressSimplification);
auto [blocked, result] = lookupTableProp(subjectType, c.prop, c.context, c.inConditional, c.suppressSimplification);
if (!blocked.empty())
{
for (TypeId blocked : blocked)
@ -1432,6 +1445,186 @@ bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Con
return true;
}
bool ConstraintSolver::tryDispatchHasIndexer(int& recursionDepth, NotNull<const Constraint> constraint, TypeId subjectType, TypeId indexType, TypeId resultType)
{
RecursionLimiter _rl{&recursionDepth, FInt::LuauSolverRecursionLimit};
subjectType = follow(subjectType);
indexType = follow(indexType);
LUAU_ASSERT(get<BlockedType>(resultType));
if (auto ft = get<FreeType>(subjectType))
{
FreeType freeResult{ft->scope, builtinTypes->neverType, builtinTypes->unknownType};
asMutable(resultType)->ty.emplace<FreeType>(freeResult);
TypeId upperBound = arena->addType(TableType{
/* props */ {},
TableIndexer{indexType, resultType},
TypeLevel{},
TableState::Unsealed
});
unify(constraint, subjectType, upperBound);
return true;
}
else if (auto tt = getMutable<TableType>(subjectType))
{
if (auto indexer = tt->indexer)
{
unify(constraint, indexType, indexer->indexType);
LUAU_ASSERT(get<BlockedType>(resultType));
bindBlockedType(resultType, indexer->indexResultType, subjectType, constraint->location);
return true;
}
else if (tt->state == TableState::Unsealed)
{
// FIXME this is greedy.
FreeType freeResult{tt->scope, builtinTypes->neverType, builtinTypes->unknownType};
asMutable(resultType)->ty.emplace<FreeType>(freeResult);
tt->indexer = TableIndexer{indexType, resultType};
return true;
}
}
else if (auto mt = get<MetatableType>(subjectType))
return tryDispatchHasIndexer(recursionDepth, constraint, mt->table, indexType, resultType);
else if (auto ct = get<ClassType>(subjectType))
{
if (auto indexer = ct->indexer)
{
unify(constraint, indexType, indexer->indexType);
asMutable(resultType)->ty.emplace<BoundType>(indexer->indexResultType);
return true;
}
else if (isString(indexType))
{
asMutable(resultType)->ty.emplace<BoundType>(builtinTypes->unknownType);
return true;
}
}
else if (auto it = get<IntersectionType>(subjectType))
{
// subjectType <: {[indexType]: resultType}
//
// 'a & ~(false | nil) <: {[indexType]: resultType}
//
// 'a <: {[indexType]: resultType}
// ~(false | nil) <: {[indexType]: resultType}
Set<TypeId> parts{nullptr};
for (TypeId part: it)
parts.insert(follow(part));
Set<TypeId> results{nullptr};
for (TypeId part: parts)
{
TypeId r = arena->addType(BlockedType{});
bool ok = tryDispatchHasIndexer(recursionDepth, constraint, part, indexType, r);
// FIXME: It's too late to stop and block now I think? We should
// scan for blocked types before we actually do anything.
LUAU_ASSERT(ok);
r = follow(r);
if (!get<ErrorType>(r))
results.insert(r);
}
if (0 == results.size())
asMutable(resultType)->ty.emplace<BoundType>(builtinTypes->errorType);
else if (1 == results.size())
asMutable(resultType)->ty.emplace<BoundType>(*results.begin());
else
asMutable(resultType)->ty.emplace<IntersectionType>(std::vector(results.begin(), results.end()));
return true;
}
else if (auto ut = get<UnionType>(subjectType))
{
Set<TypeId> parts{nullptr};
for (TypeId part: ut)
parts.insert(follow(part));
Set<TypeId> results{nullptr};
for (TypeId part: parts)
{
TypeId r = arena->addType(BlockedType{});
bool ok = tryDispatchHasIndexer(recursionDepth, constraint, part, indexType, r);
// We should have found all the blocked types ahead of time (see BlockedTypeFinder below)
LUAU_ASSERT(ok);
r = follow(r);
if (!get<ErrorType>(r))
results.insert(r);
}
if (0 == results.size())
asMutable(resultType)->ty.emplace<BoundType>(builtinTypes->errorType);
else if (1 == results.size())
asMutable(resultType)->ty.emplace<BoundType>(*results.begin());
else
asMutable(resultType)->ty.emplace<UnionType>(std::vector(results.begin(), results.end()));
return true;
}
bindBlockedType(resultType, builtinTypes->errorType, subjectType, constraint->location);
return true;
}
namespace
{
struct BlockedTypeFinder : TypeOnceVisitor
{
std::optional<TypeId> blocked;
bool visit(TypeId ty) override
{
// If we've already found one, stop traversing.
return !blocked.has_value();
}
bool visit(TypeId ty, const BlockedType&) override
{
blocked = ty;
return false;
}
};
}
bool ConstraintSolver::tryDispatch(const HasIndexerConstraint& c, NotNull<const Constraint> constraint)
{
const TypeId subjectType = follow(c.subjectType);
const TypeId indexType = follow(c.indexType);
if (isBlocked(subjectType))
return block(subjectType, constraint);
if (isBlocked(indexType))
return block(indexType, constraint);
BlockedTypeFinder btf;
btf.visit(subjectType);
if (btf.blocked)
return block(*btf.blocked, constraint);
int recursionDepth = 0;
return tryDispatchHasIndexer(recursionDepth, constraint, subjectType, indexType, c.resultType);
}
bool ConstraintSolver::tryDispatch(const SetIndexerConstraint& c, NotNull<const Constraint> constraint, bool force)
{
TypeId subjectType = follow(c.subjectType);
@ -1456,6 +1649,9 @@ bool ConstraintSolver::tryDispatch(const SetIndexerConstraint& c, NotNull<const
{
if (tt->indexer)
{
if (isBlocked(tt->indexer->indexResultType))
return block(tt->indexer->indexResultType, constraint);
// TODO This probably has to be invariant.
unify(constraint, c.indexType, tt->indexer->indexType);
asMutable(c.propType)->ty.emplace<BoundType>(tt->indexer->indexResultType);
@ -1530,6 +1726,7 @@ bool ConstraintSolver::tryDispatch(const UnpackConstraint& c, NotNull<const Cons
auto resultEnd = end(resultPack);
auto apply = [&](TypeId resultTy, TypeId srcTy) {
resultTy = follow(resultTy);
if (auto lt = getMutable<LocalType>(resultTy); c.resultIsLValue && lt)
{
lt->domain = simplifyUnion(builtinTypes, arena, lt->domain, srcTy).result;
@ -1899,14 +2096,14 @@ bool ConstraintSolver::tryDispatchIterableFunction(
}
std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTableProp(
TypeId subjectType, const std::string& propName, ValueContext context, bool suppressSimplification)
TypeId subjectType, const std::string& propName, ValueContext context, bool inConditional, bool suppressSimplification)
{
DenseHashSet<TypeId> seen{nullptr};
return lookupTableProp(subjectType, propName, context, suppressSimplification, seen);
return lookupTableProp(subjectType, propName, context, inConditional, suppressSimplification, seen);
}
std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTableProp(
TypeId subjectType, const std::string& propName, ValueContext context, bool suppressSimplification, DenseHashSet<TypeId>& seen)
TypeId subjectType, const std::string& propName, ValueContext context, bool inConditional, bool suppressSimplification, DenseHashSet<TypeId>& seen)
{
if (seen.contains(subjectType))
return {};
@ -1971,10 +2168,16 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
}
return {{}, result};
}
// if we are in a conditional context, we treat the property as present and `unknown` because
// we may be _refining_ a table to include that property. we will want to revisit this a bit
// in the future once luau has support for exact tables since this only applies when inexact.
if (inConditional)
return {{}, builtinTypes->unknownType};
}
else if (auto mt = get<MetatableType>(subjectType); mt && context == ValueContext::RValue)
{
auto [blocked, result] = lookupTableProp(mt->table, propName, context, suppressSimplification, seen);
auto [blocked, result] = lookupTableProp(mt->table, propName, context, inConditional, suppressSimplification, seen);
if (!blocked.empty() || result)
return {blocked, result};
@ -2005,10 +2208,10 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
}
}
else
return lookupTableProp(indexType, propName, context, suppressSimplification, seen);
return lookupTableProp(indexType, propName, context, inConditional, suppressSimplification, seen);
}
else if (get<MetatableType>(mtt))
return lookupTableProp(mtt, propName, context, suppressSimplification, seen);
return lookupTableProp(mtt, propName, context, inConditional, suppressSimplification, seen);
}
else if (auto ct = get<ClassType>(subjectType))
{
@ -2028,14 +2231,14 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
if (indexProp == metatable->props.end())
return {{}, std::nullopt};
return lookupTableProp(indexProp->second.type(), propName, context, suppressSimplification, seen);
return lookupTableProp(indexProp->second.type(), propName, context, inConditional, suppressSimplification, seen);
}
else if (auto ft = get<FreeType>(subjectType))
{
const TypeId upperBound = follow(ft->upperBound);
if (get<TableType>(upperBound) || get<PrimitiveType>(upperBound))
return lookupTableProp(upperBound, propName, context, suppressSimplification, seen);
return lookupTableProp(upperBound, propName, context, inConditional, suppressSimplification, seen);
// TODO: The upper bound could be an intersection that contains suitable tables or classes.
@ -2067,7 +2270,7 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
for (TypeId ty : utv)
{
auto [innerBlocked, innerResult] = lookupTableProp(ty, propName, context, suppressSimplification, seen);
auto [innerBlocked, innerResult] = lookupTableProp(ty, propName, context, inConditional, suppressSimplification, seen);
blocked.insert(blocked.end(), innerBlocked.begin(), innerBlocked.end());
if (innerResult)
options.insert(*innerResult);
@ -2096,7 +2299,7 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
for (TypeId ty : itv)
{
auto [innerBlocked, innerResult] = lookupTableProp(ty, propName, context, suppressSimplification, seen);
auto [innerBlocked, innerResult] = lookupTableProp(ty, propName, context, inConditional, suppressSimplification, seen);
blocked.insert(blocked.end(), innerBlocked.begin(), innerBlocked.end());
if (innerResult)
options.insert(*innerResult);
@ -2118,6 +2321,14 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
else
return {{}, arena->addType(IntersectionType{std::vector<TypeId>(begin(options), end(options))})};
}
else if (auto pt = get<PrimitiveType>(subjectType))
{
// if we are in a conditional context, we treat the property as present and `unknown` because
// we may be _refining_ a table to include that property. we will want to revisit this a bit
// in the future once luau has support for exact tables since this only applies when inexact.
if (inConditional && pt->type == PrimitiveType::Table)
return {{}, builtinTypes->unknownType};
}
return {{}, std::nullopt};
}

View File

@ -251,8 +251,12 @@ void DataFlowGraphBuilder::joinProps(DfgScope* result, const DfgScope& a, const
DefId DataFlowGraphBuilder::lookup(DfgScope* scope, Symbol symbol)
{
// true if any of the considered scopes are a loop.
bool outsideLoopScope = false;
for (DfgScope* current = scope; current; current = current->parent)
{
outsideLoopScope = outsideLoopScope || current->scopeType == DfgScope::Loop;
if (auto found = current->bindings.find(symbol))
return NotNull{*found};
else if (current->scopeType == DfgScope::Function)
@ -260,7 +264,12 @@ DefId DataFlowGraphBuilder::lookup(DfgScope* scope, Symbol symbol)
FunctionCapture& capture = captures[symbol];
DefId captureDef = defArena->phi({});
capture.captureDefs.push_back(captureDef);
scope->bindings[symbol] = captureDef;
// If we are outside of a loop scope, then we don't want to actually bind
// uses of `symbol` to this new phi node since it will not get populated.
if (!outsideLoopScope)
scope->bindings[symbol] = captureDef;
return NotNull{captureDef};
}
}

View File

@ -292,8 +292,13 @@ std::pair<OverloadResolver::Analysis, ErrorVec> OverloadResolver::checkOverload_
if (failedSubPack && failedSuperPack)
{
LUAU_ASSERT(!argExprs->empty());
argLocation = argExprs->at(argExprs->size() - 1)->location;
// If a bug in type inference occurs, we may have a mismatch in the return packs.
// This happens when inference incorrectly leaves the result type of a function free.
// If this happens, we don't want to explode, so we'll use the function's location.
if (argExprs->empty())
argLocation = fnExpr->location;
else
argLocation = argExprs->at(argExprs->size() - 1)->location;
// TODO extract location from the SubtypingResult path and argExprs
switch (reason.variance)

5
Analysis/src/Set.cpp Normal file
View File

@ -0,0 +1,5 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/Common.h"
LUAU_FASTFLAGVARIABLE(LuauFixSetIter, false)

View File

@ -13,6 +13,7 @@
LUAU_FASTINT(LuauTypeReductionRecursionLimit)
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_DYNAMIC_FASTINTVARIABLE(LuauSimplificationComplexityLimit, 8);
namespace Luau
{
@ -237,16 +238,12 @@ Relation relateTables(TypeId left, TypeId right, SimplifierSeenSet& seen)
LUAU_ASSERT(1 == rightTable->props.size());
// Disjoint props have nothing in common
// t1 with props p1's cannot appear in t2 and t2 with props p2's cannot appear in t1
bool foundPropFromLeftInRight = std::any_of(begin(leftTable->props), end(leftTable->props),
[&](auto prop)
{
return rightTable->props.count(prop.first) > 0;
});
bool foundPropFromRightInLeft = std::any_of(begin(rightTable->props), end(rightTable->props),
[&](auto prop)
{
return leftTable->props.count(prop.first) > 0;
});
bool foundPropFromLeftInRight = std::any_of(begin(leftTable->props), end(leftTable->props), [&](auto prop) {
return rightTable->props.count(prop.first) > 0;
});
bool foundPropFromRightInLeft = std::any_of(begin(rightTable->props), end(rightTable->props), [&](auto prop) {
return leftTable->props.count(prop.first) > 0;
});
if (!foundPropFromLeftInRight && !foundPropFromRightInLeft && leftTable->props.size() >= 1 && rightTable->props.size() >= 1)
return Relation::Disjoint;
@ -689,6 +686,9 @@ TypeId TypeSimplifier::intersectUnionWithType(TypeId left, TypeId right)
bool changed = false;
std::set<TypeId> newParts;
if (leftUnion->options.size() > (size_t)DFInt::LuauSimplificationComplexityLimit)
return arena->addType(IntersectionType{{left, right}});
for (TypeId part : leftUnion)
{
TypeId simplified = intersect(right, part);
@ -723,6 +723,15 @@ TypeId TypeSimplifier::intersectUnions(TypeId left, TypeId right)
std::set<TypeId> newParts;
// Combinatorial blowup moment!!
// combination size
size_t optionSize = (int)leftUnion->options.size() * rightUnion->options.size();
size_t maxSize = DFInt::LuauSimplificationComplexityLimit;
if (optionSize > maxSize)
return arena->addType(IntersectionType{{left, right}});
for (TypeId leftPart : leftUnion)
{
for (TypeId rightPart : rightUnion)
@ -986,6 +995,9 @@ TypeId TypeSimplifier::intersectIntersectionWithType(TypeId left, TypeId right)
const IntersectionType* leftIntersection = get<IntersectionType>(left);
LUAU_ASSERT(leftIntersection);
if (leftIntersection->parts.size() > (size_t)DFInt::LuauSimplificationComplexityLimit)
return arena->addType(IntersectionType{{left, right}});
bool changed = false;
std::set<TypeId> newParts;
@ -1135,6 +1147,10 @@ TypeId TypeSimplifier::intersect(TypeId left, TypeId right)
return right;
if (get<AnyType>(right) && get<ErrorType>(left))
return left;
if (get<UnknownType>(left) && !get<ErrorType>(right))
return right;
if (get<UnknownType>(right) && !get<ErrorType>(left))
return left;
if (get<AnyType>(left))
return arena->addType(UnionType{{right, builtinTypes->errorType}});
if (get<AnyType>(right))

View File

@ -1615,7 +1615,6 @@ std::pair<TypeId, ErrorVec> Subtyping::handleTypeFamilyReductionResult(const Typ
{
TypeFamilyContext context{arena, builtinTypes, scope, normalizer, iceReporter, NotNull{&limits}};
TypeId family = arena->addType(*familyInstance);
std::string familyString = toString(family);
FamilyGraphReductionResult result = reduceFamilies(family, {}, context, true);
ErrorVec errors;
if (result.blockedTypes.size() != 0 || result.blockedPacks.size() != 0)

View File

@ -1776,6 +1776,10 @@ std::string toString(const Constraint& constraint, ToStringOptions& opts)
const std::string pathStr = c.path.size() == 1 ? "\"" + c.path[0] + "\"" : "[\"" + join(c.path, "\", \"") + "\"]";
return tos(c.resultType) + " ~ setProp " + tos(c.subjectType) + ", " + pathStr + " " + tos(c.propType);
}
else if constexpr (std::is_same_v<T, HasIndexerConstraint>)
{
return tos(c.resultType) + " ~ hasIndexer " + tos(c.subjectType) + " " + tos(c.indexType);
}
else if constexpr (std::is_same_v<T, SetIndexerConstraint>)
{
return tos(c.resultType) + " ~ setIndexer " + tos(c.subjectType) + " [ " + tos(c.indexType) + " ] " + tos(c.propType);

View File

@ -242,6 +242,7 @@ struct TypeChecker2
Module* module;
TypeArena testArena;
TypeContext typeContext = TypeContext::Default;
std::vector<NotNull<Scope>> stack;
std::vector<TypeId> functionDeclStack;
@ -619,7 +620,11 @@ struct TypeChecker2
void visit(AstStatIf* ifStatement)
{
visit(ifStatement->condition, ValueContext::RValue);
{
InConditionalContext flipper{&typeContext};
visit(ifStatement->condition, ValueContext::RValue);
}
visit(ifStatement->thenbody);
if (ifStatement->elsebody)
visit(ifStatement->elsebody);
@ -1254,20 +1259,8 @@ struct TypeChecker2
if (!originalCallTy)
return;
TypeId fnTy = *originalCallTy;
if (selectedOverloadTy)
{
SubtypingResult result = subtyping->isSubtype(*originalCallTy, *selectedOverloadTy);
if (result.isSubtype)
fnTy = *selectedOverloadTy;
TypeId fnTy = follow(*originalCallTy);
if (result.normalizationTooComplex)
{
reportError(NormalizationTooComplex{}, call->func->location);
return;
}
}
fnTy = follow(fnTy);
if (get<AnyType>(fnTy) || get<ErrorType>(fnTy) || get<NeverType>(fnTy))
return;
@ -1286,6 +1279,19 @@ struct TypeChecker2
return;
}
if (selectedOverloadTy)
{
SubtypingResult result = subtyping->isSubtype(*originalCallTy, *selectedOverloadTy);
if (result.isSubtype)
fnTy = follow(*selectedOverloadTy);
if (result.normalizationTooComplex)
{
reportError(NormalizationTooComplex{}, call->func->location);
return;
}
}
if (call->self)
{
AstExprIndexName* indexExpr = call->func->as<AstExprIndexName>();
@ -1323,6 +1329,8 @@ struct TypeChecker2
args.head.push_back(builtinTypes->anyType);
}
OverloadResolver resolver{
builtinTypes,
NotNull{&testArena},
@ -1332,8 +1340,8 @@ struct TypeChecker2
limits,
call->location,
};
resolver.resolve(fnTy, &args, call->func, &argExprs);
auto norm = normalizer.normalize(fnTy);
if (!norm)
reportError(NormalizationTooComplex{}, call->func->location);
@ -1505,8 +1513,13 @@ struct TypeChecker2
else
reportError(CannotExtendTable{exprType, CannotExtendTable::Indexer, "indexer??"}, indexExpr->location);
}
else if (auto cls = get<ClassType>(exprType); cls && cls->indexer)
testIsSubtype(indexType, cls->indexer->indexType, indexExpr->index->location);
else if (auto cls = get<ClassType>(exprType))
{
if (cls->indexer)
testIsSubtype(indexType, cls->indexer->indexType, indexExpr->index->location);
else
reportError(DynamicPropertyLookupOnClassesUnsafe{exprType}, indexExpr->location);
}
else if (get<UnionType>(exprType) && isOptional(exprType))
{
switch (shouldSuppressErrors(NotNull{&normalizer}, exprType))
@ -2710,7 +2723,11 @@ struct TypeChecker2
return true;
}
return false;
// if we are in a conditional context, we treat the property as present and `unknown` because
// we may be _refining_ `tableTy` to include that property. we will want to revisit this a bit
// in the future once luau has support for exact tables since this only applies when inexact.
return inConditional(typeContext);
}
else if (const ClassType* cls = get<ClassType>(ty))
{
@ -2735,6 +2752,8 @@ struct TypeChecker2
return std::any_of(begin(itv), end(itv), [&](TypeId part) {
return hasIndexTypeFromType(part, prop, context, location, seen, astIndexExprType, errors);
});
else if (const PrimitiveType* pt = get<PrimitiveType>(ty))
return inConditional(typeContext) && pt->type == PrimitiveType::Table;
else
return false;
}

View File

@ -14,6 +14,11 @@ LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution);
namespace Luau
{
bool inConditional(const TypeContext& context)
{
return context == TypeContext::Condition;
}
std::optional<TypeId> findMetatableEntry(
NotNull<BuiltinTypes> builtinTypes, ErrorVec& errors, TypeId type, const std::string& entry, Location location)
{

View File

@ -58,6 +58,12 @@ bool isSupported();
void create(lua_State* L, AllocationCallback* allocationCallback, void* allocationCallbackContext);
void create(lua_State* L);
// Check if native execution is enabled
[[nodiscard]] bool isNativeExecutionEnabled(lua_State* L);
// Enable or disable native execution according to `enabled` argument
void setNativeExecutionEnabled(lua_State* L, bool enabled);
// Builds target function and all inner functions
CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags = 0, CompilationStats* stats = nullptr);

View File

@ -322,7 +322,7 @@ enum class IrCmd : uint8_t
ADJUST_STACK_TO_TOP,
// Execute fastcall builtin function in-place
// A: builtin
// A: unsigned int (builtin id)
// B: Rn (result start)
// C: Rn (argument start)
// D: Rn or Kn or undef (optional second argument)
@ -331,7 +331,7 @@ enum class IrCmd : uint8_t
FASTCALL,
// Call the fastcall builtin function
// A: builtin
// A: unsigned int (builtin id)
// B: Rn (result start)
// C: Rn (argument start)
// D: Rn or Kn or undef (optional second argument)

View File

@ -4,7 +4,7 @@
#include "Luau/Common.h"
#include "Luau/IrData.h"
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores2)
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores3)
namespace Luau
{
@ -188,7 +188,7 @@ static void visitVmRegDefsUses(T& visitor, IrFunction& function, const IrInst& i
visitor.def(inst.b);
break;
case IrCmd::FALLBACK_FORGPREP:
if (FFlag::LuauCodegenRemoveDeadStores2)
if (FFlag::LuauCodegenRemoveDeadStores3)
{
// This instruction doesn't always redefine Rn, Rn+1, Rn+2, so we have to mark it as implicit use
visitor.useRange(vmRegOp(inst.b), 3);

View File

@ -7,6 +7,8 @@
#include <stdarg.h>
#include <stdio.h>
LUAU_FASTFLAG(LuauCodeGenOptVecA64)
namespace Luau
{
namespace CodeGen
@ -557,26 +559,42 @@ void AssemblyBuilderA64::fmov(RegisterA64 dst, RegisterA64 src)
void AssemblyBuilderA64::fmov(RegisterA64 dst, double src)
{
CODEGEN_ASSERT(dst.kind == KindA64::d || dst.kind == KindA64::q);
int imm = getFmovImm(src);
CODEGEN_ASSERT(imm >= 0 && imm <= 256);
// fmov can't encode 0, but movi can; movi is otherwise not useful for fp immediates because it encodes repeating patterns
if (dst.kind == KindA64::d)
if (FFlag::LuauCodeGenOptVecA64)
{
CODEGEN_ASSERT(dst.kind == KindA64::d || dst.kind == KindA64::q);
int imm = getFmovImm(src);
CODEGEN_ASSERT(imm >= 0 && imm <= 256);
// fmov can't encode 0, but movi can; movi is otherwise not useful for fp immediates because it encodes repeating patterns
if (dst.kind == KindA64::d)
{
if (imm == 256)
placeFMOV("movi", dst, src, 0b001'0111100000'000'1110'01'00000);
else
placeFMOV("fmov", dst, src, 0b000'11110'01'1'00000000'100'00000 | (imm << 8));
}
else
{
if (imm == 256)
placeFMOV("movi.4s", dst, src, 0b010'0111100000'000'0000'01'00000);
else
placeFMOV("fmov.4s", dst, src, 0b010'0111100000'000'1111'0'1'00000 | ((imm >> 5) << 11) | (imm & 31));
}
}
else
{
CODEGEN_ASSERT(dst.kind == KindA64::d);
int imm = getFmovImm(src);
CODEGEN_ASSERT(imm >= 0 && imm <= 256);
// fmov can't encode 0, but movi can; movi is otherwise not useful for 64-bit fp immediates because it encodes repeating patterns
if (imm == 256)
placeFMOV("movi", dst, src, 0b001'0111100000'000'1110'01'00000);
else
placeFMOV("fmov", dst, src, 0b000'11110'01'1'00000000'100'00000 | (imm << 8));
}
else
{
if (imm == 256)
placeFMOV("movi.4s", dst, src, 0b010'0111100000'000'0000'01'00000);
else
placeFMOV("fmov.4s", dst, src, 0b010'0111100000'000'1111'0'1'00000 | ((imm >> 5) << 11) | (imm & 31));
}
}
void AssemblyBuilderA64::fabs(RegisterA64 dst, RegisterA64 src)

View File

@ -57,6 +57,8 @@ LUAU_FASTINTVARIABLE(CodegenHeuristicsBlockLimit, 32'768) // 32 K
// Current value is based on some member variables being limited to 16 bits
LUAU_FASTINTVARIABLE(CodegenHeuristicsBlockInstructionLimit, 65'536) // 64 K
LUAU_FASTFLAG(LuauCodegenHeapSizeReport)
namespace Luau
{
namespace CodeGen
@ -74,25 +76,94 @@ struct NativeProto
uintptr_t exectarget;
};
// Additional data attached to Proto::execdata
// Guaranteed to be aligned to 16 bytes
struct ExtraExecData
{
size_t execDataSize;
size_t codeSize;
};
static int alignTo(int value, int align)
{
CODEGEN_ASSERT(FFlag::LuauCodegenHeapSizeReport);
CODEGEN_ASSERT(align > 0 && (align & (align - 1)) == 0);
return (value + (align - 1)) & ~(align - 1);
}
// Returns the size of execdata required to store all code offsets and ExtraExecData structure at proper alignment
// Always a multiple of 4 bytes
static int calculateExecDataSize(Proto* proto)
{
CODEGEN_ASSERT(FFlag::LuauCodegenHeapSizeReport);
int size = proto->sizecode * sizeof(uint32_t);
size = alignTo(size, 16);
size += sizeof(ExtraExecData);
return size;
}
// Returns pointer to the ExtraExecData inside the Proto::execdata
// Even though 'execdata' is a field in Proto, we require it to support cases where it's not attached to Proto during construction
ExtraExecData* getExtraExecData(Proto* proto, void* execdata)
{
CODEGEN_ASSERT(FFlag::LuauCodegenHeapSizeReport);
int size = proto->sizecode * sizeof(uint32_t);
size = alignTo(size, 16);
return reinterpret_cast<ExtraExecData*>(reinterpret_cast<char*>(execdata) + size);
}
static NativeProto createNativeProto(Proto* proto, const IrBuilder& ir)
{
int sizecode = proto->sizecode;
uint32_t* instOffsets = new uint32_t[sizecode];
uint32_t instTarget = ir.function.entryLocation;
for (int i = 0; i < sizecode; i++)
if (FFlag::LuauCodegenHeapSizeReport)
{
CODEGEN_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget);
int execDataSize = calculateExecDataSize(proto);
CODEGEN_ASSERT(execDataSize % 4 == 0);
instOffsets[i] = ir.function.bcMapping[i].asmLocation - instTarget;
uint32_t* execData = new uint32_t[execDataSize / 4];
uint32_t instTarget = ir.function.entryLocation;
for (int i = 0; i < proto->sizecode; i++)
{
CODEGEN_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget);
execData[i] = ir.function.bcMapping[i].asmLocation - instTarget;
}
// Set first instruction offset to 0 so that entering this function still executes any generated entry code.
execData[0] = 0;
ExtraExecData* extra = getExtraExecData(proto, execData);
memset(extra, 0, sizeof(ExtraExecData));
extra->execDataSize = execDataSize;
// entry target will be relocated when assembly is finalized
return {proto, execData, instTarget};
}
else
{
int sizecode = proto->sizecode;
// Set first instruction offset to 0 so that entering this function still executes any generated entry code.
instOffsets[0] = 0;
uint32_t* instOffsets = new uint32_t[sizecode];
uint32_t instTarget = ir.function.entryLocation;
// entry target will be relocated when assembly is finalized
return {proto, instOffsets, instTarget};
for (int i = 0; i < sizecode; i++)
{
CODEGEN_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget);
instOffsets[i] = ir.function.bcMapping[i].asmLocation - instTarget;
}
// Set first instruction offset to 0 so that entering this function still executes any generated entry code.
instOffsets[0] = 0;
// entry target will be relocated when assembly is finalized
return {proto, instOffsets, instTarget};
}
}
static void destroyExecData(void* execdata)
@ -168,6 +239,12 @@ static int onEnter(lua_State* L, Proto* proto)
return GateFn(data->context.gateEntry)(L, proto, target, &data->context);
}
// used to disable native execution, unconditionally
static int onEnterDisabled(lua_State* L, Proto* proto)
{
return 1;
}
void onDisable(lua_State* L, Proto* proto)
{
// do nothing if proto already uses bytecode
@ -207,6 +284,17 @@ void onDisable(lua_State* L, Proto* proto)
});
}
size_t getMemorySize(lua_State* L, Proto* proto)
{
CODEGEN_ASSERT(FFlag::LuauCodegenHeapSizeReport);
ExtraExecData* extra = getExtraExecData(proto, proto->execdata);
// While execDataSize is exactly the size of the allocation we made and hold for 'execdata' field, the code size is approximate
// This is because code+data page is shared and owned by all Proto from a single module and each one can keep the whole region alive
// So individual Proto being freed by GC will not reflect memory use by native code correctly
return extra->execDataSize + extra->codeSize;
}
#if defined(__aarch64__)
unsigned int getCpuFeaturesA64()
{
@ -301,6 +389,9 @@ void create(lua_State* L, AllocationCallback* allocationCallback, void* allocati
ecb->destroy = onDestroyFunction;
ecb->enter = onEnter;
ecb->disable = onDisable;
if (FFlag::LuauCodegenHeapSizeReport)
ecb->getmemorysize = getMemorySize;
}
void create(lua_State* L)
@ -308,6 +399,17 @@ void create(lua_State* L)
create(L, nullptr, nullptr);
}
[[nodiscard]] bool isNativeExecutionEnabled(lua_State* L)
{
return getNativeState(L) ? (L->global->ecb.enter == onEnter) : false;
}
void setNativeExecutionEnabled(lua_State* L, bool enabled)
{
if (getNativeState(L))
L->global->ecb.enter = enabled ? onEnter : onEnterDisabled;
}
CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, CompilationStats* stats)
{
CODEGEN_ASSERT(lua_isLfunction(L, idx));
@ -401,9 +503,10 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp
return CodeGenCompilationResult::AllocationFailed;
}
if (gPerfLogFn && results.size() > 0)
if (FFlag::LuauCodegenHeapSizeReport)
{
gPerfLogFn(gPerfLogContext, uintptr_t(codeStart), uint32_t(results[0].exectarget), "<luau helpers>");
if (gPerfLogFn && results.size() > 0)
gPerfLogFn(gPerfLogContext, uintptr_t(codeStart), uint32_t(results[0].exectarget), "<luau helpers>");
for (size_t i = 0; i < results.size(); ++i)
{
@ -411,7 +514,27 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp
uint32_t end = i + 1 < results.size() ? uint32_t(results[i + 1].exectarget) : uint32_t(build.code.size() * sizeof(build.code[0]));
CODEGEN_ASSERT(begin < end);
logPerfFunction(results[i].p, uintptr_t(codeStart) + begin, end - begin);
if (gPerfLogFn)
logPerfFunction(results[i].p, uintptr_t(codeStart) + begin, end - begin);
ExtraExecData* extra = getExtraExecData(results[i].p, results[i].execdata);
extra->codeSize = end - begin;
}
}
else
{
if (gPerfLogFn && results.size() > 0)
{
gPerfLogFn(gPerfLogContext, uintptr_t(codeStart), uint32_t(results[0].exectarget), "<luau helpers>");
for (size_t i = 0; i < results.size(); ++i)
{
uint32_t begin = uint32_t(results[i].exectarget);
uint32_t end = i + 1 < results.size() ? uint32_t(results[i + 1].exectarget) : uint32_t(build.code.size() * sizeof(build.code[0]));
CODEGEN_ASSERT(begin < end);
logPerfFunction(results[i].p, uintptr_t(codeStart) + begin, end - begin);
}
}
}

View File

@ -27,7 +27,7 @@ LUAU_FASTFLAG(DebugCodegenSkipNumbering)
LUAU_FASTINT(CodegenHeuristicsInstructionLimit)
LUAU_FASTINT(CodegenHeuristicsBlockLimit)
LUAU_FASTINT(CodegenHeuristicsBlockInstructionLimit)
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores2)
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores3)
namespace Luau
{
@ -312,7 +312,7 @@ inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers&
}
}
if (FFlag::LuauCodegenRemoveDeadStores2)
if (FFlag::LuauCodegenRemoveDeadStores3)
markDeadStoresInBlockChains(ir);
}

View File

@ -12,6 +12,8 @@
#include "lstate.h"
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores3)
namespace Luau
{
namespace CodeGen
@ -28,10 +30,16 @@ static void emitBuiltinMathFrexp(IrRegAllocX64& regs, AssemblyBuilderX64& build,
build.vmovsd(luauRegValue(ra), xmm0);
if (FFlag::LuauCodegenRemoveDeadStores3)
build.mov(luauRegTag(ra), LUA_TNUMBER);
if (nresults > 1)
{
build.vcvtsi2sd(xmm0, xmm0, dword[sTemporarySlot + 0]);
build.vmovsd(luauRegValue(ra + 1), xmm0);
if (FFlag::LuauCodegenRemoveDeadStores3)
build.mov(luauRegTag(ra + 1), LUA_TNUMBER);
}
}
@ -45,8 +53,16 @@ static void emitBuiltinMathModf(IrRegAllocX64& regs, AssemblyBuilderX64& build,
build.vmovsd(xmm1, qword[sTemporarySlot + 0]);
build.vmovsd(luauRegValue(ra), xmm1);
if (FFlag::LuauCodegenRemoveDeadStores3)
build.mov(luauRegTag(ra), LUA_TNUMBER);
if (nresults > 1)
{
build.vmovsd(luauRegValue(ra + 1), xmm0);
if (FFlag::LuauCodegenRemoveDeadStores3)
build.mov(luauRegTag(ra + 1), LUA_TNUMBER);
}
}
static void emitBuiltinMathSign(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int arg)
@ -74,6 +90,9 @@ static void emitBuiltinMathSign(IrRegAllocX64& regs, AssemblyBuilderX64& build,
build.vblendvpd(tmp0.reg, tmp2.reg, build.f64x2(1, 1), tmp0.reg);
build.vmovsd(luauRegValue(ra), tmp0.reg);
if (FFlag::LuauCodegenRemoveDeadStores3)
build.mov(luauRegTag(ra), LUA_TNUMBER);
}
void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int ra, int arg, OperandX64 arg2, int nparams, int nresults)

View File

@ -15,6 +15,7 @@ LUAU_FASTFLAGVARIABLE(LuauCodeGenVectorA64, false)
LUAU_FASTFLAGVARIABLE(LuauCodeGenOptVecA64, false)
LUAU_FASTFLAG(LuauCodegenVectorTag2)
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores3)
namespace Luau
{
@ -202,25 +203,71 @@ static bool emitBuiltin(
switch (bfid)
{
case LBF_MATH_FREXP:
CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
emitInvokeLibm1P(build, offsetof(NativeContext, libm_frexp), arg);
build.str(d0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
if (nresults == 2)
{
if (FFlag::LuauCodegenRemoveDeadStores3)
{
build.ldr(w0, sTemporary);
build.scvtf(d1, w0);
build.str(d1, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, value.n)));
CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
emitInvokeLibm1P(build, offsetof(NativeContext, libm_frexp), arg);
build.str(d0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
RegisterA64 temp = regs.allocTemp(KindA64::w);
build.mov(temp, LUA_TNUMBER);
build.str(temp, mem(rBase, res * sizeof(TValue) + offsetof(TValue, tt)));
if (nresults == 2)
{
build.ldr(w0, sTemporary);
build.scvtf(d1, w0);
build.str(d1, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, value.n)));
build.str(temp, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, tt)));
}
}
else
{
CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
emitInvokeLibm1P(build, offsetof(NativeContext, libm_frexp), arg);
build.str(d0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
if (nresults == 2)
{
build.ldr(w0, sTemporary);
build.scvtf(d1, w0);
build.str(d1, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, value.n)));
}
}
return true;
}
case LBF_MATH_MODF:
CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
emitInvokeLibm1P(build, offsetof(NativeContext, libm_modf), arg);
build.ldr(d1, sTemporary);
build.str(d1, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
if (nresults == 2)
build.str(d0, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, value.n)));
{
if (FFlag::LuauCodegenRemoveDeadStores3)
{
CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
emitInvokeLibm1P(build, offsetof(NativeContext, libm_modf), arg);
build.ldr(d1, sTemporary);
build.str(d1, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
RegisterA64 temp = regs.allocTemp(KindA64::w);
build.mov(temp, LUA_TNUMBER);
build.str(temp, mem(rBase, res * sizeof(TValue) + offsetof(TValue, tt)));
if (nresults == 2)
{
build.str(d0, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, value.n)));
build.str(temp, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, tt)));
}
}
else
{
CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
emitInvokeLibm1P(build, offsetof(NativeContext, libm_modf), arg);
build.ldr(d1, sTemporary);
build.str(d1, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
if (nresults == 2)
build.str(d0, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, value.n)));
}
return true;
}
case LBF_MATH_SIGN:
{
CODEGEN_ASSERT(nparams == 1 && nresults == 1);
build.ldr(d0, mem(rBase, arg * sizeof(TValue) + offsetof(TValue, value.n)));
build.fcmpz(d0);
@ -230,7 +277,15 @@ static bool emitBuiltin(
build.fmov(d1, -1.0);
build.fcsel(d0, d1, d0, getConditionFP(IrCondition::Less));
build.str(d0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
if (FFlag::LuauCodegenRemoveDeadStores3)
{
RegisterA64 temp = regs.allocTemp(KindA64::w);
build.mov(temp, LUA_TNUMBER);
build.str(temp, mem(rBase, res * sizeof(TValue) + offsetof(TValue, tt)));
}
return true;
}
default:
CODEGEN_ASSERT(!"Missing A64 lowering");
@ -1192,7 +1247,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
{
RegisterA64 temp = regs.allocTemp(KindA64::x);
uint32_t vec[4] = { asU32, asU32, asU32, 0 };
uint32_t vec[4] = {asU32, asU32, asU32, 0};
build.adr(temp, vec, sizeof(vec));
build.ldr(inst.regA64, temp);
}

View File

@ -8,6 +8,8 @@
#include <math.h>
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores3)
// TODO: when nresults is less than our actual result count, we can skip computing/writing unused results
static const int kMinMaxUnrolledParams = 5;
@ -46,8 +48,11 @@ static BuiltinImplResult translateBuiltinNumberToNumber(
builtinCheckDouble(build, build.vmReg(arg), pcpos);
build.inst(IrCmd::FASTCALL, build.constUint(bfid), build.vmReg(ra), build.vmReg(arg), args, build.constInt(1), build.constInt(1));
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
if (!FFlag::LuauCodegenRemoveDeadStores3)
{
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
}
return {BuiltinImplType::Full, 1};
}
@ -107,11 +112,14 @@ static BuiltinImplResult translateBuiltinNumberTo2Number(
build.inst(
IrCmd::FASTCALL, build.constUint(bfid), build.vmReg(ra), build.vmReg(arg), args, build.constInt(1), build.constInt(nresults == 1 ? 1 : 2));
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
if (!FFlag::LuauCodegenRemoveDeadStores3)
{
if (ra != arg)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra), build.constTag(LUA_TNUMBER));
if (nresults != 1)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 1), build.constTag(LUA_TNUMBER));
if (nresults != 1)
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 1), build.constTag(LUA_TNUMBER));
}
return {BuiltinImplType::Full, 2};
}

View File

@ -3,8 +3,6 @@
#include "Luau/IrUtils.h"
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauCodegenTrackingMultilocationFix, false)
namespace Luau
{
namespace CodeGen
@ -161,7 +159,7 @@ void IrValueLocationTracking::afterInstLowering(IrInst& inst, uint32_t instIdx)
case IrCmd::LOAD_DOUBLE:
case IrCmd::LOAD_INT:
case IrCmd::LOAD_TVALUE:
if (DFFlag::LuauCodegenTrackingMultilocationFix && inst.a.kind == IrOpKind::VmReg)
if (inst.a.kind == IrOpKind::VmReg)
invalidateRestoreOp(inst.a, /*skipValueInvalidation*/ false);
recordRestoreOp(instIdx, inst.a);

View File

@ -20,6 +20,7 @@ LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false)
LUAU_FASTFLAG(LuauCodegenVectorTag2)
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauCodeGenCoverForgprepEffect, false)
LUAU_FASTFLAG(LuauCodegenLoadTVTag)
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores3)
namespace Luau
{
@ -1072,8 +1073,39 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
}
break;
// TODO: FASTCALL is more restrictive than INVOKE_FASTCALL; we should either determine the exact semantics, or rework it
case IrCmd::FASTCALL:
{
if (FFlag::LuauCodegenRemoveDeadStores3)
{
LuauBuiltinFunction bfid = LuauBuiltinFunction(function.uintOp(inst.a));
int firstReturnReg = vmRegOp(inst.b);
int nresults = function.intOp(inst.f);
// TODO: FASTCALL is more restrictive than INVOKE_FASTCALL; we should either determine the exact semantics, or rework it
handleBuiltinEffects(state, bfid, firstReturnReg, nresults);
switch (bfid)
{
case LBF_MATH_MODF:
case LBF_MATH_FREXP:
state.updateTag(IrOp{IrOpKind::VmReg, uint8_t(firstReturnReg)}, LUA_TNUMBER);
if (nresults > 1)
state.updateTag(IrOp{IrOpKind::VmReg, uint8_t(firstReturnReg + 1)}, LUA_TNUMBER);
break;
case LBF_MATH_SIGN:
state.updateTag(IrOp{IrOpKind::VmReg, uint8_t(firstReturnReg)}, LUA_TNUMBER);
break;
default:
break;
}
}
else
{
handleBuiltinEffects(state, LuauBuiltinFunction(function.uintOp(inst.a)), vmRegOp(inst.b), function.intOp(inst.f));
}
break;
}
case IrCmd::INVOKE_FASTCALL:
handleBuiltinEffects(state, LuauBuiltinFunction(function.uintOp(inst.a)), vmRegOp(inst.b), function.intOp(inst.f));
break;

View File

@ -9,7 +9,7 @@
#include "lobject.h"
LUAU_FASTFLAGVARIABLE(LuauCodegenRemoveDeadStores2, false)
LUAU_FASTFLAGVARIABLE(LuauCodegenRemoveDeadStores3, false)
LUAU_FASTFLAG(LuauCodegenVectorTag2)
// TODO: optimization can be improved by knowing which registers are live in at each VM exit
@ -265,6 +265,12 @@ static void markDeadStoresInInst(RemoveDeadStoreState& state, IrBuilder& build,
uint8_t tag = function.tagOp(inst.b);
// Storing 'nil' TValue doesn't update the value part because we don't care about that part of 'nil'
// This however prevents us from removing unused value store elimination and has an impact on GC
// To solve this issues, we invalidate the value part of a 'nil' store as well
if (tag == LUA_TNIL)
state.killValueStore(regInfo);
regInfo.tagInstIdx = index;
regInfo.maybeGco = isGCO(tag);
state.hasGcoToClear |= regInfo.maybeGco;

View File

@ -259,6 +259,7 @@ target_sources(Luau.Analysis PRIVATE
Analysis/src/Refinement.cpp
Analysis/src/RequireTracer.cpp
Analysis/src/Scope.cpp
Analysis/src/Set.cpp
Analysis/src/Simplify.cpp
Analysis/src/Substitution.cpp
Analysis/src/Subtyping.cpp

View File

@ -8,6 +8,8 @@
#include <stdio.h>
#include <stdlib.h>
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauDebugInfoDupArgLeftovers, false)
static lua_State* getthread(lua_State* L, int* arg)
{
if (lua_isthread(L, 1))
@ -26,10 +28,17 @@ static int db_info(lua_State* L)
{
int arg;
lua_State* L1 = getthread(L, &arg);
int l1top = 0;
// If L1 != L, L1 can be in any state, and therefore there are no guarantees about its stack space
// if L1 != L, L1 can be in any state, and therefore there are no guarantees about its stack space
if (L != L1)
lua_rawcheckstack(L1, 1); // for 'f' option
{
// for 'f' option, we reserve one slot and we also record the stack top
lua_rawcheckstack(L1, 1);
if (DFFlag::LuauDebugInfoDupArgLeftovers)
l1top = lua_gettop(L1);
}
int level;
if (lua_isnumber(L, arg + 1))
@ -59,7 +68,13 @@ static int db_info(lua_State* L)
if (unsigned(*it - 'a') < 26)
{
if (occurs[*it - 'a'])
{
// restore stack state of another thread as 'f' option might not have been visited yet
if (DFFlag::LuauDebugInfoDupArgLeftovers && L != L1)
lua_settop(L1, l1top);
luaL_argerror(L, arg + 2, "duplicate option");
}
occurs[*it - 'a'] = true;
}

View File

@ -104,6 +104,11 @@ public:
return status;
}
const lua_State* getThread() const
{
return L;
}
private:
lua_State* L;
int status;
@ -120,7 +125,12 @@ int luaD_rawrunprotected(lua_State* L, Pfunc f, void* ud)
}
catch (lua_exception& e)
{
// lua_exception means that luaD_throw was called and an exception object is on stack if status is ERRRUN
// It is assumed/required that the exception caught here was thrown from the same Luau state.
// If this assert fires, it indicates a lua_exception was not properly caught and propagated
// to the exception handler for a different Luau state. Report this issue to the Luau team if
// you need more information or assistance resolving this assert.
LUAU_ASSERT(e.getThread() == L);
status = e.getStatus();
}
catch (std::exception& e)

View File

@ -6,37 +6,85 @@
#include "lmem.h"
#include "lgc.h"
LUAU_FASTFLAGVARIABLE(LuauNewProtoInitAll, false)
Proto* luaF_newproto(lua_State* L)
{
Proto* f = luaM_newgco(L, Proto, sizeof(Proto), L->activememcat);
luaC_init(L, f, LUA_TPROTO);
f->k = NULL;
f->sizek = 0;
f->p = NULL;
f->sizep = 0;
f->code = NULL;
f->sizecode = 0;
f->sizeupvalues = 0;
f->nups = 0;
f->upvalues = NULL;
f->numparams = 0;
f->is_vararg = 0;
f->maxstacksize = 0;
f->flags = 0;
f->sizelineinfo = 0;
f->linegaplog2 = 0;
f->lineinfo = NULL;
f->abslineinfo = NULL;
f->sizelocvars = 0;
f->locvars = NULL;
f->source = NULL;
f->debugname = NULL;
f->debuginsn = NULL;
f->codeentry = NULL;
f->execdata = NULL;
f->exectarget = 0;
f->typeinfo = NULL;
f->userdata = NULL;
if (FFlag::LuauNewProtoInitAll)
{
f->nups = 0;
f->numparams = 0;
f->is_vararg = 0;
f->maxstacksize = 0;
f->flags = 0;
f->k = NULL;
f->code = NULL;
f->p = NULL;
f->codeentry = NULL;
f->execdata = NULL;
f->exectarget = 0;
f->lineinfo = NULL;
f->abslineinfo = NULL;
f->locvars = NULL;
f->upvalues = NULL;
f->source = NULL;
f->debugname = NULL;
f->debuginsn = NULL;
f->typeinfo = NULL;
f->userdata = NULL;
f->gclist = NULL;
f->sizecode = 0;
f->sizep = 0;
f->sizelocvars = 0;
f->sizeupvalues = 0;
f->sizek = 0;
f->sizelineinfo = 0;
f->linegaplog2 = 0;
f->linedefined = 0;
f->bytecodeid = 0;
}
else
{
f->k = NULL;
f->sizek = 0;
f->p = NULL;
f->sizep = 0;
f->code = NULL;
f->sizecode = 0;
f->sizeupvalues = 0;
f->nups = 0;
f->upvalues = NULL;
f->numparams = 0;
f->is_vararg = 0;
f->maxstacksize = 0;
f->flags = 0;
f->sizelineinfo = 0;
f->linegaplog2 = 0;
f->lineinfo = NULL;
f->abslineinfo = NULL;
f->sizelocvars = 0;
f->locvars = NULL;
f->source = NULL;
f->debugname = NULL;
f->debuginsn = NULL;
f->codeentry = NULL;
f->execdata = NULL;
f->exectarget = 0;
f->typeinfo = NULL;
f->userdata = NULL;
}
return f;
}

View File

@ -14,6 +14,8 @@
#include <string.h>
#include <stdio.h>
LUAU_FASTFLAGVARIABLE(LuauCodegenHeapSizeReport, false)
static void validateobjref(global_State* g, GCObject* f, GCObject* t)
{
LUAU_ASSERT(!isdead(g, t));
@ -824,6 +826,17 @@ static void enumproto(EnumContext* ctx, Proto* p)
size_t size = sizeof(Proto) + sizeof(Instruction) * p->sizecode + sizeof(Proto*) * p->sizep + sizeof(TValue) * p->sizek + p->sizelineinfo +
sizeof(LocVar) * p->sizelocvars + sizeof(TString*) * p->sizeupvalues;
if (FFlag::LuauCodegenHeapSizeReport)
{
if (p->execdata && ctx->L->global->ecb.getmemorysize)
{
size_t nativesize = ctx->L->global->ecb.getmemorysize(ctx->L, p);
ctx->node(ctx->context, p->execdata, uint8_t(LUA_TNONE), p->memcat, nativesize, NULL);
ctx->edge(ctx->context, enumtopointer(obj2gco(p)), p->execdata, "[native]");
}
}
enumnode(ctx, obj2gco(p), size, p->source ? getstr(p->source) : NULL);
if (p->sizek)

View File

@ -155,6 +155,7 @@ struct lua_ExecutionCallbacks
void (*destroy)(lua_State* L, Proto* proto); // called when function is destroyed
int (*enter)(lua_State* L, Proto* proto); // called when function is about to start/resume (when execdata is present), return 0 to exit VM
void (*disable)(lua_State* L, Proto* proto); // called when function has to be switched from native to bytecode in the debugger
size_t (*getmemorysize)(lua_State* L, Proto* proto); // called to request the size of memory associated with native part of the Proto
};
/*

View File

@ -13,6 +13,8 @@
#include <string.h>
LUAU_FASTFLAGVARIABLE(LuauLoadExceptionSafe, false)
// TODO: RAII deallocation doesn't work for longjmp builds if a memory error happens
template<typename T>
struct TempBuffer
@ -28,7 +30,13 @@ struct TempBuffer
{
}
~TempBuffer()
TempBuffer(const TempBuffer&) = delete;
TempBuffer(TempBuffer&&) = delete;
TempBuffer& operator=(const TempBuffer&) = delete;
TempBuffer& operator=(TempBuffer&&) = delete;
~TempBuffer() noexcept
{
luaM_freearray(L, data, count, T, 0);
}
@ -40,6 +48,38 @@ struct TempBuffer
}
};
struct ScopedSetGCThreshold
{
public:
ScopedSetGCThreshold(global_State* global, size_t newThreshold) noexcept
: global{global}
{
if (FFlag::LuauLoadExceptionSafe)
{
originalThreshold = global->GCthreshold;
global->GCthreshold = newThreshold;
}
}
ScopedSetGCThreshold(const ScopedSetGCThreshold&) = delete;
ScopedSetGCThreshold(ScopedSetGCThreshold&&) = delete;
ScopedSetGCThreshold& operator=(const ScopedSetGCThreshold&) = delete;
ScopedSetGCThreshold& operator=(ScopedSetGCThreshold&&) = delete;
~ScopedSetGCThreshold() noexcept
{
if (FFlag::LuauLoadExceptionSafe)
{
global->GCthreshold = originalThreshold;
}
}
private:
global_State* global = nullptr;
size_t originalThreshold = 0;
};
void luaV_getimport(lua_State* L, Table* env, TValue* k, StkId res, uint32_t id, bool propagatenil)
{
int count = id >> 30;
@ -181,9 +221,13 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
luaC_checkGC(L);
// pause GC for the duration of deserialization - some objects we're creating aren't rooted
// TODO: if an allocation error happens mid-load, we do not unpause GC!
const ScopedSetGCThreshold pauseGC{L->global, SIZE_MAX};
size_t GCthreshold = L->global->GCthreshold;
L->global->GCthreshold = SIZE_MAX;
if (!FFlag::LuauLoadExceptionSafe)
{
L->global->GCthreshold = SIZE_MAX;
}
// env is 0 for current environment and a stack index otherwise
Table* envt = (env == 0) ? L->gt : hvalue(luaA_toobject(L, env));
@ -245,31 +289,55 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
offset += typesize;
}
p->sizecode = readVarInt(data, size, offset);
p->code = luaM_newarray(L, p->sizecode, Instruction, p->memcat);
if (FFlag::LuauLoadExceptionSafe)
{
const int sizecode = readVarInt(data, size, offset);
p->code = luaM_newarray(L, sizecode, Instruction, p->memcat);
p->sizecode = sizecode;
}
else
{
p->sizecode = readVarInt(data, size, offset);
p->code = luaM_newarray(L, p->sizecode, Instruction, p->memcat);
}
for (int j = 0; j < p->sizecode; ++j)
p->code[j] = read<uint32_t>(data, size, offset);
p->codeentry = p->code;
p->sizek = readVarInt(data, size, offset);
p->k = luaM_newarray(L, p->sizek, TValue, p->memcat);
#ifdef HARDMEMTESTS
// this is redundant during normal runs, but resolveImportSafe can trigger GC checks under HARDMEMTESTS
// because p->k isn't fully formed at this point, we pre-fill it with nil to make subsequent setup safe
for (int j = 0; j < p->sizek; ++j)
if (FFlag::LuauLoadExceptionSafe)
{
setnilvalue(&p->k[j]);
const int sizek = readVarInt(data, size, offset);
p->k = luaM_newarray(L, sizek, TValue, p->memcat);
p->sizek = sizek;
}
else
{
p->sizek = readVarInt(data, size, offset);
p->k = luaM_newarray(L, p->sizek, TValue, p->memcat);
}
if (FFlag::LuauLoadExceptionSafe)
{
// Initialize the constants to nil to ensure they have a valid state
// in the event that some operation in the following loop fails with
// an exception.
for (int j = 0; j < p->sizek; ++j)
{
setnilvalue(&p->k[j]);
}
}
#endif
for (int j = 0; j < p->sizek; ++j)
{
switch (read<uint8_t>(data, size, offset))
{
case LBC_CONSTANT_NIL:
setnilvalue(&p->k[j]);
if (!FFlag::LuauLoadExceptionSafe)
{
setnilvalue(&p->k[j]);
}
break;
case LBC_CONSTANT_BOOLEAN:
@ -341,8 +409,18 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
}
}
p->sizep = readVarInt(data, size, offset);
p->p = luaM_newarray(L, p->sizep, Proto*, p->memcat);
if (FFlag::LuauLoadExceptionSafe)
{
const int sizep = readVarInt(data, size, offset);
p->p = luaM_newarray(L, sizep, Proto*, p->memcat);
p->sizep = sizep;
}
else
{
p->sizep = readVarInt(data, size, offset);
p->p = luaM_newarray(L, p->sizep, Proto*, p->memcat);
}
for (int j = 0; j < p->sizep; ++j)
{
uint32_t fid = readVarInt(data, size, offset);
@ -361,8 +439,18 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
int intervals = ((p->sizecode - 1) >> p->linegaplog2) + 1;
int absoffset = (p->sizecode + 3) & ~3;
p->sizelineinfo = absoffset + intervals * sizeof(int);
p->lineinfo = luaM_newarray(L, p->sizelineinfo, uint8_t, p->memcat);
if (FFlag::LuauLoadExceptionSafe)
{
const int sizelineinfo = absoffset + intervals * sizeof(int);
p->lineinfo = luaM_newarray(L, sizelineinfo, uint8_t, p->memcat);
p->sizelineinfo = sizelineinfo;
}
else
{
p->sizelineinfo = absoffset + intervals * sizeof(int);
p->lineinfo = luaM_newarray(L, p->sizelineinfo, uint8_t, p->memcat);
}
p->abslineinfo = (int*)(p->lineinfo + absoffset);
uint8_t lastoffset = 0;
@ -384,8 +472,17 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
if (debuginfo)
{
p->sizelocvars = readVarInt(data, size, offset);
p->locvars = luaM_newarray(L, p->sizelocvars, LocVar, p->memcat);
if (FFlag::LuauLoadExceptionSafe)
{
const int sizelocvars = readVarInt(data, size, offset);
p->locvars = luaM_newarray(L, sizelocvars, LocVar, p->memcat);
p->sizelocvars = sizelocvars;
}
else
{
p->sizelocvars = readVarInt(data, size, offset);
p->locvars = luaM_newarray(L, p->sizelocvars, LocVar, p->memcat);
}
for (int j = 0; j < p->sizelocvars; ++j)
{
@ -395,8 +492,17 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
p->locvars[j].reg = read<uint8_t>(data, size, offset);
}
p->sizeupvalues = readVarInt(data, size, offset);
p->upvalues = luaM_newarray(L, p->sizeupvalues, TString*, p->memcat);
if (FFlag::LuauLoadExceptionSafe)
{
const int sizeupvalues = readVarInt(data, size, offset);
p->upvalues = luaM_newarray(L, sizeupvalues, TString*, p->memcat);
p->sizeupvalues = sizeupvalues;
}
else
{
p->sizeupvalues = readVarInt(data, size, offset);
p->upvalues = luaM_newarray(L, p->sizeupvalues, TString*, p->memcat);
}
for (int j = 0; j < p->sizeupvalues; ++j)
{
@ -417,7 +523,10 @@ int luau_load(lua_State* L, const char* chunkname, const char* data, size_t size
setclvalue(L, L->top, cl);
incr_top(L);
L->global->GCthreshold = GCthreshold;
if (!FFlag::LuauLoadExceptionSafe)
{
L->global->GCthreshold = GCthreshold;
}
return 0;
}

View File

@ -1,11 +1,14 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "Luau/AssemblyBuilderA64.h"
#include "Luau/StringUtils.h"
#include "ScopedFlags.h"
#include "doctest.h"
#include <string.h>
LUAU_FASTFLAG(LuauCodeGenOptVecA64)
using namespace Luau::CodeGen;
using namespace Luau::CodeGen::A64;
@ -448,6 +451,8 @@ TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPCompare")
TEST_CASE_FIXTURE(AssemblyBuilderA64Fixture, "FPImm")
{
ScopedFastFlag luauCodeGenOptVecA64{FFlag::LuauCodeGenOptVecA64, true};
SINGLE_COMPARE(fmov(d0, 0), 0x2F00E400);
SINGLE_COMPARE(fmov(d0, 0.125), 0x1E681000);
SINGLE_COMPARE(fmov(d0, -0.125), 0x1E781000);

View File

@ -16,7 +16,6 @@
LUAU_FASTFLAG(LuauTraceTypesInNonstrictMode2)
LUAU_FASTFLAG(LuauSetMetatableDoesNotTimeTravel)
LUAU_FASTFLAG(LuauAutocompleteStringLiteralBounds);
LUAU_FASTFLAG(LuauAutocompleteTableKeysNoInitialCharacter)
using namespace Luau;
@ -2696,8 +2695,6 @@ local t = {
TEST_CASE_FIXTURE(ACFixture, "suggest_table_keys_no_initial_character")
{
ScopedFastFlag sff{FFlag::LuauAutocompleteTableKeysNoInitialCharacter, true};
check(R"(
type Test = { first: number, second: number }
local t: Test = { @1 }
@ -2711,8 +2708,6 @@ local t: Test = { @1 }
TEST_CASE_FIXTURE(ACFixture, "suggest_table_keys_no_initial_character_2")
{
ScopedFastFlag sff{FFlag::LuauAutocompleteTableKeysNoInitialCharacter, true};
check(R"(
type Test = { first: number, second: number }
local t: Test = { first = 1, @1 }
@ -2726,8 +2721,6 @@ local t: Test = { first = 1, @1 }
TEST_CASE_FIXTURE(ACFixture, "suggest_table_keys_no_initial_character_3")
{
ScopedFastFlag sff{FFlag::LuauAutocompleteTableKeysNoInitialCharacter, true};
check(R"(
type Properties = { TextScaled: boolean, Text: string }
local function create(props: Properties) end
@ -3642,6 +3635,9 @@ TEST_CASE_FIXTURE(ACFixture, "string_contents_is_available_to_callback")
TEST_CASE_FIXTURE(ACFixture, "autocomplete_response_perf1" * doctest::timeout(0.5))
{
if (FFlag::DebugLuauDeferredConstraintResolution)
return; // FIXME: This test is just barely at the threshhold which makes it very flaky under the new solver
// Build a function type with a large overload set
const int parts = 100;
std::string source;

View File

@ -26,8 +26,13 @@ extern bool verbose;
extern bool codegen;
extern int optimizationLevel;
// internal functions, declared in lgc.h - not exposed via lua.h
void luaC_fullgc(lua_State* L);
void luaC_validate(lua_State* L);
LUAU_FASTINT(CodegenHeuristicsInstructionLimit)
LUAU_DYNAMIC_FASTFLAG(LuauCodegenTrackingMultilocationFix)
LUAU_FASTFLAG(LuauLoadExceptionSafe)
LUAU_DYNAMIC_FASTFLAG(LuauDebugInfoDupArgLeftovers)
static lua_CompileOptions defaultOptions()
{
@ -241,7 +246,6 @@ static StateRef runConformance(const char* name, void (*setup)(lua_State* L) = n
status = lua_resume(L, nullptr, 0);
}
extern void luaC_validate(lua_State * L); // internal function, declared in lgc.h - not exposed via lua.h
luaC_validate(L);
if (status == 0)
@ -626,6 +630,8 @@ TEST_CASE("DateTime")
TEST_CASE("Debug")
{
ScopedFastFlag luauDebugInfoDupArgLeftovers{DFFlag::LuauDebugInfoDupArgLeftovers, true};
runConformance("debug.lua");
}
@ -2030,8 +2036,6 @@ TEST_CASE("SafeEnv")
TEST_CASE("Native")
{
ScopedFastFlag luauCodegenTrackingMultilocationFix{DFFlag::LuauCodegenTrackingMultilocationFix, true};
// This tests requires code to run natively, otherwise all 'is_native' checks will fail
if (!codegen || !luau_codegen_supported())
return;
@ -2053,7 +2057,7 @@ TEST_CASE("NativeTypeAnnotations")
});
}
TEST_CASE("HugeFunction")
[[nodiscard]] static std::string makeHugeFunctionSource()
{
std::string source;
@ -2074,6 +2078,13 @@ TEST_CASE("HugeFunction")
// use failed fast-calls with imports and constants to exercise all of the more complex fallback sequences
source += "return bit32.lshift('84', -1)";
return source;
}
TEST_CASE("HugeFunction")
{
std::string source = makeHugeFunctionSource();
StateRef globalState(luaL_newstate(), lua_close);
lua_State* L = globalState.get();
@ -2100,6 +2111,78 @@ TEST_CASE("HugeFunction")
CHECK(lua_tonumber(L, -1) == 42);
}
TEST_CASE("HugeFunctionLoadFailure")
{
// This test case verifies that if an out-of-memory error occurs inside of
// luau_load, we are not left with any GC objects in inconsistent states
// that would cause issues during garbage collection.
//
// We create a script with a huge function in it, then pass this to
// luau_load. This should require two "large" allocations: One for the
// code array and one for the constants array (k). We run this test twice
// and fail each of these two allocations.
ScopedFastFlag luauLoadExceptionSafe{FFlag::LuauLoadExceptionSafe, true};
std::string source = makeHugeFunctionSource();
static const size_t expectedTotalLargeAllocations = 2;
static size_t largeAllocationToFail = 0;
static size_t largeAllocationCount = 0;
const auto testAllocate = [](void* ud, void* ptr, size_t osize, size_t nsize) -> void*
{
if (nsize == 0)
{
free(ptr);
return nullptr;
}
else if (nsize > 32768)
{
if (largeAllocationCount == largeAllocationToFail)
return nullptr;
++largeAllocationCount;
return realloc(ptr, nsize);
}
else
{
return realloc(ptr, nsize);
}
};
size_t bytecodeSize = 0;
char* const bytecode = luau_compile(source.data(), source.size(), nullptr, &bytecodeSize);
for (largeAllocationToFail = 0; largeAllocationToFail != expectedTotalLargeAllocations; ++largeAllocationToFail)
{
largeAllocationCount = 0;
StateRef globalState(lua_newstate(testAllocate, nullptr), lua_close);
lua_State* L = globalState.get();
luaL_openlibs(L);
luaL_sandbox(L);
luaL_sandboxthread(L);
try
{
luau_load(L, "=HugeFunction", bytecode, bytecodeSize, 0);
REQUIRE(false); // The luau_load should fail with an exception
}
catch (const std::exception& ex)
{
REQUIRE(strcmp(ex.what(), "lua_exception: not enough memory") == 0);
}
luaC_fullgc(L);
}
free(bytecode);
REQUIRE_EQ(largeAllocationToFail, expectedTotalLargeAllocations);
}
TEST_CASE("IrInstructionLimit")
{
if (!codegen || !luau_codegen_supported())

View File

@ -13,13 +13,11 @@
#include <limits.h>
LUAU_FASTFLAG(LuauCodegenVectorTag2)
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores3)
LUAU_FASTFLAG(DebugLuauAbortingChecks)
using namespace Luau::CodeGen;
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores2)
LUAU_FASTFLAG(DebugLuauAbortingChecks)
class IrBuilderFixture
{
public:
@ -2543,7 +2541,7 @@ bb_0: ; useCount: 0
TEST_CASE_FIXTURE(IrBuilderFixture, "ForgprepInvalidation")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp block = build.block(IrBlockKind::Internal);
IrOp followup = build.block(IrBlockKind::Internal);
@ -2582,6 +2580,57 @@ bb_1:
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "FastCallEffects1")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
build.beginBlock(entry);
build.inst(IrCmd::FASTCALL, build.constUint(LBF_MATH_FREXP), build.vmReg(1), build.vmReg(2), build.undef(), build.constInt(1), build.constInt(2));
build.inst(IrCmd::CHECK_TAG, build.vmReg(1), build.constTag(tnumber), build.vmExit(1));
build.inst(IrCmd::CHECK_TAG, build.vmReg(2), build.constTag(tnumber), build.vmExit(1));
build.inst(IrCmd::RETURN, build.vmReg(1), build.constInt(2));
updateUseCounts(build.function);
computeCfgInfo(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
; in regs: R2
FASTCALL 14u, R1, R2, undef, 1i, 2i
RETURN R1, 2i
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "FastCallEffects2")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
build.beginBlock(entry);
build.inst(IrCmd::FASTCALL, build.constUint(LBF_MATH_MODF), build.vmReg(1), build.vmReg(2), build.undef(), build.constInt(1), build.constInt(1));
build.inst(IrCmd::CHECK_TAG, build.vmReg(1), build.constTag(tnumber), build.vmExit(1));
build.inst(IrCmd::CHECK_TAG, build.vmReg(2), build.constTag(tnumber), build.vmExit(1));
build.inst(IrCmd::RETURN, build.vmReg(1), build.constInt(2));
updateUseCounts(build.function);
computeCfgInfo(build.function);
constPropInBlockChains(build, true);
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
; in regs: R2
FASTCALL 20u, R1, R2, undef, 1i, 1i
CHECK_TAG R2, tnumber, exit(1)
RETURN R1, 2i
)");
}
TEST_SUITE_END();
TEST_SUITE_BEGIN("Analysis");
@ -2893,7 +2942,7 @@ bb_1:
TEST_CASE_FIXTURE(IrBuilderFixture, "ForgprepImplicitUse")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
IrOp direct = build.block(IrBlockKind::Internal);
@ -3403,7 +3452,7 @@ TEST_SUITE_BEGIN("DeadStoreRemoval");
TEST_CASE_FIXTURE(IrBuilderFixture, "SimpleDoubleStore")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
@ -3449,7 +3498,7 @@ bb_0:
TEST_CASE_FIXTURE(IrBuilderFixture, "UnusedAtReturn")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
@ -3479,7 +3528,7 @@ bb_0:
TEST_CASE_FIXTURE(IrBuilderFixture, "HiddenPointerUse1")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
@ -3509,7 +3558,7 @@ bb_0:
TEST_CASE_FIXTURE(IrBuilderFixture, "HiddenPointerUse2")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
@ -3543,7 +3592,7 @@ bb_0:
TEST_CASE_FIXTURE(IrBuilderFixture, "HiddenPointerUse3")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
@ -3571,9 +3620,68 @@ bb_0:
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "HiddenPointerUse4")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
build.beginBlock(entry);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(0), build.constDouble(1.0));
build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnumber));
build.inst(IrCmd::CHECK_GC);
build.inst(IrCmd::STORE_TAG, build.vmReg(0), build.constTag(tnil));
build.inst(IrCmd::RETURN, build.vmReg(0), build.constInt(1));
updateUseCounts(build.function);
computeCfgInfo(build.function);
constPropInBlockChains(build, true);
markDeadStoresInBlockChains(build);
// It is important for tag overwrite to TNIL to kill not only the previous tag store, but the value as well
// This is important in a following scenario:
// - R0 might have been a GCO on entry to bb_0
// - R0 is overwritten by a number
// - Stack is visited by GC assist
// - R0 is overwritten by nil
// If only number tag write would have been killed, there will be a GCO tag with a double value on stack
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
CHECK_GC
STORE_TAG R0, tnil
RETURN R0, 1i
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "PartialVsFullStoresWithRecombination")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
build.beginBlock(entry);
build.inst(IrCmd::STORE_DOUBLE, build.vmReg(1), build.constDouble(1.0));
build.inst(IrCmd::STORE_TAG, build.vmReg(1), build.constTag(tnumber));
build.inst(IrCmd::STORE_TVALUE, build.vmReg(0), build.inst(IrCmd::LOAD_TVALUE, build.vmReg(1)));
build.inst(IrCmd::RETURN, build.vmReg(0), build.constInt(1));
updateUseCounts(build.function);
computeCfgInfo(build.function);
constPropInBlockChains(build, true);
markDeadStoresInBlockChains(build);
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
bb_0:
STORE_SPLIT_TVALUE R0, tnumber, 1
RETURN R0, 1i
)");
}
TEST_CASE_FIXTURE(IrBuilderFixture, "IgnoreFastcallAdjustment")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
@ -3602,7 +3710,7 @@ bb_0:
TEST_CASE_FIXTURE(IrBuilderFixture, "JumpImplicitLiveOut")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
IrOp next = build.block(IrBlockKind::Internal);
@ -3639,7 +3747,7 @@ bb_1:
TEST_CASE_FIXTURE(IrBuilderFixture, "KeepCapturedRegisterStores")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);
@ -3678,7 +3786,7 @@ bb_0:
TEST_CASE_FIXTURE(IrBuilderFixture, "AbortingChecksRequireStores")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
ScopedFastFlag debugLuauAbortingChecks{FFlag::DebugLuauAbortingChecks, true};
IrOp block = build.block(IrBlockKind::Internal);
@ -3719,7 +3827,7 @@ bb_0:
TEST_CASE_FIXTURE(IrBuilderFixture, "PartialOverFullValue")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
IrOp entry = build.block(IrBlockKind::Internal);

View File

@ -13,7 +13,7 @@
#include <memory>
LUAU_FASTFLAG(LuauCodegenVectorTag2)
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores2)
LUAU_FASTFLAG(LuauCodegenRemoveDeadStores3)
static std::string getCodegenAssembly(const char* source)
{
@ -91,7 +91,7 @@ bb_bytecode_1:
TEST_CASE("VectorComponentRead")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function compsum(a: vector)
@ -175,7 +175,7 @@ bb_bytecode_1:
TEST_CASE("VectorSubMulDiv")
{
ScopedFastFlag luauCodegenVectorTag2{FFlag::LuauCodegenVectorTag2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function vec3combo(a: vector, b: vector, c: vector, d: vector)
@ -210,7 +210,7 @@ bb_bytecode_1:
TEST_CASE("VectorSubMulDiv2")
{
ScopedFastFlag luauCodegenVectorTag2{FFlag::LuauCodegenVectorTag2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function vec3combo(a: vector)
@ -241,7 +241,7 @@ bb_bytecode_1:
TEST_CASE("VectorMulDivMixed")
{
ScopedFastFlag luauCodegenVectorTag2{FFlag::LuauCodegenVectorTag2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function vec3combo(a: vector, b: vector, c: vector, d: vector)
@ -283,7 +283,7 @@ bb_bytecode_1:
TEST_CASE("ExtraMathMemoryOperands")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores2, true};
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function foo(a: number, b: number, c: number, d: number, e: number)
@ -319,4 +319,86 @@ bb_bytecode_1:
)");
}
TEST_CASE("DseInitialStackState")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function foo()
while {} do
local _ = not _,{}
_ = nil
end
end
)"),
R"(
; function foo() line 2
bb_bytecode_0:
SET_SAVEDPC 1u
%1 = NEW_TABLE 0u, 0u
STORE_POINTER R0, %1
STORE_TAG R0, ttable
CHECK_GC
JUMP bb_2
bb_2:
CHECK_SAFE_ENV exit(3)
JUMP_EQ_TAG K1, tnil, bb_fallback_4, bb_3
bb_3:
%9 = LOAD_TVALUE K1
STORE_TVALUE R1, %9
JUMP bb_5
bb_5:
SET_SAVEDPC 7u
%21 = NEW_TABLE 0u, 0u
STORE_POINTER R1, %21
STORE_TAG R1, ttable
CHECK_GC
STORE_TAG R0, tnil
INTERRUPT 9u
JUMP bb_bytecode_0
)");
}
TEST_CASE("DseInitialStackState2")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function foo(a)
math.frexp(a)
return a
end
)"),
R"(
; function foo($arg0) line 2
bb_bytecode_0:
CHECK_SAFE_ENV exit(1)
CHECK_TAG R0, tnumber, exit(1)
FASTCALL 14u, R1, R0, undef, 1i, 2i
INTERRUPT 5u
RETURN R0, 1i
)");
}
TEST_CASE("DseInitialStackState3")
{
ScopedFastFlag luauCodegenRemoveDeadStores{FFlag::LuauCodegenRemoveDeadStores3, true};
CHECK_EQ("\n" + getCodegenAssembly(R"(
local function foo(a)
math.sign(a)
return a
end
)"),
R"(
; function foo($arg0) line 2
bb_bytecode_0:
CHECK_SAFE_ENV exit(1)
CHECK_TAG R0, tnumber, exit(1)
FASTCALL 47u, R1, R0, undef, 1i, 1i
INTERRUPT 5u
RETURN R0, 1i
)");
}
TEST_SUITE_END();

View File

@ -57,7 +57,8 @@ public:
for (int i = 0; i < 20; ++i)
{
bool engineTestDir = isDirectory(luauDirAbs + "/Client/Luau/tests");
bool luauTestDir = isDirectory(luauDirAbs + "/luau/tests");
bool luauTestDir = isDirectory(luauDirAbs + "/luau/tests/require");
if (engineTestDir || luauTestDir)
{
if (engineTestDir)

View File

@ -1,8 +1,14 @@
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
#include "ScopedFlags.h"
#include "Luau/Set.h"
#include "doctest.h"
#include <string>
#include <vector>
LUAU_FASTFLAG(LuauFixSetIter);
TEST_SUITE_BEGIN("SetTests");
TEST_CASE("empty_set_size_0")
@ -99,4 +105,32 @@ TEST_CASE("iterate_over_set_skips_erased_elements")
CHECK(sum == 9);
}
TEST_CASE("iterate_over_set_skips_first_element_if_it_is_erased")
{
ScopedFastFlag sff{FFlag::LuauFixSetIter, true};
/*
* As of this writing, in the following set, the key "y" happens to occur
* before "x" in the underlying DenseHashSet. This is important because it
* surfaces something that Set::const_iterator needs to do: If the
* underlying iterator happens to start at a deleted element, we need to
* advance until we find the first live element (or the end of the set).
*/
Luau::Set<std::string> s1{{}};
s1.insert("x");
s1.insert("y");
s1.erase("y");
std::vector<std::string> out;
auto it = s1.begin();
auto endIt = s1.end();
while (it != endIt)
{
out.push_back(*it);
++it;
}
CHECK(1 == out.size());
}
TEST_SUITE_END();

View File

@ -9,6 +9,7 @@
using namespace Luau;
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
LUAU_DYNAMIC_FASTINT(LuauSimplificationComplexityLimit)
namespace
{
@ -133,8 +134,8 @@ TEST_CASE_FIXTURE(SimplifyFixture, "unknown_and_other_tops_and_bottom_types")
CHECK(unknownTy == intersect(unknownTy, unknownTy));
CHECK("*error-type* | unknown" == intersectStr(unknownTy, anyTy));
CHECK("*error-type* | unknown" == intersectStr(anyTy, unknownTy));
CHECK("any" == intersectStr(unknownTy, anyTy));
CHECK("any" == intersectStr(anyTy, unknownTy));
CHECK(neverTy == intersect(unknownTy, neverTy));
CHECK(neverTy == intersect(neverTy, unknownTy));
@ -443,6 +444,7 @@ TEST_CASE_FIXTURE(SimplifyFixture, "union")
TEST_CASE_FIXTURE(SimplifyFixture, "two_unions")
{
ScopedFastInt sfi{DFInt::LuauSimplificationComplexityLimit, 10};
TypeId t1 = arena->addType(UnionType{{numberTy, booleanTy, stringTy, nilTy, tableTy}});
CHECK("false?" == intersectStr(t1, falsyTy));

View File

@ -693,4 +693,18 @@ TEST_CASE_FIXTURE(Fixture, "read_write_class_properties")
CHECK(builtinTypes->numberType == tm->givenType);
}
TEST_CASE_FIXTURE(ClassFixture, "cannot_index_a_class_with_no_indexer")
{
CheckResult result = check(R"(
local a = BaseClass.New()
local c = a[1]
)");
LUAU_REQUIRE_ERROR_COUNT(1, result);
CHECK_MESSAGE(get<DynamicPropertyLookupOnClassesUnsafe>(result.errors[0]), "Expected DynamicPropertyLookupOnClassesUnsafe but got " << result.errors[0]);
CHECK(builtinTypes->errorType == requireType("c"));
}
TEST_SUITE_END();

View File

@ -1141,4 +1141,42 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "luau_roact_useState_minimization")
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "bin_prov")
{
CheckResult result = check(R"(
local Bin = {}
function Bin:add(item)
self.head = { item = item}
return item
end
function Bin:destroy()
while self.head do
local item = self.head.item
if type(item) == "function" then
item()
elseif item.Destroy ~= nil then
end
self.head = self.head.next
end
end
)");
}
TEST_CASE_FIXTURE(BuiltinsFixture, "update_phonemes_minimized")
{
CheckResult result = check(R"(
local video
function(response)
for index = 1, #response do
video = video
end
return video
end
)");
LUAU_REQUIRE_ERRORS(result);
}
TEST_SUITE_END();

View File

@ -323,6 +323,99 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "typeguard_in_assert_position")
REQUIRE_EQ("number", toString(requireType("b")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "refine_unknown_to_table_then_test_a_prop")
{
CheckResult result = check(R"(
local function f(x: unknown): string?
if typeof(x) == "table" then
if typeof(x.foo) == "string" then
return x.foo
end
end
return nil
end
)");
if (FFlag::DebugLuauDeferredConstraintResolution)
LUAU_REQUIRE_NO_ERRORS(result);
else
{
LUAU_REQUIRE_ERROR_COUNT(2, result);
for (size_t i = 0; i < result.errors.size(); i++)
{
const UnknownProperty* up = get<UnknownProperty>(result.errors[i]);
REQUIRE_EQ("foo", up->key);
REQUIRE_EQ("unknown", toString(up->table));
}
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "refine_unknown_to_table_then_test_a_nested_prop")
{
CheckResult result = check(R"(
local function f(x: unknown): string?
if typeof(x) == "table" then
-- this should error, `x.foo` is an unknown property
if typeof(x.foo.bar) == "string" then
return x.foo.bar
end
end
return nil
end
)");
if (FFlag::DebugLuauDeferredConstraintResolution)
{
LUAU_REQUIRE_ERROR_COUNT(1, result);
const UnknownProperty* up = get<UnknownProperty>(result.errors[0]);
REQUIRE_EQ("bar", up->key);
REQUIRE_EQ("unknown", toString(up->table));
}
else
{
LUAU_REQUIRE_ERROR_COUNT(2, result);
for (size_t i = 0; i < result.errors.size(); i++)
{
const UnknownProperty* up = get<UnknownProperty>(result.errors[i]);
REQUIRE_EQ("foo", up->key);
REQUIRE_EQ("unknown", toString(up->table));
}
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "refine_unknown_to_table_then_test_a_tested_nested_prop")
{
CheckResult result = check(R"(
local function f(x: unknown): string?
if typeof(x) == "table" then
if typeof(x.foo) == "table" and typeof(x.foo.bar) == "string" then
return x.foo.bar
end
end
return nil
end
)");
if (FFlag::DebugLuauDeferredConstraintResolution)
LUAU_REQUIRE_NO_ERRORS(result);
else
{
LUAU_REQUIRE_ERROR_COUNT(3, result);
for (size_t i = 0; i < result.errors.size(); i++)
{
const UnknownProperty* up = get<UnknownProperty>(result.errors[i]);
REQUIRE_EQ("foo", up->key);
REQUIRE_EQ("unknown", toString(up->table));
}
}
}
TEST_CASE_FIXTURE(BuiltinsFixture, "call_an_incompatible_function_after_using_typeguard")
{
CheckResult result = check(R"(
@ -2057,4 +2150,5 @@ TEST_CASE_FIXTURE(RefinementClassFixture, "mutate_prop_of_some_refined_symbol_2"
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_SUITE_END();

View File

@ -25,6 +25,24 @@ LUAU_FASTFLAG(LuauReadWritePropertySyntax);
TEST_SUITE_BEGIN("TableTests");
TEST_CASE_FIXTURE(BuiltinsFixture, "LUAU_ASSERT_arg_exprs_doesnt_trigger_assert")
{
CheckResult result = check(R"(
local FadeValue = {}
function FadeValue.new(finalCallback)
local self = setmetatable({}, FadeValue)
self.finalCallback = finalCallback
return self
end
function FadeValue:destroy()
self.finalCallback()
self.finalCallback = nil
end
)");
}
TEST_CASE_FIXTURE(Fixture, "basic")
{
CheckResult result = check("local t = {foo = \"bar\", baz = 9, quux = nil}");
@ -2703,6 +2721,23 @@ TEST_CASE_FIXTURE(Fixture, "tables_get_names_from_their_locals")
CHECK_EQ("T", toString(requireType("T")));
}
TEST_CASE_FIXTURE(Fixture, "should_not_unblock_table_type_twice")
{
ScopedFastFlag sff(FFlag::DebugLuauDeferredConstraintResolution, true);
check(R"(
local timer = peek(timerQueue)
while timer ~= nil do
if timer.startTime <= currentTime then
timer.isQueued = true
end
timer = peek(timerQueue)
end
)");
// Just checking this is enough to satisfy the original bug.
}
TEST_CASE_FIXTURE(Fixture, "generalize_table_argument")
{
CheckResult result = check(R"(
@ -4176,4 +4211,40 @@ TEST_CASE_FIXTURE(Fixture, "table_writes_introduce_write_properties")
"t1 = { read FindFirstChild: (t1, string) -> (a, b...) }" == toString(requireType("oc")));
}
TEST_CASE_FIXTURE(BuiltinsFixture, "tables_can_have_both_metatables_and_indexers")
{
CheckResult result = check(R"(
local a = {}
a[1] = 5
a[2] = 17
local t = {}
setmetatable(a, t)
local c = a[1]
print(a[1])
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("number" == toString(requireType("c")));
}
TEST_CASE_FIXTURE(Fixture, "refined_thing_can_be_an_array")
{
CheckResult result = check(R"(
function foo(x, y)
if x then
return x[1]
else
return y
end
end
)");
LUAU_REQUIRE_NO_ERRORS(result);
CHECK("<a>({a}, a) -> a" == toString(requireType("foo")));
}
TEST_SUITE_END();

View File

@ -561,6 +561,41 @@ TEST_CASE_FIXTURE(Fixture, "dont_allow_cyclic_unions_to_be_inferred")
LUAU_REQUIRE_NO_ERRORS(result);
}
TEST_CASE_FIXTURE(Fixture, "indexing_into_a_cyclic_union_doesnt_crash")
{
// It shouldn't be possible to craft a cyclic union, but even if we do, we
// shouldn't blow up.
TypeArena& arena = frontend.globals.globalTypes;
unfreeze(arena);
TypeId badCyclicUnionTy = arena.freshType(frontend.globals.globalScope.get());
UnionType u;
u.options.push_back(badCyclicUnionTy);
u.options.push_back(arena.addType(TableType{{}, TableIndexer{builtinTypes->numberType, builtinTypes->numberType}, TypeLevel{}, frontend.globals.globalScope.get(), TableState::Sealed}));
asMutable(badCyclicUnionTy)->ty.emplace<UnionType>(std::move(u));
frontend.globals.globalScope->exportedTypeBindings["BadCyclicUnion"] = TypeFun{{}, badCyclicUnionTy};
freeze(arena);
CheckResult result = check(R"(
function f(x: BadCyclicUnion)
return x[0]
end
)");
// The old solver has a bug: It doesn't consider this goofy thing to be a
// table. It's not really important. What's important is that we don't
// crash, hang, or ICE.
if (FFlag::DebugLuauDeferredConstraintResolution)
LUAU_REQUIRE_NO_ERRORS(result);
else
LUAU_REQUIRE_ERROR_COUNT(1, result);
}
TEST_CASE_FIXTURE(BuiltinsFixture, "table_union_write_indirect")
{

View File

@ -111,4 +111,31 @@ end
testlinedefined()
-- don't leave garbage on the other thread
local wrapped1 = coroutine.create(function()
local thread = coroutine.create(function(target)
for i = 1, 100 do pcall(debug.info, target, 0, "llf") end
return 123
end)
local success, res = coroutine.resume(thread, coroutine.running())
assert(success)
assert(res == 123)
end)
coroutine.resume(wrapped1)
local wrapped2 = coroutine.create(function()
local thread = coroutine.create(function(target)
for i = 1, 100 do pcall(debug.info, target, 0, "ff") end
return 123
end)
local success, res = coroutine.resume(thread, coroutine.running())
assert(success)
assert(res == 123)
end)
coroutine.resume(wrapped2)
return 'OK'

View File

@ -427,4 +427,26 @@ end
assert(deadStoreChecks1() == 111)
local function extramath1(a)
return type(math.sign(a))
end
assert(extramath1(2) == "number")
assert(extramath1("2") == "number")
local function extramath2(a)
return type(math.modf(a))
end
assert(extramath2(2) == "number")
assert(extramath2("2") == "number")
local function extramath3(a)
local b, c = math.modf(a)
return type(c)
end
assert(extramath3(2) == "number")
assert(extramath3("2") == "number")
return('OK')

View File

@ -1,7 +1,6 @@
AstQuery.last_argument_function_call_type
AutocompleteTest.anonymous_autofilled_generic_on_argument_type_pack_vararg
AutocompleteTest.anonymous_autofilled_generic_type_pack_vararg
AutocompleteTest.autocomplete_response_perf1
AutocompleteTest.autocomplete_string_singleton_equality
AutocompleteTest.do_wrong_compatible_nonself_calls
AutocompleteTest.type_correct_suggestion_for_overloads
@ -18,7 +17,6 @@ BuiltinTests.gmatch_capture_types_default_capture
BuiltinTests.gmatch_capture_types_parens_in_sets_are_ignored
BuiltinTests.gmatch_capture_types_set_containing_lbracket
BuiltinTests.gmatch_definition
BuiltinTests.ipairs_iterator_should_infer_types_and_type_check
BuiltinTests.os_time_takes_optional_date_table
BuiltinTests.select_slightly_out_of_range
BuiltinTests.select_way_out_of_range
@ -32,22 +30,6 @@ BuiltinTests.string_format_report_all_type_errors_at_correct_positions
BuiltinTests.string_format_use_correct_argument2
BuiltinTests.table_freeze_is_generic
BuiltinTests.tonumber_returns_optional_number_type
ControlFlowAnalysis.if_not_x_break_elif_not_y_break
ControlFlowAnalysis.if_not_x_break_elif_not_y_continue
ControlFlowAnalysis.if_not_x_break_elif_not_y_fallthrough_elif_not_z_break
ControlFlowAnalysis.if_not_x_break_elif_rand_break_elif_not_y_break
ControlFlowAnalysis.if_not_x_break_elif_rand_break_elif_not_y_fallthrough
ControlFlowAnalysis.if_not_x_break_if_not_y_break
ControlFlowAnalysis.if_not_x_break_if_not_y_continue
ControlFlowAnalysis.if_not_x_continue_elif_not_y_continue
ControlFlowAnalysis.if_not_x_continue_elif_not_y_fallthrough_elif_not_z_continue
ControlFlowAnalysis.if_not_x_continue_elif_not_y_throw_elif_not_z_fallthrough
ControlFlowAnalysis.if_not_x_continue_elif_rand_continue_elif_not_y_continue
ControlFlowAnalysis.if_not_x_continue_elif_rand_continue_elif_not_y_fallthrough
ControlFlowAnalysis.if_not_x_continue_if_not_y_continue
ControlFlowAnalysis.if_not_x_continue_if_not_y_throw
ControlFlowAnalysis.if_not_x_return_elif_not_y_break
ControlFlowAnalysis.if_not_x_return_elif_not_y_fallthrough_elif_not_z_break
ControlFlowAnalysis.tagged_unions
DefinitionTests.class_definition_indexer
DefinitionTests.class_definition_overload_metamethods
@ -149,7 +131,6 @@ ProvisionalTests.do_not_ice_when_trying_to_pick_first_of_generic_type_pack
ProvisionalTests.error_on_eq_metamethod_returning_a_type_other_than_boolean
ProvisionalTests.expected_type_should_be_a_helpful_deduction_guide_for_function_calls
ProvisionalTests.floating_generics_should_not_be_allowed
ProvisionalTests.free_is_not_bound_to_any
ProvisionalTests.free_options_can_be_unified_together
ProvisionalTests.free_options_cannot_be_unified_together
ProvisionalTests.generic_type_leak_to_module_interface
@ -172,14 +153,10 @@ RefinementTest.discriminate_from_isa_of_x
RefinementTest.discriminate_from_truthiness_of_x
RefinementTest.discriminate_tag
RefinementTest.discriminate_tag_with_implicit_else
RefinementTest.fail_to_refine_a_property_of_subscript_expression
RefinementTest.falsiness_of_TruthyPredicate_narrows_into_nil
RefinementTest.globals_can_be_narrowed_too
RefinementTest.impossible_type_narrow_is_not_an_error
RefinementTest.index_on_a_refined_property
RefinementTest.isa_type_refinement_must_be_known_ahead_of_time
RefinementTest.narrow_property_of_a_bounded_variable
RefinementTest.nonoptional_type_can_narrow_to_nil_if_sense_is_true
RefinementTest.not_t_or_some_prop_of_t
RefinementTest.refine_a_param_that_got_resolved_during_constraint_solving_stage
RefinementTest.refine_a_property_of_some_global
@ -230,7 +207,7 @@ TableTests.generalize_table_argument
TableTests.generic_table_instantiation_potential_regression
TableTests.indexer_mismatch
TableTests.indexers_get_quantified_too
TableTests.indexing_from_a_table_should_prefer_properties_when_possible
TableTests.infer_indexer_from_array_like_table
TableTests.infer_indexer_from_its_variable_type_and_unifiable
TableTests.inferred_return_type_of_free_table
TableTests.instantiate_table_cloning_3
@ -335,10 +312,8 @@ TypeInfer.dont_report_type_errors_within_an_AstStatError
TypeInfer.globals
TypeInfer.globals2
TypeInfer.globals_are_banned_in_strict_mode
TypeInfer.infer_assignment_value_types
TypeInfer.infer_locals_via_assignment_from_its_call_site
TypeInfer.infer_through_group_expr
TypeInfer.infer_type_assertion_value_type
TypeInfer.no_stack_overflow_from_isoptional
TypeInfer.promote_tail_type_packs
TypeInfer.recursive_function_that_invokes_itself_with_a_refinement_of_its_parameter
@ -352,7 +327,6 @@ TypeInferAnyError.any_type_propagates
TypeInferAnyError.assign_prop_to_table_by_calling_any_yields_any
TypeInferAnyError.call_to_any_yields_any
TypeInferAnyError.can_subscript_any
TypeInferAnyError.intersection_of_any_can_have_props
TypeInferAnyError.metatable_of_any_can_be_a_table
TypeInferAnyError.quantify_any_does_not_bind_to_itself
TypeInferAnyError.replace_every_free_type_when_unifying_a_complex_function_with_any
@ -361,7 +335,6 @@ TypeInferClasses.cannot_unify_class_instance_with_primitive
TypeInferClasses.class_type_mismatch_with_name_conflict
TypeInferClasses.class_unification_type_mismatch_is_correct_order
TypeInferClasses.detailed_class_unification_error
TypeInferClasses.index_instance_property
TypeInferClasses.indexable_classes
TypeInferClasses.intersections_of_unions_of_classes
TypeInferClasses.optional_class_field_access_error
@ -471,7 +444,6 @@ TypeInferOperators.equality_operations_succeed_if_any_union_branch_succeeds
TypeInferOperators.error_on_invalid_operand_types_to_relational_operators2
TypeInferOperators.luau_polyfill_is_array
TypeInferOperators.mm_comparisons_must_return_a_boolean
TypeInferOperators.operator_eq_verifies_types_do_intersect
TypeInferOperators.refine_and_or
TypeInferOperators.reworked_and
TypeInferOperators.reworked_or

View File

@ -1,7 +1,7 @@
<?xml version="1.0" encoding="utf-8"?>
<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
<Type Name="::lua_TValue">
<Type Name="lua_TValue">
<DisplayString Condition="tt == lua_Type::LUA_TNIL">nil</DisplayString>
<DisplayString Condition="tt == lua_Type::LUA_TBOOLEAN">{(bool)value.b}</DisplayString>
<DisplayString Condition="tt == lua_Type::LUA_TLIGHTUSERDATA">lightuserdata {(uintptr_t)value.p,h} tag: {extra[0]}</DisplayString>
@ -39,7 +39,7 @@
</Expand>
</Type>
<Type Name="::TKey">
<Type Name="TKey">
<DisplayString Condition="tt == lua_Type::LUA_TNIL">nil</DisplayString>
<DisplayString Condition="tt == lua_Type::LUA_TBOOLEAN">{(bool)value.b}</DisplayString>
<DisplayString Condition="tt == lua_Type::LUA_TLIGHTUSERDATA">lightuserdata {(uintptr_t)value.p,h} tag: {extra[0]}</DisplayString>
@ -72,12 +72,12 @@
</Expand>
</Type>
<Type Name="::LuaNode">
<Type Name="LuaNode">
<DisplayString Condition="key.tt != lua_Type::LUA_TNIL || val.tt != lua_Type::LUA_TNIL">{key,na} = {val}</DisplayString>
<DisplayString Condition="key.tt == lua_Type::LUA_TNIL &amp;&amp; val.tt == 0">---</DisplayString>
</Type>
<Type Name="::Table">
<Type Name="Table">
<DisplayString>table</DisplayString>
<Expand>
<Item Name="metatable" Condition="metatable">metatable</Item>
@ -103,7 +103,7 @@
</Expand>
</Type>
<Type Name="::Udata">
<Type Name="Udata">
<Expand>
<CustomListItems>
<Variable Name="count" InitialValue="1&lt;&lt;metatable->lsizenode" />
@ -131,7 +131,7 @@
</Expand>
</Type>
<Type Name="::Closure">
<Type Name="Closure">
<DisplayString Condition="isC == 1" IncludeView="short">{c.f,na}</DisplayString>
<DisplayString Condition="isC == 0" IncludeView="short">{l.p,na}</DisplayString>
<DisplayString Condition="isC == 1" ExcludeView="short">{c}</DisplayString>
@ -139,11 +139,11 @@
<DisplayString>invalid</DisplayString>
</Type>
<Type Name="::TString">
<Type Name="TString">
<DisplayString>{data,s}</DisplayString>
</Type>
<Type Name="::CallInfo">
<Type Name="CallInfo">
<Intrinsic Name="cl" Category="Property" Expression="func->value.gc->cl"/>
<Intrinsic Name="isC" Category="Property" Expression="cl().isC"/>
<Intrinsic Name="proto" Category="Property" Expression="cl().l.p"/>
@ -163,7 +163,7 @@
<DisplayString Condition="isC()">=[C] {cl().c.f,na}</DisplayString>
</Type>
<Type Name ="::lua_State">
<Type Name ="lua_State">
<DisplayString Condition="ci">{ci,na}</DisplayString>
<DisplayString>thread</DisplayString>
<Expand>
@ -211,7 +211,7 @@
</Expand>
</Type>
<Type Name="::Proto">
<Type Name="Proto">
<DisplayString Condition="debugname">{source->data,sb}:{linedefined} function {debugname->data,sb} [{(int)numparams} arg, {(int)nups} upval]</DisplayString>
<DisplayString Condition="!debugname">{source->data,sb}:{linedefined} [{(int)numparams} arg, {(int)nups} upval]</DisplayString>
<Expand>
@ -266,7 +266,7 @@
</Expand>
</Type>
<Type Name="::GCheader">
<Type Name="GCheader">
<Expand>
<Synthetic Name="[type]">
<DisplayString>{(lua_Type)tt}</DisplayString>

View File

@ -6,9 +6,17 @@ import os.path
import subprocess as sp
import sys
import xml.sax as x
import colorama as c
c.init()
try:
import colorama as c
except ImportError:
class c:
class Fore:
RED=''
RESET=''
GREEN=''
else:
c.init()
SCRIPT_PATH = os.path.split(sys.argv[0])[0]
FAIL_LIST_PATH = os.path.join(SCRIPT_PATH, "faillist.txt")