mirror of
https://github.com/luau-lang/luau.git
synced 2024-11-15 06:15:44 +08:00
Sync to upstream/release/615 (#1175)
# What's changed? * Luau allocation scheme was changed to handle allocations in 513-1024 byte range internally without falling back to global allocator * coroutine/thread creation no longer requires any global allocations, making it up to 15% faster (vs libc malloc) * table construction for 17-32 keys or 33-64 array elements is up to 30% faster (vs libc malloc) ### New Type Solver * Cyclic unary negation type families are reduced to `number` when possible * Class types are skipped when searching for free types in unifier to improve performance * Fixed issues with table type inference when metatables are present * Improved inference of iteration loop types * Fixed an issue with bidirectional inference of method calls * Type simplification will now preserve error suppression markers ### Native Code Generation * Fixed TAG_VECTOR skip optimization to not break instruction use counts (broken optimization wasn't included in 614) * Fixed missing side-effect when optimizing generic loop preparation instruction --- ### Internal Contributors Co-authored-by: Aaron Weiss <aaronweiss@roblox.com> Co-authored-by: Andy Friesen <afriesen@roblox.com> Co-authored-by: Lily Brown <lbrown@roblox.com> Co-authored-by: Vyacheslav Egorov <vegorov@roblox.com> --------- Co-authored-by: Aaron Weiss <aaronweiss@roblox.com> Co-authored-by: Alexander McCord <amccord@roblox.com> Co-authored-by: Andy Friesen <afriesen@roblox.com> Co-authored-by: Vighnesh <vvijay@roblox.com> Co-authored-by: Aviral Goel <agoel@roblox.com> Co-authored-by: David Cope <dcope@roblox.com> Co-authored-by: Lily Brown <lbrown@roblox.com>
This commit is contained in:
parent
cc51e616ce
commit
443903aa00
@ -764,16 +764,17 @@ TypeId makeStringMetatable(NotNull<BuiltinTypes> builtinTypes)
|
||||
const TypeId numberType = builtinTypes->numberType;
|
||||
const TypeId booleanType = builtinTypes->booleanType;
|
||||
const TypeId stringType = builtinTypes->stringType;
|
||||
const TypeId anyType = builtinTypes->anyType;
|
||||
|
||||
const TypeId optionalNumber = arena->addType(UnionType{{nilType, numberType}});
|
||||
const TypeId optionalString = arena->addType(UnionType{{nilType, stringType}});
|
||||
const TypeId optionalBoolean = arena->addType(UnionType{{nilType, booleanType}});
|
||||
|
||||
const TypePackId oneStringPack = arena->addTypePack({stringType});
|
||||
const TypePackId anyTypePack = arena->addTypePack(TypePackVar{VariadicTypePack{anyType}, true});
|
||||
const TypePackId anyTypePack = builtinTypes->anyTypePack;
|
||||
|
||||
FunctionType formatFTV{arena->addTypePack(TypePack{{stringType}, anyTypePack}), oneStringPack};
|
||||
const TypePackId variadicTailPack = FFlag::DebugLuauDeferredConstraintResolution ? builtinTypes->unknownTypePack : anyTypePack;
|
||||
|
||||
FunctionType formatFTV{arena->addTypePack(TypePack{{stringType}, variadicTailPack}), oneStringPack};
|
||||
formatFTV.magicFunction = &magicFunctionFormat;
|
||||
const TypeId formatFn = arena->addType(formatFTV);
|
||||
attachDcrMagicFunction(formatFn, dcrMagicFunctionFormat);
|
||||
@ -820,13 +821,13 @@ TypeId makeStringMetatable(NotNull<BuiltinTypes> builtinTypes)
|
||||
{"split", {makeFunction(*arena, stringType, {}, {}, {optionalString}, {},
|
||||
{arena->addType(TableType{{}, TableIndexer{numberType, stringType}, TypeLevel{}, TableState::Sealed})})}},
|
||||
{"pack", {arena->addType(FunctionType{
|
||||
arena->addTypePack(TypePack{{stringType}, anyTypePack}),
|
||||
arena->addTypePack(TypePack{{stringType}, variadicTailPack}),
|
||||
oneStringPack,
|
||||
})}},
|
||||
{"packsize", {makeFunction(*arena, stringType, {}, {}, {}, {}, {numberType})}},
|
||||
{"unpack", {arena->addType(FunctionType{
|
||||
arena->addTypePack(TypePack{{stringType, stringType, optionalNumber}}),
|
||||
anyTypePack,
|
||||
variadicTailPack,
|
||||
})}},
|
||||
};
|
||||
|
||||
|
@ -2270,10 +2270,6 @@ std::tuple<TypeId, TypeId, RefinementId> ConstraintGenerator::checkBinary(
|
||||
if (!key)
|
||||
return {leftType, rightType, nullptr};
|
||||
|
||||
auto augmentForErrorSupression = [&](TypeId ty) -> TypeId {
|
||||
return arena->addType(UnionType{{ty, builtinTypes->errorType}});
|
||||
};
|
||||
|
||||
TypeId discriminantTy = builtinTypes->neverType;
|
||||
if (typeguard->type == "nil")
|
||||
discriminantTy = builtinTypes->nilType;
|
||||
@ -2288,9 +2284,9 @@ std::tuple<TypeId, TypeId, RefinementId> ConstraintGenerator::checkBinary(
|
||||
else if (typeguard->type == "buffer")
|
||||
discriminantTy = builtinTypes->bufferType;
|
||||
else if (typeguard->type == "table")
|
||||
discriminantTy = augmentForErrorSupression(builtinTypes->tableType);
|
||||
discriminantTy = builtinTypes->tableType;
|
||||
else if (typeguard->type == "function")
|
||||
discriminantTy = augmentForErrorSupression(builtinTypes->functionType);
|
||||
discriminantTy = builtinTypes->functionType;
|
||||
else if (typeguard->type == "userdata")
|
||||
{
|
||||
// For now, we don't really care about being accurate with userdata if the typeguard was using typeof.
|
||||
|
@ -472,6 +472,11 @@ struct FreeTypeSearcher : TypeOnceVisitor
|
||||
result->push_back({ty, location});
|
||||
return false;
|
||||
}
|
||||
|
||||
bool visit(TypeId, const ClassType&) override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
@ -672,13 +677,13 @@ bool ConstraintSolver::tryDispatch(const IterableConstraint& c, NotNull<const Co
|
||||
return false;
|
||||
};
|
||||
|
||||
auto [iteratorTypes, iteratorTail] = flatten(c.iterator);
|
||||
if (iteratorTail && isBlocked(*iteratorTail))
|
||||
return block_(*iteratorTail);
|
||||
TypePack iterator = extendTypePack(*arena, builtinTypes, c.iterator, 3);
|
||||
if (iterator.head.size() < 3 && iterator.tail && isBlocked(*iterator.tail))
|
||||
return block_(*iterator.tail);
|
||||
|
||||
{
|
||||
bool blocked = false;
|
||||
for (TypeId t : iteratorTypes)
|
||||
for (TypeId t : iterator.head)
|
||||
{
|
||||
if (isBlocked(t))
|
||||
{
|
||||
@ -691,35 +696,32 @@ bool ConstraintSolver::tryDispatch(const IterableConstraint& c, NotNull<const Co
|
||||
return false;
|
||||
}
|
||||
|
||||
if (0 == iteratorTypes.size())
|
||||
if (0 == iterator.head.size())
|
||||
{
|
||||
Anyification anyify{arena, constraint->scope, builtinTypes, &iceReporter, errorRecoveryType(), errorRecoveryTypePack()};
|
||||
std::optional<TypePackId> anyified = anyify.substitute(c.variables);
|
||||
LUAU_ASSERT(anyified);
|
||||
unify(constraint, *anyified, c.variables);
|
||||
unify(constraint, builtinTypes->anyTypePack, c.variables);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
TypeId nextTy = follow(iteratorTypes[0]);
|
||||
TypeId nextTy = follow(iterator.head[0]);
|
||||
if (get<FreeType>(nextTy))
|
||||
return block_(nextTy);
|
||||
|
||||
if (get<FunctionType>(nextTy))
|
||||
{
|
||||
TypeId tableTy = builtinTypes->nilType;
|
||||
if (iteratorTypes.size() >= 2)
|
||||
tableTy = iteratorTypes[1];
|
||||
if (iterator.head.size() >= 2)
|
||||
tableTy = iterator.head[1];
|
||||
|
||||
TypeId firstIndexTy = builtinTypes->nilType;
|
||||
if (iteratorTypes.size() >= 3)
|
||||
firstIndexTy = iteratorTypes[2];
|
||||
if (iterator.head.size() >= 3)
|
||||
firstIndexTy = iterator.head[2];
|
||||
|
||||
return tryDispatchIterableFunction(nextTy, tableTy, firstIndexTy, c, constraint, force);
|
||||
}
|
||||
|
||||
else
|
||||
return tryDispatchIterableTable(iteratorTypes[0], c, constraint, force);
|
||||
return tryDispatchIterableTable(iterator.head[0], c, constraint, force);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1174,10 +1176,14 @@ bool ConstraintSolver::tryDispatch(const FunctionCheckConstraint& c, NotNull<con
|
||||
const std::vector<TypeId> expectedArgs = flatten(ftv->argTypes).first;
|
||||
const std::vector<TypeId> argPackHead = flatten(argsPack).first;
|
||||
|
||||
for (size_t i = 0; i < c.callSite->args.size && i < expectedArgs.size() && i < argPackHead.size(); ++i)
|
||||
// If this is a self call, the types will have more elements than the AST call.
|
||||
// We don't attempt to perform bidirectional inference on the self type.
|
||||
const size_t typeOffset = c.callSite->self ? 1 : 0;
|
||||
|
||||
for (size_t i = 0; i < c.callSite->args.size && i + typeOffset < expectedArgs.size() && i + typeOffset < argPackHead.size(); ++i)
|
||||
{
|
||||
const TypeId expectedArgTy = follow(expectedArgs[i]);
|
||||
const TypeId actualArgTy = follow(argPackHead[i]);
|
||||
const TypeId expectedArgTy = follow(expectedArgs[i + typeOffset]);
|
||||
const TypeId actualArgTy = follow(argPackHead[i + typeOffset]);
|
||||
const AstExpr* expr = c.callSite->args.data[i];
|
||||
|
||||
(*c.astExpectedTypes)[expr] = expectedArgTy;
|
||||
@ -1375,7 +1381,7 @@ bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Con
|
||||
}
|
||||
|
||||
auto bind = [&](TypeId a, TypeId b) {
|
||||
bindBlockedType(a, b, c.subjectType, constraint->location);
|
||||
bindBlockedType(a, b, subjectType, constraint->location);
|
||||
};
|
||||
|
||||
if (existingPropType)
|
||||
@ -1387,6 +1393,8 @@ bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Con
|
||||
return true;
|
||||
}
|
||||
|
||||
const TypeId originalSubjectType = subjectType;
|
||||
|
||||
if (auto mt = get<MetatableType>(subjectType))
|
||||
subjectType = follow(mt->table);
|
||||
|
||||
@ -1419,7 +1427,7 @@ bool ConstraintSolver::tryDispatch(const SetPropConstraint& c, NotNull<const Con
|
||||
}
|
||||
}
|
||||
|
||||
bind(c.resultType, subjectType);
|
||||
bind(c.resultType, originalSubjectType);
|
||||
unblock(c.resultType, constraint->location);
|
||||
return true;
|
||||
}
|
||||
@ -1802,21 +1810,15 @@ bool ConstraintSolver::tryDispatchIterableTable(TypeId iteratorTy, const Iterabl
|
||||
}
|
||||
|
||||
TypeId nextFn = iterRets.head[0];
|
||||
TypeId table = iterRets.head.size() == 2 ? iterRets.head[1] : freshType(arena, builtinTypes, constraint->scope);
|
||||
|
||||
if (std::optional<TypeId> instantiatedNextFn = instantiate(builtinTypes, arena, NotNull{&limits}, constraint->scope, nextFn))
|
||||
{
|
||||
const TypeId firstIndex = freshType(arena, builtinTypes, constraint->scope);
|
||||
|
||||
// nextTy : (iteratorTy, indexTy?) -> (indexTy, valueTailTy...)
|
||||
const TypePackId nextArgPack = arena->addTypePack({table, arena->addType(UnionType{{firstIndex, builtinTypes->nilType}})});
|
||||
const TypePackId valueTailTy = arena->addTypePack(FreeTypePack{constraint->scope});
|
||||
const TypePackId nextRetPack = arena->addTypePack(TypePack{{firstIndex}, valueTailTy});
|
||||
|
||||
const TypeId expectedNextTy = arena->addType(FunctionType{nextArgPack, nextRetPack});
|
||||
unify(constraint, *instantiatedNextFn, expectedNextTy);
|
||||
const FunctionType* nextFn = get<FunctionType>(*instantiatedNextFn);
|
||||
LUAU_ASSERT(nextFn);
|
||||
const TypePackId nextRetPack = nextFn->retTypes;
|
||||
|
||||
pushConstraint(constraint->scope, constraint->location, UnpackConstraint{c.variables, nextRetPack});
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1864,31 +1866,13 @@ bool ConstraintSolver::tryDispatchIterableFunction(
|
||||
return false;
|
||||
}
|
||||
|
||||
TypeId firstIndex;
|
||||
TypeId retIndex;
|
||||
if (isNil(firstIndexTy) || isOptional(firstIndexTy))
|
||||
{
|
||||
// FIXME freshType is suspect here
|
||||
firstIndex = arena->addType(UnionType{{freshType(arena, builtinTypes, constraint->scope), builtinTypes->nilType}});
|
||||
retIndex = firstIndex;
|
||||
}
|
||||
else
|
||||
{
|
||||
firstIndex = firstIndexTy;
|
||||
retIndex = arena->addType(UnionType{{firstIndexTy, builtinTypes->nilType}});
|
||||
}
|
||||
const FunctionType* nextFn = get<FunctionType>(nextTy);
|
||||
// If this does not hold, we should've never called `tryDispatchIterableFunction` in the first place.
|
||||
LUAU_ASSERT(nextFn);
|
||||
const TypePackId nextRetPack = nextFn->retTypes;
|
||||
|
||||
// nextTy : (tableTy, indexTy?) -> (indexTy?, valueTailTy...)
|
||||
const TypePackId nextArgPack = arena->addTypePack({tableTy, firstIndex});
|
||||
const TypePackId valueTailTy = arena->addTypePack(FreeTypePack{constraint->scope});
|
||||
const TypePackId nextRetPack = arena->addTypePack(TypePack{{retIndex}, valueTailTy});
|
||||
|
||||
const TypeId expectedNextTy = arena->addType(FunctionType{TypeLevel{}, constraint->scope, nextArgPack, nextRetPack});
|
||||
bool ok = unify(constraint, nextTy, expectedNextTy);
|
||||
|
||||
// if there are no errors from unifying the two, we can pass forward the expected type as our selected resolution.
|
||||
if (ok)
|
||||
(*c.astForInNextTypes)[c.nextAstFragment] = expectedNextTy;
|
||||
// the type of the `nextAstFragment` is the `nextTy`.
|
||||
(*c.astForInNextTypes)[c.nextAstFragment] = nextTy;
|
||||
|
||||
auto it = begin(nextRetPack);
|
||||
std::vector<TypeId> modifiedNextRetHead;
|
||||
@ -1988,7 +1972,7 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
|
||||
return {{}, result};
|
||||
}
|
||||
}
|
||||
else if (auto mt = get<MetatableType>(subjectType))
|
||||
else if (auto mt = get<MetatableType>(subjectType); mt && context == ValueContext::RValue)
|
||||
{
|
||||
auto [blocked, result] = lookupTableProp(mt->table, propName, context, suppressSimplification, seen);
|
||||
if (!blocked.empty() || result)
|
||||
@ -2023,6 +2007,8 @@ std::pair<std::vector<TypeId>, std::optional<TypeId>> ConstraintSolver::lookupTa
|
||||
else
|
||||
return lookupTableProp(indexType, propName, context, suppressSimplification, seen);
|
||||
}
|
||||
else if (get<MetatableType>(mtt))
|
||||
return lookupTableProp(mtt, propName, context, suppressSimplification, seen);
|
||||
}
|
||||
else if (auto ct = get<ClassType>(subjectType))
|
||||
{
|
||||
|
@ -1169,6 +1169,8 @@ ModulePtr check(const SourceModule& sourceModule, Mode mode, const std::vector<R
|
||||
result->name = sourceModule.name;
|
||||
result->humanReadableName = sourceModule.humanReadableName;
|
||||
|
||||
result->mode = sourceModule.mode.value_or(Mode::NoCheck);
|
||||
|
||||
result->internalTypes.owningModule = result.get();
|
||||
result->interfaceTypes.owningModule = result.get();
|
||||
|
||||
@ -1199,7 +1201,7 @@ ModulePtr check(const SourceModule& sourceModule, Mode mode, const std::vector<R
|
||||
cg.visitModuleRoot(sourceModule.root);
|
||||
result->errors = std::move(cg.errors);
|
||||
|
||||
ConstraintSolver cs{NotNull{&normalizer}, NotNull(cg.rootScope), borrowConstraints(cg.constraints), result->humanReadableName, moduleResolver,
|
||||
ConstraintSolver cs{NotNull{&normalizer}, NotNull(cg.rootScope), borrowConstraints(cg.constraints), result->name, moduleResolver,
|
||||
requireCycles, logger.get(), limits};
|
||||
|
||||
if (options.randomizeConstraintResolutionSeed)
|
||||
@ -1294,8 +1296,8 @@ ModulePtr Frontend::check(const SourceModule& sourceModule, Mode mode, std::vect
|
||||
catch (const InternalCompilerError& err)
|
||||
{
|
||||
InternalCompilerError augmented = err.location.has_value()
|
||||
? InternalCompilerError{err.message, sourceModule.humanReadableName, *err.location}
|
||||
: InternalCompilerError{err.message, sourceModule.humanReadableName};
|
||||
? InternalCompilerError{err.message, sourceModule.name, *err.location}
|
||||
: InternalCompilerError{err.message, sourceModule.name};
|
||||
throw augmented;
|
||||
}
|
||||
}
|
||||
|
@ -236,6 +236,8 @@ std::pair<OverloadResolver::Analysis, ErrorVec> OverloadResolver::checkOverload_
|
||||
*/
|
||||
|
||||
Location argLocation;
|
||||
if (reason.superPath.components.size() <= 1)
|
||||
break;
|
||||
|
||||
if (const Luau::TypePath::Index* pathIndexComponent = get_if<Luau::TypePath::Index>(&reason.superPath.components.at(1)))
|
||||
{
|
||||
|
@ -1033,9 +1033,17 @@ TypeId TypeSimplifier::intersectIntersectionWithType(TypeId left, TypeId right)
|
||||
|
||||
std::optional<TypeId> TypeSimplifier::basicIntersect(TypeId left, TypeId right)
|
||||
{
|
||||
if (get<AnyType>(left))
|
||||
if (get<AnyType>(left) && get<ErrorType>(right))
|
||||
return right;
|
||||
if (get<AnyType>(right) && get<ErrorType>(left))
|
||||
return left;
|
||||
if (get<AnyType>(left))
|
||||
return arena->addType(UnionType{{right, builtinTypes->errorType}});
|
||||
if (get<AnyType>(right))
|
||||
return arena->addType(UnionType{{left, builtinTypes->errorType}});
|
||||
if (get<UnknownType>(left))
|
||||
return right;
|
||||
if (get<UnknownType>(right))
|
||||
return left;
|
||||
if (get<NeverType>(left))
|
||||
return left;
|
||||
@ -1120,9 +1128,17 @@ TypeId TypeSimplifier::intersect(TypeId left, TypeId right)
|
||||
left = simplify(left);
|
||||
right = simplify(right);
|
||||
|
||||
if (get<AnyType>(left))
|
||||
if (get<AnyType>(left) && get<ErrorType>(right))
|
||||
return right;
|
||||
if (get<AnyType>(right) && get<ErrorType>(left))
|
||||
return left;
|
||||
if (get<AnyType>(left))
|
||||
return arena->addType(UnionType{{right, builtinTypes->errorType}});
|
||||
if (get<AnyType>(right))
|
||||
return arena->addType(UnionType{{left, builtinTypes->errorType}});
|
||||
if (get<UnknownType>(left))
|
||||
return right;
|
||||
if (get<UnknownType>(right))
|
||||
return left;
|
||||
if (get<NeverType>(left))
|
||||
return left;
|
||||
@ -1278,9 +1294,11 @@ TypeId TypeSimplifier::simplify(TypeId ty, DenseHashSet<TypeId>& seen)
|
||||
{
|
||||
TypeId negatedTy = follow(nt->ty);
|
||||
if (get<AnyType>(negatedTy))
|
||||
return arena->addType(UnionType{{builtinTypes->neverType, builtinTypes->errorType}});
|
||||
else if (get<UnknownType>(negatedTy))
|
||||
return builtinTypes->neverType;
|
||||
else if (get<NeverType>(negatedTy))
|
||||
return builtinTypes->anyType;
|
||||
return builtinTypes->unknownType;
|
||||
if (auto nnt = get<NegationType>(negatedTy))
|
||||
return simplify(nnt->ty, seen);
|
||||
}
|
||||
|
@ -726,7 +726,7 @@ SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, TypePackId
|
||||
|
||||
if (TypePackId* other = env.mappedGenericPacks.find(*superTail))
|
||||
// TODO: TypePath can't express "slice of a pack + its tail".
|
||||
results.push_back(isCovariantWith(env, *other, subTailPack).withSuperComponent(TypePath::PackField::Tail));
|
||||
results.push_back(isContravariantWith(env, subTailPack, *other).withSuperComponent(TypePath::PackField::Tail));
|
||||
else
|
||||
env.mappedGenericPacks.try_insert(*superTail, subTailPack);
|
||||
|
||||
|
@ -1269,7 +1269,16 @@ struct TypeChecker2
|
||||
return;
|
||||
else if (isOptional(fnTy))
|
||||
{
|
||||
reportError(OptionalValueAccess{fnTy}, call->func->location);
|
||||
switch (shouldSuppressErrors(NotNull{&normalizer}, fnTy))
|
||||
{
|
||||
case ErrorSuppression::Suppress:
|
||||
break;
|
||||
case ErrorSuppression::NormalizationFailed:
|
||||
reportError(NormalizationTooComplex{}, call->func->location);
|
||||
// fallthrough intentional
|
||||
case ErrorSuppression::DoNotSuppress:
|
||||
reportError(OptionalValueAccess{fnTy}, call->func->location);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "Luau/TxnLog.h"
|
||||
#include "Luau/Type.h"
|
||||
#include "Luau/TypeCheckLimits.h"
|
||||
#include "Luau/TypeFwd.h"
|
||||
#include "Luau/TypeUtils.h"
|
||||
#include "Luau/Unifier2.h"
|
||||
#include "Luau/VecDeque.h"
|
||||
@ -861,20 +862,33 @@ static TypeFamilyReductionResult<TypeId> comparisonFamilyFn(TypeId instance, con
|
||||
// lt< 'a, t> -> 'a is t - we'll solve the constraint, return and solve lt<t, t> -> bool
|
||||
// lt< t, 'a> -> same as above
|
||||
bool canSubmitConstraint = ctx->solver && ctx->constraint;
|
||||
bool lhsFree = get<FreeType>(lhsTy) != nullptr;
|
||||
bool rhsFree = get<FreeType>(rhsTy) != nullptr;
|
||||
if (canSubmitConstraint)
|
||||
{
|
||||
if (get<FreeType>(lhsTy) && get<NeverType>(rhsTy) == nullptr)
|
||||
// Implement injective type families for comparison type families
|
||||
// lt <number, t> implies t is number
|
||||
// lt <t, number> implies t is number
|
||||
if (lhsFree && isNumber(rhsTy))
|
||||
asMutable(lhsTy)->ty.emplace<BoundType>(ctx->builtins->numberType);
|
||||
else if (rhsFree && isNumber(lhsTy))
|
||||
asMutable(rhsTy)->ty.emplace<BoundType>(ctx->builtins->numberType);
|
||||
else if (lhsFree && get<NeverType>(rhsTy) == nullptr)
|
||||
{
|
||||
auto c1 = ctx->solver->pushConstraint(ctx->scope, {}, EqualityConstraint{lhsTy, rhsTy});
|
||||
const_cast<Constraint*>(ctx->constraint)->dependencies.emplace_back(c1);
|
||||
}
|
||||
else if (get<FreeType>(rhsTy) && get<NeverType>(lhsTy) == nullptr)
|
||||
else if (rhsFree && get<NeverType>(lhsTy) == nullptr)
|
||||
{
|
||||
auto c1 = ctx->solver->pushConstraint(ctx->scope, {}, EqualityConstraint{rhsTy, lhsTy});
|
||||
const_cast<Constraint*>(ctx->constraint)->dependencies.emplace_back(c1);
|
||||
}
|
||||
}
|
||||
|
||||
// The above might have caused the operand types to be rebound, we need to follow them again
|
||||
lhsTy = follow(lhsTy);
|
||||
rhsTy = follow(rhsTy);
|
||||
|
||||
// check to see if both operand types are resolved enough, and wait to reduce if not
|
||||
if (isPending(lhsTy, ctx->solver))
|
||||
return {std::nullopt, false, {lhsTy}, {}};
|
||||
|
@ -432,6 +432,13 @@ struct TraversalState
|
||||
|
||||
if (auto tt = get<TableType>(current); tt && tt->indexer)
|
||||
indexer = &(*tt->indexer);
|
||||
else if (auto mt = get<MetatableType>(current))
|
||||
{
|
||||
if (auto mtTab = get<TableType>(follow(mt->table)); mtTab && mtTab->indexer)
|
||||
indexer = &(*mtTab->indexer);
|
||||
else if (auto mtMt = get<TableType>(follow(mt->metatable)); mtMt && mtMt->indexer)
|
||||
indexer = &(*mtMt->indexer);
|
||||
}
|
||||
// Note: we don't appear to walk the class hierarchy for indexers
|
||||
else if (auto ct = get<ClassType>(current); ct && ct->indexer)
|
||||
indexer = &(*ct->indexer);
|
||||
|
@ -401,6 +401,9 @@ Unifier::Unifier(NotNull<Normalizer> normalizer, NotNull<Scope> scope, const Loc
|
||||
, sharedState(*normalizer->sharedState)
|
||||
{
|
||||
LUAU_ASSERT(sharedState.iceHandler);
|
||||
|
||||
// Unifier is not usable when this flag is enabled! Please consider using Subtyping instead.
|
||||
LUAU_ASSERT(!FFlag::DebugLuauDeferredConstraintResolution);
|
||||
}
|
||||
|
||||
void Unifier::tryUnify(TypeId subTy, TypeId superTy, bool isFunctionCall, bool isIntersection, const LiteralProperties* literalProperties)
|
||||
|
@ -580,6 +580,11 @@ struct FreeTypeSearcher : TypeVisitor
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool visit(TypeId, const ClassType&) override
|
||||
{
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
struct MutatingGeneralizer : TypeOnceVisitor
|
||||
|
@ -6,8 +6,6 @@
|
||||
#include <stdarg.h>
|
||||
#include <stdio.h>
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauCache32BitAsmConsts, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
namespace CodeGen
|
||||
@ -1041,33 +1039,24 @@ OperandX64 AssemblyBuilderX64::i64(int64_t value)
|
||||
|
||||
OperandX64 AssemblyBuilderX64::f32(float value)
|
||||
{
|
||||
if (FFlag::LuauCache32BitAsmConsts)
|
||||
uint32_t as32BitKey;
|
||||
static_assert(sizeof(as32BitKey) == sizeof(value), "Expecting float to be 32-bit");
|
||||
memcpy(&as32BitKey, &value, sizeof(value));
|
||||
|
||||
if (as32BitKey != ~0u)
|
||||
{
|
||||
uint32_t as32BitKey;
|
||||
static_assert(sizeof(as32BitKey) == sizeof(value), "Expecting float to be 32-bit");
|
||||
memcpy(&as32BitKey, &value, sizeof(value));
|
||||
|
||||
if (as32BitKey != ~0u)
|
||||
{
|
||||
if (int32_t* prev = constCache32.find(as32BitKey))
|
||||
return OperandX64(SizeX64::dword, noreg, 1, rip, *prev);
|
||||
}
|
||||
|
||||
size_t pos = allocateData(4, 4);
|
||||
writef32(&data[pos], value);
|
||||
int32_t offset = int32_t(pos - data.size());
|
||||
|
||||
if (as32BitKey != ~0u)
|
||||
constCache32[as32BitKey] = offset;
|
||||
|
||||
return OperandX64(SizeX64::dword, noreg, 1, rip, offset);
|
||||
}
|
||||
else
|
||||
{
|
||||
size_t pos = allocateData(4, 4);
|
||||
writef32(&data[pos], value);
|
||||
return OperandX64(SizeX64::dword, noreg, 1, rip, int32_t(pos - data.size()));
|
||||
if (int32_t* prev = constCache32.find(as32BitKey))
|
||||
return OperandX64(SizeX64::dword, noreg, 1, rip, *prev);
|
||||
}
|
||||
|
||||
size_t pos = allocateData(4, 4);
|
||||
writef32(&data[pos], value);
|
||||
int32_t offset = int32_t(pos - data.size());
|
||||
|
||||
if (as32BitKey != ~0u)
|
||||
constCache32[as32BitKey] = offset;
|
||||
|
||||
return OperandX64(SizeX64::dword, noreg, 1, rip, offset);
|
||||
}
|
||||
|
||||
OperandX64 AssemblyBuilderX64::f64(double value)
|
||||
|
@ -11,10 +11,9 @@
|
||||
#include "lstate.h"
|
||||
#include "lgc.h"
|
||||
|
||||
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauCodeGenFixBufferLenCheckA64, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauCodeGenVectorA64, false)
|
||||
|
||||
LUAU_FASTFLAG(LuauCodegenVectorTag)
|
||||
LUAU_FASTFLAG(LuauCodegenVectorTag2)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
@ -680,7 +679,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
{
|
||||
build.fadd(inst.regA64, regOp(inst.a), regOp(inst.b));
|
||||
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
{
|
||||
RegisterA64 tempw = regs.allocTemp(KindA64::w);
|
||||
build.mov(tempw, LUA_TVECTOR);
|
||||
@ -710,7 +709,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
{
|
||||
build.fsub(inst.regA64, regOp(inst.a), regOp(inst.b));
|
||||
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
{
|
||||
RegisterA64 tempw = regs.allocTemp(KindA64::w);
|
||||
build.mov(tempw, LUA_TVECTOR);
|
||||
@ -740,7 +739,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
{
|
||||
build.fmul(inst.regA64, regOp(inst.a), regOp(inst.b));
|
||||
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
{
|
||||
RegisterA64 tempw = regs.allocTemp(KindA64::w);
|
||||
build.mov(tempw, LUA_TVECTOR);
|
||||
@ -770,7 +769,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
{
|
||||
build.fdiv(inst.regA64, regOp(inst.a), regOp(inst.b));
|
||||
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
{
|
||||
RegisterA64 tempw = regs.allocTemp(KindA64::w);
|
||||
build.mov(tempw, LUA_TVECTOR);
|
||||
@ -800,7 +799,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
{
|
||||
build.fneg(inst.regA64, regOp(inst.a));
|
||||
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
{
|
||||
RegisterA64 tempw = regs.allocTemp(KindA64::w);
|
||||
build.mov(tempw, LUA_TVECTOR);
|
||||
@ -1184,7 +1183,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
build.fcvt(temps, tempd);
|
||||
build.dup_4s(inst.regA64, castReg(KindA64::q, temps), 0);
|
||||
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
{
|
||||
build.mov(tempw, LUA_TVECTOR);
|
||||
build.ins_4s(inst.regA64, tempw, 3);
|
||||
@ -1629,11 +1628,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
RegisterA64 tempx = castReg(KindA64::x, temp);
|
||||
build.sub(tempx, tempx, regOp(inst.b)); // implicit uxtw
|
||||
build.cmp(tempx, uint16_t(accessSize));
|
||||
|
||||
if (DFFlag::LuauCodeGenFixBufferLenCheckA64)
|
||||
build.b(ConditionA64::Less, target); // note: this is a signed 64-bit comparison so that out of bounds offset fails
|
||||
else
|
||||
build.b(ConditionA64::LessEqual, target); // note: this is a signed 64-bit comparison so that out of bounds offset fails
|
||||
build.b(ConditionA64::Less, target); // note: this is a signed 64-bit comparison so that out of bounds offset fails
|
||||
}
|
||||
}
|
||||
else if (inst.b.kind == IrOpKind::Constant)
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include "lstate.h"
|
||||
#include "lgc.h"
|
||||
|
||||
LUAU_FASTFLAG(LuauCodegenVectorTag)
|
||||
LUAU_FASTFLAG(LuauCodegenVectorTag2)
|
||||
LUAU_FASTFLAGVARIABLE(LuauCodegenVectorOptAnd, false)
|
||||
|
||||
namespace Luau
|
||||
@ -612,7 +612,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
|
||||
build.vaddps(inst.regX64, tmpa, tmpb);
|
||||
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
build.vorps(inst.regX64, inst.regX64, vectorOrMaskOp());
|
||||
break;
|
||||
}
|
||||
@ -627,7 +627,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
|
||||
|
||||
build.vsubps(inst.regX64, tmpa, tmpb);
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
build.vorps(inst.regX64, inst.regX64, vectorOrMaskOp());
|
||||
break;
|
||||
}
|
||||
@ -642,7 +642,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
|
||||
|
||||
build.vmulps(inst.regX64, tmpa, tmpb);
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
build.vorps(inst.regX64, inst.regX64, vectorOrMaskOp());
|
||||
break;
|
||||
}
|
||||
@ -657,7 +657,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
RegisterX64 tmpb = (inst.a == inst.b) ? tmpa : vecOp(inst.b, tmp2);
|
||||
|
||||
build.vdivps(inst.regX64, tmpa, tmpb);
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
build.vpinsrd(inst.regX64, inst.regX64, build.i32(LUA_TVECTOR), 3);
|
||||
break;
|
||||
}
|
||||
@ -677,7 +677,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
build.vxorpd(inst.regX64, inst.regX64, build.f32x4(-0.0, -0.0, -0.0, -0.0));
|
||||
}
|
||||
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
build.vpinsrd(inst.regX64, inst.regX64, build.i32(LUA_TVECTOR), 3);
|
||||
break;
|
||||
}
|
||||
@ -983,7 +983,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
static_assert(sizeof(asU32) == sizeof(value), "Expecting float to be 32-bit");
|
||||
memcpy(&asU32, &value, sizeof(value));
|
||||
|
||||
if (FFlag::LuauCodegenVectorTag)
|
||||
if (FFlag::LuauCodegenVectorTag2)
|
||||
build.vmovaps(inst.regX64, build.u32x4(asU32, asU32, asU32, 0));
|
||||
else
|
||||
build.vmovaps(inst.regX64, build.u32x4(asU32, asU32, asU32, LUA_TVECTOR));
|
||||
@ -993,7 +993,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
||||
build.vcvtsd2ss(inst.regX64, inst.regX64, memRegDoubleOp(inst.a));
|
||||
build.vpshufps(inst.regX64, inst.regX64, inst.regX64, 0b00'00'00'00);
|
||||
|
||||
if (!FFlag::LuauCodegenVectorTag)
|
||||
if (!FFlag::LuauCodegenVectorTag2)
|
||||
build.vpinsrd(inst.regX64, inst.regX64, build.i32(LUA_TVECTOR), 3);
|
||||
}
|
||||
break;
|
||||
@ -2237,7 +2237,7 @@ OperandX64 IrLoweringX64::bufferAddrOp(IrOp bufferOp, IrOp indexOp)
|
||||
|
||||
RegisterX64 IrLoweringX64::vecOp(IrOp op, ScopedRegX64& tmp)
|
||||
{
|
||||
if (FFlag::LuauCodegenVectorOptAnd && FFlag::LuauCodegenVectorTag)
|
||||
if (FFlag::LuauCodegenVectorOptAnd && FFlag::LuauCodegenVectorTag2)
|
||||
{
|
||||
IrInst source = function.instOp(op);
|
||||
CODEGEN_ASSERT(source.cmd != IrCmd::SUBSTITUTE); // we don't process substitutions
|
||||
@ -2298,7 +2298,7 @@ OperandX64 IrLoweringX64::vectorAndMaskOp()
|
||||
|
||||
OperandX64 IrLoweringX64::vectorOrMaskOp()
|
||||
{
|
||||
CODEGEN_ASSERT(!FFlag::LuauCodegenVectorTag);
|
||||
CODEGEN_ASSERT(!FFlag::LuauCodegenVectorTag2);
|
||||
if (vectorOrMask.base == noreg)
|
||||
vectorOrMask = build.u32x4(0, 0, 0, LUA_TVECTOR);
|
||||
|
||||
|
@ -12,8 +12,7 @@
|
||||
#include "lstate.h"
|
||||
#include "ltm.h"
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauCodegenLuData, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauCodegenVector, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauCodegenVectorTag2, false)
|
||||
LUAU_FASTFLAGVARIABLE(LuauCodegenVectorTag, false)
|
||||
|
||||
namespace Luau
|
||||
@ -354,100 +353,97 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc,
|
||||
{
|
||||
BytecodeTypes bcTypes = build.function.getBytecodeTypesAt(pcpos);
|
||||
|
||||
if (FFlag::LuauCodegenVector)
|
||||
// Special fast-paths for vectors, matching the cases we have in VM
|
||||
if (bcTypes.a == LBC_TYPE_VECTOR && bcTypes.b == LBC_TYPE_VECTOR && (tm == TM_ADD || tm == TM_SUB || tm == TM_MUL || tm == TM_DIV))
|
||||
{
|
||||
// Special fast-paths for vectors, matching the cases we have in VM
|
||||
if (bcTypes.a == LBC_TYPE_VECTOR && bcTypes.b == LBC_TYPE_VECTOR && (tm == TM_ADD || tm == TM_SUB || tm == TM_MUL || tm == TM_DIV))
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
|
||||
|
||||
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, opb);
|
||||
IrOp vc = build.inst(IrCmd::LOAD_TVALUE, opc);
|
||||
IrOp result;
|
||||
|
||||
switch (tm)
|
||||
{
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
|
||||
|
||||
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, opb);
|
||||
IrOp vc = build.inst(IrCmd::LOAD_TVALUE, opc);
|
||||
IrOp result;
|
||||
|
||||
switch (tm)
|
||||
{
|
||||
case TM_ADD:
|
||||
result = build.inst(IrCmd::ADD_VEC, vb, vc);
|
||||
break;
|
||||
case TM_SUB:
|
||||
result = build.inst(IrCmd::SUB_VEC, vb, vc);
|
||||
break;
|
||||
case TM_MUL:
|
||||
result = build.inst(IrCmd::MUL_VEC, vb, vc);
|
||||
break;
|
||||
case TM_DIV:
|
||||
result = build.inst(IrCmd::DIV_VEC, vb, vc);
|
||||
break;
|
||||
default:
|
||||
CODEGEN_ASSERT(!"Unknown TM op");
|
||||
}
|
||||
|
||||
if (FFlag::LuauCodegenVectorTag)
|
||||
result = build.inst(IrCmd::TAG_VECTOR, result);
|
||||
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
|
||||
return;
|
||||
case TM_ADD:
|
||||
result = build.inst(IrCmd::ADD_VEC, vb, vc);
|
||||
break;
|
||||
case TM_SUB:
|
||||
result = build.inst(IrCmd::SUB_VEC, vb, vc);
|
||||
break;
|
||||
case TM_MUL:
|
||||
result = build.inst(IrCmd::MUL_VEC, vb, vc);
|
||||
break;
|
||||
case TM_DIV:
|
||||
result = build.inst(IrCmd::DIV_VEC, vb, vc);
|
||||
break;
|
||||
default:
|
||||
CODEGEN_ASSERT(!"Unknown TM op");
|
||||
}
|
||||
else if (bcTypes.a == LBC_TYPE_NUMBER && bcTypes.b == LBC_TYPE_VECTOR && (tm == TM_MUL || tm == TM_DIV))
|
||||
|
||||
if (FFlag::LuauCodegenVectorTag2)
|
||||
result = build.inst(IrCmd::TAG_VECTOR, result);
|
||||
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
|
||||
return;
|
||||
}
|
||||
else if (bcTypes.a == LBC_TYPE_NUMBER && bcTypes.b == LBC_TYPE_VECTOR && (tm == TM_MUL || tm == TM_DIV))
|
||||
{
|
||||
if (rb != -1)
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TNUMBER), build.vmExit(pcpos));
|
||||
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
|
||||
|
||||
IrOp vb = build.inst(IrCmd::NUM_TO_VEC, loadDoubleOrConstant(build, opb));
|
||||
IrOp vc = build.inst(IrCmd::LOAD_TVALUE, opc);
|
||||
IrOp result;
|
||||
|
||||
switch (tm)
|
||||
{
|
||||
if (rb != -1)
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TNUMBER), build.vmExit(pcpos));
|
||||
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
|
||||
|
||||
IrOp vb = build.inst(IrCmd::NUM_TO_VEC, loadDoubleOrConstant(build, opb));
|
||||
IrOp vc = build.inst(IrCmd::LOAD_TVALUE, opc);
|
||||
IrOp result;
|
||||
|
||||
switch (tm)
|
||||
{
|
||||
case TM_MUL:
|
||||
result = build.inst(IrCmd::MUL_VEC, vb, vc);
|
||||
break;
|
||||
case TM_DIV:
|
||||
result = build.inst(IrCmd::DIV_VEC, vb, vc);
|
||||
break;
|
||||
default:
|
||||
CODEGEN_ASSERT(!"Unknown TM op");
|
||||
}
|
||||
|
||||
if (FFlag::LuauCodegenVectorTag)
|
||||
result = build.inst(IrCmd::TAG_VECTOR, result);
|
||||
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
|
||||
return;
|
||||
case TM_MUL:
|
||||
result = build.inst(IrCmd::MUL_VEC, vb, vc);
|
||||
break;
|
||||
case TM_DIV:
|
||||
result = build.inst(IrCmd::DIV_VEC, vb, vc);
|
||||
break;
|
||||
default:
|
||||
CODEGEN_ASSERT(!"Unknown TM op");
|
||||
}
|
||||
else if (bcTypes.a == LBC_TYPE_VECTOR && bcTypes.b == LBC_TYPE_NUMBER && (tm == TM_MUL || tm == TM_DIV))
|
||||
|
||||
if (FFlag::LuauCodegenVectorTag2)
|
||||
result = build.inst(IrCmd::TAG_VECTOR, result);
|
||||
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
|
||||
return;
|
||||
}
|
||||
else if (bcTypes.a == LBC_TYPE_VECTOR && bcTypes.b == LBC_TYPE_NUMBER && (tm == TM_MUL || tm == TM_DIV))
|
||||
{
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
|
||||
|
||||
if (rc != -1)
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TNUMBER), build.vmExit(pcpos));
|
||||
|
||||
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, opb);
|
||||
IrOp vc = build.inst(IrCmd::NUM_TO_VEC, loadDoubleOrConstant(build, opc));
|
||||
IrOp result;
|
||||
|
||||
switch (tm)
|
||||
{
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
|
||||
|
||||
if (rc != -1)
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rc)), build.constTag(LUA_TNUMBER), build.vmExit(pcpos));
|
||||
|
||||
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, opb);
|
||||
IrOp vc = build.inst(IrCmd::NUM_TO_VEC, loadDoubleOrConstant(build, opc));
|
||||
IrOp result;
|
||||
|
||||
switch (tm)
|
||||
{
|
||||
case TM_MUL:
|
||||
result = build.inst(IrCmd::MUL_VEC, vb, vc);
|
||||
break;
|
||||
case TM_DIV:
|
||||
result = build.inst(IrCmd::DIV_VEC, vb, vc);
|
||||
break;
|
||||
default:
|
||||
CODEGEN_ASSERT(!"Unknown TM op");
|
||||
}
|
||||
|
||||
if (FFlag::LuauCodegenVectorTag)
|
||||
result = build.inst(IrCmd::TAG_VECTOR, result);
|
||||
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
|
||||
return;
|
||||
case TM_MUL:
|
||||
result = build.inst(IrCmd::MUL_VEC, vb, vc);
|
||||
break;
|
||||
case TM_DIV:
|
||||
result = build.inst(IrCmd::DIV_VEC, vb, vc);
|
||||
break;
|
||||
default:
|
||||
CODEGEN_ASSERT(!"Unknown TM op");
|
||||
}
|
||||
|
||||
if (FFlag::LuauCodegenVectorTag2)
|
||||
result = build.inst(IrCmd::TAG_VECTOR, result);
|
||||
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), result);
|
||||
return;
|
||||
}
|
||||
|
||||
IrOp fallback;
|
||||
@ -467,30 +463,10 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc,
|
||||
bcTypes.b == LBC_TYPE_NUMBER ? build.vmExit(pcpos) : getInitializedFallback(build, fallback));
|
||||
}
|
||||
|
||||
IrOp vb, vc;
|
||||
IrOp vb = loadDoubleOrConstant(build, opb);
|
||||
IrOp vc;
|
||||
IrOp result;
|
||||
|
||||
if (FFlag::LuauCodegenVector)
|
||||
{
|
||||
vb = loadDoubleOrConstant(build, opb);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (opb.kind == IrOpKind::VmConst)
|
||||
{
|
||||
CODEGEN_ASSERT(build.function.proto);
|
||||
TValue protok = build.function.proto->k[vmConstOp(opb)];
|
||||
|
||||
CODEGEN_ASSERT(protok.tt == LUA_TNUMBER);
|
||||
|
||||
vb = build.constDouble(protok.value.n);
|
||||
}
|
||||
else
|
||||
{
|
||||
vb = build.inst(IrCmd::LOAD_DOUBLE, opb);
|
||||
}
|
||||
}
|
||||
|
||||
if (opc.kind == IrOpKind::VmConst)
|
||||
{
|
||||
CODEGEN_ASSERT(build.function.proto);
|
||||
@ -600,13 +576,13 @@ void translateInstMinus(IrBuilder& build, const Instruction* pc, int pcpos)
|
||||
int ra = LUAU_INSN_A(*pc);
|
||||
int rb = LUAU_INSN_B(*pc);
|
||||
|
||||
if (FFlag::LuauCodegenVector && bcTypes.a == LBC_TYPE_VECTOR)
|
||||
if (bcTypes.a == LBC_TYPE_VECTOR)
|
||||
{
|
||||
build.inst(IrCmd::CHECK_TAG, build.inst(IrCmd::LOAD_TAG, build.vmReg(rb)), build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
|
||||
|
||||
IrOp vb = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(rb));
|
||||
IrOp va = build.inst(IrCmd::UNM_VEC, vb);
|
||||
if (FFlag::LuauCodegenVectorTag)
|
||||
if (FFlag::LuauCodegenVectorTag2)
|
||||
va = build.inst(IrCmd::TAG_VECTOR, va);
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(ra), va);
|
||||
return;
|
||||
@ -940,10 +916,7 @@ void translateInstForGPrepNext(IrBuilder& build, const Instruction* pc, int pcpo
|
||||
|
||||
// setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)), LU_TAG_ITERATOR);
|
||||
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra + 2), build.constInt(0));
|
||||
|
||||
if (FFlag::LuauCodegenLuData)
|
||||
build.inst(IrCmd::STORE_EXTRA, build.vmReg(ra + 2), build.constInt(LU_TAG_ITERATOR));
|
||||
|
||||
build.inst(IrCmd::STORE_EXTRA, build.vmReg(ra + 2), build.constInt(LU_TAG_ITERATOR));
|
||||
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 2), build.constTag(LUA_TLIGHTUSERDATA));
|
||||
|
||||
build.inst(IrCmd::JUMP, target);
|
||||
@ -976,10 +949,7 @@ void translateInstForGPrepInext(IrBuilder& build, const Instruction* pc, int pcp
|
||||
|
||||
// setpvalue(ra + 2, reinterpret_cast<void*>(uintptr_t(0)), LU_TAG_ITERATOR);
|
||||
build.inst(IrCmd::STORE_POINTER, build.vmReg(ra + 2), build.constInt(0));
|
||||
|
||||
if (FFlag::LuauCodegenLuData)
|
||||
build.inst(IrCmd::STORE_EXTRA, build.vmReg(ra + 2), build.constInt(LU_TAG_ITERATOR));
|
||||
|
||||
build.inst(IrCmd::STORE_EXTRA, build.vmReg(ra + 2), build.constInt(LU_TAG_ITERATOR));
|
||||
build.inst(IrCmd::STORE_TAG, build.vmReg(ra + 2), build.constTag(LUA_TLIGHTUSERDATA));
|
||||
|
||||
build.inst(IrCmd::JUMP, target);
|
||||
@ -1225,7 +1195,7 @@ void translateInstGetTableKS(IrBuilder& build, const Instruction* pc, int pcpos)
|
||||
|
||||
IrOp tb = build.inst(IrCmd::LOAD_TAG, build.vmReg(rb));
|
||||
|
||||
if (FFlag::LuauCodegenVector && bcTypes.a == LBC_TYPE_VECTOR)
|
||||
if (bcTypes.a == LBC_TYPE_VECTOR)
|
||||
{
|
||||
build.inst(IrCmd::CHECK_TAG, tb, build.constTag(LUA_TVECTOR), build.vmExit(pcpos));
|
||||
|
||||
|
@ -17,9 +17,8 @@
|
||||
LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3)
|
||||
LUAU_FASTINTVARIABLE(LuauCodeGenReuseSlotLimit, 64)
|
||||
LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false)
|
||||
LUAU_FASTFLAG(LuauCodegenVector)
|
||||
LUAU_FASTFLAG(LuauCodegenVectorTag)
|
||||
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauCodeGenCheckGcEffectFix, false)
|
||||
LUAU_FASTFLAG(LuauCodegenVectorTag2)
|
||||
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauCodeGenCoverForgprepEffect, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
@ -712,11 +711,11 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
||||
uint8_t tag = state.tryGetTag(inst.b);
|
||||
|
||||
// We know the tag of some instructions that result in TValue
|
||||
if (FFlag::LuauCodegenVector && tag == 0xff)
|
||||
if (tag == 0xff)
|
||||
{
|
||||
if (IrInst* arg = function.asInstOp(inst.b))
|
||||
{
|
||||
if (FFlag::LuauCodegenVectorTag)
|
||||
if (FFlag::LuauCodegenVectorTag2)
|
||||
{
|
||||
if (arg->cmd == IrCmd::TAG_VECTOR)
|
||||
tag = LUA_TVECTOR;
|
||||
@ -1050,11 +1049,8 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
||||
{
|
||||
state.checkedGc = true;
|
||||
|
||||
if (DFFlag::LuauCodeGenCheckGcEffectFix)
|
||||
{
|
||||
// GC assist might modify table data (hash part)
|
||||
state.invalidateHeapTableData();
|
||||
}
|
||||
// GC assist might modify table data (hash part)
|
||||
state.invalidateHeapTableData();
|
||||
}
|
||||
break;
|
||||
case IrCmd::BARRIER_OBJ:
|
||||
@ -1264,20 +1260,21 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
||||
case IrCmd::SUB_VEC:
|
||||
case IrCmd::MUL_VEC:
|
||||
case IrCmd::DIV_VEC:
|
||||
if (FFlag::LuauCodegenVectorTag)
|
||||
if (FFlag::LuauCodegenVectorTag2)
|
||||
{
|
||||
if (IrInst* a = function.asInstOp(inst.a); a && a->cmd == IrCmd::TAG_VECTOR)
|
||||
inst.a = a->a;
|
||||
replace(function, inst.a, a->a);
|
||||
|
||||
if (IrInst* b = function.asInstOp(inst.b); b && b->cmd == IrCmd::TAG_VECTOR)
|
||||
inst.b = b->a;
|
||||
replace(function, inst.b, b->a);
|
||||
}
|
||||
break;
|
||||
|
||||
case IrCmd::UNM_VEC:
|
||||
if (FFlag::LuauCodegenVectorTag)
|
||||
if (FFlag::LuauCodegenVectorTag2)
|
||||
{
|
||||
if (IrInst* a = function.asInstOp(inst.a); a && a->cmd == IrCmd::TAG_VECTOR)
|
||||
inst.a = a->a;
|
||||
replace(function, inst.a, a->a);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -1409,6 +1406,9 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
||||
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 0u});
|
||||
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 1u});
|
||||
state.invalidate(IrOp{inst.b.kind, vmRegOp(inst.b) + 2u});
|
||||
|
||||
if (DFFlag::LuauCodeGenCoverForgprepEffect)
|
||||
state.invalidateUserCall();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -5,8 +5,6 @@
|
||||
|
||||
#include <utility>
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauCodegenMathMemArgs, false)
|
||||
|
||||
namespace Luau
|
||||
{
|
||||
namespace CodeGen
|
||||
@ -116,7 +114,7 @@ static void optimizeMemoryOperandsX64(IrFunction& function, IrBlock& block)
|
||||
case IrCmd::SQRT_NUM:
|
||||
case IrCmd::ABS_NUM:
|
||||
{
|
||||
if (FFlag::LuauCodegenMathMemArgs && inst.a.kind == IrOpKind::Inst)
|
||||
if (inst.a.kind == IrOpKind::Inst)
|
||||
{
|
||||
IrInst& arg = function.instOp(inst.a);
|
||||
|
||||
|
@ -108,7 +108,7 @@
|
||||
|
||||
// upper bound for number of size classes used by page allocator
|
||||
#ifndef LUA_SIZECLASSES
|
||||
#define LUA_SIZECLASSES 32
|
||||
#define LUA_SIZECLASSES 40
|
||||
#endif
|
||||
|
||||
// available number of separate memory categories
|
||||
|
@ -120,9 +120,19 @@ static_assert(offsetof(Udata, data) == ABISWITCH(16, 16, 12), "size mismatch for
|
||||
static_assert(sizeof(Table) == ABISWITCH(48, 32, 32), "size mismatch for table header");
|
||||
static_assert(offsetof(Buffer, data) == ABISWITCH(8, 8, 8), "size mismatch for buffer header");
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauExtendedSizeClasses, false)
|
||||
|
||||
const size_t kSizeClasses = LUA_SIZECLASSES;
|
||||
const size_t kMaxSmallSize = 512;
|
||||
const size_t kPageSize = 16 * 1024 - 24; // slightly under 16KB since that results in less fragmentation due to heap metadata
|
||||
const size_t kMaxSmallSize_DEPRECATED = 512; // TODO: remove with FFlagLuauExtendedSizeClasses
|
||||
const size_t kMaxSmallSize = 1024;
|
||||
const size_t kLargePageThreshold = 512; // larger pages are used for objects larger than this size to fit more of them into a page
|
||||
|
||||
// constant factor to reduce our page sizes by, to increase the chances that pages we allocate will
|
||||
// allow external allocators to allocate them without wasting space due to rounding introduced by their heap meta data
|
||||
const size_t kExternalAllocatorMetaDataReduction = 24;
|
||||
|
||||
const size_t kSmallPageSize = 16 * 1024 - kExternalAllocatorMetaDataReduction;
|
||||
const size_t kLargePageSize = 32 * 1024 - kExternalAllocatorMetaDataReduction;
|
||||
|
||||
const size_t kBlockHeader = sizeof(double) > sizeof(void*) ? sizeof(double) : sizeof(void*); // suitable for aligning double & void* on all platforms
|
||||
const size_t kGCOLinkOffset = (sizeof(GCheader) + sizeof(void*) - 1) & ~(sizeof(void*) - 1); // GCO pages contain freelist links after the GC header
|
||||
@ -143,6 +153,7 @@ struct SizeClassConfig
|
||||
// - we first allocate sizes classes in multiples of 8
|
||||
// - after the first cutoff we allocate size classes in multiples of 16
|
||||
// - after the second cutoff we allocate size classes in multiples of 32
|
||||
// - after the third cutoff we allocate size classes in multiples of 64
|
||||
// this balances internal fragmentation vs external fragmentation
|
||||
for (int size = 8; size < 64; size += 8)
|
||||
sizeOfClass[classCount++] = size;
|
||||
@ -150,7 +161,10 @@ struct SizeClassConfig
|
||||
for (int size = 64; size < 256; size += 16)
|
||||
sizeOfClass[classCount++] = size;
|
||||
|
||||
for (int size = 256; size <= 512; size += 32)
|
||||
for (int size = 256; size < 512; size += 32)
|
||||
sizeOfClass[classCount++] = size;
|
||||
|
||||
for (int size = 512; size <= 1024; size += 64)
|
||||
sizeOfClass[classCount++] = size;
|
||||
|
||||
LUAU_ASSERT(size_t(classCount) <= kSizeClasses);
|
||||
@ -169,7 +183,8 @@ struct SizeClassConfig
|
||||
const SizeClassConfig kSizeClassConfig;
|
||||
|
||||
// size class for a block of size sz; returns -1 for size=0 because empty allocations take no space
|
||||
#define sizeclass(sz) (size_t((sz)-1) < kMaxSmallSize ? kSizeClassConfig.classForSize[sz] : -1)
|
||||
#define sizeclass(sz) \
|
||||
(size_t((sz)-1) < (FFlag::LuauExtendedSizeClasses ? kMaxSmallSize : kMaxSmallSize_DEPRECATED) ? kSizeClassConfig.classForSize[sz] : -1)
|
||||
|
||||
// metadata for a block is stored in the first pointer of the block
|
||||
#define metadata(block) (*(void**)(block))
|
||||
@ -247,16 +262,34 @@ static lua_Page* newpage(lua_State* L, lua_Page** gcopageset, int pageSize, int
|
||||
|
||||
static lua_Page* newclasspage(lua_State* L, lua_Page** freepageset, lua_Page** gcopageset, uint8_t sizeClass, bool storeMetadata)
|
||||
{
|
||||
int blockSize = kSizeClassConfig.sizeOfClass[sizeClass] + (storeMetadata ? kBlockHeader : 0);
|
||||
int blockCount = (kPageSize - offsetof(lua_Page, data)) / blockSize;
|
||||
if (FFlag::LuauExtendedSizeClasses)
|
||||
{
|
||||
int sizeOfClass = kSizeClassConfig.sizeOfClass[sizeClass];
|
||||
int pageSize = sizeOfClass > int(kLargePageThreshold) ? kLargePageSize : kSmallPageSize;
|
||||
int blockSize = sizeOfClass + (storeMetadata ? kBlockHeader : 0);
|
||||
int blockCount = (pageSize - offsetof(lua_Page, data)) / blockSize;
|
||||
|
||||
lua_Page* page = newpage(L, gcopageset, kPageSize, blockSize, blockCount);
|
||||
lua_Page* page = newpage(L, gcopageset, pageSize, blockSize, blockCount);
|
||||
|
||||
// prepend a page to page freelist (which is empty because we only ever allocate a new page when it is!)
|
||||
LUAU_ASSERT(!freepageset[sizeClass]);
|
||||
freepageset[sizeClass] = page;
|
||||
// prepend a page to page freelist (which is empty because we only ever allocate a new page when it is!)
|
||||
LUAU_ASSERT(!freepageset[sizeClass]);
|
||||
freepageset[sizeClass] = page;
|
||||
|
||||
return page;
|
||||
return page;
|
||||
}
|
||||
else
|
||||
{
|
||||
int blockSize = kSizeClassConfig.sizeOfClass[sizeClass] + (storeMetadata ? kBlockHeader : 0);
|
||||
int blockCount = (kSmallPageSize - offsetof(lua_Page, data)) / blockSize;
|
||||
|
||||
lua_Page* page = newpage(L, gcopageset, kSmallPageSize, blockSize, blockCount);
|
||||
|
||||
// prepend a page to page freelist (which is empty because we only ever allocate a new page when it is!)
|
||||
LUAU_ASSERT(!freepageset[sizeClass]);
|
||||
freepageset[sizeClass] = page;
|
||||
|
||||
return page;
|
||||
}
|
||||
}
|
||||
|
||||
static void freepage(lua_State* L, lua_Page** gcopageset, lua_Page* page)
|
||||
|
@ -11,8 +11,6 @@
|
||||
#include <intrin.h>
|
||||
#endif
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauSciNumberSkipTrailDot, false)
|
||||
|
||||
// This work is based on:
|
||||
// Raffaello Giulietti. The Schubfach way to render doubles. 2021
|
||||
// https://drive.google.com/file/d/1IEeATSVnEE6TkrHlCYNY2GjaraBjOT4f/edit
|
||||
@ -363,7 +361,7 @@ char* luai_num2str(char* buf, double n)
|
||||
|
||||
char* exp = trimzero(buf + declen + 1);
|
||||
|
||||
if (FFlag::LuauSciNumberSkipTrailDot && exp[-1] == '.')
|
||||
if (exp[-1] == '.')
|
||||
exp--;
|
||||
|
||||
return printexp(exp, dot - 1);
|
||||
|
@ -48,7 +48,7 @@ int luaO_rawequalObj(const TValue* t1, const TValue* t2)
|
||||
case LUA_TBOOLEAN:
|
||||
return bvalue(t1) == bvalue(t2); // boolean true must be 1 !!
|
||||
case LUA_TLIGHTUSERDATA:
|
||||
return pvalue(t1) == pvalue(t2) && (!FFlag::LuauTaggedLuData || lightuserdatatag(t1) == lightuserdatatag(t2));
|
||||
return pvalue(t1) == pvalue(t2) && lightuserdatatag(t1) == lightuserdatatag(t2);
|
||||
default:
|
||||
LUAU_ASSERT(iscollectable(t1));
|
||||
return gcvalue(t1) == gcvalue(t2);
|
||||
@ -71,7 +71,7 @@ int luaO_rawequalKey(const TKey* t1, const TValue* t2)
|
||||
case LUA_TBOOLEAN:
|
||||
return bvalue(t1) == bvalue(t2); // boolean true must be 1 !!
|
||||
case LUA_TLIGHTUSERDATA:
|
||||
return pvalue(t1) == pvalue(t2) && (!FFlag::LuauTaggedLuData || lightuserdatatag(t1) == lightuserdatatag(t2));
|
||||
return pvalue(t1) == pvalue(t2) && lightuserdatatag(t1) == lightuserdatatag(t2);
|
||||
default:
|
||||
LUAU_ASSERT(iscollectable(t1));
|
||||
return gcvalue(t1) == gcvalue(t2);
|
||||
|
@ -5,8 +5,6 @@
|
||||
#include "lua.h"
|
||||
#include "lcommon.h"
|
||||
|
||||
LUAU_FASTFLAG(LuauTaggedLuData)
|
||||
|
||||
/*
|
||||
** Union of all collectible objects
|
||||
*/
|
||||
|
@ -8,8 +8,6 @@
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
|
||||
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauInterruptablePatternMatch, false)
|
||||
|
||||
// macro to `unsign' a character
|
||||
#define uchar(c) ((unsigned char)(c))
|
||||
|
||||
@ -432,18 +430,15 @@ static const char* match(MatchState* ms, const char* s, const char* p)
|
||||
if (ms->matchdepth-- == 0)
|
||||
luaL_error(ms->L, "pattern too complex");
|
||||
|
||||
if (DFFlag::LuauInterruptablePatternMatch)
|
||||
{
|
||||
lua_State* L = ms->L;
|
||||
void (*interrupt)(lua_State*, int) = L->global->cb.interrupt;
|
||||
lua_State* L = ms->L;
|
||||
void (*interrupt)(lua_State*, int) = L->global->cb.interrupt;
|
||||
|
||||
if (LUAU_UNLIKELY(!!interrupt))
|
||||
{
|
||||
// this interrupt is not yieldable
|
||||
L->nCcalls++;
|
||||
interrupt(L, -1);
|
||||
L->nCcalls--;
|
||||
}
|
||||
if (LUAU_UNLIKELY(!!interrupt))
|
||||
{
|
||||
// this interrupt is not yieldable
|
||||
L->nCcalls++;
|
||||
interrupt(L, -1);
|
||||
L->nCcalls--;
|
||||
}
|
||||
|
||||
init: // using goto's to optimize tail recursion
|
||||
|
@ -129,7 +129,7 @@ const TString* luaT_objtypenamestr(lua_State* L, const TValue* o)
|
||||
if (ttisstring(type))
|
||||
return tsvalue(type);
|
||||
}
|
||||
else if (FFlag::LuauTaggedLuData && ttislightuserdata(o))
|
||||
else if (ttislightuserdata(o))
|
||||
{
|
||||
int tag = lightuserdatatag(o);
|
||||
|
||||
|
@ -133,8 +133,6 @@
|
||||
// Does VM support native execution via ExecutionCallbacks? We mostly assume it does but keep the define to make it easy to quantify the cost.
|
||||
#define VM_HAS_NATIVE 1
|
||||
|
||||
LUAU_FASTFLAGVARIABLE(LuauTaggedLuData, false)
|
||||
|
||||
LUAU_NOINLINE void luau_callhook(lua_State* L, lua_Hook hook, void* userdata)
|
||||
{
|
||||
ptrdiff_t base = savestack(L, L->base);
|
||||
@ -1110,9 +1108,7 @@ reentry:
|
||||
VM_NEXT();
|
||||
|
||||
case LUA_TLIGHTUSERDATA:
|
||||
pc += (pvalue(ra) == pvalue(rb) && (!FFlag::LuauTaggedLuData || lightuserdatatag(ra) == lightuserdatatag(rb)))
|
||||
? LUAU_INSN_D(insn)
|
||||
: 1;
|
||||
pc += (pvalue(ra) == pvalue(rb) && lightuserdatatag(ra) == lightuserdatatag(rb)) ? LUAU_INSN_D(insn) : 1;
|
||||
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
|
||||
VM_NEXT();
|
||||
|
||||
@ -1227,9 +1223,7 @@ reentry:
|
||||
VM_NEXT();
|
||||
|
||||
case LUA_TLIGHTUSERDATA:
|
||||
pc += (pvalue(ra) != pvalue(rb) || (FFlag::LuauTaggedLuData && lightuserdatatag(ra) != lightuserdatatag(rb)))
|
||||
? LUAU_INSN_D(insn)
|
||||
: 1;
|
||||
pc += (pvalue(ra) != pvalue(rb) || lightuserdatatag(ra) != lightuserdatatag(rb)) ? LUAU_INSN_D(insn) : 1;
|
||||
LUAU_ASSERT(unsigned(pc - cl->l.p->code) < unsigned(cl->l.p->sizecode));
|
||||
VM_NEXT();
|
||||
|
||||
|
@ -288,7 +288,7 @@ int luaV_equalval(lua_State* L, const TValue* t1, const TValue* t2)
|
||||
case LUA_TBOOLEAN:
|
||||
return bvalue(t1) == bvalue(t2); // true must be 1 !!
|
||||
case LUA_TLIGHTUSERDATA:
|
||||
return pvalue(t1) == pvalue(t2) && (!FFlag::LuauTaggedLuData || lightuserdatatag(t1) == lightuserdatatag(t2));
|
||||
return pvalue(t1) == pvalue(t2) && lightuserdatatag(t1) == lightuserdatatag(t2);
|
||||
case LUA_TUSERDATA:
|
||||
{
|
||||
tm = get_compTM(L, uvalue(t1)->metatable, uvalue(t2)->metatable, TM_EQ);
|
||||
|
130
bench/other/boatbomber-HashLib/Base64.lua
Normal file
130
bench/other/boatbomber-HashLib/Base64.lua
Normal file
@ -0,0 +1,130 @@
|
||||
-- @original: https://gist.github.com/Reselim/40d62b17d138cc74335a1b0709e19ce2
|
||||
local Alphabet = {}
|
||||
local Indexes = {}
|
||||
|
||||
-- A-Z
|
||||
for Index = 65, 90 do
|
||||
table.insert(Alphabet, Index)
|
||||
end
|
||||
|
||||
-- a-z
|
||||
for Index = 97, 122 do
|
||||
table.insert(Alphabet, Index)
|
||||
end
|
||||
|
||||
-- 0-9
|
||||
for Index = 48, 57 do
|
||||
table.insert(Alphabet, Index)
|
||||
end
|
||||
|
||||
table.insert(Alphabet, 43) -- +
|
||||
table.insert(Alphabet, 47) -- /
|
||||
|
||||
for Index, Character in ipairs(Alphabet) do
|
||||
Indexes[Character] = Index
|
||||
end
|
||||
|
||||
local Base64 = {}
|
||||
|
||||
local bit32_rshift = bit32.rshift
|
||||
local bit32_lshift = bit32.lshift
|
||||
local bit32_band = bit32.band
|
||||
|
||||
--[[**
|
||||
Encodes a string in Base64.
|
||||
@param [t:string] Input The input string to encode.
|
||||
@returns [t:string] The string encoded in Base64.
|
||||
**--]]
|
||||
function Base64.Encode(Input)
|
||||
local Output = {}
|
||||
local Length = 0
|
||||
|
||||
for Index = 1, #Input, 3 do
|
||||
local C1, C2, C3 = string.byte(Input, Index, Index + 2)
|
||||
|
||||
local A = bit32_rshift(C1, 2)
|
||||
local B = bit32_lshift(bit32_band(C1, 3), 4) + bit32_rshift(C2 or 0, 4)
|
||||
local C = bit32_lshift(bit32_band(C2 or 0, 15), 2) + bit32_rshift(C3 or 0, 6)
|
||||
local D = bit32_band(C3 or 0, 63)
|
||||
|
||||
Length = Length + 1
|
||||
Output[Length] = Alphabet[A + 1]
|
||||
|
||||
Length = Length + 1
|
||||
Output[Length] = Alphabet[B + 1]
|
||||
|
||||
Length = Length + 1
|
||||
Output[Length] = C2 and Alphabet[C + 1] or 61
|
||||
|
||||
Length = Length + 1
|
||||
Output[Length] = C3 and Alphabet[D + 1] or 61
|
||||
end
|
||||
|
||||
local NewOutput = {}
|
||||
local NewLength = 0
|
||||
local IndexAdd4096Sub1
|
||||
|
||||
for Index = 1, Length, 4096 do
|
||||
NewLength = NewLength + 1
|
||||
IndexAdd4096Sub1 = Index + 4096 - 1
|
||||
|
||||
NewOutput[NewLength] = string.char(
|
||||
table.unpack(Output, Index, IndexAdd4096Sub1 > Length and Length or IndexAdd4096Sub1)
|
||||
)
|
||||
end
|
||||
|
||||
return table.concat(NewOutput)
|
||||
end
|
||||
|
||||
--[[**
|
||||
Decodes a string from Base64.
|
||||
@param [t:string] Input The input string to decode.
|
||||
@returns [t:string] The newly decoded string.
|
||||
**--]]
|
||||
function Base64.Decode(Input)
|
||||
local Output = {}
|
||||
local Length = 0
|
||||
|
||||
for Index = 1, #Input, 4 do
|
||||
local C1, C2, C3, C4 = string.byte(Input, Index, Index + 3)
|
||||
|
||||
local I1 = Indexes[C1] - 1
|
||||
local I2 = Indexes[C2] - 1
|
||||
local I3 = (Indexes[C3] or 1) - 1
|
||||
local I4 = (Indexes[C4] or 1) - 1
|
||||
|
||||
local A = bit32_lshift(I1, 2) + bit32_rshift(I2, 4)
|
||||
local B = bit32_lshift(bit32_band(I2, 15), 4) + bit32_rshift(I3, 2)
|
||||
local C = bit32_lshift(bit32_band(I3, 3), 6) + I4
|
||||
|
||||
Length = Length + 1
|
||||
Output[Length] = A
|
||||
|
||||
if C3 ~= 61 then
|
||||
Length = Length + 1
|
||||
Output[Length] = B
|
||||
end
|
||||
|
||||
if C4 ~= 61 then
|
||||
Length = Length + 1
|
||||
Output[Length] = C
|
||||
end
|
||||
end
|
||||
|
||||
local NewOutput = {}
|
||||
local NewLength = 0
|
||||
local IndexAdd4096Sub1
|
||||
|
||||
for Index = 1, Length, 4096 do
|
||||
NewLength = NewLength + 1
|
||||
IndexAdd4096Sub1 = Index + 4096 - 1
|
||||
|
||||
NewOutput[NewLength] = string.char(
|
||||
table.unpack(Output, Index, IndexAdd4096Sub1 > Length and Length or IndexAdd4096Sub1)
|
||||
)
|
||||
end
|
||||
|
||||
return table.concat(NewOutput)
|
||||
end
|
||||
|
||||
return Base64
|
39
bench/other/boatbomber-HashLib/HashLib.spec.lua
Normal file
39
bench/other/boatbomber-HashLib/HashLib.spec.lua
Normal file
@ -0,0 +1,39 @@
|
||||
local function describe(phrase, callback) end
|
||||
local function it(phrase, callback) end
|
||||
local function expect(value) end
|
||||
|
||||
return function()
|
||||
local HashLib = require(script.Parent)
|
||||
local sha256 = HashLib.sha256
|
||||
|
||||
describe("HashLib.sha256", function()
|
||||
it("should properly encode strings", function()
|
||||
expect(sha256("abc").to.equal("ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad"))
|
||||
expect(
|
||||
sha256("The quick brown fox jumps over the lazy dog").to.equal(
|
||||
"d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592"
|
||||
)
|
||||
)
|
||||
expect(sha256("123456").to.equal("8d969eef6ecad3c29a3a629280e686cf0c3f5d5a86aff3ca12020c923adc6c92"))
|
||||
end)
|
||||
|
||||
it("should create a private closure that works", function()
|
||||
local AppendNextChunk = sha256()
|
||||
AppendNextChunk("The quick brown fox")
|
||||
AppendNextChunk(" jumps ")
|
||||
AppendNextChunk("") -- chunk may be an empty string
|
||||
AppendNextChunk("over the lazy dog")
|
||||
expect(AppendNextChunk()).to.equal("d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592")
|
||||
end)
|
||||
|
||||
it("should allow the private closure to work if called twice", function()
|
||||
local AppendNextChunk = sha256()
|
||||
AppendNextChunk("The quick brown fox")
|
||||
AppendNextChunk(" jumps ")
|
||||
AppendNextChunk("") -- chunk may be an empty string
|
||||
AppendNextChunk("over the lazy dog")
|
||||
AppendNextChunk()
|
||||
expect(AppendNextChunk()).to.equal("d7a8fbb307d7809469ca9abcb0082e4f8d5651e46d3cdb762d02d0bf37c9e592")
|
||||
end)
|
||||
end)
|
||||
end
|
21
bench/other/boatbomber-HashLib/LICENSE
Normal file
21
bench/other/boatbomber-HashLib/LICENSE
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2021 boatbomber
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
1555
bench/other/boatbomber-HashLib/init.lua
Normal file
1555
bench/other/boatbomber-HashLib/init.lua
Normal file
File diff suppressed because it is too large
Load Diff
@ -7,8 +7,6 @@
|
||||
|
||||
#include <string.h>
|
||||
|
||||
LUAU_FASTFLAG(LuauCache32BitAsmConsts)
|
||||
|
||||
using namespace Luau::CodeGen;
|
||||
using namespace Luau::CodeGen::X64;
|
||||
|
||||
@ -748,7 +746,6 @@ TEST_CASE("ConstantStorage")
|
||||
|
||||
TEST_CASE("ConstantStorageDedup")
|
||||
{
|
||||
ScopedFastFlag luauCache32BitAsmConsts{FFlag::LuauCache32BitAsmConsts, true};
|
||||
AssemblyBuilderX64 build(/* logText= */ false);
|
||||
|
||||
for (int i = 0; i <= 3000; i++)
|
||||
|
@ -18,10 +18,18 @@ ClassFixture::ClassFixture()
|
||||
|
||||
unfreeze(arena);
|
||||
|
||||
TypeId connectionType = arena.addType(ClassType{"Connection", {}, nullopt, nullopt, {}, {}, "Connection"});
|
||||
|
||||
TypeId baseClassInstanceType = arena.addType(ClassType{"BaseClass", {}, nullopt, nullopt, {}, {}, "Test"});
|
||||
getMutable<ClassType>(baseClassInstanceType)->props = {
|
||||
{"BaseMethod", {makeFunction(arena, baseClassInstanceType, {numberType}, {})}},
|
||||
{"BaseField", {numberType}},
|
||||
|
||||
{"Touched", {connectionType}},
|
||||
};
|
||||
|
||||
getMutable<ClassType>(connectionType)->props = {
|
||||
{"Connect", {makeFunction(arena, connectionType, {makeFunction(arena, nullopt, {baseClassInstanceType}, {})}, {})}}
|
||||
};
|
||||
|
||||
TypeId baseClassType = arena.addType(ClassType{"BaseClass", {}, nullopt, nullopt, {}, {}, "Test"});
|
||||
|
@ -26,11 +26,7 @@ extern bool verbose;
|
||||
extern bool codegen;
|
||||
extern int optimizationLevel;
|
||||
|
||||
LUAU_FASTFLAG(LuauTaggedLuData)
|
||||
LUAU_FASTFLAG(LuauSciNumberSkipTrailDot)
|
||||
LUAU_DYNAMIC_FASTFLAG(LuauInterruptablePatternMatch)
|
||||
LUAU_FASTINT(CodegenHeuristicsInstructionLimit)
|
||||
LUAU_DYNAMIC_FASTFLAG(LuauCodeGenFixBufferLenCheckA64)
|
||||
LUAU_DYNAMIC_FASTFLAG(LuauCodegenTrackingMultilocationFix)
|
||||
|
||||
static lua_CompileOptions defaultOptions()
|
||||
@ -1459,8 +1455,6 @@ TEST_CASE("Coverage")
|
||||
|
||||
TEST_CASE("StringConversion")
|
||||
{
|
||||
ScopedFastFlag luauSciNumberSkipTrailDot{FFlag::LuauSciNumberSkipTrailDot, true};
|
||||
|
||||
runConformance("strconv.lua");
|
||||
}
|
||||
|
||||
@ -1654,8 +1648,6 @@ TEST_CASE("Interrupt")
|
||||
}
|
||||
};
|
||||
|
||||
ScopedFastFlag luauInterruptablePatternMatch{DFFlag::LuauInterruptablePatternMatch, true};
|
||||
|
||||
for (int test = 1; test <= 5; ++test)
|
||||
{
|
||||
lua_State* T = lua_newthread(L);
|
||||
@ -1764,8 +1756,6 @@ TEST_CASE("UserdataApi")
|
||||
|
||||
TEST_CASE("LightuserdataApi")
|
||||
{
|
||||
ScopedFastFlag luauTaggedLuData{FFlag::LuauTaggedLuData, true};
|
||||
|
||||
StateRef globalState(luaL_newstate(), lua_close);
|
||||
lua_State* L = globalState.get();
|
||||
|
||||
@ -2040,7 +2030,6 @@ TEST_CASE("SafeEnv")
|
||||
|
||||
TEST_CASE("Native")
|
||||
{
|
||||
ScopedFastFlag luauCodeGenFixBufferLenCheckA64{DFFlag::LuauCodeGenFixBufferLenCheckA64, true};
|
||||
ScopedFastFlag luauCodegenTrackingMultilocationFix{DFFlag::LuauCodegenTrackingMultilocationFix, true};
|
||||
|
||||
// This tests requires code to run natively, otherwise all 'is_native' checks will fail
|
||||
|
@ -117,7 +117,17 @@ std::optional<ModuleInfo> TestFileResolver::resolveModule(const ModuleInfo* cont
|
||||
|
||||
std::string TestFileResolver::getHumanReadableModuleName(const ModuleName& name) const
|
||||
{
|
||||
return name;
|
||||
// We have a handful of tests that need to distinguish between a canonical
|
||||
// ModuleName and the human-readable version so we apply a simple transform
|
||||
// here: We replace all slashes with dots.
|
||||
std::string result = name;
|
||||
for (size_t i = 0; i < result.size(); ++i)
|
||||
{
|
||||
if (result[i] == '/')
|
||||
result[i] = '.';
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::optional<std::string> TestFileResolver::getEnvironmentForModule(const ModuleName& name) const
|
||||
|
@ -14,6 +14,7 @@ using namespace Luau;
|
||||
|
||||
LUAU_FASTFLAG(DebugLuauDeferredConstraintResolution)
|
||||
LUAU_FASTFLAG(DebugLuauFreezeArena);
|
||||
LUAU_FASTFLAG(DebugLuauMagicTypes);
|
||||
|
||||
namespace
|
||||
{
|
||||
@ -1273,4 +1274,63 @@ TEST_CASE_FIXTURE(FrontendFixture, "markdirty_early_return")
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(FrontendFixture, "attribute_ices_to_the_correct_module")
|
||||
{
|
||||
ScopedFastFlag sff{FFlag::DebugLuauMagicTypes, true};
|
||||
|
||||
fileResolver.source["game/one"] = R"(
|
||||
require(game.two)
|
||||
)";
|
||||
|
||||
fileResolver.source["game/two"] = R"(
|
||||
local a: _luau_ice
|
||||
)";
|
||||
|
||||
try
|
||||
{
|
||||
frontend.check("game/one");
|
||||
}
|
||||
catch (InternalCompilerError& err)
|
||||
{
|
||||
CHECK("game/two" == err.moduleName);
|
||||
return;
|
||||
}
|
||||
|
||||
FAIL("Expected an InternalCompilerError!");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(FrontendFixture, "checked_modules_have_the_correct_mode")
|
||||
{
|
||||
fileResolver.source["game/A"] = R"(
|
||||
--!nocheck
|
||||
local a: number = "five"
|
||||
)";
|
||||
|
||||
fileResolver.source["game/B"] = R"(
|
||||
--!nonstrict
|
||||
local a = math.abs("five")
|
||||
)";
|
||||
|
||||
fileResolver.source["game/C"] = R"(
|
||||
--!strict
|
||||
local a = 10
|
||||
)";
|
||||
|
||||
frontend.check("game/A");
|
||||
frontend.check("game/B");
|
||||
frontend.check("game/C");
|
||||
|
||||
ModulePtr moduleA = frontend.moduleResolver.getModule("game/A");
|
||||
REQUIRE(moduleA);
|
||||
CHECK(moduleA->mode == Mode::NoCheck);
|
||||
|
||||
ModulePtr moduleB = frontend.moduleResolver.getModule("game/B");
|
||||
REQUIRE(moduleB);
|
||||
CHECK(moduleB->mode == Mode::Nonstrict);
|
||||
|
||||
ModulePtr moduleC = frontend.moduleResolver.getModule("game/C");
|
||||
REQUIRE(moduleC);
|
||||
CHECK(moduleC->mode == Mode::Strict);
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
@ -11,9 +11,9 @@
|
||||
|
||||
#include <limits.h>
|
||||
|
||||
using namespace Luau::CodeGen;
|
||||
LUAU_FASTFLAG(LuauCodegenVectorTag2)
|
||||
|
||||
LUAU_DYNAMIC_FASTFLAG(LuauCodeGenCheckGcEffectFix)
|
||||
using namespace Luau::CodeGen;
|
||||
|
||||
class IrBuilderFixture
|
||||
{
|
||||
@ -2060,8 +2060,6 @@ bb_fallback_1:
|
||||
|
||||
TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateHashSlotChecksInvalidation")
|
||||
{
|
||||
ScopedFastFlag luauCodeGenCheckGcEffectFix{DFFlag::LuauCodeGenCheckGcEffectFix, true};
|
||||
|
||||
IrOp block = build.block(IrBlockKind::Internal);
|
||||
IrOp fallback = build.block(IrBlockKind::Fallback);
|
||||
|
||||
@ -2498,6 +2496,85 @@ bb_fallback_1:
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(IrBuilderFixture, "TagVectorSkipErrorFix")
|
||||
{
|
||||
ScopedFastFlag luauCodegenVectorTag2{FFlag::LuauCodegenVectorTag2, true};
|
||||
|
||||
IrOp block = build.block(IrBlockKind::Internal);
|
||||
|
||||
build.beginBlock(block);
|
||||
|
||||
IrOp a = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(0));
|
||||
IrOp b = build.inst(IrCmd::LOAD_TVALUE, build.vmReg(1));
|
||||
|
||||
IrOp mul = build.inst(IrCmd::TAG_VECTOR, build.inst(IrCmd::MUL_VEC, a, b));
|
||||
|
||||
IrOp t1 = build.inst(IrCmd::TAG_VECTOR, build.inst(IrCmd::ADD_VEC, mul, mul));
|
||||
IrOp t2 = build.inst(IrCmd::TAG_VECTOR, build.inst(IrCmd::SUB_VEC, mul, mul));
|
||||
|
||||
IrOp t3 = build.inst(IrCmd::TAG_VECTOR, build.inst(IrCmd::DIV_VEC, t1, build.inst(IrCmd::UNM_VEC, t2)));
|
||||
|
||||
build.inst(IrCmd::STORE_TVALUE, build.vmReg(0), t3);
|
||||
build.inst(IrCmd::RETURN, build.vmReg(0), build.constUint(1));
|
||||
|
||||
updateUseCounts(build.function);
|
||||
constPropInBlockChains(build, true);
|
||||
|
||||
CHECK("\n" + toString(build.function, IncludeUseInfo::Yes) == R"(
|
||||
bb_0: ; useCount: 0
|
||||
%0 = LOAD_TVALUE R0 ; useCount: 1, lastUse: %0
|
||||
%1 = LOAD_TVALUE R1 ; useCount: 1, lastUse: %0
|
||||
%2 = MUL_VEC %0, %1 ; useCount: 4, lastUse: %0
|
||||
%4 = ADD_VEC %2, %2 ; useCount: 1, lastUse: %0
|
||||
%6 = SUB_VEC %2, %2 ; useCount: 1, lastUse: %0
|
||||
%8 = UNM_VEC %6 ; useCount: 1, lastUse: %0
|
||||
%9 = DIV_VEC %4, %8 ; useCount: 1, lastUse: %0
|
||||
%10 = TAG_VECTOR %9 ; useCount: 1, lastUse: %0
|
||||
STORE_TVALUE R0, %10 ; %11
|
||||
RETURN R0, 1u ; %12
|
||||
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(IrBuilderFixture, "ForgprepInvalidation")
|
||||
{
|
||||
IrOp block = build.block(IrBlockKind::Internal);
|
||||
IrOp followup = build.block(IrBlockKind::Internal);
|
||||
|
||||
build.beginBlock(block);
|
||||
|
||||
IrOp tbl = build.inst(IrCmd::LOAD_POINTER, build.vmReg(0));
|
||||
build.inst(IrCmd::CHECK_READONLY, tbl, build.vmExit(1));
|
||||
|
||||
build.inst(IrCmd::FALLBACK_FORGPREP, build.constUint(2), build.vmReg(1), followup);
|
||||
|
||||
build.beginBlock(followup);
|
||||
build.inst(IrCmd::CHECK_READONLY, tbl, build.vmExit(2));
|
||||
|
||||
build.inst(IrCmd::RETURN, build.vmReg(1), build.constInt(3));
|
||||
|
||||
updateUseCounts(build.function);
|
||||
computeCfgInfo(build.function);
|
||||
constPropInBlockChains(build, true);
|
||||
|
||||
CHECK("\n" + toString(build.function, IncludeUseInfo::No) == R"(
|
||||
bb_0:
|
||||
; successors: bb_1
|
||||
; in regs: R0, R1
|
||||
; out regs: R1, R2, R3
|
||||
%0 = LOAD_POINTER R0
|
||||
CHECK_READONLY %0, exit(1)
|
||||
FALLBACK_FORGPREP 2u, R1, bb_1
|
||||
|
||||
bb_1:
|
||||
; predecessors: bb_0
|
||||
; in regs: R1, R2, R3
|
||||
CHECK_READONLY %0, exit(2)
|
||||
RETURN R1, 3i
|
||||
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
||||
TEST_SUITE_BEGIN("Analysis");
|
||||
|
@ -12,9 +12,7 @@
|
||||
|
||||
#include <memory>
|
||||
|
||||
LUAU_FASTFLAG(LuauCodegenVector)
|
||||
LUAU_FASTFLAG(LuauCodegenVectorTag)
|
||||
LUAU_FASTFLAG(LuauCodegenMathMemArgs)
|
||||
LUAU_FASTFLAG(LuauCodegenVectorTag2)
|
||||
|
||||
static std::string getCodegenAssembly(const char* source)
|
||||
{
|
||||
@ -65,8 +63,7 @@ TEST_SUITE_BEGIN("IrLowering");
|
||||
|
||||
TEST_CASE("VectorReciprocal")
|
||||
{
|
||||
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
|
||||
ScopedFastFlag luauCodegenVectorTag{FFlag::LuauCodegenVectorTag, true};
|
||||
ScopedFastFlag luauCodegenVectorTag2{FFlag::LuauCodegenVectorTag2, true};
|
||||
|
||||
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
||||
local function vecrcp(a: vector)
|
||||
@ -93,8 +90,6 @@ bb_bytecode_1:
|
||||
|
||||
TEST_CASE("VectorComponentRead")
|
||||
{
|
||||
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
|
||||
|
||||
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
||||
local function compsum(a: vector)
|
||||
return a.X + a.Y + a.Z
|
||||
@ -129,8 +124,7 @@ bb_bytecode_1:
|
||||
|
||||
TEST_CASE("VectorAdd")
|
||||
{
|
||||
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
|
||||
ScopedFastFlag luauCodegenVectorTag{FFlag::LuauCodegenVectorTag, true};
|
||||
ScopedFastFlag luauCodegenVectorTag2{FFlag::LuauCodegenVectorTag2, true};
|
||||
|
||||
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
||||
local function vec3add(a: vector, b: vector)
|
||||
@ -158,8 +152,7 @@ bb_bytecode_1:
|
||||
|
||||
TEST_CASE("VectorMinus")
|
||||
{
|
||||
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
|
||||
ScopedFastFlag luauCodegenVectorTag{FFlag::LuauCodegenVectorTag, true};
|
||||
ScopedFastFlag luauCodegenVectorTag2{FFlag::LuauCodegenVectorTag2, true};
|
||||
|
||||
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
||||
local function vec3minus(a: vector)
|
||||
@ -185,8 +178,7 @@ bb_bytecode_1:
|
||||
|
||||
TEST_CASE("VectorSubMulDiv")
|
||||
{
|
||||
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
|
||||
ScopedFastFlag luauCodegenVectorTag{FFlag::LuauCodegenVectorTag, true};
|
||||
ScopedFastFlag luauCodegenVectorTag2{FFlag::LuauCodegenVectorTag2, true};
|
||||
|
||||
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
||||
local function vec3combo(a: vector, b: vector, c: vector, d: vector)
|
||||
@ -222,10 +214,45 @@ bb_bytecode_1:
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_CASE("VectorSubMulDiv2")
|
||||
{
|
||||
ScopedFastFlag luauCodegenVectorTag2{FFlag::LuauCodegenVectorTag2, true};
|
||||
|
||||
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
||||
local function vec3combo(a: vector)
|
||||
local tmp = a * a
|
||||
return (tmp - tmp) / (tmp + tmp)
|
||||
end
|
||||
)"),
|
||||
R"(
|
||||
; function vec3combo($arg0) line 2
|
||||
bb_0:
|
||||
CHECK_TAG R0, tvector, exit(entry)
|
||||
JUMP bb_2
|
||||
bb_2:
|
||||
JUMP bb_bytecode_1
|
||||
bb_bytecode_1:
|
||||
%8 = LOAD_TVALUE R0
|
||||
%10 = MUL_VEC %8, %8
|
||||
%11 = TAG_VECTOR %10
|
||||
STORE_TVALUE R1, %11
|
||||
%19 = SUB_VEC %10, %10
|
||||
%20 = TAG_VECTOR %19
|
||||
STORE_TVALUE R3, %20
|
||||
%28 = ADD_VEC %10, %10
|
||||
%29 = TAG_VECTOR %28
|
||||
STORE_TVALUE R4, %29
|
||||
%37 = DIV_VEC %19, %28
|
||||
%38 = TAG_VECTOR %37
|
||||
STORE_TVALUE R2, %38
|
||||
INTERRUPT 4u
|
||||
RETURN R2, 1i
|
||||
)");
|
||||
}
|
||||
|
||||
TEST_CASE("VectorMulDivMixed")
|
||||
{
|
||||
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
|
||||
ScopedFastFlag luauCodegenVectorTag{FFlag::LuauCodegenVectorTag, true};
|
||||
ScopedFastFlag luauCodegenVectorTag2{FFlag::LuauCodegenVectorTag2, true};
|
||||
|
||||
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
||||
local function vec3combo(a: vector, b: vector, c: vector, d: vector)
|
||||
@ -281,8 +308,6 @@ bb_bytecode_1:
|
||||
|
||||
TEST_CASE("ExtraMathMemoryOperands")
|
||||
{
|
||||
ScopedFastFlag luauCodegenMathMemArgs{FFlag::LuauCodegenMathMemArgs, true};
|
||||
|
||||
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
||||
local function foo(a: number, b: number, c: number, d: number, e: number)
|
||||
return math.floor(a) + math.ceil(b) + math.round(c) + math.sqrt(d) + math.abs(e)
|
||||
|
@ -130,16 +130,17 @@ TEST_CASE_FIXTURE(SimplifyFixture, "overload_negation_refinement_is_never")
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "unknown_and_other_tops_and_bottom_types")
|
||||
{
|
||||
|
||||
CHECK(unknownTy == intersect(unknownTy, unknownTy));
|
||||
|
||||
CHECK(unknownTy == intersect(unknownTy, anyTy));
|
||||
CHECK(unknownTy == intersect(anyTy, unknownTy));
|
||||
CHECK("*error-type* | unknown" == intersectStr(unknownTy, anyTy));
|
||||
CHECK("*error-type* | unknown" == intersectStr(anyTy, unknownTy));
|
||||
|
||||
CHECK(neverTy == intersect(unknownTy, neverTy));
|
||||
CHECK(neverTy == intersect(neverTy, unknownTy));
|
||||
|
||||
CHECK(neverTy == intersect(unknownTy, errorTy));
|
||||
CHECK(neverTy == intersect(errorTy, unknownTy));
|
||||
CHECK(errorTy == intersect(unknownTy, errorTy));
|
||||
CHECK(errorTy == intersect(errorTy, unknownTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "nil")
|
||||
@ -179,17 +180,37 @@ TEST_CASE_FIXTURE(SimplifyFixture, "boolean_and_truthy_and_falsy")
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "any_and_indeterminate_types")
|
||||
{
|
||||
CHECK("'a" == intersectStr(anyTy, freeTy));
|
||||
CHECK("'a" == intersectStr(freeTy, anyTy));
|
||||
CHECK("'a | *error-type*" == intersectStr(anyTy, freeTy));
|
||||
CHECK("'a | *error-type*" == intersectStr(freeTy, anyTy));
|
||||
|
||||
CHECK("b" == intersectStr(anyTy, genericTy));
|
||||
CHECK("b" == intersectStr(genericTy, anyTy));
|
||||
CHECK("*error-type* | b" == intersectStr(anyTy, genericTy));
|
||||
CHECK("*error-type* | b" == intersectStr(genericTy, anyTy));
|
||||
|
||||
CHECK(blockedTy == intersect(anyTy, blockedTy));
|
||||
CHECK(blockedTy == intersect(blockedTy, anyTy));
|
||||
auto anyRhsBlocked = get<UnionType>(intersect(anyTy, blockedTy));
|
||||
auto anyLhsBlocked = get<UnionType>(intersect(blockedTy, anyTy));
|
||||
|
||||
CHECK(pendingTy == intersect(anyTy, pendingTy));
|
||||
CHECK(pendingTy == intersect(pendingTy, anyTy));
|
||||
REQUIRE(anyRhsBlocked);
|
||||
REQUIRE(anyRhsBlocked->options.size() == 2);
|
||||
CHECK(blockedTy == anyRhsBlocked->options[0]);
|
||||
CHECK(errorTy == anyRhsBlocked->options[1]);
|
||||
|
||||
REQUIRE(anyLhsBlocked);
|
||||
REQUIRE(anyLhsBlocked->options.size() == 2);
|
||||
CHECK(blockedTy == anyLhsBlocked->options[0]);
|
||||
CHECK(errorTy == anyLhsBlocked->options[1]);
|
||||
|
||||
auto anyRhsPending = get<UnionType>(intersect(anyTy, pendingTy));
|
||||
auto anyLhsPending = get<UnionType>(intersect(pendingTy, anyTy));
|
||||
|
||||
REQUIRE(anyRhsPending);
|
||||
REQUIRE(anyRhsPending->options.size() == 2);
|
||||
CHECK(pendingTy == anyRhsPending->options[0]);
|
||||
CHECK(errorTy == anyRhsPending->options[1]);
|
||||
|
||||
REQUIRE(anyLhsPending);
|
||||
REQUIRE(anyLhsPending->options.size() == 2);
|
||||
CHECK(pendingTy == anyLhsPending->options[0]);
|
||||
CHECK(errorTy == anyLhsPending->options[1]);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "unknown_and_indeterminate_types")
|
||||
@ -197,22 +218,14 @@ TEST_CASE_FIXTURE(SimplifyFixture, "unknown_and_indeterminate_types")
|
||||
CHECK(freeTy == intersect(unknownTy, freeTy));
|
||||
CHECK(freeTy == intersect(freeTy, unknownTy));
|
||||
|
||||
TypeId t = nullptr;
|
||||
CHECK(genericTy == intersect(unknownTy, genericTy));
|
||||
CHECK(genericTy == intersect(genericTy, unknownTy));
|
||||
|
||||
t = intersect(unknownTy, genericTy);
|
||||
CHECK_MESSAGE(isIntersection(t), "Should be an intersection but got " << t);
|
||||
t = intersect(genericTy, unknownTy);
|
||||
CHECK_MESSAGE(isIntersection(t), "Should be an intersection but got " << t);
|
||||
CHECK(blockedTy == intersect(unknownTy, blockedTy));
|
||||
CHECK(blockedTy == intersect(unknownTy, blockedTy));
|
||||
|
||||
t = intersect(unknownTy, blockedTy);
|
||||
CHECK_MESSAGE(isIntersection(t), "Should be an intersection but got " << t);
|
||||
t = intersect(blockedTy, unknownTy);
|
||||
CHECK_MESSAGE(isIntersection(t), "Should be an intersection but got " << t);
|
||||
|
||||
t = intersect(unknownTy, pendingTy);
|
||||
CHECK_MESSAGE(isIntersection(t), "Should be an intersection but got " << t);
|
||||
t = intersect(pendingTy, unknownTy);
|
||||
CHECK_MESSAGE(isIntersection(t), "Should be an intersection but got " << t);
|
||||
CHECK(pendingTy == intersect(unknownTy, pendingTy));
|
||||
CHECK(pendingTy == intersect(unknownTy, pendingTy));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "unknown_and_concrete")
|
||||
@ -274,8 +287,8 @@ TEST_CASE_FIXTURE(SimplifyFixture, "primitives")
|
||||
CHECK(neverTy == intersect(neverTy, tableTy));
|
||||
CHECK(neverTy == intersect(tableTy, neverTy));
|
||||
|
||||
CHECK(numberTy == intersect(anyTy, numberTy));
|
||||
CHECK(numberTy == intersect(numberTy, anyTy));
|
||||
CHECK("*error-type* | number" == intersectStr(anyTy, numberTy));
|
||||
CHECK("*error-type* | number" == intersectStr(numberTy, anyTy));
|
||||
|
||||
CHECK(neverTy == intersect(stringTy, nilTy));
|
||||
CHECK(neverTy == intersect(nilTy, stringTy));
|
||||
@ -504,7 +517,15 @@ TEST_CASE_FIXTURE(SimplifyFixture, "some_tables_are_really_never")
|
||||
|
||||
CHECK(neverTy == intersect(t1, numberTy));
|
||||
CHECK(neverTy == intersect(numberTy, t1));
|
||||
CHECK(neverTy == intersect(t1, t1));
|
||||
CHECK(t1 == intersect(t1, t1));
|
||||
|
||||
TypeId notUnknownTy = mkNegation(unknownTy);
|
||||
|
||||
TypeId t2 = mkTable({{"someKey", notUnknownTy}});
|
||||
|
||||
CHECK(neverTy == intersect(t2, numberTy));
|
||||
CHECK(neverTy == intersect(numberTy, t2));
|
||||
CHECK(neverTy == intersect(t2, t2));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "simplify_stops_at_cycles")
|
||||
@ -520,20 +541,26 @@ TEST_CASE_FIXTURE(SimplifyFixture, "simplify_stops_at_cycles")
|
||||
tt->props["cyclic"] = Property{t2};
|
||||
t2t->props["cyclic"] = Property{t};
|
||||
|
||||
CHECK(t == intersect(t, anyTy));
|
||||
CHECK(t == intersect(anyTy, t));
|
||||
CHECK(t == intersect(t, unknownTy));
|
||||
CHECK(t == intersect(unknownTy, t));
|
||||
|
||||
CHECK(t2 == intersect(t2, anyTy));
|
||||
CHECK(t2 == intersect(anyTy, t2));
|
||||
CHECK(t2 == intersect(t2, unknownTy));
|
||||
CHECK(t2 == intersect(unknownTy, t2));
|
||||
|
||||
CHECK("*error-type* | t1 where t1 = { cyclic: { cyclic: t1 } }" == intersectStr(t, anyTy));
|
||||
CHECK("*error-type* | t1 where t1 = { cyclic: { cyclic: t1 } }" == intersectStr(anyTy, t));
|
||||
|
||||
CHECK("*error-type* | t1 where t1 = { cyclic: { cyclic: t1 } }" == intersectStr(t2, anyTy));
|
||||
CHECK("*error-type* | t1 where t1 = { cyclic: { cyclic: t1 } }" == intersectStr(anyTy, t2));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(SimplifyFixture, "free_type_bound_by_any_with_any")
|
||||
{
|
||||
CHECK(freeTy == intersect(freeTy, anyTy));
|
||||
CHECK(freeTy == intersect(anyTy, freeTy));
|
||||
CHECK("'a | *error-type*" == intersectStr(freeTy, anyTy));
|
||||
CHECK("'a | *error-type*" == intersectStr(anyTy, freeTy));
|
||||
|
||||
CHECK(freeTy == intersect(freeTy, anyTy));
|
||||
CHECK(freeTy == intersect(anyTy, freeTy));
|
||||
CHECK("'a | *error-type*" == intersectStr(freeTy, anyTy));
|
||||
CHECK("'a | *error-type*" == intersectStr(anyTy, freeTy));
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
@ -509,7 +509,7 @@ TEST_CASE_FIXTURE(ClassFixture, "keyof_type_family_works_on_classes")
|
||||
CheckResult result = check(R"(
|
||||
type KeysOfMyObject = keyof<BaseClass>
|
||||
|
||||
local function ok(idx: KeysOfMyObject): "BaseMethod" | "BaseField" return idx end
|
||||
local function ok(idx: KeysOfMyObject): "BaseMethod" | "BaseField" | "Touched" return idx end
|
||||
local function err(idx: KeysOfMyObject): "BaseMethod" return idx end
|
||||
)");
|
||||
|
||||
@ -518,7 +518,7 @@ TEST_CASE_FIXTURE(ClassFixture, "keyof_type_family_works_on_classes")
|
||||
TypePackMismatch* tpm = get<TypePackMismatch>(result.errors[0]);
|
||||
REQUIRE(tpm);
|
||||
CHECK_EQ("\"BaseMethod\"", toString(tpm->wantedTp));
|
||||
CHECK_EQ("\"BaseField\" | \"BaseMethod\"", toString(tpm->givenTp));
|
||||
CHECK_EQ("\"BaseField\" | \"BaseMethod\" | \"Touched\"", toString(tpm->givenTp));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(ClassFixture, "keyof_type_family_errors_if_it_has_nonclass_part")
|
||||
|
@ -32,7 +32,15 @@ TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_returns_any")
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
|
||||
CHECK_EQ(builtinTypes->anyType, requireType("a"));
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
// Bug: We do not simplify at the right time
|
||||
CHECK_EQ("any?", toString(requireType("a")));
|
||||
}
|
||||
else
|
||||
{
|
||||
CHECK_EQ(builtinTypes->anyType, requireType("a"));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_returns_any2")
|
||||
@ -64,7 +72,7 @@ TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_returns_any2")
|
||||
TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_any")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
local bar: any
|
||||
local bar = nil :: any
|
||||
|
||||
local a
|
||||
for b in bar do
|
||||
@ -74,13 +82,21 @@ TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_any")
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
|
||||
CHECK_EQ("any", toString(requireType("a")));
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
// Bug: We do not simplify at the right time
|
||||
CHECK_EQ("any?", toString(requireType("a")));
|
||||
}
|
||||
else
|
||||
{
|
||||
CHECK_EQ("any", toString(requireType("a")));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_any2")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
local bar: any
|
||||
local bar = nil :: any
|
||||
|
||||
local a
|
||||
for b in bar() do
|
||||
@ -90,7 +106,39 @@ TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_any2")
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
|
||||
CHECK_EQ("any", toString(requireType("a")));
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
// Bug: We do not simplify at the right time
|
||||
CHECK_EQ("any?", toString(requireType("a")));
|
||||
}
|
||||
else
|
||||
{
|
||||
CHECK_EQ("any", toString(requireType("a")));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_any_pack")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
function bar(): ...any end
|
||||
|
||||
local a
|
||||
for b in bar() do
|
||||
a = b
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
// Bug: We do not simplify at the right time
|
||||
CHECK_EQ("any?", toString(requireType("a")));
|
||||
}
|
||||
else
|
||||
{
|
||||
CHECK_EQ("any", toString(requireType("a")));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_error")
|
||||
@ -104,7 +152,16 @@ TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_error")
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
|
||||
CHECK_EQ("*error-type*", toString(requireType("a")));
|
||||
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
// Bug: We do not simplify at the right time
|
||||
CHECK_EQ("*error-type*?", toString(requireType("a")));
|
||||
}
|
||||
else
|
||||
{
|
||||
CHECK_EQ("*error-type*", toString(requireType("a")));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_error2")
|
||||
@ -118,9 +175,21 @@ TEST_CASE_FIXTURE(Fixture, "for_in_loop_iterator_is_error2")
|
||||
end
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
// CLI-97375(awe): `bar()` is returning `nil` here, which isn't wrong necessarily,
|
||||
// but then we're signaling an additional error for the access on `nil`.
|
||||
LUAU_REQUIRE_ERROR_COUNT(2, result);
|
||||
|
||||
CHECK_EQ("*error-type*", toString(requireType("a")));
|
||||
// Bug: We do not simplify at the right time
|
||||
CHECK_EQ("*error-type*?", toString(requireType("a")));
|
||||
}
|
||||
else
|
||||
{
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
|
||||
CHECK_EQ("*error-type*", toString(requireType("a")));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "length_of_error_type_does_not_produce_an_error")
|
||||
|
@ -967,7 +967,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "type_comparison_ifelse_expression")
|
||||
|
||||
CHECK_EQ("number", toString(requireTypeAtPosition({10, 49})));
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
CHECK_EQ("unknown & ~number", toString(requireTypeAtPosition({10, 66})));
|
||||
CHECK_EQ("~number", toString(requireTypeAtPosition({10, 66})));
|
||||
else
|
||||
CHECK_EQ("unknown", toString(requireTypeAtPosition({10, 66})));
|
||||
}
|
||||
@ -1497,7 +1497,7 @@ TEST_CASE_FIXTURE(BuiltinsFixture, "refine_unknowns")
|
||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||
{
|
||||
CHECK_EQ("string", toString(requireTypeAtPosition({3, 28})));
|
||||
CHECK_EQ("unknown & ~string", toString(requireTypeAtPosition({5, 28})));
|
||||
CHECK_EQ("~string", toString(requireTypeAtPosition({5, 28})));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -4022,8 +4022,7 @@ TEST_CASE_FIXTURE(Fixture, "infer_write_property")
|
||||
|
||||
LUAU_REQUIRE_NO_ERRORS(result);
|
||||
|
||||
// CHECK("({ y: number }) -> ()" == toString(requireType("f")));
|
||||
CHECK("({ y: number & unknown }) -> ()" == toString(requireType("f")));
|
||||
CHECK("({ y: number }) -> ()" == toString(requireType("f")));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Fixture, "table_subtyping_error_suppression")
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include "Luau/VisitType.h"
|
||||
|
||||
#include "Fixture.h"
|
||||
#include "ClassFixture.h"
|
||||
#include "ScopedFlags.h"
|
||||
|
||||
#include "doctest.h"
|
||||
@ -1219,6 +1220,26 @@ TEST_CASE_FIXTURE(Fixture, "bidirectional_checking_of_callback_property")
|
||||
CHECK(location.end.line == 7);
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(ClassFixture, "bidirectional_inference_of_class_methods")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
local c = ChildClass.New()
|
||||
|
||||
-- Instead of reporting that the lambda is the wrong type, report that we are using its argument improperly.
|
||||
c.Touched:Connect(function(other)
|
||||
print(other.ThisDoesNotExist)
|
||||
end)
|
||||
)");
|
||||
|
||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||
|
||||
UnknownProperty* err = get<UnknownProperty>(result.errors[0]);
|
||||
REQUIRE(err);
|
||||
|
||||
CHECK("ThisDoesNotExist" == err->key);
|
||||
CHECK("BaseClass" == toString(err->table));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(BuiltinsFixture, "it_is_ok_to_have_inconsistent_number_of_return_values_in_nonstrict")
|
||||
{
|
||||
CheckResult result = check(R"(
|
||||
|
@ -182,4 +182,17 @@ TEST_CASE_FIXTURE(Unifier2Fixture, "generalize_a_type_that_is_bounded_by_another
|
||||
CHECK(builtinTypes.unknownType == follow(t2));
|
||||
}
|
||||
|
||||
TEST_CASE_FIXTURE(Unifier2Fixture, "dont_traverse_into_class_types_when_generalizing")
|
||||
{
|
||||
auto [propTy, _] = freshType();
|
||||
|
||||
TypeId cursedClass = arena.addType(ClassType{"Cursed", {{"oh_no", Property::readonly(propTy)}}, std::nullopt, std::nullopt, {}, {}, ""});
|
||||
|
||||
auto genClass = u2.generalize(cursedClass);
|
||||
REQUIRE(genClass);
|
||||
|
||||
auto genPropTy = get<ClassType>(*genClass)->props.at("oh_no").readTy;
|
||||
CHECK(is<FreeType>(*genPropTy));
|
||||
}
|
||||
|
||||
TEST_SUITE_END();
|
||||
|
@ -4,7 +4,6 @@ AutocompleteTest.anonymous_autofilled_generic_type_pack_vararg
|
||||
AutocompleteTest.autocomplete_response_perf1
|
||||
AutocompleteTest.autocomplete_string_singleton_equality
|
||||
AutocompleteTest.do_wrong_compatible_nonself_calls
|
||||
AutocompleteTest.type_correct_expected_argument_type_suggestion_self
|
||||
AutocompleteTest.type_correct_suggestion_for_overloads
|
||||
BuiltinTests.aliased_string_format
|
||||
BuiltinTests.assert_removes_falsy_types
|
||||
@ -92,7 +91,6 @@ GenericsTests.factories_of_generics
|
||||
GenericsTests.generic_argument_count_too_few
|
||||
GenericsTests.generic_argument_count_too_many
|
||||
GenericsTests.generic_factories
|
||||
GenericsTests.generic_functions_dont_cache_type_parameters
|
||||
GenericsTests.generic_functions_in_types
|
||||
GenericsTests.generic_type_families_work_in_subtyping
|
||||
GenericsTests.generic_type_pack_parentheses
|
||||
@ -244,7 +242,6 @@ TableTests.generic_table_instantiation_potential_regression
|
||||
TableTests.indexer_mismatch
|
||||
TableTests.indexers_get_quantified_too
|
||||
TableTests.indexing_from_a_table_should_prefer_properties_when_possible
|
||||
TableTests.inequality_operators_imply_exactly_matching_types
|
||||
TableTests.infer_indexer_from_its_variable_type_and_unifiable
|
||||
TableTests.inferred_return_type_of_free_table
|
||||
TableTests.instantiate_table_cloning_3
|
||||
@ -264,7 +261,6 @@ TableTests.ok_to_set_nil_even_on_non_lvalue_base_expr
|
||||
TableTests.okay_to_add_property_to_unsealed_tables_by_assignment
|
||||
TableTests.okay_to_add_property_to_unsealed_tables_by_function_call
|
||||
TableTests.only_ascribe_synthetic_names_at_module_scope
|
||||
TableTests.oop_polymorphic
|
||||
TableTests.open_table_unification_2
|
||||
TableTests.pass_a_union_of_tables_to_a_function_that_requires_a_table
|
||||
TableTests.pass_a_union_of_tables_to_a_function_that_requires_a_table_2
|
||||
@ -367,11 +363,6 @@ TypeInferAnyError.any_type_propagates
|
||||
TypeInferAnyError.assign_prop_to_table_by_calling_any_yields_any
|
||||
TypeInferAnyError.call_to_any_yields_any
|
||||
TypeInferAnyError.can_subscript_any
|
||||
TypeInferAnyError.for_in_loop_iterator_is_any
|
||||
TypeInferAnyError.for_in_loop_iterator_is_any2
|
||||
TypeInferAnyError.for_in_loop_iterator_is_error
|
||||
TypeInferAnyError.for_in_loop_iterator_is_error2
|
||||
TypeInferAnyError.for_in_loop_iterator_returns_any
|
||||
TypeInferAnyError.intersection_of_any_can_have_props
|
||||
TypeInferAnyError.metatable_of_any_can_be_a_table
|
||||
TypeInferAnyError.quantify_any_does_not_bind_to_itself
|
||||
@ -442,6 +433,7 @@ TypeInferFunctions.too_many_return_values_in_parentheses
|
||||
TypeInferFunctions.too_many_return_values_no_function
|
||||
TypeInferLoops.cli_68448_iterators_need_not_accept_nil
|
||||
TypeInferLoops.dcr_iteration_explore_raycast_minimization
|
||||
TypeInferLoops.dcr_iteration_fragmented_keys
|
||||
TypeInferLoops.dcr_iteration_on_never_gives_never
|
||||
TypeInferLoops.dcr_xpath_candidates
|
||||
TypeInferLoops.for_in_loop
|
||||
@ -449,10 +441,9 @@ TypeInferLoops.for_in_loop_error_on_factory_not_returning_the_right_amount_of_va
|
||||
TypeInferLoops.for_in_loop_error_on_iterator_requiring_args_but_none_given
|
||||
TypeInferLoops.for_in_loop_on_error
|
||||
TypeInferLoops.for_in_loop_on_non_function
|
||||
TypeInferLoops.for_in_loop_with_custom_iterator
|
||||
TypeInferLoops.for_in_loop_with_incompatible_args_to_iterator
|
||||
TypeInferLoops.for_in_loop_with_next
|
||||
TypeInferLoops.for_in_with_an_iterator_of_type_any
|
||||
TypeInferLoops.for_in_with_generic_next
|
||||
TypeInferLoops.for_loop
|
||||
TypeInferLoops.ipairs_produces_integral_indices
|
||||
TypeInferLoops.iterate_over_free_table
|
||||
@ -483,7 +474,6 @@ TypeInferOOP.inferring_hundreds_of_self_calls_should_not_suffocate_memory
|
||||
TypeInferOOP.methods_are_topologically_sorted
|
||||
TypeInferOOP.object_constructor_can_refer_to_method_of_self
|
||||
TypeInferOOP.promise_type_error_too_complex
|
||||
TypeInferOOP.react_style_oo
|
||||
TypeInferOperators.add_type_family_works
|
||||
TypeInferOperators.cli_38355_recursive_union
|
||||
TypeInferOperators.compound_assign_mismatch_metatable
|
||||
|
Loading…
Reference in New Issue
Block a user