From ea14e65ea04a19882df99cfb8ec7d5b73ff50e82 Mon Sep 17 00:00:00 2001 From: vegorov-rbx <75688451+vegorov-rbx@users.noreply.github.com> Date: Thu, 15 Feb 2024 18:04:39 -0800 Subject: [PATCH] Sync to upstream/release/613 (#1167) # What's changed? * Compiler now targets bytecode version 5 by default, this includes support for vector type literals and sub/div opcodes with a constant on lhs ### New Type Solver * Normalizer type inhabitance check has been optimized * Added ability to reduce cyclic `and`/`or` type families ### Native Code Generation * `CodeGen::compile` now returns more specific causes of a code generation failure * Fixed linking issues on platforms that don't support unwind frame data registration --- ### Internal Contributors Co-authored-by: Andy Friesen Co-authored-by: Aviral Goel Co-authored-by: Vyacheslav Egorov --------- Co-authored-by: Aaron Weiss Co-authored-by: Alexander McCord Co-authored-by: Andy Friesen Co-authored-by: Vighnesh Co-authored-by: Aviral Goel Co-authored-by: David Cope Co-authored-by: Lily Brown --- Analysis/include/Luau/Constraint.h | 4 +- Analysis/include/Luau/ConstraintGenerator.h | 13 +- Analysis/include/Luau/Normalize.h | 5 +- Analysis/include/Luau/Set.h | 4 +- Analysis/include/Luau/Subtyping.h | 27 ++- Analysis/include/Luau/Type.h | 9 +- Analysis/include/Luau/TypeFamily.h | 8 +- Analysis/include/Luau/TypePath.h | 4 +- Analysis/include/Luau/TypeUtils.h | 11 +- Analysis/src/Autocomplete.cpp | 1 - Analysis/src/ConstraintGenerator.cpp | 79 +++---- Analysis/src/ConstraintSolver.cpp | 3 +- Analysis/src/Frontend.cpp | 3 +- Analysis/src/Normalize.cpp | 14 +- Analysis/src/Subtyping.cpp | 17 +- Analysis/src/ToString.cpp | 4 +- Analysis/src/TypeFamily.cpp | 153 +++++++++---- Analysis/src/TypePack.cpp | 3 +- Analysis/src/Unifier2.cpp | 7 +- Ast/src/Ast.cpp | 4 +- Ast/src/Parser.cpp | 6 +- CodeGen/include/Luau/AddressA64.h | 8 +- CodeGen/include/Luau/AssemblyBuilderA64.h | 2 +- CodeGen/include/Luau/AssemblyBuilderX64.h | 2 +- CodeGen/include/Luau/BytecodeSummary.h | 12 +- CodeGen/include/Luau/CodeGen.h | 18 +- CodeGen/include/Luau/CodeGenCommon.h | 12 + CodeGen/include/Luau/ConditionX64.h | 4 +- CodeGen/include/Luau/IrAnalysis.h | 4 +- CodeGen/include/Luau/IrData.h | 30 +-- CodeGen/include/Luau/IrVisitUseDef.h | 14 +- CodeGen/include/Luau/OperandX64.h | 36 +-- CodeGen/include/Luau/RegisterA64.h | 8 +- CodeGen/include/Luau/RegisterX64.h | 2 +- CodeGen/src/AssemblyBuilderA64.cpp | 238 ++++++++++---------- CodeGen/src/AssemblyBuilderX64.cpp | 146 ++++++------ CodeGen/src/BytecodeAnalysis.cpp | 40 ++-- CodeGen/src/BytecodeSummary.cpp | 2 +- CodeGen/src/CodeAllocator.cpp | 42 ++-- CodeGen/src/CodeBlockUnwind.cpp | 21 +- CodeGen/src/CodeGen.cpp | 53 +++-- CodeGen/src/CodeGenA64.cpp | 14 +- CodeGen/src/CodeGenAssembly.cpp | 8 +- CodeGen/src/CodeGenLower.h | 30 ++- CodeGen/src/CodeGenX64.cpp | 4 +- CodeGen/src/EmitBuiltinsX64.cpp | 8 +- CodeGen/src/EmitCommonX64.cpp | 10 +- CodeGen/src/EmitCommonX64.h | 4 +- CodeGen/src/EmitInstructionX64.cpp | 6 +- CodeGen/src/IrAnalysis.cpp | 48 ++-- CodeGen/src/IrBuilder.cpp | 20 +- CodeGen/src/IrCallWrapperX64.cpp | 18 +- CodeGen/src/IrDump.cpp | 8 +- CodeGen/src/IrLoweringA64.cpp | 136 +++++------ CodeGen/src/IrLoweringX64.cpp | 114 +++++----- CodeGen/src/IrRegAllocA64.cpp | 64 +++--- CodeGen/src/IrRegAllocX64.cpp | 50 ++-- CodeGen/src/IrTranslateBuiltins.cpp | 12 +- CodeGen/src/IrTranslation.cpp | 48 ++-- CodeGen/src/IrUtils.cpp | 28 +-- CodeGen/src/IrValueLocationTracking.cpp | 14 +- CodeGen/src/OptimizeConstProp.cpp | 44 ++-- CodeGen/src/OptimizeFinalX64.cpp | 4 +- CodeGen/src/UnwindBuilderDwarf2.cpp | 28 +-- CodeGen/src/UnwindBuilderWin.cpp | 34 +-- Common/include/Luau/Bytecode.h | 2 +- Common/include/Luau/ExperimentalFlags.h | 6 +- Common/include/Luau/VecDeque.h | 16 +- Compiler/src/BuiltinFolding.cpp | 5 +- Compiler/src/BytecodeBuilder.cpp | 5 +- Compiler/src/Compiler.cpp | 4 +- Sources.cmake | 1 + tests/AstJsonEncoder.test.cpp | 2 +- tests/Compiler.test.cpp | 10 - tests/Conformance.test.cpp | 2 +- tests/DiffAsserts.h | 6 +- tests/IrBuilder.test.cpp | 5 - tests/IrLowering.test.cpp | 6 - tests/Linter.test.cpp | 13 +- tests/TypeFamily.test.cpp | 3 +- tests/TypeInfer.annotations.test.cpp | 8 +- tests/TypeInfer.intersectionTypes.test.cpp | 24 +- tests/TypeInfer.provisional.test.cpp | 1 - tests/TypeInfer.refinements.test.cpp | 4 +- tests/TypeInfer.singletons.test.cpp | 3 +- tests/TypeInfer.tables.test.cpp | 2 - tests/TypeInfer.test.cpp | 10 +- tests/TypeInfer.typestates.test.cpp | 2 +- tests/TypeInfer.unionTypes.test.cpp | 8 +- tests/main.cpp | 23 +- tools/faillist.txt | 6 +- 91 files changed, 1059 insertions(+), 945 deletions(-) create mode 100644 CodeGen/include/Luau/CodeGenCommon.h diff --git a/Analysis/include/Luau/Constraint.h b/Analysis/include/Luau/Constraint.h index 08302c52..38506f0a 100644 --- a/Analysis/include/Luau/Constraint.h +++ b/Analysis/include/Luau/Constraint.h @@ -255,8 +255,8 @@ struct ReducePackConstraint }; using ConstraintV = Variant; + NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, FunctionCheckConstraint, PrimitiveTypeConstraint, HasPropConstraint, + SetPropConstraint, SetIndexerConstraint, SingletonOrTopTypeConstraint, UnpackConstraint, SetOpConstraint, ReduceConstraint, ReducePackConstraint>; struct Constraint { diff --git a/Analysis/include/Luau/ConstraintGenerator.h b/Analysis/include/Luau/ConstraintGenerator.h index 044d4aa6..65318a31 100644 --- a/Analysis/include/Luau/ConstraintGenerator.h +++ b/Analysis/include/Luau/ConstraintGenerator.h @@ -118,10 +118,9 @@ struct ConstraintGenerator DcrLogger* logger; - ConstraintGenerator(ModulePtr module, NotNull normalizer, NotNull moduleResolver, - NotNull builtinTypes, NotNull ice, const ScopePtr& globalScope, - std::function prepareModuleScope, DcrLogger* logger, NotNull dfg, - std::vector requireCycles); + ConstraintGenerator(ModulePtr module, NotNull normalizer, NotNull moduleResolver, NotNull builtinTypes, + NotNull ice, const ScopePtr& globalScope, std::function prepareModuleScope, + DcrLogger* logger, NotNull dfg, std::vector requireCycles); /** * The entry point to the ConstraintGenerator. This will construct a set @@ -190,8 +189,10 @@ private: }; using RefinementContext = InsertionOrderedMap; - void unionRefinements(const ScopePtr& scope, Location location, const RefinementContext& lhs, const RefinementContext& rhs, RefinementContext& dest, std::vector* constraints); - void computeRefinement(const ScopePtr& scope, Location location, RefinementId refinement, RefinementContext* refis, bool sense, bool eq, std::vector* constraints); + void unionRefinements(const ScopePtr& scope, Location location, const RefinementContext& lhs, const RefinementContext& rhs, + RefinementContext& dest, std::vector* constraints); + void computeRefinement(const ScopePtr& scope, Location location, RefinementId refinement, RefinementContext* refis, bool sense, bool eq, + std::vector* constraints); void applyRefinements(const ScopePtr& scope, Location location, RefinementId refinement); ControlFlow visitBlockWithoutChildScope(const ScopePtr& scope, AstStatBlock* block); diff --git a/Analysis/include/Luau/Normalize.h b/Analysis/include/Luau/Normalize.h index 4508d4a4..9458462d 100644 --- a/Analysis/include/Luau/Normalize.h +++ b/Analysis/include/Luau/Normalize.h @@ -393,8 +393,9 @@ public: // Check for inhabitance bool isInhabited(TypeId ty); - bool isInhabited(TypeId ty, Set seen); - bool isInhabited(const NormalizedType* norm, Set seen = {nullptr}); + bool isInhabited(TypeId ty, Set& seen); + bool isInhabited(const NormalizedType* norm); + bool isInhabited(const NormalizedType* norm, Set& seen); // Check for intersections being inhabited bool isIntersectionInhabited(TypeId left, TypeId right); diff --git a/Analysis/include/Luau/Set.h b/Analysis/include/Luau/Set.h index 1cfdf5c6..aeb15aaa 100644 --- a/Analysis/include/Luau/Set.h +++ b/Analysis/include/Luau/Set.h @@ -127,7 +127,8 @@ public: const_iterator(typename Impl::const_iterator impl, typename Impl::const_iterator end) : impl(impl) , end(end) - {} + { + } const T& operator*() const { @@ -168,6 +169,7 @@ public: ++*this; return res; } + private: typename Impl::const_iterator impl; typename Impl::const_iterator end; diff --git a/Analysis/include/Luau/Subtyping.h b/Analysis/include/Luau/Subtyping.h index ddd12732..0ca2380e 100644 --- a/Analysis/include/Luau/Subtyping.h +++ b/Analysis/include/Luau/Subtyping.h @@ -65,7 +65,7 @@ struct SubtypingResult bool isSubtype = false; bool normalizationTooComplex = false; bool isCacheable = true; - + ErrorVec errors; /// The reason for isSubtype to be false. May not be present even if /// isSubtype is false, depending on the input types. SubtypingReasonings reasoning{kEmptyReasoning}; @@ -78,6 +78,7 @@ struct SubtypingResult SubtypingResult& withBothPath(TypePath::Path path); SubtypingResult& withSubPath(TypePath::Path path); SubtypingResult& withSuperPath(TypePath::Path path); + SubtypingResult& withErrors(ErrorVec& err); // Only negates the `isSubtype`. static SubtypingResult negate(const SubtypingResult& result); @@ -211,18 +212,22 @@ private: template TypeId makeAggregateType(const Container& container, TypeId orElse); - template - T handleTypeFamilyReductionResult(const TypeFamilyInstanceType* tf) + + std::pair handleTypeFamilyReductionResult(const TypeFamilyInstanceType* familyInstance) { TypeFamilyContext context{arena, builtinTypes, scope, normalizer, iceReporter, NotNull{&limits}}; - TypeFamilyReductionResult result = tf->family->reducer(tf->typeArguments, tf->packArguments, NotNull{&context}); - if (!result.blockedTypes.empty()) - unexpected(result.blockedTypes[0]); - else if (!result.blockedPacks.empty()) - unexpected(result.blockedPacks[0]); - else if (result.uninhabited || result.result == std::nullopt) - return builtinTypes->neverType; - return *result.result; + TypeId family = arena->addType(*familyInstance); + std::string familyString = toString(family); + FamilyGraphReductionResult result = reduceFamilies(family, {}, context, true); + ErrorVec errors; + if (result.blockedTypes.size() != 0 || result.blockedPacks.size() != 0) + { + errors.push_back(TypeError{{}, UninhabitedTypeFamily{family}}); + return {builtinTypes->neverType, errors}; + } + if (result.reducedTypes.contains(family)) + return {family, errors}; + return {builtinTypes->neverType, errors}; } [[noreturn]] void unexpected(TypeId ty); diff --git a/Analysis/include/Luau/Type.h b/Analysis/include/Luau/Type.h index b368bb20..c72864ce 100644 --- a/Analysis/include/Luau/Type.h +++ b/Analysis/include/Luau/Type.h @@ -406,7 +406,8 @@ struct Property // TODO: Kill all constructors in favor of `Property::rw(TypeId read, TypeId write)` and friends. Property(); Property(TypeId readTy, bool deprecated = false, const std::string& deprecatedSuggestion = "", std::optional location = std::nullopt, - const Tags& tags = {}, const std::optional& documentationSymbol = std::nullopt, std::optional typeLocation = std::nullopt); + const Tags& tags = {}, const std::optional& documentationSymbol = std::nullopt, + std::optional typeLocation = std::nullopt); // DEPRECATED: Should only be called in non-RWP! We assert that the `readTy` is not nullopt. // TODO: Kill once we don't have non-RWP. @@ -639,9 +640,9 @@ struct NegationType using ErrorType = Unifiable::Error; -using TypeVariant = - Unifiable::Variant; +using TypeVariant = Unifiable::Variant; struct Type final { diff --git a/Analysis/include/Luau/TypeFamily.h b/Analysis/include/Luau/TypeFamily.h index 77fd6e8a..79dc435c 100644 --- a/Analysis/include/Luau/TypeFamily.h +++ b/Analysis/include/Luau/TypeFamily.h @@ -76,6 +76,10 @@ struct TypeFamilyReductionResult std::vector blockedPacks; }; +template +using ReducerFunction = + std::function(T, const std::vector&, const std::vector&, NotNull)>; + /// Represents a type function that may be applied to map a series of types and /// type packs to a single output type. struct TypeFamily @@ -85,7 +89,7 @@ struct TypeFamily std::string name; /// The reducer function for the type family. - std::function(const std::vector&, const std::vector&, NotNull)> reducer; + ReducerFunction reducer; }; /// Represents a type function that may be applied to map a series of types and @@ -97,7 +101,7 @@ struct TypePackFamily std::string name; /// The reducer function for the type pack family. - std::function(const std::vector&, const std::vector&, NotNull)> reducer; + ReducerFunction reducer; }; struct FamilyGraphReductionResult diff --git a/Analysis/include/Luau/TypePath.h b/Analysis/include/Luau/TypePath.h index bdca95a4..fe7b1a25 100644 --- a/Analysis/include/Luau/TypePath.h +++ b/Analysis/include/Luau/TypePath.h @@ -102,9 +102,7 @@ struct Path std::vector components; /// Creates a new empty Path. - Path() - { - } + Path() {} /// Creates a new Path from a list of components. explicit Path(std::vector components) diff --git a/Analysis/include/Luau/TypeUtils.h b/Analysis/include/Luau/TypeUtils.h index 8803d924..6ac4e7c6 100644 --- a/Analysis/include/Luau/TypeUtils.h +++ b/Analysis/include/Luau/TypeUtils.h @@ -66,9 +66,15 @@ struct ErrorSuppression }; ErrorSuppression() = default; - constexpr ErrorSuppression(Value enumValue) : value(enumValue) { } + constexpr ErrorSuppression(Value enumValue) + : value(enumValue) + { + } - constexpr operator Value() const { return value; } + constexpr operator Value() const + { + return value; + } explicit operator bool() const = delete; ErrorSuppression orElse(const ErrorSuppression& other) const @@ -81,6 +87,7 @@ struct ErrorSuppression return *this; } } + private: Value value; }; diff --git a/Analysis/src/Autocomplete.cpp b/Analysis/src/Autocomplete.cpp index 87a593bd..1c8620b8 100644 --- a/Analysis/src/Autocomplete.cpp +++ b/Analysis/src/Autocomplete.cpp @@ -161,7 +161,6 @@ static bool checkTypeMatch(TypeId subTy, TypeId superTy, NotNull scope, T return unifier.canUnify(subTy, superTy).empty(); } - } static TypeCorrectKind checkTypeCorrectKind( diff --git a/Analysis/src/ConstraintGenerator.cpp b/Analysis/src/ConstraintGenerator.cpp index f3fb163e..6f744951 100644 --- a/Analysis/src/ConstraintGenerator.cpp +++ b/Analysis/src/ConstraintGenerator.cpp @@ -142,9 +142,7 @@ struct HasFreeType : TypeOnceVisitor { bool result = false; - HasFreeType() - { - } + HasFreeType() {} bool visit(TypeId ty) override { @@ -288,7 +286,7 @@ std::optional ConstraintGenerator::lookup(const ScopePtr& scope, DefId d // `scope->lookup(operand)` may return nothing because we only bind a type to that operand // once we've seen that particular `DefId`. In this case, we need to prototype those types // and use those at a later time. - std::optional ty = lookup(scope, operand, /*prototype*/false); + std::optional ty = lookup(scope, operand, /*prototype*/ false); if (!ty) { ty = arena->addType(BlockedType{}); @@ -315,7 +313,8 @@ NotNull ConstraintGenerator::addConstraint(const ScopePtr& scope, st return NotNull{constraints.emplace_back(std::move(c)).get()}; } -void ConstraintGenerator::unionRefinements(const ScopePtr& scope, Location location, const RefinementContext& lhs, const RefinementContext& rhs, RefinementContext& dest, std::vector* constraints) +void ConstraintGenerator::unionRefinements(const ScopePtr& scope, Location location, const RefinementContext& lhs, const RefinementContext& rhs, + RefinementContext& dest, std::vector* constraints) { const auto intersect = [&](const std::vector& types) { if (1 == types.size()) @@ -346,7 +345,8 @@ void ConstraintGenerator::unionRefinements(const ScopePtr& scope, Location locat } } -void ConstraintGenerator::computeRefinement(const ScopePtr& scope, Location location, RefinementId refinement, RefinementContext* refis, bool sense, bool eq, std::vector* constraints) +void ConstraintGenerator::computeRefinement(const ScopePtr& scope, Location location, RefinementId refinement, RefinementContext* refis, bool sense, + bool eq, std::vector* constraints) { if (!refinement) return; @@ -907,19 +907,17 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatLocalFuncti std::make_unique(constraintScope, function->name->location, GeneralizationConstraint{functionType, sig.signature}); Constraint* previous = nullptr; - forEachConstraint(start, end, this, - [&c, &previous](const ConstraintPtr& constraint) + forEachConstraint(start, end, this, [&c, &previous](const ConstraintPtr& constraint) { + c->dependencies.push_back(NotNull{constraint.get()}); + + if (auto psc = get(*constraint); psc && psc->returns) { - c->dependencies.push_back(NotNull{constraint.get()}); + if (previous) + constraint->dependencies.push_back(NotNull{previous}); - if (auto psc = get(*constraint); psc && psc->returns) - { - if (previous) - constraint->dependencies.push_back(NotNull{previous}); - - previous = constraint.get(); - } - }); + previous = constraint.get(); + } + }); addConstraint(scope, std::move(c)); module->astTypes[function->func] = functionType; @@ -1018,20 +1016,18 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatFunction* f std::make_unique(constraintScope, function->name->location, GeneralizationConstraint{generalizedType, sig.signature}); Constraint* previous = nullptr; - forEachConstraint(start, end, this, - [&c, &excludeList, &previous](const ConstraintPtr& constraint) + forEachConstraint(start, end, this, [&c, &excludeList, &previous](const ConstraintPtr& constraint) { + if (!excludeList.contains(constraint.get())) + c->dependencies.push_back(NotNull{constraint.get()}); + + if (auto psc = get(*constraint); psc && psc->returns) { - if (!excludeList.contains(constraint.get())) - c->dependencies.push_back(NotNull{constraint.get()}); + if (previous) + constraint->dependencies.push_back(NotNull{previous}); - if (auto psc = get(*constraint); psc && psc->returns) - { - if (previous) - constraint->dependencies.push_back(NotNull{previous}); - - previous = constraint.get(); - } - }); + previous = constraint.get(); + } + }); addConstraint(scope, std::move(c)); } @@ -1470,8 +1466,7 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatError* erro return ControlFlow::None; } -InferencePack ConstraintGenerator::checkPack( - const ScopePtr& scope, AstArray exprs, const std::vector>& expectedTypes) +InferencePack ConstraintGenerator::checkPack(const ScopePtr& scope, AstArray exprs, const std::vector>& expectedTypes) { std::vector head; std::optional tail; @@ -1708,14 +1703,8 @@ InferencePack ConstraintGenerator::checkPack(const ScopePtr& scope, AstExprCall* * 4. Solve the call */ - NotNull checkConstraint = addConstraint(scope, call->func->location, - FunctionCheckConstraint{ - fnType, - argPack, - call, - NotNull{&module->astExpectedTypes} - } - ); + NotNull checkConstraint = + addConstraint(scope, call->func->location, FunctionCheckConstraint{fnType, argPack, call, NotNull{&module->astExpectedTypes}}); forEachConstraint(funcBeginCheckpoint, funcEndCheckpoint, this, [checkConstraint](const ConstraintPtr& constraint) { checkConstraint->dependencies.emplace_back(constraint.get()); @@ -1743,8 +1732,7 @@ InferencePack ConstraintGenerator::checkPack(const ScopePtr& scope, AstExprCall* } } -Inference ConstraintGenerator::check( - const ScopePtr& scope, AstExpr* expr, std::optional expectedType, bool forceSingleton, bool generalize) +Inference ConstraintGenerator::check(const ScopePtr& scope, AstExpr* expr, std::optional expectedType, bool forceSingleton, bool generalize) { RecursionCounter counter{&recursionCount}; @@ -2403,11 +2391,9 @@ std::optional ConstraintGenerator::checkLValue(const ScopePtr& scope, As if (transform) { - addConstraint(scope, local->location, UnpackConstraint{ - arena->addTypePack({*ty}), - arena->addTypePack({assignedTy}), - /*resultIsLValue*/ true - }); + addConstraint(scope, local->location, + UnpackConstraint{arena->addTypePack({*ty}), arena->addTypePack({assignedTy}), + /*resultIsLValue*/ true}); recordInferredBinding(local->local, *ty); } @@ -3395,7 +3381,6 @@ void ConstraintGenerator::fillInInferredBindings(const ScopePtr& globalScope, As scope->bindings[symbol] = Binding{ty, location}; } - } } diff --git a/Analysis/src/ConstraintSolver.cpp b/Analysis/src/ConstraintSolver.cpp index 304f710f..109fb2fb 100644 --- a/Analysis/src/ConstraintSolver.cpp +++ b/Analysis/src/ConstraintSolver.cpp @@ -1184,7 +1184,8 @@ bool ConstraintSolver::tryDispatch(const FunctionCheckConstraint& c, NotNullis() || expr->is() || expr->is() || expr->is() || expr->is()) + else if (expr->is() || expr->is() || expr->is() || + expr->is() || expr->is()) { Unifier2 u2{arena, builtinTypes, constraint->scope, NotNull{&iceReporter}}; u2.unify(actualArgTy, expectedArgTy); diff --git a/Analysis/src/Frontend.cpp b/Analysis/src/Frontend.cpp index 61df4ac5..b0dadd0b 100644 --- a/Analysis/src/Frontend.cpp +++ b/Analysis/src/Frontend.cpp @@ -1289,7 +1289,8 @@ ModulePtr Frontend::check(const SourceModule& sourceModule, Mode mode, std::vect { return Luau::check(sourceModule, mode, requireCycles, builtinTypes, NotNull{&iceHandler}, NotNull{forAutocomplete ? &moduleResolverForAutocomplete : &moduleResolver}, NotNull{fileResolver}, - environmentScope ? *environmentScope : globals.globalScope, prepareModuleScopeWrap, options, typeCheckLimits, recordJsonLog, writeJsonLog); + environmentScope ? *environmentScope : globals.globalScope, prepareModuleScopeWrap, options, typeCheckLimits, recordJsonLog, + writeJsonLog); } catch (const InternalCompilerError& err) { diff --git a/Analysis/src/Normalize.cpp b/Analysis/src/Normalize.cpp index 985ad1e9..ee80cc99 100644 --- a/Analysis/src/Normalize.cpp +++ b/Analysis/src/Normalize.cpp @@ -402,7 +402,14 @@ static bool isShallowInhabited(const NormalizedType& norm) !get(norm.buffers) || !norm.functions.isNever() || !norm.tables.empty() || !norm.tyvars.empty(); } -bool Normalizer::isInhabited(const NormalizedType* norm, Set seen) +bool Normalizer::isInhabited(const NormalizedType* norm) +{ + Set seen{nullptr}; + + return isInhabited(norm, seen); +} + +bool Normalizer::isInhabited(const NormalizedType* norm, Set& seen) { // If normalization failed, the type is complex, and so is more likely than not to be inhabited. if (!norm) @@ -436,7 +443,8 @@ bool Normalizer::isInhabited(TypeId ty) return *result; } - bool result = isInhabited(ty, {nullptr}); + Set seen{nullptr}; + bool result = isInhabited(ty, seen); if (cacheInhabitance) cachedIsInhabited[ty] = result; @@ -444,7 +452,7 @@ bool Normalizer::isInhabited(TypeId ty) return result; } -bool Normalizer::isInhabited(TypeId ty, Set seen) +bool Normalizer::isInhabited(TypeId ty, Set& seen) { // TODO: use log.follow(ty), CLI-64291 ty = follow(ty); diff --git a/Analysis/src/Subtyping.cpp b/Analysis/src/Subtyping.cpp index 8d399123..3c7c32f1 100644 --- a/Analysis/src/Subtyping.cpp +++ b/Analysis/src/Subtyping.cpp @@ -126,6 +126,7 @@ SubtypingResult& SubtypingResult::andAlso(const SubtypingResult& other) isSubtype &= other.isSubtype; normalizationTooComplex |= other.normalizationTooComplex; isCacheable &= other.isCacheable; + errors.insert(errors.end(), other.errors.begin(), other.errors.end()); return *this; } @@ -147,6 +148,7 @@ SubtypingResult& SubtypingResult::orElse(const SubtypingResult& other) isSubtype |= other.isSubtype; normalizationTooComplex |= other.normalizationTooComplex; isCacheable &= other.isCacheable; + errors.insert(errors.end(), other.errors.begin(), other.errors.end()); return *this; } @@ -213,6 +215,12 @@ SubtypingResult& SubtypingResult::withSuperPath(TypePath::Path path) return *this; } +SubtypingResult& SubtypingResult::withErrors(ErrorVec& err) +{ + errors = std::move(err); + return *this; +} + SubtypingResult SubtypingResult::negate(const SubtypingResult& result) { return SubtypingResult{ @@ -1421,15 +1429,16 @@ bool Subtyping::bindGeneric(SubtypingEnvironment& env, TypeId subTy, TypeId supe SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, const TypeFamilyInstanceType* subFamilyInstance, const TypeId superTy) { // Reduce the typefamily instance - TypeId reduced = handleTypeFamilyReductionResult(subFamilyInstance); - return isCovariantWith(env, reduced, superTy); + auto [ty, errors] = handleTypeFamilyReductionResult(subFamilyInstance); + // If we return optional, that means the type family was irreducible - we can reduce that to never + return isCovariantWith(env, ty, superTy).withErrors(errors); } SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, const TypeId subTy, const TypeFamilyInstanceType* superFamilyInstance) { // Reduce the typefamily instance - TypeId reduced = handleTypeFamilyReductionResult(superFamilyInstance); - return isCovariantWith(env, subTy, reduced); + auto [ty, errors] = handleTypeFamilyReductionResult(superFamilyInstance); + return isCovariantWith(env, subTy, ty).withErrors(errors); } /* diff --git a/Analysis/src/ToString.cpp b/Analysis/src/ToString.cpp index 0f520116..2e204926 100644 --- a/Analysis/src/ToString.cpp +++ b/Analysis/src/ToString.cpp @@ -1284,8 +1284,8 @@ void TypeStringifier::stringify(TypePackId tpid, const std::vector& cycles, const std::set& cycleTPs, - DenseHashMap& cycleNames, DenseHashMap& cycleTpNames, bool exhaustive) +static void assignCycleNames(const std::set& cycles, const std::set& cycleTPs, DenseHashMap& cycleNames, + DenseHashMap& cycleTpNames, bool exhaustive) { int nextIndex = 1; diff --git a/Analysis/src/TypeFamily.cpp b/Analysis/src/TypeFamily.cpp index 4903865f..9d28e7da 100644 --- a/Analysis/src/TypeFamily.cpp +++ b/Analysis/src/TypeFamily.cpp @@ -2,6 +2,7 @@ #include "Luau/TypeFamily.h" +#include "Luau/Common.h" #include "Luau/ConstraintSolver.h" #include "Luau/DenseHash.h" #include "Luau/Instantiation.h" @@ -11,10 +12,12 @@ #include "Luau/Subtyping.h" #include "Luau/ToString.h" #include "Luau/TxnLog.h" +#include "Luau/Type.h" #include "Luau/TypeCheckLimits.h" #include "Luau/TypeUtils.h" #include "Luau/Unifier2.h" #include "Luau/VecDeque.h" +#include "Luau/Set.h" #include "Luau/VisitType.h" LUAU_DYNAMIC_FASTINTVARIABLE(LuauTypeFamilyGraphReductionMaximumSteps, 1'000'000); @@ -26,6 +29,7 @@ struct InstanceCollector : TypeOnceVisitor { VecDeque tys; VecDeque tps; + std::vector cyclicInstance; bool visit(TypeId ty, const TypeFamilyInstanceType&) override { @@ -39,6 +43,14 @@ struct InstanceCollector : TypeOnceVisitor return true; } + void cycle(TypeId ty) override + { + /// Detected cyclic type pack + TypeId t = follow(ty); + if (get(t)) + cyclicInstance.push_back(t); + } + bool visit(TypeId ty, const ClassType&) override { return false; @@ -63,6 +75,7 @@ struct FamilyReducer VecDeque queuedTys; VecDeque queuedTps; + std::vector cyclicTypeFamilies; DenseHashSet irreducible{nullptr}; FamilyGraphReductionResult result; bool force = false; @@ -70,10 +83,12 @@ struct FamilyReducer // Local to the constraint being reduced. Location location; - FamilyReducer(VecDeque queuedTys, VecDeque queuedTps, Location location, TypeFamilyContext ctx, bool force = false) + FamilyReducer(VecDeque queuedTys, VecDeque queuedTps, std::vector cyclicTypes, Location location, + TypeFamilyContext ctx, bool force = false) : ctx(ctx) , queuedTys(std::move(queuedTys)) , queuedTps(std::move(queuedTps)) + , cyclicTypeFamilies(std::move(cyclicTypes)) , force(force) , location(location) { @@ -81,6 +96,7 @@ struct FamilyReducer enum class SkipTestResult { + CyclicTypeFamily, Irreducible, Defer, Okay, @@ -92,10 +108,16 @@ struct FamilyReducer if (is(ty)) { + for (auto t : cyclicTypeFamilies) + { + if (ty == t) + return SkipTestResult::CyclicTypeFamily; + } + if (!irreducible.contains(ty)) return SkipTestResult::Defer; - else - return SkipTestResult::Irreducible; + + return SkipTestResult::Irreducible; } else if (is(ty)) { @@ -223,10 +245,12 @@ struct FamilyReducer if (const TypeFamilyInstanceType* tfit = get(subject)) { - if (!testParameters(subject, tfit)) + SkipTestResult testCyclic = testForSkippability(subject); + + if (!testParameters(subject, tfit) && testCyclic != SkipTestResult::CyclicTypeFamily) return; - TypeFamilyReductionResult result = tfit->family->reducer(tfit->typeArguments, tfit->packArguments, NotNull{&ctx}); + TypeFamilyReductionResult result = tfit->family->reducer(subject, tfit->typeArguments, tfit->packArguments, NotNull{&ctx}); handleFamilyReduction(subject, result); } } @@ -244,7 +268,7 @@ struct FamilyReducer if (!testParameters(subject, tfit)) return; - TypeFamilyReductionResult result = tfit->family->reducer(tfit->typeArguments, tfit->packArguments, NotNull{&ctx}); + TypeFamilyReductionResult result = tfit->family->reducer(subject, tfit->typeArguments, tfit->packArguments, NotNull{&ctx}); handleFamilyReduction(subject, result); } } @@ -259,9 +283,9 @@ struct FamilyReducer }; static FamilyGraphReductionResult reduceFamiliesInternal( - VecDeque queuedTys, VecDeque queuedTps, Location location, TypeFamilyContext ctx, bool force) + VecDeque queuedTys, VecDeque queuedTps, std::vector cyclics, Location location, TypeFamilyContext ctx, bool force) { - FamilyReducer reducer{std::move(queuedTys), std::move(queuedTps), location, ctx, force}; + FamilyReducer reducer{std::move(queuedTys), std::move(queuedTps), std::move(cyclics), location, ctx, force}; int iterationCount = 0; while (!reducer.done()) @@ -295,7 +319,7 @@ FamilyGraphReductionResult reduceFamilies(TypeId entrypoint, Location location, if (collector.tys.empty() && collector.tps.empty()) return {}; - return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, ctx, force); + return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), std::move(collector.cyclicInstance), location, ctx, force); } FamilyGraphReductionResult reduceFamilies(TypePackId entrypoint, Location location, TypeFamilyContext ctx, bool force) @@ -314,7 +338,7 @@ FamilyGraphReductionResult reduceFamilies(TypePackId entrypoint, Location locati if (collector.tys.empty() && collector.tps.empty()) return {}; - return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, ctx, force); + return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), {}, location, ctx, force); } bool isPending(TypeId ty, ConstraintSolver* solver) @@ -322,7 +346,8 @@ bool isPending(TypeId ty, ConstraintSolver* solver) return is(ty) || is(ty) || is(ty) || (solver && solver->hasUnresolvedConstraints(ty)); } -TypeFamilyReductionResult notFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult notFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 1 || !packParams.empty()) { @@ -339,7 +364,8 @@ TypeFamilyReductionResult notFamilyFn(const std::vector& typePar return {ctx->builtins->booleanType, false, {}, {}}; } -TypeFamilyReductionResult lenFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult lenFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 1 || !packParams.empty()) { @@ -415,7 +441,7 @@ TypeFamilyReductionResult lenFamilyFn(const std::vector& typePar } TypeFamilyReductionResult unmFamilyFn( - const std::vector& typeParams, const std::vector& packParams, NotNull ctx) + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 1 || !packParams.empty()) { @@ -486,8 +512,8 @@ TypeFamilyReductionResult unmFamilyFn( return {std::nullopt, true, {}, {}}; } -TypeFamilyReductionResult numericBinopFamilyFn( - const std::vector& typeParams, const std::vector& packParams, NotNull ctx, const std::string metamethod) +TypeFamilyReductionResult numericBinopFamilyFn(TypeId instance, const std::vector& typeParams, + const std::vector& packParams, NotNull ctx, const std::string metamethod) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -571,7 +597,8 @@ TypeFamilyReductionResult numericBinopFamilyFn( return {std::nullopt, true, {}, {}}; } -TypeFamilyReductionResult addFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult addFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -579,10 +606,11 @@ TypeFamilyReductionResult addFamilyFn(const std::vector& typePar LUAU_ASSERT(false); } - return numericBinopFamilyFn(typeParams, packParams, ctx, "__add"); + return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__add"); } -TypeFamilyReductionResult subFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult subFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -590,10 +618,11 @@ TypeFamilyReductionResult subFamilyFn(const std::vector& typePar LUAU_ASSERT(false); } - return numericBinopFamilyFn(typeParams, packParams, ctx, "__sub"); + return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__sub"); } -TypeFamilyReductionResult mulFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult mulFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -601,10 +630,11 @@ TypeFamilyReductionResult mulFamilyFn(const std::vector& typePar LUAU_ASSERT(false); } - return numericBinopFamilyFn(typeParams, packParams, ctx, "__mul"); + return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__mul"); } -TypeFamilyReductionResult divFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult divFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -612,10 +642,11 @@ TypeFamilyReductionResult divFamilyFn(const std::vector& typePar LUAU_ASSERT(false); } - return numericBinopFamilyFn(typeParams, packParams, ctx, "__div"); + return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__div"); } -TypeFamilyReductionResult idivFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult idivFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -623,10 +654,11 @@ TypeFamilyReductionResult idivFamilyFn(const std::vector& typePa LUAU_ASSERT(false); } - return numericBinopFamilyFn(typeParams, packParams, ctx, "__idiv"); + return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__idiv"); } -TypeFamilyReductionResult powFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult powFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -634,10 +666,11 @@ TypeFamilyReductionResult powFamilyFn(const std::vector& typePar LUAU_ASSERT(false); } - return numericBinopFamilyFn(typeParams, packParams, ctx, "__pow"); + return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__pow"); } -TypeFamilyReductionResult modFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult modFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -645,10 +678,11 @@ TypeFamilyReductionResult modFamilyFn(const std::vector& typePar LUAU_ASSERT(false); } - return numericBinopFamilyFn(typeParams, packParams, ctx, "__mod"); + return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__mod"); } -TypeFamilyReductionResult concatFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult concatFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -733,7 +767,8 @@ TypeFamilyReductionResult concatFamilyFn(const std::vector& type return {ctx->builtins->stringType, false, {}, {}}; } -TypeFamilyReductionResult andFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult andFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -744,6 +779,14 @@ TypeFamilyReductionResult andFamilyFn(const std::vector& typePar TypeId lhsTy = follow(typeParams.at(0)); TypeId rhsTy = follow(typeParams.at(1)); + // t1 = and ~> lhs + if (follow(rhsTy) == instance && lhsTy != rhsTy) + return {lhsTy, false, {}, {}}; + // t1 = and ~> rhs + if (follow(lhsTy) == instance && lhsTy != rhsTy) + return {rhsTy, false, {}, {}}; + + // check to see if both operand types are resolved enough, and wait to reduce if not if (isPending(lhsTy, ctx->solver)) return {std::nullopt, false, {lhsTy}, {}}; @@ -761,7 +804,8 @@ TypeFamilyReductionResult andFamilyFn(const std::vector& typePar return {overallResult.result, false, std::move(blockedTypes), {}}; } -TypeFamilyReductionResult orFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult orFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -772,6 +816,13 @@ TypeFamilyReductionResult orFamilyFn(const std::vector& typePara TypeId lhsTy = follow(typeParams.at(0)); TypeId rhsTy = follow(typeParams.at(1)); + // t1 = or ~> lhs + if (follow(rhsTy) == instance && lhsTy != rhsTy) + return {lhsTy, false, {}, {}}; + // t1 = or ~> rhs + if (follow(lhsTy) == instance && lhsTy != rhsTy) + return {rhsTy, false, {}, {}}; + // check to see if both operand types are resolved enough, and wait to reduce if not if (isPending(lhsTy, ctx->solver)) return {std::nullopt, false, {lhsTy}, {}}; @@ -789,8 +840,8 @@ TypeFamilyReductionResult orFamilyFn(const std::vector& typePara return {overallResult.result, false, std::move(blockedTypes), {}}; } -static TypeFamilyReductionResult comparisonFamilyFn( - const std::vector& typeParams, const std::vector& packParams, NotNull ctx, const std::string metamethod) +static TypeFamilyReductionResult comparisonFamilyFn(TypeId instance, const std::vector& typeParams, + const std::vector& packParams, NotNull ctx, const std::string metamethod) { if (typeParams.size() != 2 || !packParams.empty()) @@ -870,7 +921,8 @@ static TypeFamilyReductionResult comparisonFamilyFn( return {ctx->builtins->booleanType, false, {}, {}}; } -TypeFamilyReductionResult ltFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult ltFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -878,10 +930,11 @@ TypeFamilyReductionResult ltFamilyFn(const std::vector& typePara LUAU_ASSERT(false); } - return comparisonFamilyFn(typeParams, packParams, ctx, "__lt"); + return comparisonFamilyFn(instance, typeParams, packParams, ctx, "__lt"); } -TypeFamilyReductionResult leFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult leFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -889,10 +942,11 @@ TypeFamilyReductionResult leFamilyFn(const std::vector& typePara LUAU_ASSERT(false); } - return comparisonFamilyFn(typeParams, packParams, ctx, "__le"); + return comparisonFamilyFn(instance, typeParams, packParams, ctx, "__le"); } -TypeFamilyReductionResult eqFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult eqFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -995,7 +1049,8 @@ struct FindRefinementBlockers : TypeOnceVisitor }; -TypeFamilyReductionResult refineFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult refineFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -1052,7 +1107,8 @@ TypeFamilyReductionResult refineFamilyFn(const std::vector& type return {resultTy, false, {}, {}}; } -TypeFamilyReductionResult unionFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult unionFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -1082,7 +1138,8 @@ TypeFamilyReductionResult unionFamilyFn(const std::vector& typeP } -TypeFamilyReductionResult intersectFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult intersectFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 2 || !packParams.empty()) { @@ -1174,7 +1231,8 @@ bool computeKeysOf(TypeId ty, Set& result, DenseHashSet& se return false; } -TypeFamilyReductionResult keyofFamilyImpl(const std::vector& typeParams, const std::vector& packParams, NotNull ctx, bool isRaw) +TypeFamilyReductionResult keyofFamilyImpl( + const std::vector& typeParams, const std::vector& packParams, NotNull ctx, bool isRaw) { if (typeParams.size() != 1 || !packParams.empty()) { @@ -1190,7 +1248,8 @@ TypeFamilyReductionResult keyofFamilyImpl(const std::vector& typ if (!normTy) return {std::nullopt, false, {}, {}}; - // if we don't have either just tables or just classes, we've got nothing to get keys of (at least until a future version perhaps adds classes as well) + // if we don't have either just tables or just classes, we've got nothing to get keys of (at least until a future version perhaps adds classes as + // well) if (normTy->hasTables() == normTy->hasClasses()) return {std::nullopt, true, {}, {}}; @@ -1289,7 +1348,8 @@ TypeFamilyReductionResult keyofFamilyImpl(const std::vector& typ return {ctx->arena->addType(UnionType{singletons}), false, {}, {}}; } -TypeFamilyReductionResult keyofFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult keyofFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 1 || !packParams.empty()) { @@ -1300,7 +1360,8 @@ TypeFamilyReductionResult keyofFamilyFn(const std::vector& typeP return keyofFamilyImpl(typeParams, packParams, ctx, /* isRaw */ false); } -TypeFamilyReductionResult rawkeyofFamilyFn(const std::vector& typeParams, const std::vector& packParams, NotNull ctx) +TypeFamilyReductionResult rawkeyofFamilyFn( + TypeId instance, const std::vector& typeParams, const std::vector& packParams, NotNull ctx) { if (typeParams.size() != 1 || !packParams.empty()) { diff --git a/Analysis/src/TypePack.cpp b/Analysis/src/TypePack.cpp index 1c86f841..0d86bead 100644 --- a/Analysis/src/TypePack.cpp +++ b/Analysis/src/TypePack.cpp @@ -271,7 +271,8 @@ TypePackId follow(TypePackId tp, const void* context, TypePackId (*mapper)(const if (const Unifiable::Bound* btv = get>(mapped)) return btv->boundTo; - else if (const TypePack* tp = get(mapped); (FFlag::DebugLuauDeferredConstraintResolution || FFlag::LuauFollowEmptyTypePacks) && tp && tp->head.empty()) + else if (const TypePack* tp = get(mapped); + (FFlag::DebugLuauDeferredConstraintResolution || FFlag::LuauFollowEmptyTypePacks) && tp && tp->head.empty()) return tp->tail; else return std::nullopt; diff --git a/Analysis/src/Unifier2.cpp b/Analysis/src/Unifier2.cpp index cba2f4bb..5d3dc864 100644 --- a/Analysis/src/Unifier2.cpp +++ b/Analysis/src/Unifier2.cpp @@ -150,7 +150,8 @@ bool Unifier2::unify(TypeId subTy, TypeId superTy) return true; } -bool Unifier2::unify(TypeId subTy, const FunctionType* superFn) { +bool Unifier2::unify(TypeId subTy, const FunctionType* superFn) +{ const FunctionType* subFn = get(subTy); bool shouldInstantiate = @@ -465,8 +466,8 @@ struct MutatingGeneralizer : TypeOnceVisitor bool isWithinFunction = false; - MutatingGeneralizer( - NotNull builtinTypes, NotNull scope, DenseHashMap positiveTypes, DenseHashMap negativeTypes) + MutatingGeneralizer(NotNull builtinTypes, NotNull scope, DenseHashMap positiveTypes, + DenseHashMap negativeTypes) : TypeOnceVisitor(/* skipBoundTypes */ true) , builtinTypes(builtinTypes) , scope(scope) diff --git a/Ast/src/Ast.cpp b/Ast/src/Ast.cpp index 0409a622..bb82e0be 100644 --- a/Ast/src/Ast.cpp +++ b/Ast/src/Ast.cpp @@ -522,8 +522,8 @@ void AstStatLocal::visit(AstVisitor* visitor) } } -AstStatFor::AstStatFor(const Location& location, AstLocal* var, AstExpr* from, AstExpr* to, AstExpr* step, AstStatBlock* body, bool hasDo, - const Location& doLocation) +AstStatFor::AstStatFor( + const Location& location, AstLocal* var, AstExpr* from, AstExpr* to, AstExpr* step, AstStatBlock* body, bool hasDo, const Location& doLocation) : AstStat(ClassIndex(), location) , var(var) , from(from) diff --git a/Ast/src/Parser.cpp b/Ast/src/Parser.cpp index c4d1c65d..fa1ab61e 100644 --- a/Ast/src/Parser.cpp +++ b/Ast/src/Parser.cpp @@ -599,8 +599,7 @@ AstStat* Parser::parseFor() bool hasEnd = expectMatchEndAndConsume(Lexeme::ReservedEnd, matchDo); body->hasEnd = hasEnd; - return allocator.alloc( - Location(start, end), copy(vars), copy(values), body, hasIn, inLocation, hasDo, matchDo.location); + return allocator.alloc(Location(start, end), copy(vars), copy(values), body, hasIn, inLocation, hasDo, matchDo.location); } } @@ -907,8 +906,7 @@ AstStat* Parser::parseDeclaration(const Location& start) { props.push_back(parseDeclaredClassMethod()); } - else if (lexer.current().type == '[' && (lexer.lookahead().type == Lexeme::RawString || - lexer.lookahead().type == Lexeme::QuotedString)) + else if (lexer.current().type == '[' && (lexer.lookahead().type == Lexeme::RawString || lexer.lookahead().type == Lexeme::QuotedString)) { const Lexeme begin = lexer.current(); nextLexeme(); // [ diff --git a/CodeGen/include/Luau/AddressA64.h b/CodeGen/include/Luau/AddressA64.h index 097cc136..fbac3ec3 100644 --- a/CodeGen/include/Luau/AddressA64.h +++ b/CodeGen/include/Luau/AddressA64.h @@ -32,8 +32,8 @@ struct AddressA64 , offset(xzr) , data(off) { - LUAU_ASSERT(base.kind == KindA64::x || base == sp); - LUAU_ASSERT(kind != AddressKindA64::reg); + CODEGEN_ASSERT(base.kind == KindA64::x || base == sp); + CODEGEN_ASSERT(kind != AddressKindA64::reg); } constexpr AddressA64(RegisterA64 base, RegisterA64 offset) @@ -42,8 +42,8 @@ struct AddressA64 , offset(offset) , data(0) { - LUAU_ASSERT(base.kind == KindA64::x); - LUAU_ASSERT(offset.kind == KindA64::x); + CODEGEN_ASSERT(base.kind == KindA64::x); + CODEGEN_ASSERT(offset.kind == KindA64::x); } AddressKindA64 kind; diff --git a/CodeGen/include/Luau/AssemblyBuilderA64.h b/CodeGen/include/Luau/AssemblyBuilderA64.h index 78251012..a86403d4 100644 --- a/CodeGen/include/Luau/AssemblyBuilderA64.h +++ b/CodeGen/include/Luau/AssemblyBuilderA64.h @@ -176,7 +176,7 @@ public: // Extracts code offset (in bytes) from label uint32_t getLabelOffset(const Label& label) { - LUAU_ASSERT(label.location != ~0u); + CODEGEN_ASSERT(label.location != ~0u); return label.location * 4; } diff --git a/CodeGen/include/Luau/AssemblyBuilderX64.h b/CodeGen/include/Luau/AssemblyBuilderX64.h index 0be59fb1..f3f85ed5 100644 --- a/CodeGen/include/Luau/AssemblyBuilderX64.h +++ b/CodeGen/include/Luau/AssemblyBuilderX64.h @@ -179,7 +179,7 @@ public: // Extracts code offset (in bytes) from label uint32_t getLabelOffset(const Label& label) { - LUAU_ASSERT(label.location != ~0u); + CODEGEN_ASSERT(label.location != ~0u); return label.location; } diff --git a/CodeGen/include/Luau/BytecodeSummary.h b/CodeGen/include/Luau/BytecodeSummary.h index 648c0957..f421daef 100644 --- a/CodeGen/include/Luau/BytecodeSummary.h +++ b/CodeGen/include/Luau/BytecodeSummary.h @@ -1,7 +1,7 @@ // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details #pragma once -#include "Luau/Common.h" +#include "Luau/CodeGenCommon.h" #include "Luau/Bytecode.h" #include @@ -49,21 +49,21 @@ public: void incCount(unsigned nesting, uint8_t op) { - LUAU_ASSERT(nesting <= getNestingLimit()); - LUAU_ASSERT(op < getOpLimit()); + CODEGEN_ASSERT(nesting <= getNestingLimit()); + CODEGEN_ASSERT(op < getOpLimit()); ++counts[nesting][op]; } unsigned getCount(unsigned nesting, uint8_t op) const { - LUAU_ASSERT(nesting <= getNestingLimit()); - LUAU_ASSERT(op < getOpLimit()); + CODEGEN_ASSERT(nesting <= getNestingLimit()); + CODEGEN_ASSERT(op < getOpLimit()); return counts[nesting][op]; } const std::vector& getCounts(unsigned nesting) const { - LUAU_ASSERT(nesting <= getNestingLimit()); + CODEGEN_ASSERT(nesting <= getNestingLimit()); return counts[nesting]; } diff --git a/CodeGen/include/Luau/CodeGen.h b/CodeGen/include/Luau/CodeGen.h index d84c7236..965aa7c2 100644 --- a/CodeGen/include/Luau/CodeGen.h +++ b/CodeGen/include/Luau/CodeGen.h @@ -23,14 +23,21 @@ enum CodeGenFlags CodeGen_ColdFunctions = 1 << 1, }; +// These enum values can be reported through telemetry. +// To ensure consistency, changes should be additive. enum class CodeGenCompilationResult { - Success, // Successfully generated code for at least one function - NothingToCompile, // There were no new functions to compile + Success = 0, // Successfully generated code for at least one function + NothingToCompile = 1, // There were no new functions to compile + NotNativeModule = 2, // Module does not have `--!native` comment - CodeGenNotInitialized, // Native codegen system is not initialized - CodeGenFailed, // Native codegen failed due to an internal compiler error - AllocationFailed, // Native codegen failed due to an allocation error + CodeGenNotInitialized = 3, // Native codegen system is not initialized + CodeGenOverflowInstructionLimit = 4, // Instruction limit overflow + CodeGenOverflowBlockLimit = 5, // Block limit overflow + CodeGenOverflowBlockInstructionLimit = 6, // Block instruction limit overflow + CodeGenAssemblerFinalizationFailure = 7, // Failure during assembler finalization + CodeGenLoweringFailure = 8, // Lowering failed + AllocationFailed = 9, // Native codegen failed due to an allocation error }; struct CompilationStats @@ -40,6 +47,7 @@ struct CompilationStats size_t nativeDataSizeBytes = 0; size_t nativeMetadataSizeBytes = 0; + uint32_t functionsTotal = 0; uint32_t functionsCompiled = 0; }; diff --git a/CodeGen/include/Luau/CodeGenCommon.h b/CodeGen/include/Luau/CodeGenCommon.h new file mode 100644 index 00000000..84090423 --- /dev/null +++ b/CodeGen/include/Luau/CodeGenCommon.h @@ -0,0 +1,12 @@ +// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details +#pragma once + +#include "Luau/Common.h" + +#if defined(LUAU_ASSERTENABLED) +#define CODEGEN_ASSERT(expr) ((void)(!!(expr) || (Luau::assertCallHandler(#expr, __FILE__, __LINE__, __FUNCTION__) && (LUAU_DEBUGBREAK(), 0)))) +#elif defined(CODEGEN_ENABLE_ASSERT_HANDLER) +#define CODEGEN_ASSERT(expr) ((void)(!!(expr) || Luau::assertCallHandler(#expr, __FILE__, __LINE__, __FUNCTION__))) +#else +#define CODEGEN_ASSERT(expr) (void)sizeof(!!(expr)) +#endif diff --git a/CodeGen/include/Luau/ConditionX64.h b/CodeGen/include/Luau/ConditionX64.h index 4432641a..39ee3f02 100644 --- a/CodeGen/include/Luau/ConditionX64.h +++ b/CodeGen/include/Luau/ConditionX64.h @@ -1,7 +1,7 @@ // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details #pragma once -#include "Luau/Common.h" +#include "Luau/CodeGenCommon.h" namespace Luau { @@ -102,7 +102,7 @@ inline ConditionX64 getReverseCondition(ConditionX64 cond) case ConditionX64::NotParity: return ConditionX64::Parity; case ConditionX64::Count: - LUAU_ASSERT(!"invalid ConditionX64 value"); + CODEGEN_ASSERT(!"invalid ConditionX64 value"); } return ConditionX64::Count; diff --git a/CodeGen/include/Luau/IrAnalysis.h b/CodeGen/include/Luau/IrAnalysis.h index 0f9ce795..c9362c7a 100644 --- a/CodeGen/include/Luau/IrAnalysis.h +++ b/CodeGen/include/Luau/IrAnalysis.h @@ -1,7 +1,7 @@ // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details #pragma once -#include "Luau/Common.h" +#include "Luau/CodeGenCommon.h" #include #include @@ -167,7 +167,7 @@ struct BlockIteratorWrapper uint32_t operator[](size_t pos) const { - LUAU_ASSERT(pos < size_t(itEnd - itBegin)); + CODEGEN_ASSERT(pos < size_t(itEnd - itBegin)); return itBegin[pos]; } }; diff --git a/CodeGen/include/Luau/IrData.h b/CodeGen/include/Luau/IrData.h index de79f6f2..129536d1 100644 --- a/CodeGen/include/Luau/IrData.h +++ b/CodeGen/include/Luau/IrData.h @@ -995,13 +995,13 @@ struct IrFunction IrBlock& blockOp(IrOp op) { - LUAU_ASSERT(op.kind == IrOpKind::Block); + CODEGEN_ASSERT(op.kind == IrOpKind::Block); return blocks[op.index]; } IrInst& instOp(IrOp op) { - LUAU_ASSERT(op.kind == IrOpKind::Inst); + CODEGEN_ASSERT(op.kind == IrOpKind::Inst); return instructions[op.index]; } @@ -1015,7 +1015,7 @@ struct IrFunction IrConst& constOp(IrOp op) { - LUAU_ASSERT(op.kind == IrOpKind::Constant); + CODEGEN_ASSERT(op.kind == IrOpKind::Constant); return constants[op.index]; } @@ -1023,7 +1023,7 @@ struct IrFunction { IrConst& value = constOp(op); - LUAU_ASSERT(value.kind == IrConstKind::Tag); + CODEGEN_ASSERT(value.kind == IrConstKind::Tag); return value.valueTag; } @@ -1044,7 +1044,7 @@ struct IrFunction { IrConst& value = constOp(op); - LUAU_ASSERT(value.kind == IrConstKind::Int); + CODEGEN_ASSERT(value.kind == IrConstKind::Int); return value.valueInt; } @@ -1065,7 +1065,7 @@ struct IrFunction { IrConst& value = constOp(op); - LUAU_ASSERT(value.kind == IrConstKind::Uint); + CODEGEN_ASSERT(value.kind == IrConstKind::Uint); return value.valueUint; } @@ -1086,7 +1086,7 @@ struct IrFunction { IrConst& value = constOp(op); - LUAU_ASSERT(value.kind == IrConstKind::Double); + CODEGEN_ASSERT(value.kind == IrConstKind::Double); return value.valueDouble; } @@ -1106,14 +1106,14 @@ struct IrFunction uint32_t getBlockIndex(const IrBlock& block) const { // Can only be called with blocks from our vector - LUAU_ASSERT(&block >= blocks.data() && &block <= blocks.data() + blocks.size()); + CODEGEN_ASSERT(&block >= blocks.data() && &block <= blocks.data() + blocks.size()); return uint32_t(&block - blocks.data()); } uint32_t getInstIndex(const IrInst& inst) const { // Can only be called with instructions from our vector - LUAU_ASSERT(&inst >= instructions.data() && &inst <= instructions.data() + instructions.size()); + CODEGEN_ASSERT(&inst >= instructions.data() && &inst <= instructions.data() + instructions.size()); return uint32_t(&inst - instructions.data()); } @@ -1154,7 +1154,7 @@ struct IrFunction BytecodeTypes getBytecodeTypesAt(int pcpos) const { - LUAU_ASSERT(pcpos >= 0); + CODEGEN_ASSERT(pcpos >= 0); if (size_t(pcpos) < bcTypes.size()) return bcTypes[pcpos]; @@ -1165,31 +1165,31 @@ struct IrFunction inline IrCondition conditionOp(IrOp op) { - LUAU_ASSERT(op.kind == IrOpKind::Condition); + CODEGEN_ASSERT(op.kind == IrOpKind::Condition); return IrCondition(op.index); } inline int vmRegOp(IrOp op) { - LUAU_ASSERT(op.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(op.kind == IrOpKind::VmReg); return op.index; } inline int vmConstOp(IrOp op) { - LUAU_ASSERT(op.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(op.kind == IrOpKind::VmConst); return op.index; } inline int vmUpvalueOp(IrOp op) { - LUAU_ASSERT(op.kind == IrOpKind::VmUpvalue); + CODEGEN_ASSERT(op.kind == IrOpKind::VmUpvalue); return op.index; } inline uint32_t vmExitOp(IrOp op) { - LUAU_ASSERT(op.kind == IrOpKind::VmExit); + CODEGEN_ASSERT(op.kind == IrOpKind::VmExit); return op.index; } diff --git a/CodeGen/include/Luau/IrVisitUseDef.h b/CodeGen/include/Luau/IrVisitUseDef.h index 6ae31440..acff0d76 100644 --- a/CodeGen/include/Luau/IrVisitUseDef.h +++ b/CodeGen/include/Luau/IrVisitUseDef.h @@ -117,7 +117,7 @@ static void visitVmRegDefsUses(T& visitor, IrFunction& function, const IrInst& i { if (count >= 3) { - LUAU_ASSERT(inst.d.kind == IrOpKind::VmReg && vmRegOp(inst.d) == vmRegOp(inst.c) + 1); + CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmReg && vmRegOp(inst.d) == vmRegOp(inst.c) + 1); visitor.useRange(vmRegOp(inst.c), count); } @@ -206,12 +206,12 @@ static void visitVmRegDefsUses(T& visitor, IrFunction& function, const IrInst& i default: // All instructions which reference registers have to be handled explicitly - LUAU_ASSERT(inst.a.kind != IrOpKind::VmReg); - LUAU_ASSERT(inst.b.kind != IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind != IrOpKind::VmReg); - LUAU_ASSERT(inst.d.kind != IrOpKind::VmReg); - LUAU_ASSERT(inst.e.kind != IrOpKind::VmReg); - LUAU_ASSERT(inst.f.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.a.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.b.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.d.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.e.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.f.kind != IrOpKind::VmReg); break; } } diff --git a/CodeGen/include/Luau/OperandX64.h b/CodeGen/include/Luau/OperandX64.h index b9aa8f54..2be72ea8 100644 --- a/CodeGen/include/Luau/OperandX64.h +++ b/CodeGen/include/Luau/OperandX64.h @@ -1,7 +1,7 @@ // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details #pragma once -#include "Luau/Common.h" +#include "Luau/CodeGenCommon.h" #include "Luau/RegisterX64.h" #include @@ -62,9 +62,9 @@ struct OperandX64 constexpr OperandX64 operator[](OperandX64&& addr) const { - LUAU_ASSERT(cat == CategoryX64::mem); - LUAU_ASSERT(index == noreg && scale == 1 && base == noreg && imm == 0); - LUAU_ASSERT(addr.memSize == SizeX64::none); + CODEGEN_ASSERT(cat == CategoryX64::mem); + CODEGEN_ASSERT(index == noreg && scale == 1 && base == noreg && imm == 0); + CODEGEN_ASSERT(addr.memSize == SizeX64::none); addr.cat = CategoryX64::mem; addr.memSize = memSize; @@ -85,8 +85,8 @@ constexpr OperandX64 operator*(RegisterX64 reg, uint8_t scale) if (scale == 1) return OperandX64(reg); - LUAU_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8); - LUAU_ASSERT(reg.index != 0b100 && "can't scale SP"); + CODEGEN_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8); + CODEGEN_ASSERT(reg.index != 0b100 && "can't scale SP"); return OperandX64(SizeX64::none, reg, scale, noreg, 0); } @@ -103,16 +103,16 @@ constexpr OperandX64 operator-(RegisterX64 reg, int32_t disp) constexpr OperandX64 operator+(RegisterX64 base, RegisterX64 index) { - LUAU_ASSERT(index.index != 4 && "sp cannot be used as index"); - LUAU_ASSERT(base.size == index.size); + CODEGEN_ASSERT(index.index != 4 && "sp cannot be used as index"); + CODEGEN_ASSERT(base.size == index.size); return OperandX64(SizeX64::none, index, 1, base, 0); } constexpr OperandX64 operator+(OperandX64 op, int32_t disp) { - LUAU_ASSERT(op.cat == CategoryX64::mem); - LUAU_ASSERT(op.memSize == SizeX64::none); + CODEGEN_ASSERT(op.cat == CategoryX64::mem); + CODEGEN_ASSERT(op.memSize == SizeX64::none); op.imm += disp; return op; @@ -120,10 +120,10 @@ constexpr OperandX64 operator+(OperandX64 op, int32_t disp) constexpr OperandX64 operator+(OperandX64 op, RegisterX64 base) { - LUAU_ASSERT(op.cat == CategoryX64::mem); - LUAU_ASSERT(op.memSize == SizeX64::none); - LUAU_ASSERT(op.base == noreg); - LUAU_ASSERT(op.index == noreg || op.index.size == base.size); + CODEGEN_ASSERT(op.cat == CategoryX64::mem); + CODEGEN_ASSERT(op.memSize == SizeX64::none); + CODEGEN_ASSERT(op.base == noreg); + CODEGEN_ASSERT(op.index == noreg || op.index.size == base.size); op.base = base; return op; @@ -131,10 +131,10 @@ constexpr OperandX64 operator+(OperandX64 op, RegisterX64 base) constexpr OperandX64 operator+(RegisterX64 base, OperandX64 op) { - LUAU_ASSERT(op.cat == CategoryX64::mem); - LUAU_ASSERT(op.memSize == SizeX64::none); - LUAU_ASSERT(op.base == noreg); - LUAU_ASSERT(op.index == noreg || op.index.size == base.size); + CODEGEN_ASSERT(op.cat == CategoryX64::mem); + CODEGEN_ASSERT(op.memSize == SizeX64::none); + CODEGEN_ASSERT(op.base == noreg); + CODEGEN_ASSERT(op.index == noreg || op.index.size == base.size); op.base = base; return op; diff --git a/CodeGen/include/Luau/RegisterA64.h b/CodeGen/include/Luau/RegisterA64.h index beb34ca7..f661cdfc 100644 --- a/CodeGen/include/Luau/RegisterA64.h +++ b/CodeGen/include/Luau/RegisterA64.h @@ -1,7 +1,7 @@ // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details #pragma once -#include "Luau/Common.h" +#include "Luau/CodeGenCommon.h" #include @@ -40,9 +40,9 @@ struct RegisterA64 constexpr RegisterA64 castReg(KindA64 kind, RegisterA64 reg) { - LUAU_ASSERT(kind != reg.kind); - LUAU_ASSERT(kind != KindA64::none && reg.kind != KindA64::none); - LUAU_ASSERT((kind == KindA64::w || kind == KindA64::x) == (reg.kind == KindA64::w || reg.kind == KindA64::x)); + CODEGEN_ASSERT(kind != reg.kind); + CODEGEN_ASSERT(kind != KindA64::none && reg.kind != KindA64::none); + CODEGEN_ASSERT((kind == KindA64::w || kind == KindA64::x) == (reg.kind == KindA64::w || reg.kind == KindA64::x)); return RegisterA64{kind, reg.index}; } diff --git a/CodeGen/include/Luau/RegisterX64.h b/CodeGen/include/Luau/RegisterX64.h index 7fa97607..74e0ab46 100644 --- a/CodeGen/include/Luau/RegisterX64.h +++ b/CodeGen/include/Luau/RegisterX64.h @@ -1,7 +1,7 @@ // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details #pragma once -#include "Luau/Common.h" +#include "Luau/CodeGenCommon.h" #include diff --git a/CodeGen/src/AssemblyBuilderA64.cpp b/CodeGen/src/AssemblyBuilderA64.cpp index de0eb0cd..96d17192 100644 --- a/CodeGen/src/AssemblyBuilderA64.cpp +++ b/CodeGen/src/AssemblyBuilderA64.cpp @@ -58,13 +58,13 @@ AssemblyBuilderA64::AssemblyBuilderA64(bool logText, unsigned int features) AssemblyBuilderA64::~AssemblyBuilderA64() { - LUAU_ASSERT(finalized); + CODEGEN_ASSERT(finalized); } void AssemblyBuilderA64::mov(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst == sp); - LUAU_ASSERT(dst.kind == src.kind || (dst.kind == KindA64::x && src == sp) || (dst == sp && src.kind == KindA64::x)); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst == sp); + CODEGEN_ASSERT(dst.kind == src.kind || (dst.kind == KindA64::x && src == sp) || (dst == sp && src.kind == KindA64::x)); if (dst == sp || src == sp) placeR1("mov", dst, src, 0b00'100010'0'000000000000); @@ -150,14 +150,14 @@ void AssemblyBuilderA64::cmp(RegisterA64 src1, uint16_t src2) void AssemblyBuilderA64::csel(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond) { - LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w); + CODEGEN_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w); placeCS("csel", dst, src1, src2, cond, 0b11010'10'0, 0b00); } void AssemblyBuilderA64::cset(RegisterA64 dst, ConditionA64 cond) { - LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w); + CODEGEN_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w); RegisterA64 src = dst.kind == KindA64::x ? xzr : wzr; @@ -240,24 +240,24 @@ void AssemblyBuilderA64::ror(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2 void AssemblyBuilderA64::clz(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); - LUAU_ASSERT(dst.kind == src.kind); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == src.kind); placeR1("clz", dst, src, 0b10'11010110'00000'00010'0); } void AssemblyBuilderA64::rbit(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); - LUAU_ASSERT(dst.kind == src.kind); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == src.kind); placeR1("rbit", dst, src, 0b10'11010110'00000'0000'00); } void AssemblyBuilderA64::rev(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); - LUAU_ASSERT(dst.kind == src.kind); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == src.kind); placeR1("rev", dst, src, 0b10'11010110'00000'0000'10 | int(dst.kind == KindA64::x)); } @@ -265,7 +265,7 @@ void AssemblyBuilderA64::rev(RegisterA64 dst, RegisterA64 src) void AssemblyBuilderA64::lsl(RegisterA64 dst, RegisterA64 src1, uint8_t src2) { int size = dst.kind == KindA64::x ? 64 : 32; - LUAU_ASSERT(src2 < size); + CODEGEN_ASSERT(src2 < size); placeBFM("lsl", dst, src1, src2, 0b10'100110, (-src2) & (size - 1), size - 1 - src2); } @@ -273,7 +273,7 @@ void AssemblyBuilderA64::lsl(RegisterA64 dst, RegisterA64 src1, uint8_t src2) void AssemblyBuilderA64::lsr(RegisterA64 dst, RegisterA64 src1, uint8_t src2) { int size = dst.kind == KindA64::x ? 64 : 32; - LUAU_ASSERT(src2 < size); + CODEGEN_ASSERT(src2 < size); placeBFM("lsr", dst, src1, src2, 0b10'100110, src2, size - 1); } @@ -281,7 +281,7 @@ void AssemblyBuilderA64::lsr(RegisterA64 dst, RegisterA64 src1, uint8_t src2) void AssemblyBuilderA64::asr(RegisterA64 dst, RegisterA64 src1, uint8_t src2) { int size = dst.kind == KindA64::x ? 64 : 32; - LUAU_ASSERT(src2 < size); + CODEGEN_ASSERT(src2 < size); placeBFM("asr", dst, src1, src2, 0b00'100110, src2, size - 1); } @@ -289,7 +289,7 @@ void AssemblyBuilderA64::asr(RegisterA64 dst, RegisterA64 src1, uint8_t src2) void AssemblyBuilderA64::ror(RegisterA64 dst, RegisterA64 src1, uint8_t src2) { int size = dst.kind == KindA64::x ? 64 : 32; - LUAU_ASSERT(src2 < size); + CODEGEN_ASSERT(src2 < size); // note: this is encoding src1 via immr which is a hack but the bit layout matches and a special archetype feels excessive placeBFM("ror", dst, src1, src2, 0b00'100111, src1.index, src2); @@ -298,7 +298,7 @@ void AssemblyBuilderA64::ror(RegisterA64 dst, RegisterA64 src1, uint8_t src2) void AssemblyBuilderA64::ubfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w) { int size = dst.kind == KindA64::x ? 64 : 32; - LUAU_ASSERT(w > 0 && f + w <= size); + CODEGEN_ASSERT(w > 0 && f + w <= size); // f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability placeBFM("ubfiz", dst, src, f * 100 + w, 0b10'100110, (-f) & (size - 1), w - 1); @@ -307,7 +307,7 @@ void AssemblyBuilderA64::ubfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint void AssemblyBuilderA64::ubfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w) { int size = dst.kind == KindA64::x ? 64 : 32; - LUAU_ASSERT(w > 0 && f + w <= size); + CODEGEN_ASSERT(w > 0 && f + w <= size); // f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability placeBFM("ubfx", dst, src, f * 100 + w, 0b10'100110, f, f + w - 1); @@ -316,7 +316,7 @@ void AssemblyBuilderA64::ubfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8 void AssemblyBuilderA64::sbfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w) { int size = dst.kind == KindA64::x ? 64 : 32; - LUAU_ASSERT(w > 0 && f + w <= size); + CODEGEN_ASSERT(w > 0 && f + w <= size); // f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability placeBFM("sbfiz", dst, src, f * 100 + w, 0b00'100110, (-f) & (size - 1), w - 1); @@ -325,7 +325,7 @@ void AssemblyBuilderA64::sbfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint void AssemblyBuilderA64::sbfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w) { int size = dst.kind == KindA64::x ? 64 : 32; - LUAU_ASSERT(w > 0 && f + w <= size); + CODEGEN_ASSERT(w > 0 && f + w <= size); // f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability placeBFM("sbfx", dst, src, f * 100 + w, 0b00'100110, f, f + w - 1); @@ -333,7 +333,7 @@ void AssemblyBuilderA64::sbfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8 void AssemblyBuilderA64::ldr(RegisterA64 dst, AddressA64 src) { - LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w || dst.kind == KindA64::s || dst.kind == KindA64::d || dst.kind == KindA64::q); + CODEGEN_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w || dst.kind == KindA64::s || dst.kind == KindA64::d || dst.kind == KindA64::q); switch (dst.kind) { @@ -353,56 +353,56 @@ void AssemblyBuilderA64::ldr(RegisterA64 dst, AddressA64 src) placeA("ldr", dst, src, 0b00'11110011, /* sizelog= */ 4); break; case KindA64::none: - LUAU_ASSERT(!"Unexpected register kind"); + CODEGEN_ASSERT(!"Unexpected register kind"); } } void AssemblyBuilderA64::ldrb(RegisterA64 dst, AddressA64 src) { - LUAU_ASSERT(dst.kind == KindA64::w); + CODEGEN_ASSERT(dst.kind == KindA64::w); placeA("ldrb", dst, src, 0b00'11100001, /* sizelog= */ 0); } void AssemblyBuilderA64::ldrh(RegisterA64 dst, AddressA64 src) { - LUAU_ASSERT(dst.kind == KindA64::w); + CODEGEN_ASSERT(dst.kind == KindA64::w); placeA("ldrh", dst, src, 0b01'11100001, /* sizelog= */ 1); } void AssemblyBuilderA64::ldrsb(RegisterA64 dst, AddressA64 src) { - LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w); + CODEGEN_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w); placeA("ldrsb", dst, src, 0b00'11100010 | uint8_t(dst.kind == KindA64::w), /* sizelog= */ 0); } void AssemblyBuilderA64::ldrsh(RegisterA64 dst, AddressA64 src) { - LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w); + CODEGEN_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w); placeA("ldrsh", dst, src, 0b01'11100010 | uint8_t(dst.kind == KindA64::w), /* sizelog= */ 1); } void AssemblyBuilderA64::ldrsw(RegisterA64 dst, AddressA64 src) { - LUAU_ASSERT(dst.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == KindA64::x); placeA("ldrsw", dst, src, 0b10'11100010, /* sizelog= */ 2); } void AssemblyBuilderA64::ldp(RegisterA64 dst1, RegisterA64 dst2, AddressA64 src) { - LUAU_ASSERT(dst1.kind == KindA64::x || dst1.kind == KindA64::w); - LUAU_ASSERT(dst1.kind == dst2.kind); + CODEGEN_ASSERT(dst1.kind == KindA64::x || dst1.kind == KindA64::w); + CODEGEN_ASSERT(dst1.kind == dst2.kind); placeP("ldp", dst1, dst2, src, 0b101'0'010'1, uint8_t(dst1.kind == KindA64::x) << 1, /* sizelog= */ dst1.kind == KindA64::x ? 3 : 2); } void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst) { - LUAU_ASSERT(src.kind == KindA64::x || src.kind == KindA64::w || src.kind == KindA64::s || src.kind == KindA64::d || src.kind == KindA64::q); + CODEGEN_ASSERT(src.kind == KindA64::x || src.kind == KindA64::w || src.kind == KindA64::s || src.kind == KindA64::d || src.kind == KindA64::q); switch (src.kind) { @@ -422,28 +422,28 @@ void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst) placeA("str", src, dst, 0b00'11110010, /* sizelog= */ 4); break; case KindA64::none: - LUAU_ASSERT(!"Unexpected register kind"); + CODEGEN_ASSERT(!"Unexpected register kind"); } } void AssemblyBuilderA64::strb(RegisterA64 src, AddressA64 dst) { - LUAU_ASSERT(src.kind == KindA64::w); + CODEGEN_ASSERT(src.kind == KindA64::w); placeA("strb", src, dst, 0b00'11100000, /* sizelog= */ 0); } void AssemblyBuilderA64::strh(RegisterA64 src, AddressA64 dst) { - LUAU_ASSERT(src.kind == KindA64::w); + CODEGEN_ASSERT(src.kind == KindA64::w); placeA("strh", src, dst, 0b01'11100000, /* sizelog= */ 1); } void AssemblyBuilderA64::stp(RegisterA64 src1, RegisterA64 src2, AddressA64 dst) { - LUAU_ASSERT(src1.kind == KindA64::x || src1.kind == KindA64::w); - LUAU_ASSERT(src1.kind == src2.kind); + CODEGEN_ASSERT(src1.kind == KindA64::x || src1.kind == KindA64::w); + CODEGEN_ASSERT(src1.kind == src2.kind); placeP("stp", src1, src2, dst, 0b101'0'010'0, uint8_t(src1.kind == KindA64::x) << 1, /* sizelog= */ src1.kind == KindA64::x ? 3 : 2); } @@ -538,7 +538,7 @@ void AssemblyBuilderA64::adr(RegisterA64 dst, Label& label) void AssemblyBuilderA64::fmov(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::d && (src.kind == KindA64::d || src.kind == KindA64::x)); + CODEGEN_ASSERT(dst.kind == KindA64::d && (src.kind == KindA64::d || src.kind == KindA64::x)); if (src.kind == KindA64::d) placeR1("fmov", dst, src, 0b000'11110'01'1'0000'00'10000); @@ -548,10 +548,10 @@ void AssemblyBuilderA64::fmov(RegisterA64 dst, RegisterA64 src) void AssemblyBuilderA64::fmov(RegisterA64 dst, double src) { - LUAU_ASSERT(dst.kind == KindA64::d); + CODEGEN_ASSERT(dst.kind == KindA64::d); int imm = getFmovImm(src); - LUAU_ASSERT(imm >= 0 && imm <= 256); + CODEGEN_ASSERT(imm >= 0 && imm <= 256); // fmov can't encode 0, but movi can; movi is otherwise not useful for 64-bit fp immediates because it encodes repeating patterns if (imm == 256) @@ -562,7 +562,7 @@ void AssemblyBuilderA64::fmov(RegisterA64 dst, double src) void AssemblyBuilderA64::fabs(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d); + CODEGEN_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d); placeR1("fabs", dst, src, 0b000'11110'01'1'0000'01'10000); } @@ -571,13 +571,13 @@ void AssemblyBuilderA64::fadd(RegisterA64 dst, RegisterA64 src1, RegisterA64 src { if (dst.kind == KindA64::d) { - LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d); + CODEGEN_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d); placeR3("fadd", dst, src1, src2, 0b11110'01'1, 0b0010'10); } else { - LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s); + CODEGEN_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s); placeR3("fadd", dst, src1, src2, 0b11110'00'1, 0b0010'10); } @@ -587,13 +587,13 @@ void AssemblyBuilderA64::fdiv(RegisterA64 dst, RegisterA64 src1, RegisterA64 src { if (dst.kind == KindA64::d) { - LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d); + CODEGEN_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d); placeR3("fdiv", dst, src1, src2, 0b11110'01'1, 0b0001'10); } else { - LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s); + CODEGEN_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s); placeR3("fdiv", dst, src1, src2, 0b11110'00'1, 0b0001'10); } @@ -603,13 +603,13 @@ void AssemblyBuilderA64::fmul(RegisterA64 dst, RegisterA64 src1, RegisterA64 src { if (dst.kind == KindA64::d) { - LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d); + CODEGEN_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d); placeR3("fmul", dst, src1, src2, 0b11110'01'1, 0b0000'10); } else { - LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s); + CODEGEN_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s); placeR3("fmul", dst, src1, src2, 0b11110'00'1, 0b0000'10); } @@ -619,13 +619,13 @@ void AssemblyBuilderA64::fneg(RegisterA64 dst, RegisterA64 src) { if (dst.kind == KindA64::d) { - LUAU_ASSERT(src.kind == KindA64::d); + CODEGEN_ASSERT(src.kind == KindA64::d); placeR1("fneg", dst, src, 0b000'11110'01'1'0000'10'10000); } else { - LUAU_ASSERT(dst.kind == KindA64::s && src.kind == KindA64::s); + CODEGEN_ASSERT(dst.kind == KindA64::s && src.kind == KindA64::s); placeR1("fneg", dst, src, 0b000'11110'00'1'0000'10'10000); } @@ -633,7 +633,7 @@ void AssemblyBuilderA64::fneg(RegisterA64 dst, RegisterA64 src) void AssemblyBuilderA64::fsqrt(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d); + CODEGEN_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d); placeR1("fsqrt", dst, src, 0b000'11110'01'1'0000'11'10000); } @@ -642,13 +642,13 @@ void AssemblyBuilderA64::fsub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src { if (dst.kind == KindA64::d) { - LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d); + CODEGEN_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d); placeR3("fsub", dst, src1, src2, 0b11110'01'1, 0b0011'10); } else { - LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s); + CODEGEN_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s); placeR3("fsub", dst, src1, src2, 0b11110'00'1, 0b0011'10); } @@ -656,8 +656,8 @@ void AssemblyBuilderA64::fsub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src void AssemblyBuilderA64::ins_4s(RegisterA64 dst, RegisterA64 src, uint8_t index) { - LUAU_ASSERT(dst.kind == KindA64::q && src.kind == KindA64::w); - LUAU_ASSERT(index < 4); + CODEGEN_ASSERT(dst.kind == KindA64::q && src.kind == KindA64::w); + CODEGEN_ASSERT(index < 4); if (logText) logAppend(" %-12sv%d.s[%d],w%d\n", "ins", dst.index, index, src.index); @@ -670,9 +670,9 @@ void AssemblyBuilderA64::ins_4s(RegisterA64 dst, RegisterA64 src, uint8_t index) void AssemblyBuilderA64::ins_4s(RegisterA64 dst, uint8_t dstIndex, RegisterA64 src, uint8_t srcIndex) { - LUAU_ASSERT(dst.kind == KindA64::q && src.kind == KindA64::q); - LUAU_ASSERT(dstIndex < 4); - LUAU_ASSERT(srcIndex < 4); + CODEGEN_ASSERT(dst.kind == KindA64::q && src.kind == KindA64::q); + CODEGEN_ASSERT(dstIndex < 4); + CODEGEN_ASSERT(srcIndex < 4); if (logText) logAppend(" %-12sv%d.s[%d],v%d.s[%d]\n", "ins", dst.index, dstIndex, src.index, srcIndex); @@ -687,8 +687,8 @@ void AssemblyBuilderA64::dup_4s(RegisterA64 dst, RegisterA64 src, uint8_t index) { if (dst.kind == KindA64::s) { - LUAU_ASSERT(src.kind == KindA64::q); - LUAU_ASSERT(index < 4); + CODEGEN_ASSERT(src.kind == KindA64::q); + CODEGEN_ASSERT(index < 4); if (logText) logAppend(" %-12ss%d,v%d.s[%d]\n", "dup", dst.index, src.index, index); @@ -699,8 +699,8 @@ void AssemblyBuilderA64::dup_4s(RegisterA64 dst, RegisterA64 src, uint8_t index) } else { - LUAU_ASSERT(src.kind == KindA64::q); - LUAU_ASSERT(index < 4); + CODEGEN_ASSERT(src.kind == KindA64::q); + CODEGEN_ASSERT(index < 4); if (logText) logAppend(" %-12sv%d.4s,v%d.s[%d]\n", "dup", dst.index, src.index, index); @@ -715,21 +715,21 @@ void AssemblyBuilderA64::dup_4s(RegisterA64 dst, RegisterA64 src, uint8_t index) void AssemblyBuilderA64::frinta(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d); + CODEGEN_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d); placeR1("frinta", dst, src, 0b000'11110'01'1'001'100'10000); } void AssemblyBuilderA64::frintm(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d); + CODEGEN_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d); placeR1("frintm", dst, src, 0b000'11110'01'1'001'010'10000); } void AssemblyBuilderA64::frintp(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d); + CODEGEN_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d); placeR1("frintp", dst, src, 0b000'11110'01'1'001'001'10000); } @@ -741,67 +741,67 @@ void AssemblyBuilderA64::fcvt(RegisterA64 dst, RegisterA64 src) else if (dst.kind == KindA64::d && src.kind == KindA64::s) placeR1("fcvt", dst, src, 0b11110'00'1'0001'01'10000); else - LUAU_ASSERT(!"Unexpected register kind"); + CODEGEN_ASSERT(!"Unexpected register kind"); } void AssemblyBuilderA64::fcvtzs(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); - LUAU_ASSERT(src.kind == KindA64::d); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); + CODEGEN_ASSERT(src.kind == KindA64::d); placeR1("fcvtzs", dst, src, 0b000'11110'01'1'11'000'000000); } void AssemblyBuilderA64::fcvtzu(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); - LUAU_ASSERT(src.kind == KindA64::d); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); + CODEGEN_ASSERT(src.kind == KindA64::d); placeR1("fcvtzu", dst, src, 0b000'11110'01'1'11'001'000000); } void AssemblyBuilderA64::scvtf(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::d); - LUAU_ASSERT(src.kind == KindA64::w || src.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == KindA64::d); + CODEGEN_ASSERT(src.kind == KindA64::w || src.kind == KindA64::x); placeR1("scvtf", dst, src, 0b000'11110'01'1'00'010'000000); } void AssemblyBuilderA64::ucvtf(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::d); - LUAU_ASSERT(src.kind == KindA64::w || src.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == KindA64::d); + CODEGEN_ASSERT(src.kind == KindA64::w || src.kind == KindA64::x); placeR1("ucvtf", dst, src, 0b000'11110'01'1'00'011'000000); } void AssemblyBuilderA64::fjcvtzs(RegisterA64 dst, RegisterA64 src) { - LUAU_ASSERT(dst.kind == KindA64::w); - LUAU_ASSERT(src.kind == KindA64::d); - LUAU_ASSERT(features & Feature_JSCVT); + CODEGEN_ASSERT(dst.kind == KindA64::w); + CODEGEN_ASSERT(src.kind == KindA64::d); + CODEGEN_ASSERT(features & Feature_JSCVT); placeR1("fjcvtzs", dst, src, 0b000'11110'01'1'11'110'000000); } void AssemblyBuilderA64::fcmp(RegisterA64 src1, RegisterA64 src2) { - LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d); + CODEGEN_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d); placeFCMP("fcmp", src1, src2, 0b11110'01'1, 0b00); } void AssemblyBuilderA64::fcmpz(RegisterA64 src) { - LUAU_ASSERT(src.kind == KindA64::d); + CODEGEN_ASSERT(src.kind == KindA64::d); placeFCMP("fcmp", src, RegisterA64{src.kind, 0}, 0b11110'01'1, 0b01); } void AssemblyBuilderA64::fcsel(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond) { - LUAU_ASSERT(dst.kind == KindA64::d); + CODEGEN_ASSERT(dst.kind == KindA64::d); placeCS("fcsel", dst, src1, src2, cond, 0b11110'01'1, 0b11); } @@ -820,7 +820,7 @@ bool AssemblyBuilderA64::finalize() { // If this assertion fires, a label was used in jmp without calling setLabel uint32_t label = fixup.label; - LUAU_ASSERT(labelLocations[label - 1] != ~0u); + CODEGEN_ASSERT(labelLocations[label - 1] != ~0u); int value = int(labelLocations[label - 1]) - int(fixup.location); patchOffset(fixup.location, value, fixup.kind); @@ -913,9 +913,9 @@ void AssemblyBuilderA64::placeSR3(const char* name, RegisterA64 dst, RegisterA64 if (logText) log(name, dst, src1, src2, shift); - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); - LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind); - LUAU_ASSERT(shift >= -63 && shift <= 63); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind); + CODEGEN_ASSERT(shift >= -63 && shift <= 63); uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; @@ -929,8 +929,8 @@ void AssemblyBuilderA64::placeSR2(const char* name, RegisterA64 dst, RegisterA64 if (logText) log(name, dst, src); - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); - LUAU_ASSERT(dst.kind == src.kind); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == src.kind); uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; @@ -943,8 +943,8 @@ void AssemblyBuilderA64::placeR3(const char* name, RegisterA64 dst, RegisterA64 if (logText) log(name, dst, src1, src2); - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d || dst.kind == KindA64::s); - LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d || dst.kind == KindA64::s); + CODEGEN_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind); uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; @@ -957,8 +957,8 @@ void AssemblyBuilderA64::placeR3(const char* name, RegisterA64 dst, RegisterA64 if (logText) log(name, dst, src1, src2); - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d || dst.kind == KindA64::q); - LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d || dst.kind == KindA64::q); + CODEGEN_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind); place(dst.index | (src1.index << 5) | (op2 << 10) | (src2.index << 16) | (op << 21) | (sizes << 29)); commit(); @@ -980,9 +980,9 @@ void AssemblyBuilderA64::placeI12(const char* name, RegisterA64 dst, RegisterA64 if (logText) log(name, dst, src1, src2); - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst == sp); - LUAU_ASSERT(dst.kind == src1.kind || (dst.kind == KindA64::x && src1 == sp) || (dst == sp && src1.kind == KindA64::x)); - LUAU_ASSERT(src2 >= 0 && src2 < (1 << 12)); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst == sp); + CODEGEN_ASSERT(dst.kind == src1.kind || (dst.kind == KindA64::x && src1 == sp) || (dst == sp && src1.kind == KindA64::x)); + CODEGEN_ASSERT(src2 >= 0 && src2 < (1 << 12)); uint32_t sf = (dst.kind != KindA64::w) ? 0x80000000 : 0; @@ -995,9 +995,9 @@ void AssemblyBuilderA64::placeI16(const char* name, RegisterA64 dst, int src, ui if (logText) log(name, dst, src, shift); - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); - LUAU_ASSERT(src >= 0 && src <= 0xffff); - LUAU_ASSERT(shift == 0 || shift == 16 || shift == 32 || shift == 48); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); + CODEGEN_ASSERT(src >= 0 && src <= 0xffff); + CODEGEN_ASSERT(shift == 0 || shift == 16 || shift == 32 || shift == 48); uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; @@ -1021,14 +1021,14 @@ void AssemblyBuilderA64::placeA(const char* name, RegisterA64 dst, AddressA64 sr else if (src.data >= -256 && src.data <= 255) place(dst.index | (src.base.index << 5) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22)); else - LUAU_ASSERT(!"Unable to encode large immediate offset"); + CODEGEN_ASSERT(!"Unable to encode large immediate offset"); break; case AddressKindA64::pre: - LUAU_ASSERT(src.data >= -256 && src.data <= 255); + CODEGEN_ASSERT(src.data >= -256 && src.data <= 255); place(dst.index | (src.base.index << 5) | (0b11 << 10) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22)); break; case AddressKindA64::post: - LUAU_ASSERT(src.data >= -256 && src.data <= 255); + CODEGEN_ASSERT(src.data >= -256 && src.data <= 255); place(dst.index | (src.base.index << 5) | (0b01 << 10) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22)); break; } @@ -1060,7 +1060,7 @@ void AssemblyBuilderA64::placeBC(const char* name, Label& label, uint8_t op, uin void AssemblyBuilderA64::placeBCR(const char* name, Label& label, uint8_t op, RegisterA64 cond) { - LUAU_ASSERT(cond.kind == KindA64::w || cond.kind == KindA64::x); + CODEGEN_ASSERT(cond.kind == KindA64::w || cond.kind == KindA64::x); uint32_t sf = (cond.kind == KindA64::x) ? 0x80000000 : 0; @@ -1078,7 +1078,7 @@ void AssemblyBuilderA64::placeBR(const char* name, RegisterA64 src, uint32_t op) if (logText) log(name, src); - LUAU_ASSERT(src.kind == KindA64::x); + CODEGEN_ASSERT(src.kind == KindA64::x); place((src.index << 5) | (op << 10)); commit(); @@ -1086,8 +1086,8 @@ void AssemblyBuilderA64::placeBR(const char* name, RegisterA64 src, uint32_t op) void AssemblyBuilderA64::placeBTR(const char* name, Label& label, uint8_t op, RegisterA64 cond, uint8_t bit) { - LUAU_ASSERT(cond.kind == KindA64::x || cond.kind == KindA64::w); - LUAU_ASSERT(bit < (cond.kind == KindA64::x ? 64 : 32)); + CODEGEN_ASSERT(cond.kind == KindA64::x || cond.kind == KindA64::w); + CODEGEN_ASSERT(bit < (cond.kind == KindA64::x ? 64 : 32)); place(cond.index | ((bit & 0x1f) << 19) | (op << 24) | ((bit >> 5) << 31)); commit(); @@ -1103,7 +1103,7 @@ void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op) if (logText) log(name, dst); - LUAU_ASSERT(dst.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == KindA64::x); place(dst.index | (op << 24)); commit(); @@ -1111,7 +1111,7 @@ void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op) void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op, Label& label) { - LUAU_ASSERT(dst.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == KindA64::x); place(dst.index | (op << 24)); commit(); @@ -1127,9 +1127,9 @@ void AssemblyBuilderA64::placeP(const char* name, RegisterA64 src1, RegisterA64 if (logText) log(name, src1, src2, dst); - LUAU_ASSERT(dst.kind == AddressKindA64::imm); - LUAU_ASSERT(dst.data >= -128 * (1 << sizelog) && dst.data <= 127 * (1 << sizelog)); - LUAU_ASSERT(dst.data % (1 << sizelog) == 0); + CODEGEN_ASSERT(dst.kind == AddressKindA64::imm); + CODEGEN_ASSERT(dst.data >= -128 * (1 << sizelog) && dst.data <= 127 * (1 << sizelog)); + CODEGEN_ASSERT(dst.data % (1 << sizelog) == 0); place(src1.index | (dst.base.index << 5) | (src2.index << 10) | (((dst.data >> sizelog) & 127) << 15) | (op << 22) | (opc << 30)); commit(); @@ -1141,7 +1141,7 @@ void AssemblyBuilderA64::placeCS( if (logText) log(name, dst, src1, src2, cond); - LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind); + CODEGEN_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind); uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; @@ -1159,7 +1159,7 @@ void AssemblyBuilderA64::placeFCMP(const char* name, RegisterA64 src1, RegisterA log(name, src1, src2); } - LUAU_ASSERT(src1.kind == src2.kind); + CODEGEN_ASSERT(src1.kind == src2.kind); place((opc << 3) | (src1.index << 5) | (0b1000 << 10) | (src2.index << 16) | (op << 21)); commit(); @@ -1179,9 +1179,9 @@ void AssemblyBuilderA64::placeBM(const char* name, RegisterA64 dst, RegisterA64 if (logText) log(name, dst, src1, src2); - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); - LUAU_ASSERT(dst.kind == src1.kind); - LUAU_ASSERT(isMaskSupported(src2)); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == src1.kind); + CODEGEN_ASSERT(isMaskSupported(src2)); uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; @@ -1200,8 +1200,8 @@ void AssemblyBuilderA64::placeBFM(const char* name, RegisterA64 dst, RegisterA64 if (logText) log(name, dst, src1, src2); - LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); - LUAU_ASSERT(dst.kind == src1.kind); + CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x); + CODEGEN_ASSERT(dst.kind == src1.kind); uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; uint32_t n = (dst.kind == KindA64::x) ? 1 << 22 : 0; @@ -1215,9 +1215,9 @@ void AssemblyBuilderA64::placeER(const char* name, RegisterA64 dst, RegisterA64 if (logText) log(name, dst, src1, src2, shift); - LUAU_ASSERT(dst.kind == KindA64::x && src1.kind == KindA64::x); - LUAU_ASSERT(src2.kind == KindA64::w); - LUAU_ASSERT(shift >= 0 && shift <= 4); + CODEGEN_ASSERT(dst.kind == KindA64::x && src1.kind == KindA64::x); + CODEGEN_ASSERT(src2.kind == KindA64::w); + CODEGEN_ASSERT(shift >= 0 && shift <= 4); uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; // could be useful in the future for byte->word extends int option = 0b010; // UXTW @@ -1228,7 +1228,7 @@ void AssemblyBuilderA64::placeER(const char* name, RegisterA64 dst, RegisterA64 void AssemblyBuilderA64::place(uint32_t word) { - LUAU_ASSERT(codePos < codeEnd); + CODEGEN_ASSERT(codePos < codeEnd); *codePos++ = word; } @@ -1259,7 +1259,7 @@ void AssemblyBuilderA64::patchOffset(uint32_t location, int value, Patch::Kind k int offset = (kind == Patch::Imm26) ? 0 : 5; int range = (kind == Patch::Imm19) ? (1 << 19) : (kind == Patch::Imm26) ? (1 << 26) : (1 << 14); - LUAU_ASSERT((code[location] & ((range - 1) << offset)) == 0); + CODEGEN_ASSERT((code[location] & ((range - 1) << offset)) == 0); if (value > -(range >> 1) && value < (range >> 1)) code[location] |= (value & (range - 1)) << offset; @@ -1269,7 +1269,7 @@ void AssemblyBuilderA64::patchOffset(uint32_t location, int value, Patch::Kind k void AssemblyBuilderA64::commit() { - LUAU_ASSERT(codePos <= codeEnd); + CODEGEN_ASSERT(codePos <= codeEnd); if (codeEnd == codePos) extend(); @@ -1286,7 +1286,7 @@ void AssemblyBuilderA64::extend() size_t AssemblyBuilderA64::allocateData(size_t size, size_t align) { - LUAU_ASSERT(align > 0 && align <= kMaxAlign && (align & (align - 1)) == 0); + CODEGEN_ASSERT(align > 0 && align <= kMaxAlign && (align & (align - 1)) == 0); if (dataPos < size) { @@ -1467,7 +1467,7 @@ void AssemblyBuilderA64::log(RegisterA64 reg) if (reg.index == 31) text.append("sp"); else - LUAU_ASSERT(!"Unexpected register kind"); + CODEGEN_ASSERT(!"Unexpected register kind"); break; } } diff --git a/CodeGen/src/AssemblyBuilderX64.cpp b/CodeGen/src/AssemblyBuilderX64.cpp index 6e5b2d70..be0f7198 100644 --- a/CodeGen/src/AssemblyBuilderX64.cpp +++ b/CodeGen/src/AssemblyBuilderX64.cpp @@ -99,7 +99,7 @@ AssemblyBuilderX64::AssemblyBuilderX64(bool logText) AssemblyBuilderX64::~AssemblyBuilderX64() { - LUAU_ASSERT(finalized); + CODEGEN_ASSERT(finalized); } void AssemblyBuilderX64::add(OperandX64 lhs, OperandX64 rhs) @@ -191,7 +191,7 @@ void AssemblyBuilderX64::mov(OperandX64 lhs, OperandX64 rhs) } else { - LUAU_ASSERT(size == SizeX64::qword); + CODEGEN_ASSERT(size == SizeX64::qword); place(OP_PLUS_REG(0xb8, lhs.base.index)); placeImm64(rhs.imm); @@ -218,7 +218,7 @@ void AssemblyBuilderX64::mov(OperandX64 lhs, OperandX64 rhs) } else { - LUAU_ASSERT(size == SizeX64::dword || size == SizeX64::qword); + CODEGEN_ASSERT(size == SizeX64::dword || size == SizeX64::qword); place(0xc7); placeModRegMem(lhs, 0, /*extraCodeBytes=*/4); @@ -235,7 +235,7 @@ void AssemblyBuilderX64::mov(OperandX64 lhs, OperandX64 rhs) } else { - LUAU_ASSERT(!"No encoding for this operand combination"); + CODEGEN_ASSERT(!"No encoding for this operand combination"); } commit(); @@ -250,7 +250,7 @@ void AssemblyBuilderX64::mov64(RegisterX64 lhs, int64_t imm) logAppend(",%llXh\n", (unsigned long long)imm); } - LUAU_ASSERT(lhs.size == SizeX64::qword); + CODEGEN_ASSERT(lhs.size == SizeX64::qword); placeRex(lhs); place(OP_PLUS_REG(0xb8, lhs.index)); @@ -263,7 +263,7 @@ void AssemblyBuilderX64::movsx(RegisterX64 lhs, OperandX64 rhs) if (logText) log("movsx", lhs, rhs); - LUAU_ASSERT(rhs.memSize == SizeX64::byte || rhs.memSize == SizeX64::word); + CODEGEN_ASSERT(rhs.memSize == SizeX64::byte || rhs.memSize == SizeX64::word); placeRex(lhs, rhs); place(0x0f); @@ -277,7 +277,7 @@ void AssemblyBuilderX64::movzx(RegisterX64 lhs, OperandX64 rhs) if (logText) log("movzx", lhs, rhs); - LUAU_ASSERT(rhs.memSize == SizeX64::byte || rhs.memSize == SizeX64::word); + CODEGEN_ASSERT(rhs.memSize == SizeX64::byte || rhs.memSize == SizeX64::word); placeRex(lhs, rhs); place(0x0f); @@ -372,9 +372,9 @@ void AssemblyBuilderX64::lea(OperandX64 lhs, OperandX64 rhs) if (logText) log("lea", lhs, rhs); - LUAU_ASSERT(lhs.cat == CategoryX64::reg && rhs.cat == CategoryX64::mem && rhs.memSize == SizeX64::none); - LUAU_ASSERT(rhs.base == rip || rhs.base.size == lhs.base.size); - LUAU_ASSERT(rhs.index == noreg || rhs.index.size == lhs.base.size); + CODEGEN_ASSERT(lhs.cat == CategoryX64::reg && rhs.cat == CategoryX64::mem && rhs.memSize == SizeX64::none); + CODEGEN_ASSERT(rhs.base == rip || rhs.base.size == lhs.base.size); + CODEGEN_ASSERT(rhs.index == noreg || rhs.index.size == lhs.base.size); rhs.memSize = lhs.base.size; placeBinaryRegAndRegMem(lhs, rhs, 0x8d, 0x8d); } @@ -384,7 +384,7 @@ void AssemblyBuilderX64::push(OperandX64 op) if (logText) log("push", op); - LUAU_ASSERT(op.cat == CategoryX64::reg && op.base.size == SizeX64::qword); + CODEGEN_ASSERT(op.cat == CategoryX64::reg && op.base.size == SizeX64::qword); placeRex(op.base); place(OP_PLUS_REG(0x50, op.base.index)); commit(); @@ -395,7 +395,7 @@ void AssemblyBuilderX64::pop(OperandX64 op) if (logText) log("pop", op); - LUAU_ASSERT(op.cat == CategoryX64::reg && op.base.size == SizeX64::qword); + CODEGEN_ASSERT(op.cat == CategoryX64::reg && op.base.size == SizeX64::qword); placeRex(op.base); place(OP_PLUS_REG(0x58, op.base.index)); commit(); @@ -413,7 +413,7 @@ void AssemblyBuilderX64::ret() void AssemblyBuilderX64::setcc(ConditionX64 cond, OperandX64 op) { SizeX64 size = op.cat == CategoryX64::reg ? op.base.size : op.memSize; - LUAU_ASSERT(size == SizeX64::byte); + CODEGEN_ASSERT(size == SizeX64::byte); if (logText) log(setccTextForCondition[size_t(cond)], op); @@ -428,7 +428,7 @@ void AssemblyBuilderX64::setcc(ConditionX64 cond, OperandX64 op) void AssemblyBuilderX64::cmov(ConditionX64 cond, RegisterX64 lhs, OperandX64 rhs) { SizeX64 size = rhs.cat == CategoryX64::reg ? rhs.base.size : rhs.memSize; - LUAU_ASSERT(size != SizeX64::byte && size == lhs.size); + CODEGEN_ASSERT(size != SizeX64::byte && size == lhs.size); if (logText) log(cmovTextForCondition[size_t(cond)], lhs, rhs); @@ -457,7 +457,7 @@ void AssemblyBuilderX64::jmp(Label& label) void AssemblyBuilderX64::jmp(OperandX64 op) { - LUAU_ASSERT((op.cat == CategoryX64::reg ? op.base.size : op.memSize) == SizeX64::qword); + CODEGEN_ASSERT((op.cat == CategoryX64::reg ? op.base.size : op.memSize) == SizeX64::qword); if (logText) log("jmp", op); @@ -484,7 +484,7 @@ void AssemblyBuilderX64::call(Label& label) void AssemblyBuilderX64::call(OperandX64 op) { - LUAU_ASSERT((op.cat == CategoryX64::reg ? op.base.size : op.memSize) == SizeX64::qword); + CODEGEN_ASSERT((op.cat == CategoryX64::reg ? op.base.size : op.memSize) == SizeX64::qword); if (logText) log("call", op); @@ -499,7 +499,7 @@ void AssemblyBuilderX64::call(OperandX64 op) void AssemblyBuilderX64::lea(RegisterX64 lhs, Label& label) { - LUAU_ASSERT(lhs.size == SizeX64::qword); + CODEGEN_ASSERT(lhs.size == SizeX64::qword); placeBinaryRegAndRegMem(lhs, OperandX64(SizeX64::qword, noreg, 1, rip, 0), 0x8d, 0x8d); @@ -534,7 +534,7 @@ void AssemblyBuilderX64::bsr(RegisterX64 dst, OperandX64 src) if (logText) log("bsr", dst, src); - LUAU_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword); + CODEGEN_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword); placeRex(dst, src); place(0x0f); @@ -548,7 +548,7 @@ void AssemblyBuilderX64::bsf(RegisterX64 dst, OperandX64 src) if (logText) log("bsf", dst, src); - LUAU_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword); + CODEGEN_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword); placeRex(dst, src); place(0x0f); @@ -562,7 +562,7 @@ void AssemblyBuilderX64::bswap(RegisterX64 dst) if (logText) log("bswap", dst); - LUAU_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword); + CODEGEN_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword); placeRex(dst); place(0x0f); @@ -668,7 +668,7 @@ void AssemblyBuilderX64::nop(uint32_t length) void AssemblyBuilderX64::align(uint32_t alignment, AlignmentDataX64 data) { - LUAU_ASSERT((alignment & (alignment - 1)) == 0); + CODEGEN_ASSERT((alignment & (alignment - 1)) == 0); uint32_t size = getCodeSize(); uint32_t pad = ((size + alignment - 1) & ~(alignment - 1)) - size; @@ -814,9 +814,9 @@ void AssemblyBuilderX64::vcvtsi2sd(OperandX64 dst, OperandX64 src1, OperandX64 s void AssemblyBuilderX64::vcvtsd2ss(OperandX64 dst, OperandX64 src1, OperandX64 src2) { if (src2.cat == CategoryX64::reg) - LUAU_ASSERT(src2.base.size == SizeX64::xmmword); + CODEGEN_ASSERT(src2.base.size == SizeX64::xmmword); else - LUAU_ASSERT(src2.memSize == SizeX64::qword); + CODEGEN_ASSERT(src2.memSize == SizeX64::qword); placeAvx("vcvtsd2ss", dst, src1, src2, 0x5a, (src2.cat == CategoryX64::reg ? src2.base.size : src2.memSize) == SizeX64::qword, AVX_0F, AVX_F2); } @@ -824,9 +824,9 @@ void AssemblyBuilderX64::vcvtsd2ss(OperandX64 dst, OperandX64 src1, OperandX64 s void AssemblyBuilderX64::vcvtss2sd(OperandX64 dst, OperandX64 src1, OperandX64 src2) { if (src2.cat == CategoryX64::reg) - LUAU_ASSERT(src2.base.size == SizeX64::xmmword); + CODEGEN_ASSERT(src2.base.size == SizeX64::xmmword); else - LUAU_ASSERT(src2.memSize == SizeX64::dword); + CODEGEN_ASSERT(src2.memSize == SizeX64::dword); placeAvx("vcvtsd2ss", dst, src1, src2, 0x5a, false, AVX_0F, AVX_F3); } @@ -900,19 +900,19 @@ void AssemblyBuilderX64::vmovq(OperandX64 dst, OperandX64 src) { if (dst.base.size == SizeX64::xmmword) { - LUAU_ASSERT(dst.cat == CategoryX64::reg); - LUAU_ASSERT(src.base.size == SizeX64::qword); + CODEGEN_ASSERT(dst.cat == CategoryX64::reg); + CODEGEN_ASSERT(src.base.size == SizeX64::qword); placeAvx("vmovq", dst, src, 0x6e, true, AVX_0F, AVX_66); } else if (dst.base.size == SizeX64::qword) { - LUAU_ASSERT(src.cat == CategoryX64::reg); - LUAU_ASSERT(src.base.size == SizeX64::xmmword); + CODEGEN_ASSERT(src.cat == CategoryX64::reg); + CODEGEN_ASSERT(src.base.size == SizeX64::xmmword); placeAvx("vmovq", src, dst, 0x7e, true, AVX_0F, AVX_66); } else { - LUAU_ASSERT(!"No encoding for left operand of this category"); + CODEGEN_ASSERT(!"No encoding for left operand of this category"); } } @@ -955,7 +955,7 @@ bool AssemblyBuilderX64::finalize() for (Label fixup : pendingLabels) { // If this assertion fires, a label was used in jmp without calling setLabel - LUAU_ASSERT(labelLocations[fixup.id - 1] != ~0u); + CODEGEN_ASSERT(labelLocations[fixup.id - 1] != ~0u); uint32_t value = labelLocations[fixup.id - 1] - (fixup.location + 4); writeu32(&code[fixup.location], value); } @@ -1160,16 +1160,16 @@ void AssemblyBuilderX64::placeBinary(const char* name, OperandX64 lhs, OperandX6 else if (lhs.cat == CategoryX64::mem && rhs.cat == CategoryX64::reg) placeBinaryRegMemAndReg(lhs, rhs, code8rev, coderev); else - LUAU_ASSERT(!"No encoding for this operand combination"); + CODEGEN_ASSERT(!"No encoding for this operand combination"); } void AssemblyBuilderX64::placeBinaryRegMemAndImm(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code, uint8_t codeImm8, uint8_t opreg) { - LUAU_ASSERT(lhs.cat == CategoryX64::reg || lhs.cat == CategoryX64::mem); - LUAU_ASSERT(rhs.cat == CategoryX64::imm); + CODEGEN_ASSERT(lhs.cat == CategoryX64::reg || lhs.cat == CategoryX64::mem); + CODEGEN_ASSERT(rhs.cat == CategoryX64::imm); SizeX64 size = lhs.cat == CategoryX64::reg ? lhs.base.size : lhs.memSize; - LUAU_ASSERT(size == SizeX64::byte || size == SizeX64::dword || size == SizeX64::qword); + CODEGEN_ASSERT(size == SizeX64::byte || size == SizeX64::dword || size == SizeX64::qword); placeRex(lhs); @@ -1181,7 +1181,7 @@ void AssemblyBuilderX64::placeBinaryRegMemAndImm(OperandX64 lhs, OperandX64 rhs, } else { - LUAU_ASSERT(size == SizeX64::dword || size == SizeX64::qword); + CODEGEN_ASSERT(size == SizeX64::dword || size == SizeX64::qword); if (int8_t(rhs.imm) == rhs.imm && code != codeImm8) { @@ -1202,11 +1202,11 @@ void AssemblyBuilderX64::placeBinaryRegMemAndImm(OperandX64 lhs, OperandX64 rhs, void AssemblyBuilderX64::placeBinaryRegAndRegMem(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code) { - LUAU_ASSERT(lhs.cat == CategoryX64::reg && (rhs.cat == CategoryX64::reg || rhs.cat == CategoryX64::mem)); - LUAU_ASSERT(lhs.base.size == (rhs.cat == CategoryX64::reg ? rhs.base.size : rhs.memSize)); + CODEGEN_ASSERT(lhs.cat == CategoryX64::reg && (rhs.cat == CategoryX64::reg || rhs.cat == CategoryX64::mem)); + CODEGEN_ASSERT(lhs.base.size == (rhs.cat == CategoryX64::reg ? rhs.base.size : rhs.memSize)); SizeX64 size = lhs.base.size; - LUAU_ASSERT(size == SizeX64::byte || size == SizeX64::word || size == SizeX64::dword || size == SizeX64::qword); + CODEGEN_ASSERT(size == SizeX64::byte || size == SizeX64::word || size == SizeX64::dword || size == SizeX64::qword); if (size == SizeX64::word) place(0x66); @@ -1229,10 +1229,10 @@ void AssemblyBuilderX64::placeUnaryModRegMem(const char* name, OperandX64 op, ui if (logText) log(name, op); - LUAU_ASSERT(op.cat == CategoryX64::reg || op.cat == CategoryX64::mem); + CODEGEN_ASSERT(op.cat == CategoryX64::reg || op.cat == CategoryX64::mem); SizeX64 size = op.cat == CategoryX64::reg ? op.base.size : op.memSize; - LUAU_ASSERT(size == SizeX64::byte || size == SizeX64::dword || size == SizeX64::qword); + CODEGEN_ASSERT(size == SizeX64::byte || size == SizeX64::dword || size == SizeX64::qword); placeRex(op); place(size == SizeX64::byte ? code8 : code); @@ -1246,8 +1246,8 @@ void AssemblyBuilderX64::placeShift(const char* name, OperandX64 lhs, OperandX64 if (logText) log(name, lhs, rhs); - LUAU_ASSERT(lhs.cat == CategoryX64::reg || lhs.cat == CategoryX64::mem); - LUAU_ASSERT(rhs.cat == CategoryX64::imm || (rhs.cat == CategoryX64::reg && rhs.base == cl)); + CODEGEN_ASSERT(lhs.cat == CategoryX64::reg || lhs.cat == CategoryX64::mem); + CODEGEN_ASSERT(rhs.cat == CategoryX64::imm || (rhs.cat == CategoryX64::reg && rhs.base == cl)); SizeX64 size = lhs.base.size; @@ -1260,7 +1260,7 @@ void AssemblyBuilderX64::placeShift(const char* name, OperandX64 lhs, OperandX64 } else if (rhs.cat == CategoryX64::imm) { - LUAU_ASSERT(int8_t(rhs.imm) == rhs.imm); + CODEGEN_ASSERT(int8_t(rhs.imm) == rhs.imm); place(size == SizeX64::byte ? 0xc0 : 0xc1); placeModRegMem(lhs, opreg, /*extraCodeBytes=*/1); @@ -1289,8 +1289,8 @@ void AssemblyBuilderX64::placeJcc(const char* name, Label& label, uint8_t cc) void AssemblyBuilderX64::placeAvx(const char* name, OperandX64 dst, OperandX64 src, uint8_t code, bool setW, uint8_t mode, uint8_t prefix) { - LUAU_ASSERT(dst.cat == CategoryX64::reg); - LUAU_ASSERT(src.cat == CategoryX64::reg || src.cat == CategoryX64::mem); + CODEGEN_ASSERT(dst.cat == CategoryX64::reg); + CODEGEN_ASSERT(src.cat == CategoryX64::reg || src.cat == CategoryX64::mem); if (logText) log(name, dst, src); @@ -1305,7 +1305,7 @@ void AssemblyBuilderX64::placeAvx(const char* name, OperandX64 dst, OperandX64 s void AssemblyBuilderX64::placeAvx( const char* name, OperandX64 dst, OperandX64 src, uint8_t code, uint8_t coderev, bool setW, uint8_t mode, uint8_t prefix) { - LUAU_ASSERT((dst.cat == CategoryX64::mem && src.cat == CategoryX64::reg) || (dst.cat == CategoryX64::reg && src.cat == CategoryX64::mem)); + CODEGEN_ASSERT((dst.cat == CategoryX64::mem && src.cat == CategoryX64::reg) || (dst.cat == CategoryX64::reg && src.cat == CategoryX64::mem)); if (logText) log(name, dst, src); @@ -1329,9 +1329,9 @@ void AssemblyBuilderX64::placeAvx( void AssemblyBuilderX64::placeAvx( const char* name, OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t code, bool setW, uint8_t mode, uint8_t prefix) { - LUAU_ASSERT(dst.cat == CategoryX64::reg); - LUAU_ASSERT(src1.cat == CategoryX64::reg); - LUAU_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem); + CODEGEN_ASSERT(dst.cat == CategoryX64::reg); + CODEGEN_ASSERT(src1.cat == CategoryX64::reg); + CODEGEN_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem); if (logText) log(name, dst, src1, src2); @@ -1346,9 +1346,9 @@ void AssemblyBuilderX64::placeAvx( void AssemblyBuilderX64::placeAvx( const char* name, OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t imm8, uint8_t code, bool setW, uint8_t mode, uint8_t prefix) { - LUAU_ASSERT(dst.cat == CategoryX64::reg); - LUAU_ASSERT(src1.cat == CategoryX64::reg); - LUAU_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem); + CODEGEN_ASSERT(dst.cat == CategoryX64::reg); + CODEGEN_ASSERT(src1.cat == CategoryX64::reg); + CODEGEN_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem); if (logText) log(name, dst, src1, src2, imm8); @@ -1378,7 +1378,7 @@ void AssemblyBuilderX64::placeRex(OperandX64 op) else if (op.cat == CategoryX64::mem) code = REX_W_BIT(op.memSize == SizeX64::qword) | REX_X(op.index) | REX_B(op.base); else - LUAU_ASSERT(!"No encoding for left operand of this category"); + CODEGEN_ASSERT(!"No encoding for left operand of this category"); if (code != 0) place(code | 0x40); @@ -1393,7 +1393,7 @@ void AssemblyBuilderX64::placeRexNoW(OperandX64 op) else if (op.cat == CategoryX64::mem) code = REX_X(op.index) | REX_B(op.base); else - LUAU_ASSERT(!"No encoding for left operand of this category"); + CODEGEN_ASSERT(!"No encoding for left operand of this category"); if (code != 0) place(code | 0x40); @@ -1414,9 +1414,9 @@ void AssemblyBuilderX64::placeRex(RegisterX64 lhs, OperandX64 rhs) void AssemblyBuilderX64::placeVex(OperandX64 dst, OperandX64 src1, OperandX64 src2, bool setW, uint8_t mode, uint8_t prefix) { - LUAU_ASSERT(dst.cat == CategoryX64::reg); - LUAU_ASSERT(src1.cat == CategoryX64::reg); - LUAU_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem); + CODEGEN_ASSERT(dst.cat == CategoryX64::reg); + CODEGEN_ASSERT(src1.cat == CategoryX64::reg); + CODEGEN_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem); place(AVX_3_1()); place(AVX_3_2(dst.base, src2.index, src2.base, mode)); @@ -1427,13 +1427,13 @@ static uint8_t getScaleEncoding(uint8_t scale) { static const uint8_t scales[9] = {0xff, 0, 1, 0xff, 2, 0xff, 0xff, 0xff, 3}; - LUAU_ASSERT(scale < 9 && scales[scale] != 0xff); + CODEGEN_ASSERT(scale < 9 && scales[scale] != 0xff); return scales[scale]; } void AssemblyBuilderX64::placeRegAndModRegMem(OperandX64 lhs, OperandX64 rhs, int32_t extraCodeBytes) { - LUAU_ASSERT(lhs.cat == CategoryX64::reg); + CODEGEN_ASSERT(lhs.cat == CategoryX64::reg); placeModRegMem(rhs, lhs.base.index, extraCodeBytes); } @@ -1481,8 +1481,8 @@ void AssemblyBuilderX64::placeModRegMem(OperandX64 rhs, uint8_t regop, int32_t e } else if ((base.index & 0x7) == 0b100) // r12/sp-based addressing requires SIB { - LUAU_ASSERT(rhs.scale == 1); - LUAU_ASSERT(index == noreg); + CODEGEN_ASSERT(rhs.scale == 1); + CODEGEN_ASSERT(index == noreg); place(MOD_RM(mod, regop, 0b100)); place(SIB(rhs.scale, 0b100, base.index)); @@ -1516,7 +1516,7 @@ void AssemblyBuilderX64::placeModRegMem(OperandX64 rhs, uint8_t regop, int32_t e } else { - LUAU_ASSERT(!"No encoding for right operand of this category"); + CODEGEN_ASSERT(!"No encoding for right operand of this category"); } } @@ -1540,21 +1540,21 @@ void AssemblyBuilderX64::placeImm8(int32_t imm) void AssemblyBuilderX64::placeImm16(int16_t imm) { uint8_t* pos = codePos; - LUAU_ASSERT(pos + sizeof(imm) < codeEnd); + CODEGEN_ASSERT(pos + sizeof(imm) < codeEnd); codePos = writeu16(pos, imm); } void AssemblyBuilderX64::placeImm32(int32_t imm) { uint8_t* pos = codePos; - LUAU_ASSERT(pos + sizeof(imm) < codeEnd); + CODEGEN_ASSERT(pos + sizeof(imm) < codeEnd); codePos = writeu32(pos, imm); } void AssemblyBuilderX64::placeImm64(int64_t imm) { uint8_t* pos = codePos; - LUAU_ASSERT(pos + sizeof(imm) < codeEnd); + CODEGEN_ASSERT(pos + sizeof(imm) < codeEnd); codePos = writeu64(pos, imm); } @@ -1579,13 +1579,13 @@ void AssemblyBuilderX64::placeLabel(Label& label) void AssemblyBuilderX64::place(uint8_t byte) { - LUAU_ASSERT(codePos < codeEnd); + CODEGEN_ASSERT(codePos < codeEnd); *codePos++ = byte; } void AssemblyBuilderX64::commit() { - LUAU_ASSERT(codePos <= codeEnd); + CODEGEN_ASSERT(codePos <= codeEnd); ++instructionCount; @@ -1604,7 +1604,7 @@ void AssemblyBuilderX64::extend() size_t AssemblyBuilderX64::allocateData(size_t size, size_t align) { - LUAU_ASSERT(align > 0 && align <= kMaxAlign && (align & (align - 1)) == 0); + CODEGEN_ASSERT(align > 0 && align <= kMaxAlign && (align & (align - 1)) == 0); if (dataPos < size) { @@ -1732,7 +1732,7 @@ void AssemblyBuilderX64::log(OperandX64 op) logAppend("%Xh", op.imm); break; default: - LUAU_ASSERT(!"Unknown operand category"); + CODEGEN_ASSERT(!"Unknown operand category"); } } @@ -1740,7 +1740,7 @@ const char* AssemblyBuilderX64::getSizeName(SizeX64 size) const { static const char* sizeNames[] = {"none", "byte", "word", "dword", "qword", "xmmword", "ymmword"}; - LUAU_ASSERT(unsigned(size) < sizeof(sizeNames) / sizeof(sizeNames[0])); + CODEGEN_ASSERT(unsigned(size) < sizeof(sizeNames) / sizeof(sizeNames[0])); return sizeNames[unsigned(size)]; } @@ -1754,8 +1754,8 @@ const char* AssemblyBuilderX64::getRegisterName(RegisterX64 reg) const {"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"}, {"ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"}}; - LUAU_ASSERT(reg.index < 16); - LUAU_ASSERT(reg.size <= SizeX64::ymmword); + CODEGEN_ASSERT(reg.index < 16); + CODEGEN_ASSERT(reg.size <= SizeX64::ymmword); return names[size_t(reg.size)][reg.index]; } diff --git a/CodeGen/src/BytecodeAnalysis.cpp b/CodeGen/src/BytecodeAnalysis.cpp index 40c5d9cc..557e2d7e 100644 --- a/CodeGen/src/BytecodeAnalysis.cpp +++ b/CodeGen/src/BytecodeAnalysis.cpp @@ -7,8 +7,6 @@ #include "lobject.h" -LUAU_FASTFLAGVARIABLE(LuauFixDivrkInference, false) - namespace Luau { namespace CodeGen @@ -338,7 +336,7 @@ static void applyBuiltinCall(int bfid, BytecodeTypes& types) void buildBytecodeBlocks(IrFunction& function, const std::vector& jumpTargets) { Proto* proto = function.proto; - LUAU_ASSERT(proto); + CODEGEN_ASSERT(proto); std::vector& bcBlocks = function.bcBlocks; @@ -380,14 +378,14 @@ void buildBytecodeBlocks(IrFunction& function, const std::vector& jumpT previ = i; i = nexti; - LUAU_ASSERT(i <= proto->sizecode); + CODEGEN_ASSERT(i <= proto->sizecode); } } void analyzeBytecodeTypes(IrFunction& function) { Proto* proto = function.proto; - LUAU_ASSERT(proto); + CODEGEN_ASSERT(proto); // Setup our current knowledge of type tags based on arguments uint8_t regTags[256]; @@ -398,8 +396,8 @@ void analyzeBytecodeTypes(IrFunction& function) // Now that we have VM basic blocks, we can attempt to track register type tags locally for (const BytecodeBlock& block : function.bcBlocks) { - LUAU_ASSERT(block.startpc != -1); - LUAU_ASSERT(block.finishpc != -1); + CODEGEN_ASSERT(block.startpc != -1); + CODEGEN_ASSERT(block.finishpc != -1); // At the block start, reset or knowledge to the starting state // In the future we might be able to propagate some info between the blocks as well @@ -682,23 +680,11 @@ void analyzeBytecodeTypes(IrFunction& function) case LOP_DIVRK: { int ra = LUAU_INSN_A(*pc); + int kb = LUAU_INSN_B(*pc); + int rc = LUAU_INSN_C(*pc); - if (FFlag::LuauFixDivrkInference) - { - int kb = LUAU_INSN_B(*pc); - int rc = LUAU_INSN_C(*pc); - - bcType.a = getBytecodeConstantTag(proto, kb); - bcType.b = regTags[rc]; - } - else - { - int rb = LUAU_INSN_B(*pc); - int kc = LUAU_INSN_C(*pc); - - bcType.a = regTags[rb]; - bcType.b = getBytecodeConstantTag(proto, kc); - } + bcType.a = getBytecodeConstantTag(proto, kb); + bcType.b = regTags[rc]; regTags[ra] = LBC_TYPE_ANY; @@ -771,7 +757,7 @@ void analyzeBytecodeTypes(IrFunction& function) int skip = LUAU_INSN_C(*pc); Instruction call = pc[skip + 1]; - LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); + CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); int ra = LUAU_INSN_A(call); applyBuiltinCall(bfid, bcType); @@ -788,7 +774,7 @@ void analyzeBytecodeTypes(IrFunction& function) int skip = LUAU_INSN_C(*pc); Instruction call = pc[skip + 1]; - LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); + CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); int ra = LUAU_INSN_A(call); applyBuiltinCall(bfid, bcType); @@ -803,7 +789,7 @@ void analyzeBytecodeTypes(IrFunction& function) int skip = LUAU_INSN_C(*pc); Instruction call = pc[skip + 1]; - LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); + CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); int ra = LUAU_INSN_A(call); applyBuiltinCall(bfid, bcType); @@ -886,7 +872,7 @@ void analyzeBytecodeTypes(IrFunction& function) case LOP_FORGPREP: break; default: - LUAU_ASSERT(!"Unknown instruction"); + CODEGEN_ASSERT(!"Unknown instruction"); } i += getOpLength(op); diff --git a/CodeGen/src/BytecodeSummary.cpp b/CodeGen/src/BytecodeSummary.cpp index 32029b11..0089f592 100644 --- a/CodeGen/src/BytecodeSummary.cpp +++ b/CodeGen/src/BytecodeSummary.cpp @@ -50,7 +50,7 @@ FunctionBytecodeSummary FunctionBytecodeSummary::fromProto(Proto* proto, unsigne std::vector summarizeBytecode(lua_State* L, int idx, unsigned nestingLimit) { - LUAU_ASSERT(lua_isLfunction(L, idx)); + CODEGEN_ASSERT(lua_isLfunction(L, idx)); const TValue* func = luaA_toobject(L, idx); Proto* root = clvalue(func)->l.p; diff --git a/CodeGen/src/CodeAllocator.cpp b/CodeGen/src/CodeAllocator.cpp index 50c60fac..ab623b42 100644 --- a/CodeGen/src/CodeAllocator.cpp +++ b/CodeGen/src/CodeAllocator.cpp @@ -1,7 +1,7 @@ // This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details #include "Luau/CodeAllocator.h" -#include "Luau/Common.h" +#include "Luau/CodeGenCommon.h" #include @@ -35,40 +35,40 @@ static size_t alignToPageSize(size_t size) #if defined(_WIN32) static uint8_t* allocatePagesImpl(size_t size) { - LUAU_ASSERT(size == alignToPageSize(size)); + CODEGEN_ASSERT(size == alignToPageSize(size)); return (uint8_t*)VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); } static void freePagesImpl(uint8_t* mem, size_t size) { - LUAU_ASSERT(size == alignToPageSize(size)); + CODEGEN_ASSERT(size == alignToPageSize(size)); if (VirtualFree(mem, 0, MEM_RELEASE) == 0) - LUAU_ASSERT(!"failed to deallocate block memory"); + CODEGEN_ASSERT(!"failed to deallocate block memory"); } static void makePagesExecutable(uint8_t* mem, size_t size) { - LUAU_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0); - LUAU_ASSERT(size == alignToPageSize(size)); + CODEGEN_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0); + CODEGEN_ASSERT(size == alignToPageSize(size)); DWORD oldProtect; if (VirtualProtect(mem, size, PAGE_EXECUTE_READ, &oldProtect) == 0) - LUAU_ASSERT(!"Failed to change page protection"); + CODEGEN_ASSERT(!"Failed to change page protection"); } static void flushInstructionCache(uint8_t* mem, size_t size) { #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM) if (FlushInstructionCache(GetCurrentProcess(), mem, size) == 0) - LUAU_ASSERT(!"Failed to flush instruction cache"); + CODEGEN_ASSERT(!"Failed to flush instruction cache"); #endif } #else static uint8_t* allocatePagesImpl(size_t size) { - LUAU_ASSERT(size == alignToPageSize(size)); + CODEGEN_ASSERT(size == alignToPageSize(size)); #ifdef __APPLE__ void* result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_JIT, -1, 0); @@ -81,19 +81,19 @@ static uint8_t* allocatePagesImpl(size_t size) static void freePagesImpl(uint8_t* mem, size_t size) { - LUAU_ASSERT(size == alignToPageSize(size)); + CODEGEN_ASSERT(size == alignToPageSize(size)); if (munmap(mem, size) != 0) - LUAU_ASSERT(!"Failed to deallocate block memory"); + CODEGEN_ASSERT(!"Failed to deallocate block memory"); } static void makePagesExecutable(uint8_t* mem, size_t size) { - LUAU_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0); - LUAU_ASSERT(size == alignToPageSize(size)); + CODEGEN_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0); + CODEGEN_ASSERT(size == alignToPageSize(size)); if (mprotect(mem, size, PROT_READ | PROT_EXEC) != 0) - LUAU_ASSERT(!"Failed to change page protection"); + CODEGEN_ASSERT(!"Failed to change page protection"); } static void flushInstructionCache(uint8_t* mem, size_t size) @@ -118,8 +118,8 @@ CodeAllocator::CodeAllocator(size_t blockSize, size_t maxTotalSize, AllocationCa , allocationCallback{allocationCallback} , allocationCallbackContext{allocationCallbackContext} { - LUAU_ASSERT(blockSize > kMaxReservedDataSize); - LUAU_ASSERT(maxTotalSize >= blockSize); + CODEGEN_ASSERT(blockSize > kMaxReservedDataSize); + CODEGEN_ASSERT(maxTotalSize >= blockSize); } CodeAllocator::~CodeAllocator() @@ -154,10 +154,10 @@ bool CodeAllocator::allocate( if (!allocateNewBlock(startOffset)) return false; - LUAU_ASSERT(totalSize <= size_t(blockEnd - blockPos)); + CODEGEN_ASSERT(totalSize <= size_t(blockEnd - blockPos)); } - LUAU_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0); // Allocation starts on page boundary + CODEGEN_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0); // Allocation starts on page boundary size_t dataOffset = startOffset + alignedDataSize - dataSize; size_t codeOffset = startOffset + alignedDataSize; @@ -182,8 +182,8 @@ bool CodeAllocator::allocate( if (pageAlignedSize <= size_t(blockEnd - blockPos)) { blockPos += pageAlignedSize; - LUAU_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0); - LUAU_ASSERT(blockPos <= blockEnd); + CODEGEN_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0); + CODEGEN_ASSERT(blockPos <= blockEnd); } else { @@ -217,7 +217,7 @@ bool CodeAllocator::allocateNewBlock(size_t& unwindInfoSize) // 'Round up' to preserve alignment of the following data and code unwindInfoSize = (unwindInfoSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1); - LUAU_ASSERT(unwindInfoSize <= kMaxReservedDataSize); + CODEGEN_ASSERT(unwindInfoSize <= kMaxReservedDataSize); if (!unwindInfo) return false; diff --git a/CodeGen/src/CodeBlockUnwind.cpp b/CodeGen/src/CodeBlockUnwind.cpp index f883d6e3..486aee2f 100644 --- a/CodeGen/src/CodeBlockUnwind.cpp +++ b/CodeGen/src/CodeBlockUnwind.cpp @@ -20,8 +20,8 @@ #elif defined(__linux__) || defined(__APPLE__) // Defined in unwind.h which may not be easily discoverable on various platforms -extern "C" void __register_frame(const void*); -extern "C" void __deregister_frame(const void*); +extern "C" void __register_frame(const void*) __attribute__((weak)); +extern "C" void __deregister_frame(const void*) __attribute__((weak)); extern "C" void __unw_add_dynamic_fde() __attribute__((weak)); #endif @@ -104,7 +104,7 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz // All unwinding related data is placed together at the start of the block size_t unwindSize = unwind->getSize(); unwindSize = (unwindSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1); // Match code allocator alignment - LUAU_ASSERT(blockSize >= unwindSize); + CODEGEN_ASSERT(blockSize >= unwindSize); char* unwindData = (char*)block; unwind->finalize(unwindData, unwindSize, block, blockSize); @@ -112,10 +112,13 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz #if defined(_WIN32) && defined(_M_X64) if (!RtlAddFunctionTable((RUNTIME_FUNCTION*)block, uint32_t(unwind->getFunctionCount()), uintptr_t(block))) { - LUAU_ASSERT(!"Failed to allocate function table"); + CODEGEN_ASSERT(!"Failed to allocate function table"); return nullptr; } #elif defined(__linux__) || defined(__APPLE__) + if (!__register_frame) + return nullptr; + visitFdeEntries(unwindData, __register_frame); #endif @@ -125,7 +128,7 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz static unw_add_find_dynamic_unwind_sections_t unw_add_find_dynamic_unwind_sections = unw_add_find_dynamic_unwind_sections_t(dlsym(RTLD_DEFAULT, "__unw_add_find_dynamic_unwind_sections")); static int regonce = unw_add_find_dynamic_unwind_sections ? unw_add_find_dynamic_unwind_sections(findDynamicUnwindSections) : 0; - LUAU_ASSERT(regonce == 0); + CODEGEN_ASSERT(regonce == 0); #endif beginOffset = unwindSize + unwind->getBeginOffset(); @@ -136,8 +139,14 @@ void destroyBlockUnwindInfo(void* context, void* unwindData) { #if defined(_WIN32) && defined(_M_X64) if (!RtlDeleteFunctionTable((RUNTIME_FUNCTION*)unwindData)) - LUAU_ASSERT(!"Failed to deallocate function table"); + CODEGEN_ASSERT(!"Failed to deallocate function table"); #elif defined(__linux__) || defined(__APPLE__) + if (!__deregister_frame) + { + CODEGEN_ASSERT(!"Cannot deregister unwind information"); + return; + } + visitFdeEntries((char*)unwindData, __deregister_frame); #endif } diff --git a/CodeGen/src/CodeGen.cpp b/CodeGen/src/CodeGen.cpp index ab28daa2..bada61cd 100644 --- a/CodeGen/src/CodeGen.cpp +++ b/CodeGen/src/CodeGen.cpp @@ -45,13 +45,13 @@ LUAU_FASTFLAGVARIABLE(DebugCodegenOptSize, false) LUAU_FASTFLAGVARIABLE(DebugCodegenSkipNumbering, false) // Per-module IR instruction count limit -LUAU_FASTINTVARIABLE(CodegenHeuristicsInstructionLimit, 1'048'576) // 1 M +LUAU_FASTINTVARIABLE(CodegenHeuristicsInstructionLimit, 1'048'576) // 1 M // Per-function IR block limit // Current value is based on some member variables being limited to 16 bits // Because block check is made before optimization passes and optimization can generate new blocks, limit is lowered 2x // The limit will probably be adjusted in the future to avoid performance issues with analysis that's more complex than O(n) -LUAU_FASTINTVARIABLE(CodegenHeuristicsBlockLimit, 32'768) // 32 K +LUAU_FASTINTVARIABLE(CodegenHeuristicsBlockLimit, 32'768) // 32 K // Per-function IR instruction limit // Current value is based on some member variables being limited to 16 bits @@ -85,7 +85,7 @@ static NativeProto createNativeProto(Proto* proto, const IrBuilder& ir) for (int i = 0; i < sizecode; i++) { - LUAU_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget); + CODEGEN_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget); instOffsets[i] = ir.function.bcMapping[i].asmLocation - instTarget; } @@ -104,7 +104,7 @@ static void destroyExecData(void* execdata) static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size) { - LUAU_ASSERT(p->source); + CODEGEN_ASSERT(p->source); const char* source = getstr(p->source); source = (source[0] == '=' || source[0] == '@') ? source + 1 : "[string]"; @@ -117,7 +117,8 @@ static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size) } template -static std::optional createNativeFunction(AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, uint32_t& totalIrInstCount) +static std::optional createNativeFunction( + AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, uint32_t& totalIrInstCount, CodeGenCompilationResult& result) { IrBuilder ir; ir.buildFunctionIr(proto); @@ -125,11 +126,13 @@ static std::optional createNativeFunction(AssemblyBuilder& build, M unsigned instCount = unsigned(ir.function.instructions.size()); if (totalIrInstCount + instCount >= unsigned(FInt::CodegenHeuristicsInstructionLimit.value)) + { + result = CodeGenCompilationResult::CodeGenOverflowInstructionLimit; return std::nullopt; - + } totalIrInstCount += instCount; - if (!lowerFunction(ir, build, helpers, proto, {}, /* stats */ nullptr)) + if (!lowerFunction(ir, build, helpers, proto, {}, /* stats */ nullptr, result)) return std::nullopt; return createNativeProto(proto, ir); @@ -158,8 +161,8 @@ static int onEnter(lua_State* L, Proto* proto) { NativeState* data = getNativeState(L); - LUAU_ASSERT(proto->execdata); - LUAU_ASSERT(L->ci->savedpc >= proto->code && L->ci->savedpc < proto->code + proto->sizecode); + CODEGEN_ASSERT(proto->execdata); + CODEGEN_ASSERT(L->ci->savedpc >= proto->code && L->ci->savedpc < proto->code + proto->sizecode); uintptr_t target = proto->exectarget + static_cast(proto->execdata)[L->ci->savedpc - proto->code]; @@ -266,7 +269,7 @@ bool isSupported() void create(lua_State* L, AllocationCallback* allocationCallback, void* allocationCallbackContext) { - LUAU_ASSERT(isSupported()); + CODEGEN_ASSERT(isSupported()); std::unique_ptr data = std::make_unique(allocationCallback, allocationCallbackContext); @@ -309,12 +312,13 @@ void create(lua_State* L) CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, CompilationStats* stats) { - LUAU_ASSERT(lua_isLfunction(L, idx)); + CODEGEN_ASSERT(lua_isLfunction(L, idx)); const TValue* func = luaA_toobject(L, idx); Proto* root = clvalue(func)->l.p; + if ((flags & CodeGen_OnlyNativeModules) != 0 && (root->flags & LPF_NATIVE_MODULE) == 0) - return CodeGenCompilationResult::NothingToCompile; + return CodeGenCompilationResult::NotNativeModule; // If initialization has failed, do not compile any functions NativeState* data = getNativeState(L); @@ -334,6 +338,9 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp if (protos.empty()) return CodeGenCompilationResult::NothingToCompile; + if (stats != nullptr) + stats->functionsTotal = uint32_t(protos.size()); + #if defined(__aarch64__) static unsigned int cpuFeatures = getCpuFeaturesA64(); A64::AssemblyBuilderA64 build(/* logText= */ false, cpuFeatures); @@ -353,10 +360,19 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp uint32_t totalIrInstCount = 0; + CodeGenCompilationResult codeGenCompilationResult = CodeGenCompilationResult::Success; + for (Proto* p : protos) { - if (std::optional np = createNativeFunction(build, helpers, p, totalIrInstCount)) + // If compiling a proto fails, we want to propagate the failure via codeGenCompilationResult + // If multiple compilations fail, we only use the failure from the first unsuccessful compilation. + CodeGenCompilationResult temp = CodeGenCompilationResult::Success; + + if (std::optional np = createNativeFunction(build, helpers, p, totalIrInstCount, temp)) results.push_back(*np); + // second compilation failure onwards, this condition fails and codeGenCompilationResult is not assigned. + else if (codeGenCompilationResult == CodeGenCompilationResult::Success) + codeGenCompilationResult = temp; } // Very large modules might result in overflowing a jump offset; in this case we currently abandon the entire module @@ -365,12 +381,15 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp for (NativeProto result : results) destroyExecData(result.execdata); - return CodeGenCompilationResult::CodeGenFailed; + return CodeGenCompilationResult::CodeGenAssemblerFinalizationFailure; } // If no functions were assembled, we don't need to allocate/copy executable pages for helpers if (results.empty()) - return CodeGenCompilationResult::CodeGenFailed; + { + LUAU_ASSERT(codeGenCompilationResult != CodeGenCompilationResult::Success); + return codeGenCompilationResult; + } uint8_t* nativeData = nullptr; size_t sizeNativeData = 0; @@ -392,7 +411,7 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp { uint32_t begin = uint32_t(results[i].exectarget); uint32_t end = i + 1 < results.size() ? uint32_t(results[i + 1].exectarget) : uint32_t(build.code.size() * sizeof(build.code[0])); - LUAU_ASSERT(begin < end); + CODEGEN_ASSERT(begin < end); logPerfFunction(results[i].p, uintptr_t(codeStart) + begin, end - begin); } @@ -421,7 +440,7 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp stats->nativeDataSizeBytes += build.data.size(); } - return CodeGenCompilationResult::Success; + return codeGenCompilationResult; } void setPerfLog(void* context, PerfLogFn logFn) diff --git a/CodeGen/src/CodeGenA64.cpp b/CodeGen/src/CodeGenA64.cpp index 37ee462b..c0cf7e04 100644 --- a/CodeGen/src/CodeGenA64.cpp +++ b/CodeGen/src/CodeGenA64.cpp @@ -103,7 +103,7 @@ static void emitContinueCall(AssemblyBuilderA64& build, ModuleHelpers& helpers) // If the fallback yielded, we need to do this right away // note: it's slightly cheaper to check x0 LSB; a valid Closure pointer must be aligned to 8 bytes - LUAU_ASSERT(CALL_FALLBACK_YIELD == 1); + CODEGEN_ASSERT(CALL_FALLBACK_YIELD == 1); build.tbnz(x0, 0, helpers.exitNoContinueVm); // Need to update state of the current function before we jump away @@ -114,7 +114,7 @@ static void emitContinueCall(AssemblyBuilderA64& build, ModuleHelpers& helpers) build.mov(rClosure, x0); - LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8); + CODEGEN_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8); build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code build.br(x2); @@ -178,7 +178,7 @@ void emitReturn(AssemblyBuilderA64& build, ModuleHelpers& helpers) build.ldr(x1, mem(rClosure, offsetof(Closure, l.p))); // cl->l.p aka proto - LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8); + CODEGEN_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8); build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code // Get instruction index from instruction pointer @@ -188,7 +188,7 @@ void emitReturn(AssemblyBuilderA64& build, ModuleHelpers& helpers) build.sub(x2, x2, rCode); // Get new instruction location and jump to it - LUAU_ASSERT(offsetof(Proto, exectarget) == offsetof(Proto, execdata) + 8); + CODEGEN_ASSERT(offsetof(Proto, exectarget) == offsetof(Proto, execdata) + 8); build.ldp(x3, x4, mem(x1, offsetof(Proto, execdata))); build.ldr(w2, mem(x3, x2)); build.add(x4, x4, x2); @@ -226,7 +226,7 @@ static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilde build.ldr(rBase, mem(x0, offsetof(lua_State, base))); // L->base - LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8); + CODEGEN_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8); build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code build.ldr(x9, mem(x0, offsetof(lua_State, ci))); // L->ci @@ -270,13 +270,13 @@ bool initHeaderFunctions(NativeState& data) unwind.finishInfo(); - LUAU_ASSERT(build.data.empty()); + CODEGEN_ASSERT(build.data.empty()); uint8_t* codeStart = nullptr; if (!data.codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast(build.code.data()), int(build.code.size() * sizeof(build.code[0])), data.gateData, data.gateDataSize, codeStart)) { - LUAU_ASSERT(!"Failed to create entry function"); + CODEGEN_ASSERT(!"Failed to create entry function"); return false; } diff --git a/CodeGen/src/CodeGenAssembly.cpp b/CodeGen/src/CodeGenAssembly.cpp index 7246f0e7..cce9eac6 100644 --- a/CodeGen/src/CodeGenAssembly.cpp +++ b/CodeGen/src/CodeGenAssembly.cpp @@ -100,7 +100,9 @@ static std::string getAssemblyImpl(AssemblyBuilder& build, const TValue* func, A if (options.includeAssembly || options.includeIr) logFunctionHeader(build, p); - if (!lowerFunction(ir, build, helpers, p, options, stats)) + CodeGenCompilationResult result = CodeGenCompilationResult::Success; + + if (!lowerFunction(ir, build, helpers, p, options, stats, result)) { if (build.logText) build.logAppend("; skipping (can't lower)\n"); @@ -154,7 +156,7 @@ unsigned int getCpuFeaturesA64(); std::string getAssembly(lua_State* L, int idx, AssemblyOptions options, LoweringStats* stats) { - LUAU_ASSERT(lua_isLfunction(L, idx)); + CODEGEN_ASSERT(lua_isLfunction(L, idx)); const TValue* func = luaA_toobject(L, idx); switch (options.target) @@ -200,7 +202,7 @@ std::string getAssembly(lua_State* L, int idx, AssemblyOptions options, Lowering } default: - LUAU_ASSERT(!"Unknown target"); + CODEGEN_ASSERT(!"Unknown target"); return std::string(); } } diff --git a/CodeGen/src/CodeGenLower.h b/CodeGen/src/CodeGenLower.h index 3165cc43..c011981b 100644 --- a/CodeGen/src/CodeGenLower.h +++ b/CodeGen/src/CodeGenLower.h @@ -85,7 +85,7 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& dummy.start = ~0u; // Make sure entry block is first - LUAU_ASSERT(sortedBlocks[0] == 0); + CODEGEN_ASSERT(sortedBlocks[0] == 0); for (size_t i = 0; i < sortedBlocks.size(); ++i) { @@ -95,8 +95,8 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& if (block.kind == IrBlockKind::Dead) continue; - LUAU_ASSERT(block.start != ~0u); - LUAU_ASSERT(block.finish != ~0u); + CODEGEN_ASSERT(block.start != ~0u); + CODEGEN_ASSERT(block.finish != ~0u); // If we want to skip fallback code IR/asm, we'll record when those blocks start once we see them if (block.kind == IrBlockKind::Fallback && !seenFallback) @@ -129,11 +129,11 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& // Optimizations often propagate information between blocks // To make sure the register and spill state is correct when blocks are lowered, we check that sorted block order matches the expected one if (block.expectedNextBlock != ~0u) - LUAU_ASSERT(function.getBlockIndex(nextBlock) == block.expectedNextBlock); + CODEGEN_ASSERT(function.getBlockIndex(nextBlock) == block.expectedNextBlock); for (uint32_t index = block.start; index <= block.finish; index++) { - LUAU_ASSERT(index < function.instructions.size()); + CODEGEN_ASSERT(index < function.instructions.size()); uint32_t bcLocation = bcLocations[index]; @@ -165,12 +165,12 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction& // This also prevents them from getting into text output when that's enabled if (isPseudo(inst.cmd)) { - LUAU_ASSERT(inst.useCount == 0); + CODEGEN_ASSERT(inst.useCount == 0); continue; } // Either instruction result value is not referenced or the use count is not zero - LUAU_ASSERT(inst.lastUse == 0 || inst.useCount != 0); + CODEGEN_ASSERT(inst.lastUse == 0 || inst.useCount != 0); if (options.includeIr) { @@ -246,7 +246,8 @@ inline bool lowerIr(A64::AssemblyBuilderA64& build, IrBuilder& ir, const std::ve } template -inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options, LoweringStats* stats) +inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options, LoweringStats* stats, + CodeGenCompilationResult& codeGenCompilationResult) { killUnusedBlocks(ir.function); @@ -269,10 +270,16 @@ inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers& } if (preOptBlockCount >= unsigned(FInt::CodegenHeuristicsBlockLimit.value)) + { + codeGenCompilationResult = CodeGenCompilationResult::CodeGenOverflowBlockLimit; return false; + } if (maxBlockInstructions >= unsigned(FInt::CodegenHeuristicsBlockInstructionLimit.value)) + { + codeGenCompilationResult = CodeGenCompilationResult::CodeGenOverflowBlockInstructionLimit; return false; + } computeCfgInfo(ir.function); @@ -318,7 +325,12 @@ inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers& } } - return lowerIr(build, ir, sortedBlocks, helpers, proto, options, stats); + bool result = lowerIr(build, ir, sortedBlocks, helpers, proto, options, stats); + + if (!result) + codeGenCompilationResult = CodeGenCompilationResult::CodeGenLoweringFailure; + + return result; } } // namespace CodeGen diff --git a/CodeGen/src/CodeGenX64.cpp b/CodeGen/src/CodeGenX64.cpp index b7f70258..d992f0f1 100644 --- a/CodeGen/src/CodeGenX64.cpp +++ b/CodeGen/src/CodeGenX64.cpp @@ -198,13 +198,13 @@ bool initHeaderFunctions(NativeState& data) unwind.finishInfo(); - LUAU_ASSERT(build.data.empty()); + CODEGEN_ASSERT(build.data.empty()); uint8_t* codeStart = nullptr; if (!data.codeAllocator.allocate( build.data.data(), int(build.data.size()), build.code.data(), int(build.code.size()), data.gateData, data.gateDataSize, codeStart)) { - LUAU_ASSERT(!"Failed to create entry function"); + CODEGEN_ASSERT(!"Failed to create entry function"); return false; } diff --git a/CodeGen/src/EmitBuiltinsX64.cpp b/CodeGen/src/EmitBuiltinsX64.cpp index 87e4e795..d20de431 100644 --- a/CodeGen/src/EmitBuiltinsX64.cpp +++ b/CodeGen/src/EmitBuiltinsX64.cpp @@ -81,16 +81,16 @@ void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int r switch (bfid) { case LBF_MATH_FREXP: - LUAU_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2)); + CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2)); return emitBuiltinMathFrexp(regs, build, ra, arg, nresults); case LBF_MATH_MODF: - LUAU_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2)); + CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2)); return emitBuiltinMathModf(regs, build, ra, arg, nresults); case LBF_MATH_SIGN: - LUAU_ASSERT(nparams == 1 && nresults == 1); + CODEGEN_ASSERT(nparams == 1 && nresults == 1); return emitBuiltinMathSign(regs, build, ra, arg); default: - LUAU_ASSERT(!"Missing x64 lowering"); + CODEGEN_ASSERT(!"Missing x64 lowering"); } } diff --git a/CodeGen/src/EmitCommonX64.cpp b/CodeGen/src/EmitCommonX64.cpp index 014f5a46..c8d1e75a 100644 --- a/CodeGen/src/EmitCommonX64.cpp +++ b/CodeGen/src/EmitCommonX64.cpp @@ -73,7 +73,7 @@ void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs, build.jcc(ConditionX64::Parity, label); break; default: - LUAU_ASSERT(!"Unsupported condition"); + CODEGEN_ASSERT(!"Unsupported condition"); } } @@ -110,15 +110,15 @@ ConditionX64 getConditionInt(IrCondition cond) case IrCondition::UnsignedGreaterEqual: return ConditionX64::AboveEqual; default: - LUAU_ASSERT(!"Unsupported condition"); + CODEGEN_ASSERT(!"Unsupported condition"); return ConditionX64::Zero; } } void getTableNodeAtCachedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, RegisterX64 table, int pcpos) { - LUAU_ASSERT(tmp != node); - LUAU_ASSERT(table != node); + CODEGEN_ASSERT(tmp != node); + CODEGEN_ASSERT(table != node); build.mov(node, qword[table + offsetof(Table, node)]); @@ -134,7 +134,7 @@ void getTableNodeAtCachedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, Regist void convertNumberToIndexOrJump(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 numd, RegisterX64 numi, Label& label) { - LUAU_ASSERT(numi.size == SizeX64::dword); + CODEGEN_ASSERT(numi.size == SizeX64::dword); // Convert to integer, NaN is converted into 0x80000000 build.vcvttsd2si(numi, numd); diff --git a/CodeGen/src/EmitCommonX64.h b/CodeGen/src/EmitCommonX64.h index 4ae1c3b6..c29479e1 100644 --- a/CodeGen/src/EmitCommonX64.h +++ b/CodeGen/src/EmitCommonX64.h @@ -73,7 +73,7 @@ inline unsigned getNonVolXmmStorageSize(ABIX64 abi, uint8_t xmmRegCount) if (xmmRegCount <= kWindowsFirstNonVolXmmReg) return 0; - LUAU_ASSERT(xmmRegCount <= 16); + CODEGEN_ASSERT(xmmRegCount <= 16); return (xmmRegCount - kWindowsFirstNonVolXmmReg) * 16; } @@ -160,7 +160,7 @@ inline OperandX64 luauNodeKeyTag(RegisterX64 node) inline void setLuauReg(AssemblyBuilderX64& build, RegisterX64 tmp, int ri, OperandX64 op) { - LUAU_ASSERT(op.cat == CategoryX64::mem); + CODEGEN_ASSERT(op.cat == CategoryX64::mem); build.vmovups(tmp, op); build.vmovups(luauReg(ri), tmp); diff --git a/CodeGen/src/EmitInstructionX64.cpp b/CodeGen/src/EmitInstructionX64.cpp index f478d6b5..ae3d1308 100644 --- a/CodeGen/src/EmitInstructionX64.cpp +++ b/CodeGen/src/EmitInstructionX64.cpp @@ -296,7 +296,7 @@ void emitInstSetList(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int build.jcc(ConditionX64::NotBelow, skipResize); // Argument setup reordered to avoid conflicts - LUAU_ASSERT(rArg3 != table); + CODEGEN_ASSERT(rArg3 != table); build.mov(dwordReg(rArg3), last); build.mov(rArg2, table); build.mov(rArg1, rState); @@ -324,7 +324,7 @@ void emitInstSetList(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int } else { - LUAU_ASSERT(count != 0); + CODEGEN_ASSERT(count != 0); build.xor_(offset, offset); if (index != 1) @@ -359,7 +359,7 @@ void emitInstSetList(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int void emitInstForGLoop(AssemblyBuilderX64& build, int ra, int aux, Label& loopRepeat) { // ipairs-style traversal is handled in IR - LUAU_ASSERT(aux >= 0); + CODEGEN_ASSERT(aux >= 0); // TODO: This should use IrCallWrapperX64 RegisterX64 rArg1 = (build.abi == ABIX64::Windows) ? rcx : rdi; diff --git a/CodeGen/src/IrAnalysis.cpp b/CodeGen/src/IrAnalysis.cpp index e848970a..30ed42a0 100644 --- a/CodeGen/src/IrAnalysis.cpp +++ b/CodeGen/src/IrAnalysis.cpp @@ -33,13 +33,13 @@ void updateUseCounts(IrFunction& function) if (op.kind == IrOpKind::Inst) { IrInst& target = instructions[op.index]; - LUAU_ASSERT(target.useCount < 0xffff); + CODEGEN_ASSERT(target.useCount < 0xffff); target.useCount++; } else if (op.kind == IrOpKind::Block) { IrBlock& target = blocks[op.index]; - LUAU_ASSERT(target.useCount < 0xffff); + CODEGEN_ASSERT(target.useCount < 0xffff); target.useCount++; } }; @@ -59,10 +59,10 @@ void updateLastUseLocations(IrFunction& function, const std::vector& s { std::vector& instructions = function.instructions; -#if defined(LUAU_ASSERTENABLED) +#if defined(CODEGEN_ASSERTENABLED) // Last use assignements should be called only once for (IrInst& inst : instructions) - LUAU_ASSERT(inst.lastUse == 0); + CODEGEN_ASSERT(inst.lastUse == 0); #endif for (size_t i = 0; i < sortedBlocks.size(); ++i) @@ -73,12 +73,12 @@ void updateLastUseLocations(IrFunction& function, const std::vector& s if (block.kind == IrBlockKind::Dead) continue; - LUAU_ASSERT(block.start != ~0u); - LUAU_ASSERT(block.finish != ~0u); + CODEGEN_ASSERT(block.start != ~0u); + CODEGEN_ASSERT(block.finish != ~0u); for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++) { - LUAU_ASSERT(instIdx < function.instructions.size()); + CODEGEN_ASSERT(instIdx < function.instructions.size()); IrInst& inst = instructions[instIdx]; auto checkOp = [&](IrOp op) { @@ -101,7 +101,7 @@ void updateLastUseLocations(IrFunction& function, const std::vector& s uint32_t getNextInstUse(IrFunction& function, uint32_t targetInstIdx, uint32_t startInstIdx) { - LUAU_ASSERT(startInstIdx < function.instructions.size()); + CODEGEN_ASSERT(startInstIdx < function.instructions.size()); IrInst& targetInst = function.instructions[targetInstIdx]; for (uint32_t i = startInstIdx; i <= targetInst.lastUse; i++) @@ -131,7 +131,7 @@ uint32_t getNextInstUse(IrFunction& function, uint32_t targetInstIdx, uint32_t s } // There must be a next use since there is the last use location - LUAU_ASSERT(!"Failed to find next use"); + CODEGEN_ASSERT(!"Failed to find next use"); return targetInst.lastUse; } @@ -188,7 +188,7 @@ void requireVariadicSequence(RegisterSet& sourceRs, const RegisterSet& defRs, ui while (defRs.regs.test(varargStart)) varargStart++; - LUAU_ASSERT(!sourceRs.varargSeq || sourceRs.varargStart == varargStart); + CODEGEN_ASSERT(!sourceRs.varargSeq || sourceRs.varargStart == varargStart); sourceRs.varargSeq = true; sourceRs.varargStart = varargStart; @@ -381,7 +381,7 @@ static void computeCfgLiveInOutRegSets(IrFunction& function) if (curr.kind != IrBlockKind::Fallback && succ.kind == IrBlockKind::Fallback) { // If this is the only successor, this skip will not be valid - LUAU_ASSERT(successorsIt.size() != 1); + CODEGEN_ASSERT(successorsIt.size() != 1); continue; } @@ -391,7 +391,7 @@ static void computeCfgLiveInOutRegSets(IrFunction& function) if (succRs.varargSeq) { - LUAU_ASSERT(!outRs.varargSeq || outRs.varargStart == succRs.varargStart); + CODEGEN_ASSERT(!outRs.varargSeq || outRs.varargStart == succRs.varargStart); outRs.varargSeq = true; outRs.varargStart = succRs.varargStart; @@ -426,10 +426,10 @@ static void computeCfgLiveInOutRegSets(IrFunction& function) { RegisterSet& entryIn = info.in[0]; - LUAU_ASSERT(!entryIn.varargSeq); + CODEGEN_ASSERT(!entryIn.varargSeq); for (size_t i = 0; i < entryIn.regs.size(); i++) - LUAU_ASSERT(!entryIn.regs.test(i) || i < function.proto->numparams); + CODEGEN_ASSERT(!entryIn.regs.test(i) || i < function.proto->numparams); } } @@ -509,7 +509,7 @@ void computeBlockOrdering( { CfgInfo& info = function.cfg; - LUAU_ASSERT(info.idoms.size() == function.blocks.size()); + CODEGEN_ASSERT(info.idoms.size() == function.blocks.size()); ordering.clear(); ordering.resize(function.blocks.size()); @@ -582,13 +582,13 @@ static uint32_t findCommonDominator(const std::vector& idoms, const st while (data[a].postOrder < data[b].postOrder) { a = idoms[a]; - LUAU_ASSERT(a != ~0u); + CODEGEN_ASSERT(a != ~0u); } while (data[b].postOrder < data[a].postOrder) { b = idoms[b]; - LUAU_ASSERT(b != ~0u); + CODEGEN_ASSERT(b != ~0u); } } @@ -707,10 +707,10 @@ void computeCfgDominanceTreeChildren(IrFunction& function) void computeIteratedDominanceFrontierForDefs( IdfContext& ctx, const IrFunction& function, const std::vector& defBlocks, const std::vector& liveInBlocks) { - LUAU_ASSERT(!function.cfg.domOrdering.empty()); + CODEGEN_ASSERT(!function.cfg.domOrdering.empty()); - LUAU_ASSERT(ctx.queue.empty()); - LUAU_ASSERT(ctx.worklist.empty()); + CODEGEN_ASSERT(ctx.queue.empty()); + CODEGEN_ASSERT(ctx.worklist.empty()); ctx.idf.clear(); @@ -728,7 +728,7 @@ void computeIteratedDominanceFrontierForDefs( IdfContext::BlockAndOrdering root = ctx.queue.top(); ctx.queue.pop(); - LUAU_ASSERT(ctx.worklist.empty()); + CODEGEN_ASSERT(ctx.worklist.empty()); ctx.worklist.push_back(root.blockIdx); ctx.visits[root.blockIdx].seenInWorklist = true; @@ -785,7 +785,7 @@ void computeCfgInfo(IrFunction& function) BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx) { - LUAU_ASSERT(blockIdx < cfg.predecessorsOffsets.size()); + CODEGEN_ASSERT(blockIdx < cfg.predecessorsOffsets.size()); uint32_t start = cfg.predecessorsOffsets[blockIdx]; uint32_t end = blockIdx + 1 < cfg.predecessorsOffsets.size() ? cfg.predecessorsOffsets[blockIdx + 1] : uint32_t(cfg.predecessors.size()); @@ -795,7 +795,7 @@ BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx) BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx) { - LUAU_ASSERT(blockIdx < cfg.successorsOffsets.size()); + CODEGEN_ASSERT(blockIdx < cfg.successorsOffsets.size()); uint32_t start = cfg.successorsOffsets[blockIdx]; uint32_t end = blockIdx + 1 < cfg.successorsOffsets.size() ? cfg.successorsOffsets[blockIdx + 1] : uint32_t(cfg.successors.size()); @@ -805,7 +805,7 @@ BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx) BlockIteratorWrapper domChildren(const CfgInfo& cfg, uint32_t blockIdx) { - LUAU_ASSERT(blockIdx < cfg.domChildrenOffsets.size()); + CODEGEN_ASSERT(blockIdx < cfg.domChildrenOffsets.size()); uint32_t start = cfg.domChildrenOffsets[blockIdx]; uint32_t end = blockIdx + 1 < cfg.domChildrenOffsets.size() ? cfg.domChildrenOffsets[blockIdx + 1] : uint32_t(cfg.domChildren.size()); diff --git a/CodeGen/src/IrBuilder.cpp b/CodeGen/src/IrBuilder.cpp index 54605a7b..4647b902 100644 --- a/CodeGen/src/IrBuilder.cpp +++ b/CodeGen/src/IrBuilder.cpp @@ -32,7 +32,7 @@ static bool hasTypedParameters(Proto* proto) static void buildArgumentTypeChecks(IrBuilder& build, Proto* proto) { - LUAU_ASSERT(hasTypedParameters(proto)); + CODEGEN_ASSERT(hasTypedParameters(proto)); for (int i = 0; i < proto->numparams; ++i) { @@ -145,7 +145,7 @@ void IrBuilder::buildFunctionIr(Proto* proto) LuauOpcode op = LuauOpcode(LUAU_INSN_OP(*pc)); int nexti = i + getOpLength(op); - LUAU_ASSERT(nexti <= proto->sizecode); + CODEGEN_ASSERT(nexti <= proto->sizecode); function.bcMapping[i] = {uint32_t(function.instructions.size()), ~0u}; @@ -181,7 +181,7 @@ void IrBuilder::buildFunctionIr(Proto* proto) afterInstForNLoop(*this, pc); i = nexti; - LUAU_ASSERT(i <= proto->sizecode); + CODEGEN_ASSERT(i <= proto->sizecode); // If we are going into a new block at the next instruction and it's a fallthrough, jump has to be placed to mark block termination if (i < int(instIndexToBlock.size()) && instIndexToBlock[i] != kNoAssociatedBlockIndex) @@ -213,7 +213,7 @@ void IrBuilder::rebuildBytecodeBasicBlocks(Proto* proto) jumpTargets[target] = true; i += getOpLength(op); - LUAU_ASSERT(i <= proto->sizecode); + CODEGEN_ASSERT(i <= proto->sizecode); } // Bytecode blocks are created at bytecode jump targets and the start of a function @@ -521,7 +521,7 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i) break; } default: - LUAU_ASSERT(!"Unknown instruction"); + CODEGEN_ASSERT(!"Unknown instruction"); } } @@ -556,7 +556,7 @@ void IrBuilder::beginBlock(IrOp block) IrBlock& target = function.blocks[block.index]; activeBlockIdx = block.index; - LUAU_ASSERT(target.start == ~0u || target.start == uint32_t(function.instructions.size())); + CODEGEN_ASSERT(target.start == ~0u || target.start == uint32_t(function.instructions.size())); target.start = uint32_t(function.instructions.size()); target.sortkey = target.start; @@ -579,7 +579,7 @@ void IrBuilder::clone(const IrBlock& source, bool removeCurrentTerminator) if (const uint32_t* newIndex = instRedir.find(op.index)) op.index = *newIndex; else - LUAU_ASSERT(!"Values can only be used if they are defined in the same block"); + CODEGEN_ASSERT(!"Values can only be used if they are defined in the same block"); } }; @@ -594,13 +594,13 @@ void IrBuilder::clone(const IrBlock& source, bool removeCurrentTerminator) for (uint32_t index = source.start; index <= source.finish; index++) { - LUAU_ASSERT(index < function.instructions.size()); + CODEGEN_ASSERT(index < function.instructions.size()); IrInst clone = function.instructions[index]; // Skip pseudo instructions to make clone more compact, but validate that they have no users if (isPseudo(clone.cmd)) { - LUAU_ASSERT(clone.useCount == 0); + CODEGEN_ASSERT(clone.useCount == 0); continue; } @@ -723,7 +723,7 @@ IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d, IrOp e, IrOp f) uint32_t index = uint32_t(function.instructions.size()); function.instructions.push_back({cmd, a, b, c, d, e, f}); - LUAU_ASSERT(!inTerminatedBlock); + CODEGEN_ASSERT(!inTerminatedBlock); if (isBlockTerminator(cmd)) { diff --git a/CodeGen/src/IrCallWrapperX64.cpp b/CodeGen/src/IrCallWrapperX64.cpp index 15fabf09..569292eb 100644 --- a/CodeGen/src/IrCallWrapperX64.cpp +++ b/CodeGen/src/IrCallWrapperX64.cpp @@ -38,9 +38,9 @@ IrCallWrapperX64::IrCallWrapperX64(IrRegAllocX64& regs, AssemblyBuilderX64& buil void IrCallWrapperX64::addArgument(SizeX64 targetSize, OperandX64 source, IrOp sourceOp) { // Instruction operands rely on current instruction index for lifetime tracking - LUAU_ASSERT(instIdx != kInvalidInstIdx || sourceOp.kind == IrOpKind::None); + CODEGEN_ASSERT(instIdx != kInvalidInstIdx || sourceOp.kind == IrOpKind::None); - LUAU_ASSERT(argCount < kMaxCallArguments); + CODEGEN_ASSERT(argCount < kMaxCallArguments); CallArgument& arg = args[argCount++]; arg = {targetSize, source, sourceOp}; @@ -142,11 +142,11 @@ void IrCallWrapperX64::call(const OperandX64& func) if (CallArgument* candidate = findNonInterferingArgument()) { // This section is only for handling register targets - LUAU_ASSERT(candidate->target.cat == CategoryX64::reg); + CODEGEN_ASSERT(candidate->target.cat == CategoryX64::reg); freeSourceRegisters(*candidate); - LUAU_ASSERT(getRegisterUses(candidate->target.base) == 0); + CODEGEN_ASSERT(getRegisterUses(candidate->target.base) == 0); regs.takeReg(candidate->target.base, kInvalidInstIdx); moveToTarget(*candidate); @@ -161,7 +161,7 @@ void IrCallWrapperX64::call(const OperandX64& func) else { for (int i = 0; i < argCount; ++i) - LUAU_ASSERT(!args[i].candidate); + CODEGEN_ASSERT(!args[i].candidate); break; } } @@ -225,13 +225,13 @@ OperandX64 IrCallWrapperX64::getNextArgumentTarget(SizeX64 size) const { if (size == SizeX64::xmmword) { - LUAU_ASSERT(size_t(xmmPos) < kXmmOrder.size()); + CODEGEN_ASSERT(size_t(xmmPos) < kXmmOrder.size()); return kXmmOrder[xmmPos]; } const std::array& gprOrder = build.abi == ABIX64::Windows ? kWindowsGprOrder : kSystemvGprOrder; - LUAU_ASSERT(size_t(gprPos) < gprOrder.size()); + CODEGEN_ASSERT(size_t(gprPos) < gprOrder.size()); OperandX64 target = gprOrder[gprPos]; // Keep requested argument size @@ -416,7 +416,7 @@ void IrCallWrapperX64::removeRegisterUse(RegisterX64 reg) { if (reg.size == SizeX64::xmmword) { - LUAU_ASSERT(xmmUses[reg.index] != 0); + CODEGEN_ASSERT(xmmUses[reg.index] != 0); xmmUses[reg.index]--; if (xmmUses[reg.index] == 0) // we don't use persistent xmm regs so no need to call shouldFreeRegister @@ -424,7 +424,7 @@ void IrCallWrapperX64::removeRegisterUse(RegisterX64 reg) } else if (reg.size != SizeX64::none) { - LUAU_ASSERT(gprUses[reg.index] != 0); + CODEGEN_ASSERT(gprUses[reg.index] != 0); gprUses[reg.index]--; if (gprUses[reg.index] == 0 && regs.shouldFreeGpr(reg)) diff --git a/CodeGen/src/IrDump.cpp b/CodeGen/src/IrDump.cpp index 36c5c3d0..de7a7fa4 100644 --- a/CodeGen/src/IrDump.cpp +++ b/CodeGen/src/IrDump.cpp @@ -70,7 +70,7 @@ static const char* getTagName(uint8_t tag) case LUA_TDEADKEY: return "tdeadkey"; default: - LUAU_ASSERT(!"Unknown type tag"); + CODEGEN_ASSERT(!"Unknown type tag"); LUAU_UNREACHABLE(); } } @@ -429,7 +429,7 @@ void toString(IrToStringContext& ctx, IrOp op) toString(ctx.result, ctx.constants[op.index]); break; case IrOpKind::Condition: - LUAU_ASSERT(op.index < uint32_t(IrCondition::Count)); + CODEGEN_ASSERT(op.index < uint32_t(IrCondition::Count)); ctx.result.append(textForCondition[op.index]); break; case IrOpKind::Inst: @@ -506,7 +506,7 @@ const char* getBytecodeTypeName(uint8_t type) return "any"; } - LUAU_ASSERT(!"Unhandled type in getBytecodeTypeName"); + CODEGEN_ASSERT(!"Unhandled type in getBytecodeTypeName"); return nullptr; } @@ -568,7 +568,7 @@ static RegisterSet getJumpTargetExtraLiveIn(IrToStringContext& ctx, const IrBloc const RegisterSet& defRs = ctx.cfg.in[blockIdx]; // Find first block argument, for guard instructions (isNonTerminatingJump), that's the first and only one - LUAU_ASSERT(isNonTerminatingJump(inst.cmd)); + CODEGEN_ASSERT(isNonTerminatingJump(inst.cmd)); IrOp op = inst.a; if (inst.b.kind == IrOpKind::Block) diff --git a/CodeGen/src/IrLoweringA64.cpp b/CodeGen/src/IrLoweringA64.cpp index 04804e67..681c56ec 100644 --- a/CodeGen/src/IrLoweringA64.cpp +++ b/CodeGen/src/IrLoweringA64.cpp @@ -55,7 +55,7 @@ inline ConditionA64 getConditionFP(IrCondition cond) return ConditionA64::Less; default: - LUAU_ASSERT(!"Unexpected condition code"); + CODEGEN_ASSERT(!"Unexpected condition code"); return ConditionA64::Always; } } @@ -107,15 +107,15 @@ inline ConditionA64 getConditionInt(IrCondition cond) return ConditionA64::CarrySet; default: - LUAU_ASSERT(!"Unexpected condition code"); + CODEGEN_ASSERT(!"Unexpected condition code"); return ConditionA64::Always; } } static void emitAddOffset(AssemblyBuilderA64& build, RegisterA64 dst, RegisterA64 src, size_t offset) { - LUAU_ASSERT(dst != src); - LUAU_ASSERT(offset <= INT_MAX); + CODEGEN_ASSERT(dst != src); + CODEGEN_ASSERT(offset <= INT_MAX); if (offset <= AssemblyBuilderA64::kMaxImmediate) { @@ -186,7 +186,7 @@ static void emitFallback(AssemblyBuilderA64& build, int offset, int pcpos) static void emitInvokeLibm1P(AssemblyBuilderA64& build, size_t func, int arg) { - LUAU_ASSERT(kTempSlots >= 1); + CODEGEN_ASSERT(kTempSlots >= 1); build.ldr(d0, mem(rBase, arg * sizeof(TValue) + offsetof(TValue, value.n))); build.add(x0, sp, sTemporary.data); // sp-relative offset build.ldr(x1, mem(rNativeContext, uint32_t(func))); @@ -199,7 +199,7 @@ static bool emitBuiltin( switch (bfid) { case LBF_MATH_FREXP: - LUAU_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2)); + CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2)); emitInvokeLibm1P(build, offsetof(NativeContext, libm_frexp), arg); build.str(d0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n))); if (nresults == 2) @@ -210,7 +210,7 @@ static bool emitBuiltin( } return true; case LBF_MATH_MODF: - LUAU_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2)); + CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2)); emitInvokeLibm1P(build, offsetof(NativeContext, libm_modf), arg); build.ldr(d1, sTemporary); build.str(d1, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n))); @@ -218,7 +218,7 @@ static bool emitBuiltin( build.str(d0, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, value.n))); return true; case LBF_MATH_SIGN: - LUAU_ASSERT(nparams == 1 && nresults == 1); + CODEGEN_ASSERT(nparams == 1 && nresults == 1); build.ldr(d0, mem(rBase, arg * sizeof(TValue) + offsetof(TValue, value.n))); build.fcmpz(d0); build.fmov(d0, 0.0); @@ -230,7 +230,7 @@ static bool emitBuiltin( return true; default: - LUAU_ASSERT(!"Missing A64 lowering"); + CODEGEN_ASSERT(!"Missing A64 lowering"); return false; } } @@ -342,7 +342,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } } else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); break; } case IrCmd::GET_SLOT_NODE_ADDR: @@ -363,7 +363,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } // C field can be shifted as long as it's at the most significant byte of the instruction word - LUAU_ASSERT(kOffsetOfInstructionC == 3); + CODEGEN_ASSERT(kOffsetOfInstructionC == 3); build.ldrb(temp2, mem(regOp(inst.a), offsetof(Table, nodemask8))); build.and_(temp2, temp2, temp1w, -24); @@ -419,7 +419,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) AddressA64 addr = tempAddr(inst.a, offsetof(TValue, value)); if (inst.b.kind == IrOpKind::Constant) { - LUAU_ASSERT(intOp(inst.b) == 0); + CODEGEN_ASSERT(intOp(inst.b) == 0); build.str(xzr, addr); } else @@ -479,7 +479,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) RegisterA64 temp4 = regs.allocTemp(KindA64::s); AddressA64 addr = tempAddr(inst.a, offsetof(TValue, value)); - LUAU_ASSERT(addr.kind == AddressKindA64::imm && addr.data % 4 == 0 && unsigned(addr.data + 8) / 4 <= AddressA64::kMaxOffset); + CODEGEN_ASSERT(addr.kind == AddressKindA64::imm && addr.data % 4 == 0 && unsigned(addr.data + 8) / 4 <= AddressA64::kMaxOffset); build.fcvt(temp4, temp1); build.str(temp4, AddressA64(addr.base, addr.data + 0)); @@ -512,7 +512,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) if (inst.c.kind == IrOpKind::Constant) { // note: we reuse tag temp register as value for true booleans, and use built-in zero register for false values - LUAU_ASSERT(LUA_TBOOLEAN == 1); + CODEGEN_ASSERT(LUA_TBOOLEAN == 1); build.str(intOp(inst.c) ? tempt : wzr, addr); } else @@ -529,7 +529,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; } @@ -754,7 +754,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) if (inst.a.kind == IrOpKind::Constant) { // other cases should've been constant folded - LUAU_ASSERT(tagOp(inst.a) == LUA_TBOOLEAN); + CODEGEN_ASSERT(tagOp(inst.a) == LUA_TBOOLEAN); build.eor(inst.regA64, regOp(inst.b), 1); } else @@ -762,7 +762,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) Label notbool, exit; // use the fact that NIL is the only value less than BOOLEAN to do two tag comparisons at once - LUAU_ASSERT(LUA_TNIL == 0 && LUA_TBOOLEAN == 1); + CODEGEN_ASSERT(LUA_TNIL == 0 && LUA_TBOOLEAN == 1); build.cmp(regOp(inst.a), LUA_TBOOLEAN); build.b(ConditionA64::NotEqual, notbool); @@ -797,7 +797,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (cond == IrCondition::Equal) build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, luaV_equalval))); else - LUAU_ASSERT(!"Unsupported condition"); + CODEGEN_ASSERT(!"Unsupported condition"); build.blr(x3); @@ -823,7 +823,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) RegisterA64 temp = regs.allocTemp(KindA64::w); build.ldr(temp, mem(rBase, vmRegOp(inst.a) * sizeof(TValue) + offsetof(TValue, tt))); // nil => falsy - LUAU_ASSERT(LUA_TNIL == 0); + CODEGEN_ASSERT(LUA_TNIL == 0); build.cbz(temp, labelOp(inst.c)); // not boolean => truthy build.cmp(temp, LUA_TBOOLEAN); @@ -839,7 +839,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) RegisterA64 temp = regs.allocTemp(KindA64::w); build.ldr(temp, mem(rBase, vmRegOp(inst.a) * sizeof(TValue) + offsetof(TValue, tt))); // nil => falsy - LUAU_ASSERT(LUA_TNIL == 0); + CODEGEN_ASSERT(LUA_TNIL == 0); build.cbz(temp, labelOp(inst.b)); // not boolean => truthy build.cmp(temp, LUA_TBOOLEAN); @@ -865,7 +865,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Inst) build.cmp(regOp(inst.b), tagOp(inst.a)); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); if (isFallthroughBlock(blockOp(inst.d), next)) { @@ -899,7 +899,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(unsigned(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate); + CODEGEN_ASSERT(unsigned(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate); build.cmp(regOp(inst.a), uint16_t(intOp(inst.b))); build.b(getConditionInt(cond), labelOp(inst.d)); } @@ -1131,7 +1131,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) build.str(temp, mem(rState, offsetof(lua_State, top))); } else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); break; } case IrCmd::ADJUST_STACK_TO_TOP: @@ -1159,7 +1159,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.d.kind == IrOpKind::VmConst) emitAddOffset(build, x4, rConstants, vmConstOp(inst.d) * sizeof(TValue)); else - LUAU_ASSERT(inst.d.kind == IrOpKind::Undef); + CODEGEN_ASSERT(inst.d.kind == IrOpKind::Undef); // nparams if (intOp(inst.e) == LUA_MULTRET) @@ -1228,7 +1228,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) build.adr(x2, &n, sizeof(n)); } else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); build.add(x3, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue))); build.ldr(x4, mem(rNativeContext, offsetof(NativeContext, luaV_gettable))); @@ -1250,7 +1250,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) build.adr(x2, &n, sizeof(n)); } else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); build.add(x3, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue))); build.ldr(x4, mem(rNativeContext, offsetof(NativeContext, luaV_settable))); @@ -1366,7 +1366,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) case IrCmd::CHECK_TRUTHY: { // Constant tags which don't require boolean value check should've been removed in constant folding - LUAU_ASSERT(inst.a.kind != IrOpKind::Constant || tagOp(inst.a) == LUA_TBOOLEAN); + CODEGEN_ASSERT(inst.a.kind != IrOpKind::Constant || tagOp(inst.a) == LUA_TBOOLEAN); Label fresh; // used when guard aborts execution or jumps to a VM exit Label& target = getTargetLabel(inst.c, fresh); @@ -1376,7 +1376,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) if (inst.a.kind != IrOpKind::Constant) { // fail to fallback on 'nil' (falsy) - LUAU_ASSERT(LUA_TNIL == 0); + CODEGEN_ASSERT(LUA_TNIL == 0); build.cbz(regOp(inst.a), target); // skip value test if it's not a boolean (truthy) @@ -1455,7 +1455,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } } else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); finalizeTargetLabel(inst.c, fresh); break; @@ -1471,7 +1471,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) RegisterA64 temp1w = castReg(KindA64::w, temp1); RegisterA64 temp2 = regs.allocTemp(KindA64::x); - LUAU_ASSERT(offsetof(LuaNode, key.value) == offsetof(LuaNode, key) && kOffsetOfTKeyTagNext >= 8 && kOffsetOfTKeyTagNext < 16); + CODEGEN_ASSERT(offsetof(LuaNode, key.value) == offsetof(LuaNode, key) && kOffsetOfTKeyTagNext >= 8 && kOffsetOfTKeyTagNext < 16); build.ldp(temp1, temp2, mem(regOp(inst.a), offsetof(LuaNode, key))); // load key.value into temp1 and key.tt (alongside other bits) into temp2 build.ubfx(temp2, temp2, (kOffsetOfTKeyTagNext - 8) * 8, kTKeyTagBits); // .tt is right before .next, and 8 bytes are skipped by ldp build.cmp(temp2, LUA_TSTRING); @@ -1483,7 +1483,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) build.b(ConditionA64::NotEqual, mismatch); build.ldr(temp1w, mem(regOp(inst.a), offsetof(LuaNode, val.tt))); - LUAU_ASSERT(LUA_TNIL == 0); + CODEGEN_ASSERT(LUA_TNIL == 0); build.cbz(temp1w, mismatch); if (inst.cmd == IrCmd::JUMP_SLOT_MATCH) @@ -1509,7 +1509,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) RegisterA64 temp = regs.allocTemp(KindA64::w); build.ldr(temp, mem(regOp(inst.a), offsetof(LuaNode, val.tt))); - LUAU_ASSERT(LUA_TNIL == 0); + CODEGEN_ASSERT(LUA_TNIL == 0); build.cbz(temp, getTargetLabel(inst.b, fresh)); finalizeTargetLabel(inst.b, fresh); break; @@ -1517,7 +1517,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) case IrCmd::CHECK_BUFFER_LEN: { int accessSize = intOp(inst.c); - LUAU_ASSERT(accessSize > 0 && accessSize <= int(AssemblyBuilderA64::kMaxImmediate)); + CODEGEN_ASSERT(accessSize > 0 && accessSize <= int(AssemblyBuilderA64::kMaxImmediate)); Label fresh; // used when guard aborts execution or jumps to a VM exit Label& target = getTargetLabel(inst.d, fresh); @@ -1570,7 +1570,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } finalizeTargetLabel(inst.d, fresh); break; @@ -1594,7 +1594,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) RegisterA64 temp1 = regs.allocTemp(KindA64::x); RegisterA64 temp2 = regs.allocTemp(KindA64::x); - LUAU_ASSERT(offsetof(global_State, totalbytes) == offsetof(global_State, GCthreshold) + 8); + CODEGEN_ASSERT(offsetof(global_State, totalbytes) == offsetof(global_State, GCthreshold) + 8); Label skip; build.ldp(temp1, temp2, mem(rGlobalState, offsetof(global_State, GCthreshold))); build.cmp(temp1, temp2); @@ -1818,7 +1818,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) // clear extra variables since we might have more than two if (intOp(inst.b) > 2) { - LUAU_ASSERT(LUA_TNIL == 0); + CODEGEN_ASSERT(LUA_TNIL == 0); for (int i = 2; i < intOp(inst.b); ++i) build.str(wzr, mem(rBase, (vmRegOp(inst.a) + 3 + i) * sizeof(TValue) + offsetof(TValue, tt))); } @@ -1875,52 +1875,52 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) // Full instruction fallbacks case IrCmd::FALLBACK_GETGLOBAL: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst); regs.spill(build, index); emitFallback(build, offsetof(NativeContext, executeGETGLOBAL), uintOp(inst.a)); break; case IrCmd::FALLBACK_SETGLOBAL: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst); regs.spill(build, index); emitFallback(build, offsetof(NativeContext, executeSETGLOBAL), uintOp(inst.a)); break; case IrCmd::FALLBACK_GETTABLEKS: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst); regs.spill(build, index); emitFallback(build, offsetof(NativeContext, executeGETTABLEKS), uintOp(inst.a)); break; case IrCmd::FALLBACK_SETTABLEKS: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst); regs.spill(build, index); emitFallback(build, offsetof(NativeContext, executeSETTABLEKS), uintOp(inst.a)); break; case IrCmd::FALLBACK_NAMECALL: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst); regs.spill(build, index); emitFallback(build, offsetof(NativeContext, executeNAMECALL), uintOp(inst.a)); break; case IrCmd::FALLBACK_PREPVARARGS: - LUAU_ASSERT(inst.b.kind == IrOpKind::Constant); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::Constant); regs.spill(build, index); emitFallback(build, offsetof(NativeContext, executePREPVARARGS), uintOp(inst.a)); break; case IrCmd::FALLBACK_GETVARARGS: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::Constant); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::Constant); regs.spill(build, index); build.mov(x0, rState); @@ -1967,8 +1967,8 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) break; } case IrCmd::FALLBACK_DUPCLOSURE: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst); regs.spill(build, index); emitFallback(build, offsetof(NativeContext, executeDUPCLOSURE), uintOp(inst.a)); @@ -1982,7 +1982,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) // Pseudo instructions case IrCmd::NOP: case IrCmd::SUBSTITUTE: - LUAU_ASSERT(!"Pseudo instructions should not be lowered"); + CODEGEN_ASSERT(!"Pseudo instructions should not be lowered"); break; case IrCmd::BITAND_UINT: @@ -2167,14 +2167,14 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) { inst.regA64 = regs.allocReg(KindA64::x, index); - LUAU_ASSERT(sizeof(TString*) == 8); + CODEGEN_ASSERT(sizeof(TString*) == 8); if (inst.a.kind == IrOpKind::Inst) build.add(inst.regA64, rGlobalState, regOp(inst.a), 3); // implicit uxtw else if (inst.a.kind == IrOpKind::Constant) build.add(inst.regA64, rGlobalState, uint16_t(tagOp(inst.a)) * 8); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); build.ldr(inst.regA64, mem(inst.regA64, offsetof(global_State, ttname))); break; @@ -2330,10 +2330,10 @@ void IrLoweringA64::finishBlock(const IrBlock& curr, const IrBlock& next) { // If we have spills remaining, we have to immediately lower the successor block for (uint32_t predIdx : predecessors(function.cfg, function.getBlockIndex(next))) - LUAU_ASSERT(predIdx == function.getBlockIndex(curr)); + CODEGEN_ASSERT(predIdx == function.getBlockIndex(curr)); // And the next block cannot be a join block in cfg - LUAU_ASSERT(next.useCount == 1); + CODEGEN_ASSERT(next.useCount == 1); } } @@ -2355,7 +2355,7 @@ void IrLoweringA64::finishFunction() for (ExitHandler& handler : exitHandlers) { - LUAU_ASSERT(handler.pcpos != kVmExitEntryGuardPc); + CODEGEN_ASSERT(handler.pcpos != kVmExitEntryGuardPc); build.setLabel(handler.self); @@ -2465,7 +2465,7 @@ RegisterA64 IrLoweringA64::tempDouble(IrOp op) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); return noreg; } } @@ -2482,7 +2482,7 @@ RegisterA64 IrLoweringA64::tempInt(IrOp op) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); return noreg; } } @@ -2499,7 +2499,7 @@ RegisterA64 IrLoweringA64::tempUint(IrOp op) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); return noreg; } } @@ -2507,9 +2507,9 @@ RegisterA64 IrLoweringA64::tempUint(IrOp op) AddressA64 IrLoweringA64::tempAddr(IrOp op, int offset) { // This is needed to tighten the bounds checks in the VmConst case below - LUAU_ASSERT(offset % 4 == 0); + CODEGEN_ASSERT(offset % 4 == 0); // Full encoded range is wider depending on the load size, but this assertion helps establish a smaller guaranteed working range [0..4096) - LUAU_ASSERT(offset >= 0 && unsigned(offset / 4) <= AssemblyBuilderA64::kMaxImmediate); + CODEGEN_ASSERT(offset >= 0 && unsigned(offset / 4) <= AssemblyBuilderA64::kMaxImmediate); if (op.kind == IrOpKind::VmReg) return mem(rBase, vmRegOp(op) * sizeof(TValue) + offset); @@ -2532,7 +2532,7 @@ AddressA64 IrLoweringA64::tempAddr(IrOp op, int offset) return mem(regOp(op), offset); else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); return noreg; } } @@ -2561,7 +2561,7 @@ AddressA64 IrLoweringA64::tempAddrBuffer(IrOp bufferOp, IrOp indexOp) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); return noreg; } } @@ -2573,7 +2573,7 @@ RegisterA64 IrLoweringA64::regOp(IrOp op) if (inst.spilled || inst.needsReload) regs.restoreReg(build, inst); - LUAU_ASSERT(inst.regA64 != noreg); + CODEGEN_ASSERT(inst.regA64 != noreg); return inst.regA64; } diff --git a/CodeGen/src/IrLoweringX64.cpp b/CodeGen/src/IrLoweringX64.cpp index 103bcacf..babfdf46 100644 --- a/CodeGen/src/IrLoweringX64.cpp +++ b/CodeGen/src/IrLoweringX64.cpp @@ -52,7 +52,7 @@ void IrLoweringX64::storeDoubleAsFloat(OperandX64 dst, IrOp src) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } build.vmovss(dst, tmp.reg); } @@ -77,7 +77,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.a.kind == IrOpKind::Inst) build.mov(inst.regX64, dword[regOp(inst.a) + offsetof(TValue, tt)]); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); break; case IrCmd::LOAD_POINTER: inst.regX64 = regs.allocReg(SizeX64::qword, index); @@ -91,7 +91,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.a.kind == IrOpKind::Inst) build.mov(inst.regX64, qword[regOp(inst.a) + offsetof(TValue, value)]); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); break; case IrCmd::LOAD_DOUBLE: inst.regX64 = regs.allocReg(SizeX64::xmmword, index); @@ -101,7 +101,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.a.kind == IrOpKind::VmConst) build.vmovsd(inst.regX64, luauConstantValue(vmConstOp(inst.a))); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); break; case IrCmd::LOAD_INT: inst.regX64 = regs.allocReg(SizeX64::dword, index); @@ -117,7 +117,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) build.vcvtss2sd( inst.regX64, inst.regX64, dword[rConstants + vmConstOp(inst.a) * sizeof(TValue) + offsetof(TValue, value) + intOp(inst.b)]); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); break; case IrCmd::LOAD_TVALUE: { @@ -132,7 +132,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.a.kind == IrOpKind::Inst) build.vmovups(inst.regX64, xmmword[regOp(inst.a) + addrOffset]); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); break; } case IrCmd::LOAD_ENV: @@ -163,7 +163,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; case IrCmd::GET_SLOT_NODE_ADDR: @@ -222,7 +222,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; case IrCmd::STORE_POINTER: @@ -231,7 +231,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) if (inst.b.kind == IrOpKind::Constant) { - LUAU_ASSERT(intOp(inst.b) == 0); + CODEGEN_ASSERT(intOp(inst.b) == 0); build.mov(valueLhs, 0); } else if (inst.b.kind == IrOpKind::Inst) @@ -240,7 +240,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; } @@ -254,7 +254,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; case IrCmd::STORE_DOUBLE: @@ -274,7 +274,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; } @@ -284,7 +284,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.b.kind == IrOpKind::Inst) build.mov(luauRegValueInt(vmRegOp(inst.a)), regOp(inst.b)); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); break; case IrCmd::STORE_VECTOR: storeDoubleAsFloat(luauRegValueVector(vmRegOp(inst.a), 0), inst.b); @@ -300,7 +300,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.a.kind == IrOpKind::Inst) build.vmovups(xmmword[regOp(inst.a) + addrOffset], regOp(inst.b)); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); break; } case IrCmd::STORE_SPLIT_TVALUE: @@ -341,7 +341,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; } @@ -374,7 +374,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; } @@ -682,7 +682,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) if (inst.a.kind == IrOpKind::Constant) { // Other cases should've been constant folded - LUAU_ASSERT(tagOp(inst.a) == LUA_TBOOLEAN); + CODEGEN_ASSERT(tagOp(inst.a) == LUA_TBOOLEAN); } else { @@ -731,7 +731,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (cond == IrCondition::Equal) callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_equalval)]); else - LUAU_ASSERT(!"Unsupported condition"); + CODEGEN_ASSERT(!"Unsupported condition"); emitUpdateBase(build); @@ -751,7 +751,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) break; case IrCmd::JUMP_EQ_TAG: { - LUAU_ASSERT(inst.b.kind == IrOpKind::Inst || inst.b.kind == IrOpKind::Constant); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::Inst || inst.b.kind == IrOpKind::Constant); OperandX64 opb = inst.b.kind == IrOpKind::Inst ? regOp(inst.b) : OperandX64(tagOp(inst.b)); if (inst.a.kind == IrOpKind::Constant) @@ -950,7 +950,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(source != IrCmd::SUBSTITUTE); // we don't process substitutions + CODEGEN_ASSERT(source != IrCmd::SUBSTITUTE); // we don't process substitutions build.vcvtsi2sd(inst.regX64, inst.regX64, qwordReg(regOp(inst.a))); } break; @@ -1001,7 +1001,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; } @@ -1032,7 +1032,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.d.kind == IrOpKind::VmConst) args = luauConstantAddress(vmConstOp(inst.d)); else - LUAU_ASSERT(inst.d.kind == IrOpKind::Undef); + CODEGEN_ASSERT(inst.d.kind == IrOpKind::Undef); int ra = vmRegOp(inst.b); int arg = vmRegOp(inst.c); @@ -1102,7 +1102,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; case IrCmd::SET_TABLE: @@ -1118,7 +1118,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; case IrCmd::GET_IMPORT: @@ -1202,7 +1202,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) case IrCmd::CHECK_TRUTHY: { // Constant tags which don't require boolean value check should've been removed in constant folding - LUAU_ASSERT(inst.a.kind != IrOpKind::Constant || tagOp(inst.a) == LUA_TBOOLEAN); + CODEGEN_ASSERT(inst.a.kind != IrOpKind::Constant || tagOp(inst.a) == LUA_TBOOLEAN); Label skip; @@ -1250,7 +1250,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.b.kind == IrOpKind::Constant) build.cmp(dword[regOp(inst.a) + offsetof(Table, sizearray)], intOp(inst.b)); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); jumpOrAbortOnUndef(ConditionX64::BelowEqual, inst.c, next); break; @@ -1310,7 +1310,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) case IrCmd::CHECK_BUFFER_LEN: { int accessSize = intOp(inst.c); - LUAU_ASSERT(accessSize > 0); + CODEGEN_ASSERT(accessSize > 0); if (inst.b.kind == IrOpKind::Inst) { @@ -1361,7 +1361,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; } @@ -1537,46 +1537,46 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) // Full instruction fallbacks case IrCmd::FALLBACK_GETGLOBAL: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst); emitFallback(regs, build, offsetof(NativeContext, executeGETGLOBAL), uintOp(inst.a)); break; case IrCmd::FALLBACK_SETGLOBAL: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst); emitFallback(regs, build, offsetof(NativeContext, executeSETGLOBAL), uintOp(inst.a)); break; case IrCmd::FALLBACK_GETTABLEKS: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst); emitFallback(regs, build, offsetof(NativeContext, executeGETTABLEKS), uintOp(inst.a)); break; case IrCmd::FALLBACK_SETTABLEKS: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst); emitFallback(regs, build, offsetof(NativeContext, executeSETTABLEKS), uintOp(inst.a)); break; case IrCmd::FALLBACK_NAMECALL: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst); emitFallback(regs, build, offsetof(NativeContext, executeNAMECALL), uintOp(inst.a)); break; case IrCmd::FALLBACK_PREPVARARGS: - LUAU_ASSERT(inst.b.kind == IrOpKind::Constant); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::Constant); emitFallback(regs, build, offsetof(NativeContext, executePREPVARARGS), uintOp(inst.a)); break; case IrCmd::FALLBACK_GETVARARGS: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::Constant); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::Constant); if (intOp(inst.c) == LUA_MULTRET) { @@ -1623,8 +1623,8 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) break; } case IrCmd::FALLBACK_DUPCLOSURE: - LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst); + CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst); emitFallback(regs, build, offsetof(NativeContext, executeDUPCLOSURE), uintOp(inst.a)); break; @@ -1882,7 +1882,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) else if (inst.a.kind == IrOpKind::Constant) build.mov(inst.regX64, qword[inst.regX64 + tagOp(inst.a) * sizeof(TString*) + offsetof(global_State, ttname)]); else - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); break; } case IrCmd::GET_TYPEOF: @@ -1990,14 +1990,14 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next) } else { - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); } break; // Pseudo instructions case IrCmd::NOP: case IrCmd::SUBSTITUTE: - LUAU_ASSERT(!"Pseudo instructions should not be lowered"); + CODEGEN_ASSERT(!"Pseudo instructions should not be lowered"); break; } @@ -2012,10 +2012,10 @@ void IrLoweringX64::finishBlock(const IrBlock& curr, const IrBlock& next) { // If we have spills remaining, we have to immediately lower the successor block for (uint32_t predIdx : predecessors(function.cfg, function.getBlockIndex(next))) - LUAU_ASSERT(predIdx == function.getBlockIndex(curr) || function.blocks[predIdx].kind == IrBlockKind::Dead); + CODEGEN_ASSERT(predIdx == function.getBlockIndex(curr) || function.blocks[predIdx].kind == IrBlockKind::Dead); // And the next block cannot be a join block in cfg - LUAU_ASSERT(next.useCount == 1); + CODEGEN_ASSERT(next.useCount == 1); } } @@ -2037,7 +2037,7 @@ void IrLoweringX64::finishFunction() for (ExitHandler& handler : exitHandlers) { - LUAU_ASSERT(handler.pcpos != kVmExitEntryGuardPc); + CODEGEN_ASSERT(handler.pcpos != kVmExitEntryGuardPc); build.setLabel(handler.self); @@ -2154,7 +2154,7 @@ OperandX64 IrLoweringX64::memRegDoubleOp(IrOp op) case IrOpKind::VmConst: return luauConstantValue(vmConstOp(op)); default: - LUAU_ASSERT(!"Unsupported operand kind"); + CODEGEN_ASSERT(!"Unsupported operand kind"); } return noreg; @@ -2171,7 +2171,7 @@ OperandX64 IrLoweringX64::memRegUintOp(IrOp op) case IrOpKind::VmReg: return luauRegValueInt(vmRegOp(op)); default: - LUAU_ASSERT(!"Unsupported operand kind"); + CODEGEN_ASSERT(!"Unsupported operand kind"); } return noreg; @@ -2188,7 +2188,7 @@ OperandX64 IrLoweringX64::memRegTagOp(IrOp op) case IrOpKind::VmConst: return luauConstantTag(vmConstOp(op)); default: - LUAU_ASSERT(!"Unsupported operand kind"); + CODEGEN_ASSERT(!"Unsupported operand kind"); } return noreg; @@ -2201,7 +2201,7 @@ RegisterX64 IrLoweringX64::regOp(IrOp op) if (inst.spilled || inst.needsReload) regs.restore(inst, false); - LUAU_ASSERT(inst.regX64 != noreg); + CODEGEN_ASSERT(inst.regX64 != noreg); return inst.regX64; } @@ -2212,7 +2212,7 @@ OperandX64 IrLoweringX64::bufferAddrOp(IrOp bufferOp, IrOp indexOp) else if (indexOp.kind == IrOpKind::Constant) return regOp(bufferOp) + intOp(indexOp) + offsetof(Buffer, data); - LUAU_ASSERT(!"Unsupported instruction form"); + CODEGEN_ASSERT(!"Unsupported instruction form"); return noreg; } diff --git a/CodeGen/src/IrRegAllocA64.cpp b/CodeGen/src/IrRegAllocA64.cpp index 41f392c5..24b0b285 100644 --- a/CodeGen/src/IrRegAllocA64.cpp +++ b/CodeGen/src/IrRegAllocA64.cpp @@ -23,7 +23,7 @@ static const int8_t kInvalidSpill = 64; static int allocSpill(uint32_t& free, KindA64 kind) { - LUAU_ASSERT(kStackSize <= 256); // to support larger stack frames, we need to ensure qN is allocated at 16b boundary to fit in ldr/str encoding + CODEGEN_ASSERT(kStackSize <= 256); // to support larger stack frames, we need to ensure qN is allocated at 16b boundary to fit in ldr/str encoding // qN registers use two consecutive slots int slot = countrz(kind == KindA64::q ? free & (free >> 1) : free); @@ -32,7 +32,7 @@ static int allocSpill(uint32_t& free, KindA64 kind) uint32_t mask = (kind == KindA64::q ? 3u : 1u) << slot; - LUAU_ASSERT((free & mask) == mask); + CODEGEN_ASSERT((free & mask) == mask); free &= ~mask; return slot; @@ -43,7 +43,7 @@ static void freeSpill(uint32_t& free, KindA64 kind, uint8_t slot) // qN registers use two consecutive slots uint32_t mask = (kind == KindA64::q ? 3u : 1u) << slot; - LUAU_ASSERT((free & mask) == 0); + CODEGEN_ASSERT((free & mask) == 0); free |= mask; } @@ -53,7 +53,7 @@ static int getReloadOffset(IrCmd cmd) { case IrValueKind::Unknown: case IrValueKind::None: - LUAU_ASSERT(!"Invalid operand restore value kind"); + CODEGEN_ASSERT(!"Invalid operand restore value kind"); break; case IrValueKind::Tag: return offsetof(TValue, tt); @@ -67,7 +67,7 @@ static int getReloadOffset(IrCmd cmd) return 0; } - LUAU_ASSERT(!"Invalid operand restore value kind"); + CODEGEN_ASSERT(!"Invalid operand restore value kind"); LUAU_UNREACHABLE(); } @@ -88,7 +88,7 @@ static AddressA64 getReloadAddress(const IrFunction& function, const IrInst& ins static void restoreInst(AssemblyBuilderA64& build, uint32_t& freeSpillSlots, IrFunction& function, const IrRegAllocA64::Spill& s, RegisterA64 reg) { IrInst& inst = function.instructions[s.inst]; - LUAU_ASSERT(inst.regA64 == noreg); + CODEGEN_ASSERT(inst.regA64 == noreg); if (s.slot >= 0) { @@ -99,9 +99,9 @@ static void restoreInst(AssemblyBuilderA64& build, uint32_t& freeSpillSlots, IrF } else { - LUAU_ASSERT(!inst.spilled && inst.needsReload); + CODEGEN_ASSERT(!inst.spilled && inst.needsReload); AddressA64 addr = getReloadAddress(function, function.instructions[s.inst], /*limitToCurrentBlock*/ false); - LUAU_ASSERT(addr.base != xzr); + CODEGEN_ASSERT(addr.base != xzr); build.ldr(reg, addr); } @@ -116,7 +116,7 @@ IrRegAllocA64::IrRegAllocA64(IrFunction& function, LoweringStats* stats, std::in { for (auto& p : regs) { - LUAU_ASSERT(p.first.kind == p.second.kind && p.first.index <= p.second.index); + CODEGEN_ASSERT(p.first.kind == p.second.kind && p.first.index <= p.second.index); Set& set = getSet(p.first.kind); @@ -130,7 +130,7 @@ IrRegAllocA64::IrRegAllocA64(IrFunction& function, LoweringStats* stats, std::in memset(gpr.defs, -1, sizeof(gpr.defs)); memset(simd.defs, -1, sizeof(simd.defs)); - LUAU_ASSERT(kSpillSlots <= 32); + CODEGEN_ASSERT(kSpillSlots <= 32); freeSpillSlots = (kSpillSlots == 32) ? ~0u : (1u << kSpillSlots) - 1; } @@ -172,7 +172,7 @@ RegisterA64 IrRegAllocA64::allocTemp(KindA64 kind) set.free &= ~(1u << reg); set.temp |= 1u << reg; - LUAU_ASSERT(set.defs[reg] == kInvalidInstIdx); + CODEGEN_ASSERT(set.defs[reg] == kInvalidInstIdx); return RegisterA64{kind, uint8_t(reg)}; } @@ -188,11 +188,11 @@ RegisterA64 IrRegAllocA64::allocReuse(KindA64 kind, uint32_t index, std::initial if (source.lastUse == index && !source.reusedReg && source.regA64 != noreg) { - LUAU_ASSERT(!source.spilled && !source.needsReload); - LUAU_ASSERT(source.regA64.kind == kind); + CODEGEN_ASSERT(!source.spilled && !source.needsReload); + CODEGEN_ASSERT(source.regA64.kind == kind); Set& set = getSet(kind); - LUAU_ASSERT(set.defs[source.regA64.index] == op.index); + CODEGEN_ASSERT(set.defs[source.regA64.index] == op.index); set.defs[source.regA64.index] = index; source.reusedReg = true; @@ -207,8 +207,8 @@ RegisterA64 IrRegAllocA64::takeReg(RegisterA64 reg, uint32_t index) { Set& set = getSet(reg.kind); - LUAU_ASSERT(set.free & (1u << reg.index)); - LUAU_ASSERT(set.defs[reg.index] == kInvalidInstIdx); + CODEGEN_ASSERT(set.free & (1u << reg.index)); + CODEGEN_ASSERT(set.defs[reg.index] == kInvalidInstIdx); set.free &= ~(1u << reg.index); set.defs[reg.index] = index; @@ -220,9 +220,9 @@ void IrRegAllocA64::freeReg(RegisterA64 reg) { Set& set = getSet(reg.kind); - LUAU_ASSERT((set.base & (1u << reg.index)) != 0); - LUAU_ASSERT((set.free & (1u << reg.index)) == 0); - LUAU_ASSERT((set.temp & (1u << reg.index)) == 0); + CODEGEN_ASSERT((set.base & (1u << reg.index)) != 0); + CODEGEN_ASSERT((set.free & (1u << reg.index)) == 0); + CODEGEN_ASSERT((set.temp & (1u << reg.index)) == 0); set.free |= 1u << reg.index; set.defs[reg.index] = kInvalidInstIdx; @@ -232,7 +232,7 @@ void IrRegAllocA64::freeLastUseReg(IrInst& target, uint32_t index) { if (target.lastUse == index && !target.reusedReg) { - LUAU_ASSERT(!target.spilled && !target.needsReload); + CODEGEN_ASSERT(!target.spilled && !target.needsReload); // Register might have already been freed if it had multiple uses inside a single instruction if (target.regA64 == noreg) @@ -260,11 +260,11 @@ void IrRegAllocA64::freeLastUseRegs(const IrInst& inst, uint32_t index) void IrRegAllocA64::freeTempRegs() { - LUAU_ASSERT((gpr.free & gpr.temp) == 0); + CODEGEN_ASSERT((gpr.free & gpr.temp) == 0); gpr.free |= gpr.temp; gpr.temp = 0; - LUAU_ASSERT((simd.free & simd.temp) == 0); + CODEGEN_ASSERT((simd.free & simd.temp) == 0); simd.free |= simd.temp; simd.temp = 0; } @@ -299,7 +299,7 @@ size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::init continue; // free all temp registers - LUAU_ASSERT((set.free & set.temp) == 0); + CODEGEN_ASSERT((set.free & set.temp) == 0); set.free |= set.temp; set.temp = 0; @@ -311,13 +311,13 @@ size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::init int reg = 31 - countlz(regs); uint32_t inst = set.defs[reg]; - LUAU_ASSERT(inst != kInvalidInstIdx); + CODEGEN_ASSERT(inst != kInvalidInstIdx); IrInst& def = function.instructions[inst]; - LUAU_ASSERT(def.regA64.index == reg); - LUAU_ASSERT(!def.reusedReg); - LUAU_ASSERT(!def.spilled); - LUAU_ASSERT(!def.needsReload); + CODEGEN_ASSERT(def.regA64.index == reg); + CODEGEN_ASSERT(!def.reusedReg); + CODEGEN_ASSERT(!def.spilled); + CODEGEN_ASSERT(!def.needsReload); if (def.lastUse == index) { @@ -367,7 +367,7 @@ size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::init set.defs[reg] = kInvalidInstIdx; } - LUAU_ASSERT(set.free == set.base); + CODEGEN_ASSERT(set.free == set.base); } if (FFlag::DebugCodegenChaosA64) @@ -386,7 +386,7 @@ size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::init void IrRegAllocA64::restore(AssemblyBuilderA64& build, size_t start) { - LUAU_ASSERT(start <= spills.size()); + CODEGEN_ASSERT(start <= spills.size()); if (start < spills.size()) { @@ -421,7 +421,7 @@ void IrRegAllocA64::restoreReg(AssemblyBuilderA64& build, IrInst& inst) } } - LUAU_ASSERT(!"Expected to find a spill record"); + CODEGEN_ASSERT(!"Expected to find a spill record"); } IrRegAllocA64::Set& IrRegAllocA64::getSet(KindA64 kind) @@ -438,7 +438,7 @@ IrRegAllocA64::Set& IrRegAllocA64::getSet(KindA64 kind) return simd; default: - LUAU_ASSERT(!"Unexpected register kind"); + CODEGEN_ASSERT(!"Unexpected register kind"); LUAU_UNREACHABLE(); } } diff --git a/CodeGen/src/IrRegAllocX64.cpp b/CodeGen/src/IrRegAllocX64.cpp index d8278923..2b5da623 100644 --- a/CodeGen/src/IrRegAllocX64.cpp +++ b/CodeGen/src/IrRegAllocX64.cpp @@ -64,7 +64,7 @@ RegisterX64 IrRegAllocX64::allocReg(SizeX64 size, uint32_t instIdx) return takeReg(reg, instIdx); } - LUAU_ASSERT(!"Out of registers to allocate"); + CODEGEN_ASSERT(!"Out of registers to allocate"); return noreg; } @@ -83,7 +83,7 @@ RegisterX64 IrRegAllocX64::allocRegOrReuse(SizeX64 size, uint32_t instIdx, std:: if ((size == SizeX64::xmmword) != (source.regX64.size == SizeX64::xmmword)) continue; - LUAU_ASSERT(source.regX64 != noreg); + CODEGEN_ASSERT(source.regX64 != noreg); source.reusedReg = true; @@ -105,11 +105,11 @@ RegisterX64 IrRegAllocX64::takeReg(RegisterX64 reg, uint32_t instIdx) { if (!freeXmmMap[reg.index]) { - LUAU_ASSERT(xmmInstUsers[reg.index] != kInvalidInstIdx); + CODEGEN_ASSERT(xmmInstUsers[reg.index] != kInvalidInstIdx); preserve(function.instructions[xmmInstUsers[reg.index]]); } - LUAU_ASSERT(freeXmmMap[reg.index]); + CODEGEN_ASSERT(freeXmmMap[reg.index]); freeXmmMap[reg.index] = false; xmmInstUsers[reg.index] = instIdx; } @@ -117,11 +117,11 @@ RegisterX64 IrRegAllocX64::takeReg(RegisterX64 reg, uint32_t instIdx) { if (!freeGprMap[reg.index]) { - LUAU_ASSERT(gprInstUsers[reg.index] != kInvalidInstIdx); + CODEGEN_ASSERT(gprInstUsers[reg.index] != kInvalidInstIdx); preserve(function.instructions[gprInstUsers[reg.index]]); } - LUAU_ASSERT(freeGprMap[reg.index]); + CODEGEN_ASSERT(freeGprMap[reg.index]); freeGprMap[reg.index] = false; gprInstUsers[reg.index] = instIdx; } @@ -141,13 +141,13 @@ void IrRegAllocX64::freeReg(RegisterX64 reg) { if (reg.size == SizeX64::xmmword) { - LUAU_ASSERT(!freeXmmMap[reg.index]); + CODEGEN_ASSERT(!freeXmmMap[reg.index]); freeXmmMap[reg.index] = true; xmmInstUsers[reg.index] = kInvalidInstIdx; } else { - LUAU_ASSERT(!freeGprMap[reg.index]); + CODEGEN_ASSERT(!freeGprMap[reg.index]); freeGprMap[reg.index] = true; gprInstUsers[reg.index] = kInvalidInstIdx; } @@ -157,7 +157,7 @@ void IrRegAllocX64::freeLastUseReg(IrInst& target, uint32_t instIdx) { if (isLastUseReg(target, instIdx)) { - LUAU_ASSERT(!target.spilled && !target.needsReload); + CODEGEN_ASSERT(!target.spilled && !target.needsReload); // Register might have already been freed if it had multiple uses inside a single instruction if (target.regX64 == noreg) @@ -210,7 +210,7 @@ void IrRegAllocX64::preserve(IrInst& inst) else if (spill.valueKind == IrValueKind::Tag || spill.valueKind == IrValueKind::Int) build.mov(dword[sSpillArea + i * 8], inst.regX64); else - LUAU_ASSERT(!"Unsupported value kind"); + CODEGEN_ASSERT(!"Unsupported value kind"); usedSpillSlots.set(i); @@ -312,7 +312,7 @@ bool IrRegAllocX64::shouldFreeGpr(RegisterX64 reg) const if (reg == noreg) return false; - LUAU_ASSERT(reg.size != SizeX64::xmmword); + CODEGEN_ASSERT(reg.size != SizeX64::xmmword); for (RegisterX64 gpr : kGprAllocOrder) { @@ -340,7 +340,7 @@ unsigned IrRegAllocX64::findSpillStackSlot(IrValueKind valueKind) return i; } - LUAU_ASSERT(!"Nowhere to spill"); + CODEGEN_ASSERT(!"Nowhere to spill"); return ~0u; } @@ -364,18 +364,18 @@ bool IrRegAllocX64::hasRestoreOp(const IrInst& inst) const OperandX64 IrRegAllocX64::getRestoreAddress(const IrInst& inst, IrOp restoreOp) { - LUAU_ASSERT(restoreOp.kind != IrOpKind::None); + CODEGEN_ASSERT(restoreOp.kind != IrOpKind::None); switch (getCmdValueKind(inst.cmd)) { case IrValueKind::Unknown: case IrValueKind::None: - LUAU_ASSERT(!"Invalid operand restore value kind"); + CODEGEN_ASSERT(!"Invalid operand restore value kind"); break; case IrValueKind::Tag: return restoreOp.kind == IrOpKind::VmReg ? luauRegTag(vmRegOp(restoreOp)) : luauConstantTag(vmConstOp(restoreOp)); case IrValueKind::Int: - LUAU_ASSERT(restoreOp.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(restoreOp.kind == IrOpKind::VmReg); return luauRegValueInt(vmRegOp(restoreOp)); case IrValueKind::Pointer: return restoreOp.kind == IrOpKind::VmReg ? luauRegValue(vmRegOp(restoreOp)) : luauConstantValue(vmConstOp(restoreOp)); @@ -385,7 +385,7 @@ OperandX64 IrRegAllocX64::getRestoreAddress(const IrInst& inst, IrOp restoreOp) return restoreOp.kind == IrOpKind::VmReg ? luauReg(vmRegOp(restoreOp)) : luauConstant(vmConstOp(restoreOp)); } - LUAU_ASSERT(!"Failed to find restore operand location"); + CODEGEN_ASSERT(!"Failed to find restore operand location"); return noreg; } @@ -419,23 +419,23 @@ uint32_t IrRegAllocX64::findInstructionWithFurthestNextUse(const std::arrayreg == noreg); + CODEGEN_ASSERT(this->reg == noreg); this->reg = owner.takeReg(reg, kInvalidInstIdx); } void ScopedRegX64::alloc(SizeX64 size) { - LUAU_ASSERT(reg == noreg); + CODEGEN_ASSERT(reg == noreg); reg = owner.allocReg(size, kInvalidInstIdx); } void ScopedRegX64::free() { - LUAU_ASSERT(reg != noreg); + CODEGEN_ASSERT(reg != noreg); owner.freeReg(reg); reg = noreg; } @@ -504,7 +504,7 @@ ScopedSpills::~ScopedSpills() IrSpillX64& spill = owner.spills[i]; // Restoring spills inside this scope cannot create new spills - LUAU_ASSERT(spill.spillId < endSpillId); + CODEGEN_ASSERT(spill.spillId < endSpillId); // If spill was created inside current scope, it has to be restored if (spill.spillId >= startSpillId) diff --git a/CodeGen/src/IrTranslateBuiltins.cpp b/CodeGen/src/IrTranslateBuiltins.cpp index 2f233b6c..b7630adf 100644 --- a/CodeGen/src/IrTranslateBuiltins.cpp +++ b/CodeGen/src/IrTranslateBuiltins.cpp @@ -21,7 +21,7 @@ namespace CodeGen static void builtinCheckDouble(IrBuilder& build, IrOp arg, int pcpos) { if (arg.kind == IrOpKind::Constant) - LUAU_ASSERT(build.function.constOp(arg).kind == IrConstKind::Double); + CODEGEN_ASSERT(build.function.constOp(arg).kind == IrConstKind::Double); else build.loadAndCheckTag(arg, LUA_TNUMBER, build.vmExit(pcpos)); } @@ -227,7 +227,7 @@ static BuiltinImplResult translateBuiltinMathClamp(IrBuilder& build, int nparams IrOp block = build.block(IrBlockKind::Internal); - LUAU_ASSERT(args.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(args.kind == IrOpKind::VmReg); builtinCheckDouble(build, build.vmReg(arg), pcpos); builtinCheckDouble(build, args, pcpos); @@ -463,7 +463,7 @@ static BuiltinImplResult translateBuiltinBit32Extract( if (vb.kind == IrOpKind::Constant) { int f = int(build.function.doubleOp(vb)); - LUAU_ASSERT(unsigned(f) < 32); // checked above + CODEGEN_ASSERT(unsigned(f) < 32); // checked above value = n; @@ -658,7 +658,7 @@ static BuiltinImplResult translateBuiltinVector(IrBuilder& build, int nparams, i if (nparams < 3 || nresults > 1) return {BuiltinImplType::None, -1}; - LUAU_ASSERT(LUA_VECTOR_SIZE == 3); + CODEGEN_ASSERT(LUA_VECTOR_SIZE == 3); builtinCheckDouble(build, build.vmReg(arg), pcpos); builtinCheckDouble(build, args, pcpos); @@ -690,7 +690,7 @@ static BuiltinImplResult translateBuiltinTableInsert(IrBuilder& build, int npara if (args.kind == IrOpKind::Constant) { - LUAU_ASSERT(build.function.constOp(args).kind == IrConstKind::Double); + CODEGEN_ASSERT(build.function.constOp(args).kind == IrConstKind::Double); // No barrier necessary since numbers aren't collectable build.inst(IrCmd::STORE_DOUBLE, setnum, args); @@ -702,7 +702,7 @@ static BuiltinImplResult translateBuiltinTableInsert(IrBuilder& build, int npara build.inst(IrCmd::STORE_TVALUE, setnum, va); // Compiler only generates FASTCALL*K for source-level constants, so dynamic imports are not affected - LUAU_ASSERT(build.function.proto); + CODEGEN_ASSERT(build.function.proto); IrOp argstag = args.kind == IrOpKind::VmConst ? build.constTag(build.function.proto->k[vmConstOp(args)].tt) : build.undef(); build.inst(IrCmd::BARRIER_TABLE_FORWARD, table, args, argstag); diff --git a/CodeGen/src/IrTranslation.cpp b/CodeGen/src/IrTranslation.cpp index 242d0947..44d0a264 100644 --- a/CodeGen/src/IrTranslation.cpp +++ b/CodeGen/src/IrTranslation.cpp @@ -27,8 +27,8 @@ struct FallbackStreamScope : build(build) , next(next) { - LUAU_ASSERT(fallback.kind == IrOpKind::Block); - LUAU_ASSERT(next.kind == IrOpKind::Block); + CODEGEN_ASSERT(fallback.kind == IrOpKind::Block); + CODEGEN_ASSERT(next.kind == IrOpKind::Block); build.inst(IrCmd::JUMP, next); build.beginBlock(fallback); @@ -55,10 +55,10 @@ static IrOp loadDoubleOrConstant(IrBuilder& build, IrOp arg) { if (arg.kind == IrOpKind::VmConst) { - LUAU_ASSERT(build.function.proto); + CODEGEN_ASSERT(build.function.proto); TValue protok = build.function.proto->k[vmConstOp(arg)]; - LUAU_ASSERT(protok.tt == LUA_TNUMBER); + CODEGEN_ASSERT(protok.tt == LUA_TNUMBER); return build.constDouble(protok.value.n); } @@ -312,10 +312,10 @@ void translateInstJumpxEqN(IrBuilder& build, const Instruction* pc, int pcpos) build.beginBlock(checkValue); IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra)); - LUAU_ASSERT(build.function.proto); + CODEGEN_ASSERT(build.function.proto); TValue protok = build.function.proto->k[aux & 0xffffff]; - LUAU_ASSERT(protok.tt == LUA_TNUMBER); + CODEGEN_ASSERT(protok.tt == LUA_TNUMBER); IrOp vb = build.constDouble(protok.value.n); build.inst(IrCmd::JUMP_CMP_NUM, va, vb, build.cond(IrCondition::NotEqual), not_ ? target : next, not_ ? next : target); @@ -468,10 +468,10 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc, { if (opb.kind == IrOpKind::VmConst) { - LUAU_ASSERT(build.function.proto); + CODEGEN_ASSERT(build.function.proto); TValue protok = build.function.proto->k[vmConstOp(opb)]; - LUAU_ASSERT(protok.tt == LUA_TNUMBER); + CODEGEN_ASSERT(protok.tt == LUA_TNUMBER); vb = build.constDouble(protok.value.n); } @@ -483,10 +483,10 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc, if (opc.kind == IrOpKind::VmConst) { - LUAU_ASSERT(build.function.proto); + CODEGEN_ASSERT(build.function.proto); TValue protok = build.function.proto->k[vmConstOp(opc)]; - LUAU_ASSERT(protok.tt == LUA_TNUMBER); + CODEGEN_ASSERT(protok.tt == LUA_TNUMBER); // VM has special cases for exponentiation with constants if (tm == TM_POW && protok.value.n == 0.5) @@ -505,7 +505,7 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc, if (result.kind == IrOpKind::None) { - LUAU_ASSERT(vc.kind != IrOpKind::None); + CODEGEN_ASSERT(vc.kind != IrOpKind::None); switch (tm) { @@ -531,7 +531,7 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc, result = build.inst(IrCmd::INVOKE_LIBM, build.constUint(LBF_MATH_POW), vb, vc); break; default: - LUAU_ASSERT(!"Unsupported binary op"); + CODEGEN_ASSERT(!"Unsupported binary op"); } } @@ -717,7 +717,7 @@ IrOp translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool int skip = LUAU_INSN_C(*pc); Instruction call = pc[skip + 1]; - LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); + CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL); int ra = LUAU_INSN_A(call); int nparams = customParams ? customParamCount : LUAU_INSN_B(call) - 1; @@ -729,7 +729,7 @@ IrOp translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool if (customArgs.kind == IrOpKind::VmConst) { - LUAU_ASSERT(build.function.proto); + CODEGEN_ASSERT(build.function.proto); TValue protok = build.function.proto->k[vmConstOp(customArgs)]; if (protok.tt == LUA_TNUMBER) @@ -746,7 +746,7 @@ IrOp translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool if (br.type != BuiltinImplType::None) { - LUAU_ASSERT(nparams != LUA_MULTRET && "builtins are not allowed to handle variadic arguments"); + CODEGEN_ASSERT(nparams != LUA_MULTRET && "builtins are not allowed to handle variadic arguments"); if (nresults == LUA_MULTRET) build.inst(IrCmd::ADJUST_STACK_TO_REG, build.vmReg(ra), build.constInt(br.actualResultCount)); @@ -808,7 +808,7 @@ void beforeInstForNPrep(IrBuilder& build, const Instruction* pc, int pcpos) void afterInstForNLoop(IrBuilder& build, const Instruction* pc) { - LUAU_ASSERT(!build.numericLoopStack.empty()); + CODEGEN_ASSERT(!build.numericLoopStack.empty()); build.numericLoopStack.pop_back(); } @@ -819,7 +819,7 @@ void translateInstForNPrep(IrBuilder& build, const Instruction* pc, int pcpos) IrOp loopStart = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc)))); IrOp loopExit = build.blockAtInst(getJumpTarget(*pc, pcpos)); - LUAU_ASSERT(!build.numericLoopStack.empty()); + CODEGEN_ASSERT(!build.numericLoopStack.empty()); IrOp stepK = build.numericLoopStack.back().step; // When loop parameters are not numbers, VM tries to perform type coercion from string and raises an exception if that fails @@ -872,7 +872,7 @@ void translateInstForNLoop(IrBuilder& build, const Instruction* pc, int pcpos) IrOp loopRepeat = build.blockAtInst(repeatJumpTarget); IrOp loopExit = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc)))); - LUAU_ASSERT(!build.numericLoopStack.empty()); + CODEGEN_ASSERT(!build.numericLoopStack.empty()); IrBuilder::LoopInfo loopInfo = build.numericLoopStack.back(); // normally, the interrupt is placed at the beginning of the loop body by FORNPREP translation @@ -979,7 +979,7 @@ void translateInstForGPrepInext(IrBuilder& build, const Instruction* pc, int pcp void translateInstForGLoopIpairs(IrBuilder& build, const Instruction* pc, int pcpos) { int ra = LUAU_INSN_A(*pc); - LUAU_ASSERT(int(pc[1]) < 0); + CODEGEN_ASSERT(int(pc[1]) < 0); IrOp loopRepeat = build.blockAtInst(getJumpTarget(*pc, pcpos)); IrOp loopExit = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc)))); @@ -1376,7 +1376,7 @@ void translateInstCapture(IrBuilder& build, const Instruction* pc, int pcpos) build.inst(IrCmd::CAPTURE, build.vmUpvalue(index), build.constUint(0)); break; default: - LUAU_ASSERT(!"Unknown upvalue capture type"); + CODEGEN_ASSERT(!"Unknown upvalue capture type"); } } @@ -1394,7 +1394,7 @@ void translateInstNamecall(IrBuilder& build, const Instruction* pc, int pcpos) build.loadAndCheckTag(build.vmReg(rb), LUA_TTABLE, fallback); IrOp table = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb)); - LUAU_ASSERT(build.function.proto); + CODEGEN_ASSERT(build.function.proto); IrOp addrNodeEl = build.inst(IrCmd::GET_HASH_NODE_ADDR, table, build.constUint(tsvalue(&build.function.proto->k[aux])->hash)); // We use 'jump' version instead of 'check' guard because we are jumping away into a non-fallback block @@ -1506,7 +1506,7 @@ void translateInstOrX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c void translateInstNewClosure(IrBuilder& build, const Instruction* pc, int pcpos) { - LUAU_ASSERT(unsigned(LUAU_INSN_D(*pc)) < unsigned(build.function.proto->sizep)); + CODEGEN_ASSERT(unsigned(LUAU_INSN_D(*pc)) < unsigned(build.function.proto->sizep)); int ra = LUAU_INSN_A(*pc); Proto* pv = build.function.proto->p[LUAU_INSN_D(*pc)]; @@ -1522,7 +1522,7 @@ void translateInstNewClosure(IrBuilder& build, const Instruction* pc, int pcpos) for (int ui = 0; ui < pv->nups; ++ui) { Instruction uinsn = pc[ui + 1]; - LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE); + CODEGEN_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE); switch (LUAU_INSN_A(uinsn)) { @@ -1553,7 +1553,7 @@ void translateInstNewClosure(IrBuilder& build, const Instruction* pc, int pcpos) } default: - LUAU_ASSERT(!"Unknown upvalue capture type"); + CODEGEN_ASSERT(!"Unknown upvalue capture type"); LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks } } diff --git a/CodeGen/src/IrUtils.cpp b/CodeGen/src/IrUtils.cpp index c5abc37b..b49e974f 100644 --- a/CodeGen/src/IrUtils.cpp +++ b/CodeGen/src/IrUtils.cpp @@ -208,7 +208,7 @@ static void removeInstUse(IrFunction& function, uint32_t instIdx) { IrInst& inst = function.instructions[instIdx]; - LUAU_ASSERT(inst.useCount); + CODEGEN_ASSERT(inst.useCount); inst.useCount--; if (inst.useCount == 0) @@ -219,7 +219,7 @@ static void removeBlockUse(IrFunction& function, uint32_t blockIdx) { IrBlock& block = function.blocks[blockIdx]; - LUAU_ASSERT(block.useCount); + CODEGEN_ASSERT(block.useCount); block.useCount--; // Entry block is never removed because is has an implicit use @@ -245,7 +245,7 @@ void removeUse(IrFunction& function, IrOp op) bool isGCO(uint8_t tag) { - LUAU_ASSERT(tag < LUA_T_COUNT); + CODEGEN_ASSERT(tag < LUA_T_COUNT); // mirrors iscollectable(o) from VM/lobject.h return tag >= LUA_TSTRING; @@ -253,7 +253,7 @@ bool isGCO(uint8_t tag) void kill(IrFunction& function, IrInst& inst) { - LUAU_ASSERT(inst.useCount == 0); + CODEGEN_ASSERT(inst.useCount == 0); inst.cmd = IrCmd::NOP; @@ -277,7 +277,7 @@ void kill(IrFunction& function, uint32_t start, uint32_t end) // Kill instructions in reverse order to avoid killing instructions that are still marked as used for (int i = int(end); i >= int(start); i--) { - LUAU_ASSERT(unsigned(i) < function.instructions.size()); + CODEGEN_ASSERT(unsigned(i) < function.instructions.size()); IrInst& curr = function.instructions[i]; if (curr.cmd == IrCmd::NOP) @@ -289,7 +289,7 @@ void kill(IrFunction& function, uint32_t start, uint32_t end) void kill(IrFunction& function, IrBlock& block) { - LUAU_ASSERT(block.useCount == 0); + CODEGEN_ASSERT(block.useCount == 0); block.kind = IrBlockKind::Dead; @@ -326,8 +326,8 @@ void replace(IrFunction& function, IrBlock& block, uint32_t instIdx, IrInst repl if (!isBlockTerminator(inst.cmd) && isBlockTerminator(replacement.cmd)) { // Block has has to be fully constructed before replacement is performed - LUAU_ASSERT(block.finish != ~0u); - LUAU_ASSERT(instIdx + 1 <= block.finish); + CODEGEN_ASSERT(block.finish != ~0u); + CODEGEN_ASSERT(instIdx + 1 <= block.finish); kill(function, instIdx + 1, block.finish); @@ -353,7 +353,7 @@ void replace(IrFunction& function, IrBlock& block, uint32_t instIdx, IrInst repl void substitute(IrFunction& function, IrInst& inst, IrOp replacement) { - LUAU_ASSERT(!isBlockTerminator(inst.cmd)); + CODEGEN_ASSERT(!isBlockTerminator(inst.cmd)); inst.cmd = IrCmd::SUBSTITUTE; @@ -389,12 +389,12 @@ void applySubstitutions(IrFunction& function, IrOp& op) if (op.kind == IrOpKind::Inst) { IrInst& dst = function.instructions[op.index]; - LUAU_ASSERT(dst.cmd != IrCmd::SUBSTITUTE && "chained substitutions are not allowed"); + CODEGEN_ASSERT(dst.cmd != IrCmd::SUBSTITUTE && "chained substitutions are not allowed"); dst.useCount++; } - LUAU_ASSERT(src.useCount > 0); + CODEGEN_ASSERT(src.useCount > 0); src.useCount--; if (src.useCount == 0) @@ -443,7 +443,7 @@ bool compare(double a, double b, IrCondition cond) case IrCondition::NotGreaterEqual: return !bool(a >= b); default: - LUAU_ASSERT(!"Unsupported condition"); + CODEGEN_ASSERT(!"Unsupported condition"); } return false; @@ -482,7 +482,7 @@ bool compare(int a, int b, IrCondition cond) case IrCondition::UnsignedGreaterEqual: return unsigned(a) >= unsigned(b); default: - LUAU_ASSERT(!"Unsupported condition"); + CODEGEN_ASSERT(!"Unsupported condition"); } return false; @@ -871,7 +871,7 @@ uint32_t getNativeContextOffset(int bfid) case LBF_MATH_LDEXP: return offsetof(NativeContext, libm_ldexp); default: - LUAU_ASSERT(!"Unsupported bfid"); + CODEGEN_ASSERT(!"Unsupported bfid"); } return 0; diff --git a/CodeGen/src/IrValueLocationTracking.cpp b/CodeGen/src/IrValueLocationTracking.cpp index 8a2117d4..70fd7fc4 100644 --- a/CodeGen/src/IrValueLocationTracking.cpp +++ b/CodeGen/src/IrValueLocationTracking.cpp @@ -140,12 +140,12 @@ void IrValueLocationTracking::beforeInstLowering(IrInst& inst) default: // All instructions which reference registers have to be handled explicitly - LUAU_ASSERT(inst.a.kind != IrOpKind::VmReg); - LUAU_ASSERT(inst.b.kind != IrOpKind::VmReg); - LUAU_ASSERT(inst.c.kind != IrOpKind::VmReg); - LUAU_ASSERT(inst.d.kind != IrOpKind::VmReg); - LUAU_ASSERT(inst.e.kind != IrOpKind::VmReg); - LUAU_ASSERT(inst.f.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.a.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.b.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.c.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.d.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.e.kind != IrOpKind::VmReg); + CODEGEN_ASSERT(inst.f.kind != IrOpKind::VmReg); break; } } @@ -232,7 +232,7 @@ void IrValueLocationTracking::invalidateRestoreOp(IrOp location, bool skipValueI } else if (location.kind == IrOpKind::VmConst) { - LUAU_ASSERT(!"VM constants are immutable"); + CODEGEN_ASSERT(!"VM constants are immutable"); } } diff --git a/CodeGen/src/OptimizeConstProp.cpp b/CodeGen/src/OptimizeConstProp.cpp index 362dec22..0c543572 100644 --- a/CodeGen/src/OptimizeConstProp.cpp +++ b/CodeGen/src/OptimizeConstProp.cpp @@ -17,7 +17,6 @@ LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3) LUAU_FASTINTVARIABLE(LuauCodeGenReuseSlotLimit, 64) LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false) -LUAU_FASTFLAGVARIABLE(LuauReuseBufferChecks, false) LUAU_FASTFLAG(LuauCodegenVector) LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauCodeGenCheckGcEffectFix, false) @@ -94,7 +93,7 @@ struct ConstPropState void saveValue(IrOp op, IrOp value) { - LUAU_ASSERT(value.kind == IrOpKind::Constant); + CODEGEN_ASSERT(value.kind == IrOpKind::Constant); if (RegisterInfo* info = tryGetRegisterInfo(op)) { @@ -240,7 +239,7 @@ struct ConstPropState void createRegLink(uint32_t instIdx, IrOp regOp) { - LUAU_ASSERT(!instLink.contains(instIdx)); + CODEGEN_ASSERT(!instLink.contains(instIdx)); instLink[instIdx] = RegisterLink{uint8_t(vmRegOp(regOp)), regs[vmRegOp(regOp)].version}; } @@ -282,16 +281,16 @@ struct ConstPropState // This is used to allow instructions with register references to be compared for equality IrInst versionedVmRegLoad(IrCmd loadCmd, IrOp op) { - LUAU_ASSERT(op.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(op.kind == IrOpKind::VmReg); uint32_t version = regs[vmRegOp(op)].version; - LUAU_ASSERT(version <= 0xffffff); + CODEGEN_ASSERT(version <= 0xffffff); op.index = vmRegOp(op) | (version << 8); return IrInst{loadCmd, op}; } uint32_t* getPreviousInstIndex(const IrInst& inst) { - LUAU_ASSERT(useValueNumbering); + CODEGEN_ASSERT(useValueNumbering); if (uint32_t* prevIdx = valueMap.find(inst)) { @@ -305,7 +304,7 @@ struct ConstPropState uint32_t* getPreviousVersionedLoadIndex(IrCmd cmd, IrOp vmReg) { - LUAU_ASSERT(vmReg.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(vmReg.kind == IrOpKind::VmReg); return getPreviousInstIndex(versionedVmRegLoad(cmd, vmReg)); } @@ -352,7 +351,7 @@ struct ConstPropState // If there is no previous load, we record the current one for future lookups void substituteOrRecordVmRegLoad(IrInst& loadInst) { - LUAU_ASSERT(loadInst.a.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(loadInst.a.kind == IrOpKind::VmReg); if (!useValueNumbering) return; @@ -388,8 +387,8 @@ struct ConstPropState // VM register loads can use the value that was stored in the same Vm register earlier void forwardVmRegStoreToLoad(const IrInst& storeInst, IrCmd loadCmd) { - LUAU_ASSERT(storeInst.a.kind == IrOpKind::VmReg); - LUAU_ASSERT(storeInst.b.kind == IrOpKind::Inst); + CODEGEN_ASSERT(storeInst.a.kind == IrOpKind::VmReg); + CODEGEN_ASSERT(storeInst.b.kind == IrOpKind::Inst); if (!useValueNumbering) return; @@ -694,7 +693,7 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction& { if (inst.a.kind == IrOpKind::VmReg) { - if (FFlag::LuauReuseBufferChecks && inst.b.kind == IrOpKind::Inst) + if (inst.b.kind == IrOpKind::Inst) { if (uint32_t* prevIdx = state.getPreviousVersionedLoadIndex(IrCmd::LOAD_TVALUE, inst.a)) { @@ -964,12 +963,9 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction& break; case IrCmd::CHECK_BUFFER_LEN: { - if (!FFlag::LuauReuseBufferChecks) - break; - std::optional bufferOffset = function.asIntOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b)); int accessSize = function.intOp(inst.c); - LUAU_ASSERT(accessSize > 0); + CODEGEN_ASSERT(accessSize > 0); if (bufferOffset) { @@ -1003,8 +999,8 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction& int prevBound = function.intOp(prev.b); // Negative and overflowing constant offsets should already be replaced with unconditional jumps to a fallback - LUAU_ASSERT(currBound >= 0); - LUAU_ASSERT(prevBound >= 0); + CODEGEN_ASSERT(currBound >= 0); + CODEGEN_ASSERT(prevBound >= 0); if (unsigned(currBound) >= unsigned(prevBound)) replace(function, prev.b, inst.b); @@ -1396,7 +1392,7 @@ static void constPropInBlock(IrBuilder& build, IrBlock& block, ConstPropState& s for (uint32_t index = block.start; index <= block.finish; index++) { - LUAU_ASSERT(index < function.instructions.size()); + CODEGEN_ASSERT(index < function.instructions.size()); IrInst& inst = function.instructions[index]; applySubstitutions(function, inst); @@ -1419,7 +1415,7 @@ static void constPropInBlockChain(IrBuilder& build, std::vector& visite while (block) { uint32_t blockIdx = function.getBlockIndex(*block); - LUAU_ASSERT(!visited[blockIdx]); + CODEGEN_ASSERT(!visited[blockIdx]); visited[blockIdx] = true; constPropInBlock(build, *block, state); @@ -1474,7 +1470,7 @@ static std::vector collectDirectBlockJumpPath(IrFunction& function, st // Usually that would mean that we would have a conditional jump at the end of 'block' // But using check guards and fallback blocks it becomes a possible setup // We avoid this by making sure fallbacks rejoin the other immediate successor of 'block' - LUAU_ASSERT(getLiveOutValueCount(function, *block) == 0); + CODEGEN_ASSERT(getLiveOutValueCount(function, *block) == 0); std::vector path; @@ -1516,7 +1512,7 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector& visited IrFunction& function = build.function; uint32_t blockIdx = function.getBlockIndex(startingBlock); - LUAU_ASSERT(!visited[blockIdx]); + CODEGEN_ASSERT(!visited[blockIdx]); visited[blockIdx] = true; IrInst& termInst = function.instructions[startingBlock.finish]; @@ -1549,7 +1545,7 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector& visited constPropInBlock(build, startingBlock, state); // Verify that target hasn't changed - LUAU_ASSERT(function.instructions[startingBlock.finish].a.index == targetBlockIdx); + CODEGEN_ASSERT(function.instructions[startingBlock.finish].a.index == targetBlockIdx); // Note: using startingBlock after this line is unsafe as the reference may be reallocated by build.block() below const uint32_t startingSortKey = startingBlock.sortkey; @@ -1580,8 +1576,8 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector& visited // The information generated here is consistent with current state that could be outdated, but still useful in IR inspection if (function.cfg.in.size() == newBlock.index) { - LUAU_ASSERT(function.cfg.in.size() == function.cfg.out.size()); - LUAU_ASSERT(function.cfg.in.size() == function.cfg.def.size()); + CODEGEN_ASSERT(function.cfg.in.size() == function.cfg.out.size()); + CODEGEN_ASSERT(function.cfg.in.size() == function.cfg.def.size()); // Live in is the same as the input of the original first block function.cfg.in.push_back(function.cfg.in[path.front()]); diff --git a/CodeGen/src/OptimizeFinalX64.cpp b/CodeGen/src/OptimizeFinalX64.cpp index 639b7293..911750b0 100644 --- a/CodeGen/src/OptimizeFinalX64.cpp +++ b/CodeGen/src/OptimizeFinalX64.cpp @@ -17,11 +17,11 @@ namespace CodeGen // This pass might not be useful on different architectures static void optimizeMemoryOperandsX64(IrFunction& function, IrBlock& block) { - LUAU_ASSERT(block.kind != IrBlockKind::Dead); + CODEGEN_ASSERT(block.kind != IrBlockKind::Dead); for (uint32_t index = block.start; index <= block.finish; index++) { - LUAU_ASSERT(index < function.instructions.size()); + CODEGEN_ASSERT(index < function.instructions.size()); IrInst& inst = function.instructions[index]; switch (inst.cmd) diff --git a/CodeGen/src/UnwindBuilderDwarf2.cpp b/CodeGen/src/UnwindBuilderDwarf2.cpp index 08c8e831..b1522e7b 100644 --- a/CodeGen/src/UnwindBuilderDwarf2.cpp +++ b/CodeGen/src/UnwindBuilderDwarf2.cpp @@ -81,7 +81,7 @@ static uint8_t* defineCfaExpressionOffset(uint8_t* pos, uint32_t stackOffset) static uint8_t* defineSavedRegisterLocation(uint8_t* pos, int dwReg, uint32_t stackOffset) { - LUAU_ASSERT(stackOffset % kDataAlignFactor == 0 && "stack offsets have to be measured in kDataAlignFactor units"); + CODEGEN_ASSERT(stackOffset % kDataAlignFactor == 0 && "stack offsets have to be measured in kDataAlignFactor units"); if (dwReg <= 0x3f) { @@ -99,7 +99,7 @@ static uint8_t* defineSavedRegisterLocation(uint8_t* pos, int dwReg, uint32_t st static uint8_t* advanceLocation(uint8_t* pos, unsigned int offset) { - LUAU_ASSERT(offset < 256); + CODEGEN_ASSERT(offset < 256); pos = writeu8(pos, DW_CFA_advance_loc1); pos = writeu8(pos, offset); return pos; @@ -133,7 +133,7 @@ size_t UnwindBuilderDwarf2::getBeginOffset() const void UnwindBuilderDwarf2::startInfo(Arch arch) { - LUAU_ASSERT(arch == A64 || arch == X64); + CODEGEN_ASSERT(arch == A64 || arch == X64); uint8_t* cieLength = pos; pos = writeu32(pos, 0); // Length (to be filled later) @@ -191,7 +191,7 @@ void UnwindBuilderDwarf2::finishFunction(uint32_t beginOffset, uint32_t endOffse unwindFunctions.back().beginOffset = beginOffset; unwindFunctions.back().endOffset = endOffset; - LUAU_ASSERT(fdeEntryStart != nullptr); + CODEGEN_ASSERT(fdeEntryStart != nullptr); pos = alignPosition(fdeEntryStart, pos); writeu32(fdeEntryStart, unsigned(pos - fdeEntryStart - 4)); // Length field itself is excluded from length @@ -202,14 +202,14 @@ void UnwindBuilderDwarf2::finishInfo() // Terminate section pos = writeu32(pos, 0); - LUAU_ASSERT(getSize() <= kRawDataLimit); + CODEGEN_ASSERT(getSize() <= kRawDataLimit); } void UnwindBuilderDwarf2::prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list regs) { - LUAU_ASSERT(stackSize % 16 == 0); - LUAU_ASSERT(regs.size() >= 2 && regs.begin()[0] == A64::x29 && regs.begin()[1] == A64::x30); - LUAU_ASSERT(regs.size() * 8 <= stackSize); + CODEGEN_ASSERT(stackSize % 16 == 0); + CODEGEN_ASSERT(regs.size() >= 2 && regs.begin()[0] == A64::x29 && regs.begin()[1] == A64::x30); + CODEGEN_ASSERT(regs.size() * 8 <= stackSize); // sub sp, sp, stackSize pos = advanceLocation(pos, 4); @@ -220,7 +220,7 @@ void UnwindBuilderDwarf2::prologueA64(uint32_t prologueSize, uint32_t stackSize, for (size_t i = 0; i < regs.size(); ++i) { - LUAU_ASSERT(regs.begin()[i].kind == A64::KindA64::x); + CODEGEN_ASSERT(regs.begin()[i].kind == A64::KindA64::x); pos = defineSavedRegisterLocation(pos, regs.begin()[i].index, stackSize - unsigned(i * 8)); } } @@ -228,7 +228,7 @@ void UnwindBuilderDwarf2::prologueA64(uint32_t prologueSize, uint32_t stackSize, void UnwindBuilderDwarf2::prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list gpr, const std::vector& simd) { - LUAU_ASSERT(stackSize > 0 && stackSize < 4096 && stackSize % 8 == 0); + CODEGEN_ASSERT(stackSize > 0 && stackSize < 4096 && stackSize % 8 == 0); unsigned int stackOffset = 8; // Return address was pushed by calling the function unsigned int prologueOffset = 0; @@ -250,7 +250,7 @@ void UnwindBuilderDwarf2::prologueX64(uint32_t prologueSize, uint32_t stackSize, // push reg for (X64::RegisterX64 reg : gpr) { - LUAU_ASSERT(reg.size == X64::SizeX64::qword); + CODEGEN_ASSERT(reg.size == X64::SizeX64::qword); stackOffset += 8; prologueOffset += 2; @@ -259,7 +259,7 @@ void UnwindBuilderDwarf2::prologueX64(uint32_t prologueSize, uint32_t stackSize, pos = defineSavedRegisterLocation(pos, regIndexToDwRegX64[reg.index], stackOffset); } - LUAU_ASSERT(simd.empty()); + CODEGEN_ASSERT(simd.empty()); // sub rsp, stackSize stackOffset += stackSize; @@ -267,8 +267,8 @@ void UnwindBuilderDwarf2::prologueX64(uint32_t prologueSize, uint32_t stackSize, pos = advanceLocation(pos, 4); pos = defineCfaExpressionOffset(pos, stackOffset); - LUAU_ASSERT(stackOffset % 16 == 0); - LUAU_ASSERT(prologueOffset == prologueSize); + CODEGEN_ASSERT(stackOffset % 16 == 0); + CODEGEN_ASSERT(prologueOffset == prologueSize); } size_t UnwindBuilderDwarf2::getSize() const diff --git a/CodeGen/src/UnwindBuilderWin.cpp b/CodeGen/src/UnwindBuilderWin.cpp index 336a4e3f..498470bd 100644 --- a/CodeGen/src/UnwindBuilderWin.cpp +++ b/CodeGen/src/UnwindBuilderWin.cpp @@ -33,7 +33,7 @@ size_t UnwindBuilderWin::getBeginOffset() const void UnwindBuilderWin::startInfo(Arch arch) { - LUAU_ASSERT(arch == X64); + CODEGEN_ASSERT(arch == X64); } void UnwindBuilderWin::startFunction() @@ -61,7 +61,7 @@ void UnwindBuilderWin::finishFunction(uint32_t beginOffset, uint32_t endOffset) unwindFunctions.back().endOffset = endOffset; // Windows unwind code count is stored in uint8_t, so we can't have more - LUAU_ASSERT(unwindCodes.size() < 256); + CODEGEN_ASSERT(unwindCodes.size() < 256); UnwindInfoWin info; info.version = 1; @@ -69,13 +69,13 @@ void UnwindBuilderWin::finishFunction(uint32_t beginOffset, uint32_t endOffset) info.prologsize = prologSize; info.unwindcodecount = uint8_t(unwindCodes.size()); - LUAU_ASSERT(frameReg.index < 16); + CODEGEN_ASSERT(frameReg.index < 16); info.framereg = frameReg.index; - LUAU_ASSERT(frameRegOffset < 16); + CODEGEN_ASSERT(frameRegOffset < 16); info.frameregoff = frameRegOffset; - LUAU_ASSERT(rawDataPos + sizeof(info) <= rawData + kRawDataLimit); + CODEGEN_ASSERT(rawDataPos + sizeof(info) <= rawData + kRawDataLimit); memcpy(rawDataPos, &info, sizeof(info)); rawDataPos += sizeof(info); @@ -84,7 +84,7 @@ void UnwindBuilderWin::finishFunction(uint32_t beginOffset, uint32_t endOffset) // Copy unwind codes in reverse order // Some unwind codes take up two array slots, we write those in reverse order uint8_t* unwindCodePos = rawDataPos + sizeof(UnwindCodeWin) * (unwindCodes.size() - 1); - LUAU_ASSERT(unwindCodePos <= rawData + kRawDataLimit); + CODEGEN_ASSERT(unwindCodePos <= rawData + kRawDataLimit); for (size_t i = 0; i < unwindCodes.size(); i++) { @@ -99,21 +99,21 @@ void UnwindBuilderWin::finishFunction(uint32_t beginOffset, uint32_t endOffset) if (unwindCodes.size() % 2 != 0) rawDataPos += sizeof(UnwindCodeWin); - LUAU_ASSERT(rawDataPos <= rawData + kRawDataLimit); + CODEGEN_ASSERT(rawDataPos <= rawData + kRawDataLimit); } void UnwindBuilderWin::finishInfo() {} void UnwindBuilderWin::prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list regs) { - LUAU_ASSERT(!"Not implemented"); + CODEGEN_ASSERT(!"Not implemented"); } void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list gpr, const std::vector& simd) { - LUAU_ASSERT(stackSize > 0 && stackSize < 4096 && stackSize % 8 == 0); - LUAU_ASSERT(prologueSize < 256); + CODEGEN_ASSERT(stackSize > 0 && stackSize < 4096 && stackSize % 8 == 0); + CODEGEN_ASSERT(prologueSize < 256); unsigned int stackOffset = 8; // Return address was pushed by calling the function unsigned int prologueOffset = 0; @@ -135,7 +135,7 @@ void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bo // push reg for (X64::RegisterX64 reg : gpr) { - LUAU_ASSERT(reg.size == X64::SizeX64::qword); + CODEGEN_ASSERT(reg.size == X64::SizeX64::qword); stackOffset += 8; prologueOffset += 2; @@ -143,7 +143,7 @@ void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bo } // If frame pointer is used, simd register storage is not implemented, it will require reworking store offsets - LUAU_ASSERT(!setupFrame || simd.size() == 0); + CODEGEN_ASSERT(!setupFrame || simd.size() == 0); unsigned int simdStorageSize = unsigned(simd.size()) * 16; @@ -161,7 +161,7 @@ void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bo else { // This command can handle allocations up to 512K-8 bytes, but that potentially requires stack probing - LUAU_ASSERT(stackSize < 4096); + CODEGEN_ASSERT(stackSize < 4096); stackOffset += stackSize; prologueOffset += 7; @@ -179,8 +179,8 @@ void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bo // vmovaps [rsp+n], xmm for (X64::RegisterX64 reg : simd) { - LUAU_ASSERT(reg.size == X64::SizeX64::xmmword); - LUAU_ASSERT(xmmStoreOffset % 16 == 0 && "simd stores have to be performed to aligned locations"); + CODEGEN_ASSERT(reg.size == X64::SizeX64::xmmword); + CODEGEN_ASSERT(xmmStoreOffset % 16 == 0 && "simd stores have to be performed to aligned locations"); prologueOffset += xmmStoreOffset >= 128 ? 10 : 7; unwindCodes.push_back({uint8_t(xmmStoreOffset / 16), 0, 0}); @@ -188,8 +188,8 @@ void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bo xmmStoreOffset += 16; } - LUAU_ASSERT(stackOffset % 16 == 0); - LUAU_ASSERT(prologueOffset == prologueSize); + CODEGEN_ASSERT(stackOffset % 16 == 0); + CODEGEN_ASSERT(prologueOffset == prologueSize); this->prologSize = prologueSize; } diff --git a/Common/include/Luau/Bytecode.h b/Common/include/Luau/Bytecode.h index e3c20670..5781c2b0 100644 --- a/Common/include/Luau/Bytecode.h +++ b/Common/include/Luau/Bytecode.h @@ -431,7 +431,7 @@ enum LuauBytecodeTag // Bytecode version; runtime supports [MIN, MAX], compiler emits TARGET by default but may emit a higher version when flags are enabled LBC_VERSION_MIN = 3, LBC_VERSION_MAX = 5, - LBC_VERSION_TARGET = 4, + LBC_VERSION_TARGET = 5, // Type encoding version LBC_TYPE_VERSION = 1, // Types of constant table entries diff --git a/Common/include/Luau/ExperimentalFlags.h b/Common/include/Luau/ExperimentalFlags.h index 7372cc0d..15fb9716 100644 --- a/Common/include/Luau/ExperimentalFlags.h +++ b/Common/include/Luau/ExperimentalFlags.h @@ -11,9 +11,9 @@ inline bool isFlagExperimental(const char* flag) // Flags in this list are disabled by default in various command-line tools. They may have behavior that is not fully final, // or critical bugs that are found after the code has been submitted. static const char* const kList[] = { - "LuauInstantiateInSubtyping", // requires some fixes to lua-apps code - "LuauTinyControlFlowAnalysis", // waiting for updates to packages depended by internal builtin plugins - "LuauFixIndexerSubtypingOrdering", // requires some small fixes to lua-apps code since this fixes a false negative + "LuauInstantiateInSubtyping", // requires some fixes to lua-apps code + "LuauTinyControlFlowAnalysis", // waiting for updates to packages depended by internal builtin plugins + "LuauFixIndexerSubtypingOrdering", // requires some small fixes to lua-apps code since this fixes a false negative "LuauUpdatedRequireByStringSemantics", // requires some small fixes to fully implement some proposed changes // makes sure we always have at least one entry nullptr, diff --git a/Common/include/Luau/VecDeque.h b/Common/include/Luau/VecDeque.h index dd973c1d..a1b555b0 100644 --- a/Common/include/Luau/VecDeque.h +++ b/Common/include/Luau/VecDeque.h @@ -284,26 +284,30 @@ public: return buffer[logicalToPhysical(pos)]; } - T& front() { + T& front() + { LUAU_ASSERT(!empty()); return buffer[head]; } - const T& front() const { + const T& front() const + { LUAU_ASSERT(!empty()); return buffer[head]; } - T& back() { + T& back() + { LUAU_ASSERT(!empty()); size_t back = logicalToPhysical(queue_size - 1); return buffer[back]; } - const T& back() const { + const T& back() const + { LUAU_ASSERT(!empty()); size_t back = logicalToPhysical(queue_size - 1); @@ -427,7 +431,7 @@ public: grow(); size_t next_back = logicalToPhysical(queue_size); - new (buffer + next_back)T(value); + new (buffer + next_back) T(value); queue_size++; } @@ -446,7 +450,7 @@ public: grow(); head = (head == 0) ? capacity() - 1 : head - 1; - new (buffer + head)T(value); + new (buffer + head) T(value); queue_size++; } diff --git a/Compiler/src/BuiltinFolding.cpp b/Compiler/src/BuiltinFolding.cpp index 2f372cd9..0886e94a 100644 --- a/Compiler/src/BuiltinFolding.cpp +++ b/Compiler/src/BuiltinFolding.cpp @@ -5,8 +5,6 @@ #include -LUAU_FASTFLAGVARIABLE(LuauVectorLiterals, false) - namespace Luau { namespace Compile @@ -473,8 +471,7 @@ Constant foldBuiltin(int bfid, const Constant* args, size_t count) break; case LBF_VECTOR: - if (FFlag::LuauVectorLiterals && count >= 3 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number && - args[2].type == Constant::Type_Number) + if (count >= 3 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number && args[2].type == Constant::Type_Number) { if (count == 3) return cvector(args[0].valueNumber, args[1].valueNumber, args[2].valueNumber, 0.0); diff --git a/Compiler/src/BytecodeBuilder.cpp b/Compiler/src/BytecodeBuilder.cpp index 703ccee9..852ac80a 100644 --- a/Compiler/src/BytecodeBuilder.cpp +++ b/Compiler/src/BytecodeBuilder.cpp @@ -7,9 +7,6 @@ #include #include -LUAU_FASTFLAG(LuauVectorLiterals) -LUAU_FASTFLAG(LuauCompileRevK) - namespace Luau { @@ -1125,7 +1122,7 @@ std::string BytecodeBuilder::getError(const std::string& message) uint8_t BytecodeBuilder::getVersion() { // This function usually returns LBC_VERSION_TARGET but may sometimes return a higher number (within LBC_VERSION_MIN/MAX) under fast flags - return (FFlag::LuauVectorLiterals || FFlag::LuauCompileRevK) ? 5 : LBC_VERSION_TARGET; + return LBC_VERSION_TARGET; } uint8_t BytecodeBuilder::getTypeEncodingVersion() diff --git a/Compiler/src/Compiler.cpp b/Compiler/src/Compiler.cpp index af17e3f2..6d859aa2 100644 --- a/Compiler/src/Compiler.cpp +++ b/Compiler/src/Compiler.cpp @@ -26,8 +26,6 @@ LUAU_FASTINTVARIABLE(LuauCompileInlineThreshold, 25) LUAU_FASTINTVARIABLE(LuauCompileInlineThresholdMaxBoost, 300) LUAU_FASTINTVARIABLE(LuauCompileInlineDepth, 5) -LUAU_FASTFLAGVARIABLE(LuauCompileRevK, false) - namespace Luau { @@ -1517,7 +1515,7 @@ struct Compiler } else { - if (FFlag::LuauCompileRevK && (expr->op == AstExprBinary::Sub || expr->op == AstExprBinary::Div)) + if (expr->op == AstExprBinary::Sub || expr->op == AstExprBinary::Div) { int32_t lc = getConstantNumber(expr->left); diff --git a/Sources.cmake b/Sources.cmake index ae46bd36..1f9aa5a7 100644 --- a/Sources.cmake +++ b/Sources.cmake @@ -74,6 +74,7 @@ target_sources(Luau.CodeGen PRIVATE CodeGen/include/Luau/CodeAllocator.h CodeGen/include/Luau/CodeBlockUnwind.h CodeGen/include/Luau/CodeGen.h + CodeGen/include/Luau/CodeGenCommon.h CodeGen/include/Luau/ConditionA64.h CodeGen/include/Luau/ConditionX64.h CodeGen/include/Luau/IrAnalysis.h diff --git a/tests/AstJsonEncoder.test.cpp b/tests/AstJsonEncoder.test.cpp index 5d6b53a3..82e8f139 100644 --- a/tests/AstJsonEncoder.test.cpp +++ b/tests/AstJsonEncoder.test.cpp @@ -477,7 +477,7 @@ TEST_CASE_FIXTURE(JsonEncoderFixture, "encode_AstTypeFunction") AstStat* statement = expectParseStatement(R"(type fun = (string, bool, named: number) -> ())"); std::string_view expected = - R"({"type":"AstStatTypeAlias","location":"0,0 - 0,46","name":"fun","generics":[],"genericPacks":[],"type":{"type":"AstTypeFunction","location":"0,11 - 0,46","generics":[],"genericPacks":[],"argTypes":{"type":"AstTypeList","types":[{"type":"AstTypeReference","location":"0,12 - 0,18","name":"string","nameLocation":"0,12 - 0,18","parameters":[]},{"type":"AstTypeReference","location":"0,20 - 0,24","name":"bool","nameLocation":"0,20 - 0,24","parameters":[]},{"type":"AstTypeReference","location":"0,33 - 0,39","name":"number","nameLocation":"0,33 - 0,39","parameters":[]}]},"argNames":[null,null,{"type":"AstArgumentName","name":"named","location":"0,26 - 0,31"}],"returnTypes":{"type":"AstTypeList","types":[]}},"exported":false})"; + R"({"type":"AstStatTypeAlias","location":"0,0 - 0,46","name":"fun","generics":[],"genericPacks":[],"type":{"type":"AstTypeFunction","location":"0,11 - 0,46","generics":[],"genericPacks":[],"argTypes":{"type":"AstTypeList","types":[{"type":"AstTypeReference","location":"0,12 - 0,18","name":"string","nameLocation":"0,12 - 0,18","parameters":[]},{"type":"AstTypeReference","location":"0,20 - 0,24","name":"bool","nameLocation":"0,20 - 0,24","parameters":[]},{"type":"AstTypeReference","location":"0,33 - 0,39","name":"number","nameLocation":"0,33 - 0,39","parameters":[]}]},"argNames":[null,null,{"type":"AstArgumentName","name":"named","location":"0,26 - 0,31"}],"returnTypes":{"type":"AstTypeList","types":[]}},"exported":false})"; CHECK(toJson(statement) == expected); } diff --git a/tests/Compiler.test.cpp b/tests/Compiler.test.cpp index f24087bf..d6973aa8 100644 --- a/tests/Compiler.test.cpp +++ b/tests/Compiler.test.cpp @@ -15,8 +15,6 @@ namespace Luau std::string rep(const std::string& s, size_t n); } -LUAU_FASTFLAG(LuauVectorLiterals) -LUAU_FASTFLAG(LuauCompileRevK) LUAU_FASTINT(LuauCompileInlineDepth) LUAU_FASTINT(LuauCompileInlineThreshold) LUAU_FASTINT(LuauCompileInlineThresholdMaxBoost) @@ -1182,8 +1180,6 @@ RETURN R0 1 TEST_CASE("AndOrChainCodegen") { - ScopedFastFlag sff(FFlag::LuauCompileRevK, true); - const char* source = R"( return (1 - verticalGradientTurbulence < waterLevel + .015 and Enum.Material.Sand) @@ -2106,8 +2102,6 @@ RETURN R0 0 TEST_CASE("AndOrOptimizations") { - ScopedFastFlag sff(FFlag::LuauCompileRevK, true); - // the OR/ORK optimization triggers for cutoff since lhs is simple CHECK_EQ("\n" + compileFunction(R"( local function advancedRidgedFilter(value, cutoff) @@ -4490,8 +4484,6 @@ L0: RETURN R0 -1 TEST_CASE("VectorLiterals") { - ScopedFastFlag sff(FFlag::LuauVectorLiterals, true); - CHECK_EQ("\n" + compileFunction("return Vector3.new(1, 2, 3)", 0, 2, /*enableVectors*/ true), R"( LOADK R0 K0 [1, 2, 3] RETURN R0 1 @@ -7852,8 +7844,6 @@ RETURN R0 1 TEST_CASE("ArithRevK") { - ScopedFastFlag sff(FFlag::LuauCompileRevK, true); - // - and / have special optimized form for reverse constants; in the future, + and * will likely get compiled to ADDK/MULK // other operators are not important enough to optimize reverse constant forms for CHECK_EQ("\n" + compileFunction0(R"( diff --git a/tests/Conformance.test.cpp b/tests/Conformance.test.cpp index 8d075b51..3293b246 100644 --- a/tests/Conformance.test.cpp +++ b/tests/Conformance.test.cpp @@ -2158,7 +2158,7 @@ TEST_CASE("IrInstructionLimit") Luau::CodeGen::CodeGenCompilationResult nativeResult = Luau::CodeGen::compile(L, -1, Luau::CodeGen::CodeGen_ColdFunctions, &nativeStats); // Limit is not hit immediately, so with some functions compiled it should be a success - CHECK(nativeResult != Luau::CodeGen::CodeGenCompilationResult::CodeGenFailed); + CHECK(nativeResult == Luau::CodeGen::CodeGenCompilationResult::CodeGenOverflowInstructionLimit); // We should be able to compile at least one of our functions CHECK(nativeStats.functionsCompiled > 0); diff --git a/tests/DiffAsserts.h b/tests/DiffAsserts.h index 2c919c2a..b80ea312 100644 --- a/tests/DiffAsserts.h +++ b/tests/DiffAsserts.h @@ -26,7 +26,7 @@ std::string diff(TypeId l, TypeId r); template<> std::string diff(const Type& l, const Type& r); -} +} // namespace Luau // Note: the do-while blocks in the macros below is to scope the INFO block to // only that assertion. @@ -36,11 +36,11 @@ std::string diff(const Type& l, const Type& r); { \ INFO("Left and right values were not equal: ", diff(l, r)); \ CHECK_EQ(l, r); \ - } while(false); + } while (false); #define REQUIRE_EQ_DIFF(l, r) \ do \ { \ INFO("Left and right values were not equal: ", diff(l, r)); \ REQUIRE_EQ(l, r); \ - } while(false); + } while (false); diff --git a/tests/IrBuilder.test.cpp b/tests/IrBuilder.test.cpp index 6fab7440..ca8a0b0f 100644 --- a/tests/IrBuilder.test.cpp +++ b/tests/IrBuilder.test.cpp @@ -13,7 +13,6 @@ using namespace Luau::CodeGen; -LUAU_FASTFLAG(LuauReuseBufferChecks) LUAU_DYNAMIC_FASTFLAG(LuauCodeGenCheckGcEffectFix) class IrBuilderFixture @@ -2400,8 +2399,6 @@ bb_fallback_1: TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateBufferLengthChecks") { - ScopedFastFlag luauReuseBufferChecks{FFlag::LuauReuseBufferChecks, true}; - IrOp block = build.block(IrBlockKind::Internal); IrOp fallback = build.block(IrBlockKind::Fallback); @@ -2470,8 +2467,6 @@ bb_fallback_1: TEST_CASE_FIXTURE(IrBuilderFixture, "BufferLenghtChecksNegativeIndex") { - ScopedFastFlag luauReuseBufferChecks{FFlag::LuauReuseBufferChecks, true}; - IrOp block = build.block(IrBlockKind::Internal); IrOp fallback = build.block(IrBlockKind::Fallback); diff --git a/tests/IrLowering.test.cpp b/tests/IrLowering.test.cpp index 4a7f9e2d..bdb7e38c 100644 --- a/tests/IrLowering.test.cpp +++ b/tests/IrLowering.test.cpp @@ -12,8 +12,6 @@ #include -LUAU_FASTFLAG(LuauFixDivrkInference) -LUAU_FASTFLAG(LuauCompileRevK) LUAU_FASTFLAG(LuauCodegenVector) LUAU_FASTFLAG(LuauCodegenMathMemArgs) @@ -66,8 +64,6 @@ TEST_SUITE_BEGIN("IrLowering"); TEST_CASE("VectorReciprocal") { - ScopedFastFlag luauFixDivrkInference{FFlag::LuauFixDivrkInference, true}; - ScopedFastFlag luauCompileRevK{FFlag::LuauCompileRevK, true}; ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true}; CHECK_EQ("\n" + getCodegenAssembly(R"( @@ -218,8 +214,6 @@ bb_bytecode_1: TEST_CASE("VectorMulDivMixed") { ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true}; - ScopedFastFlag luauFixDivrkInference{FFlag::LuauFixDivrkInference, true}; - ScopedFastFlag luauCompileRevK{FFlag::LuauCompileRevK, true}; CHECK_EQ("\n" + getCodegenAssembly(R"( local function vec3combo(a: vector, b: vector, c: vector, d: vector) diff --git a/tests/Linter.test.cpp b/tests/Linter.test.cpp index 451de9e5..b0e01dce 100644 --- a/tests/Linter.test.cpp +++ b/tests/Linter.test.cpp @@ -18,7 +18,18 @@ function fib(n) return n < 2 and 1 or fib(n-1) + fib(n-2) end -return math.max(fib(5), 1) +)"); + + REQUIRE(0 == result.warnings.size()); +} + +TEST_CASE_FIXTURE(Fixture, "type_family_fully_reduces") +{ + LintResult result = lint(R"( +function fib(n) + return n < 2 or fib(n-2) +end + )"); REQUIRE(0 == result.warnings.size()); diff --git a/tests/TypeFamily.test.cpp b/tests/TypeFamily.test.cpp index abcd699a..dbc706ae 100644 --- a/tests/TypeFamily.test.cpp +++ b/tests/TypeFamily.test.cpp @@ -24,7 +24,8 @@ struct FamilyFixture : Fixture { swapFamily = TypeFamily{/* name */ "Swap", /* reducer */ - [](std::vector tys, std::vector tps, NotNull ctx) -> TypeFamilyReductionResult { + [](TypeId instance, std::vector tys, std::vector tps, + NotNull ctx) -> TypeFamilyReductionResult { LUAU_ASSERT(tys.size() == 1); TypeId param = follow(tys.at(0)); diff --git a/tests/TypeInfer.annotations.test.cpp b/tests/TypeInfer.annotations.test.cpp index 223a7eed..9373e4aa 100644 --- a/tests/TypeInfer.annotations.test.cpp +++ b/tests/TypeInfer.annotations.test.cpp @@ -267,8 +267,8 @@ TEST_CASE_FIXTURE(Fixture, "infer_type_of_value_a_via_typeof_with_assignment") CHECK("nil" == toString(requireType("b"))); LUAU_REQUIRE_ERROR_COUNT(1, result); - CHECK( - result.errors[0] == (TypeError{Location{Position{2, 29}, Position{2, 30}}, TypeMismatch{builtinTypes->nilType, builtinTypes->numberType}})); + CHECK(result.errors[0] == + (TypeError{Location{Position{2, 29}, Position{2, 30}}, TypeMismatch{builtinTypes->nilType, builtinTypes->numberType}})); } else { @@ -276,8 +276,8 @@ TEST_CASE_FIXTURE(Fixture, "infer_type_of_value_a_via_typeof_with_assignment") CHECK_EQ(*builtinTypes->numberType, *requireType("b")); LUAU_REQUIRE_ERROR_COUNT(1, result); - CHECK_EQ( - result.errors[0], (TypeError{Location{Position{4, 12}, Position{4, 17}}, TypeMismatch{builtinTypes->numberType, builtinTypes->stringType}})); + CHECK_EQ(result.errors[0], + (TypeError{Location{Position{4, 12}, Position{4, 17}}, TypeMismatch{builtinTypes->numberType, builtinTypes->stringType}})); } } diff --git a/tests/TypeInfer.intersectionTypes.test.cpp b/tests/TypeInfer.intersectionTypes.test.cpp index 80511b4b..e5d21d10 100644 --- a/tests/TypeInfer.intersectionTypes.test.cpp +++ b/tests/TypeInfer.intersectionTypes.test.cpp @@ -541,13 +541,13 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables") )"); LUAU_REQUIRE_ERROR_COUNT(1, result); - const std::string expected = - (FFlag::DebugLuauDeferredConstraintResolution) ? - "Type " - "'{ p: number?, q: number?, r: number? } & { p: number?, q: string? }'" - " could not be converted into " - "'{ p: nil }'; none of the intersection parts are compatible" : - R"(Type + const std::string expected = (FFlag::DebugLuauDeferredConstraintResolution) + ? "Type " + "'{ p: number?, q: number?, r: number? } & { p: number?, q: string? }'" + " could not be converted into " + "'{ p: nil }'; none of the intersection parts are compatible" + : + R"(Type '{| p: number?, q: number?, r: number? |} & {| p: number?, q: string? |}' could not be converted into '{| p: nil |}'; none of the intersection parts are compatible)"; @@ -618,13 +618,13 @@ TEST_CASE_FIXTURE(Fixture, "overloaded_functions_returning_intersections") )"); LUAU_REQUIRE_ERROR_COUNT(1, result); - const std::string expected = - (FFlag::DebugLuauDeferredConstraintResolution) ? - R"(Type + const std::string expected = (FFlag::DebugLuauDeferredConstraintResolution) ? + R"(Type '((number?) -> { p: number } & { q: number }) & ((string?) -> { p: number } & { r: number })' could not be converted into - '(number?) -> { p: number, q: number, r: number }'; none of the intersection parts are compatible)" : - R"(Type + '(number?) -> { p: number, q: number, r: number }'; none of the intersection parts are compatible)" + : + R"(Type '((number?) -> {| p: number |} & {| q: number |}) & ((string?) -> {| p: number |} & {| r: number |})' could not be converted into '(number?) -> {| p: number, q: number, r: number |}'; none of the intersection parts are compatible)"; diff --git a/tests/TypeInfer.provisional.test.cpp b/tests/TypeInfer.provisional.test.cpp index b81ab340..0b316150 100644 --- a/tests/TypeInfer.provisional.test.cpp +++ b/tests/TypeInfer.provisional.test.cpp @@ -273,7 +273,6 @@ TEST_CASE_FIXTURE(Fixture, "discriminate_from_x_not_equal_to_nil") // Should be {| x: nil, y: nil |} CHECK_EQ("{| x: nil, y: nil |} | {| x: string, y: number |}", toString(requireTypeAtPosition({7, 28}))); } - } TEST_CASE_FIXTURE(BuiltinsFixture, "bail_early_if_unification_is_too_complicated" * doctest::timeout(0.5)) diff --git a/tests/TypeInfer.refinements.test.cpp b/tests/TypeInfer.refinements.test.cpp index 1a9c6dcc..38c6748c 100644 --- a/tests/TypeInfer.refinements.test.cpp +++ b/tests/TypeInfer.refinements.test.cpp @@ -515,9 +515,9 @@ TEST_CASE_FIXTURE(Fixture, "free_type_is_equal_to_an_lvalue") LUAU_REQUIRE_NO_ERRORS(result); if (FFlag::DebugLuauDeferredConstraintResolution) - CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "unknown"); // a == b + CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "unknown"); // a == b else - CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "a"); // a == b + CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "a"); // a == b CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "string?"); // a == b } diff --git a/tests/TypeInfer.singletons.test.cpp b/tests/TypeInfer.singletons.test.cpp index 2af18684..34fd902b 100644 --- a/tests/TypeInfer.singletons.test.cpp +++ b/tests/TypeInfer.singletons.test.cpp @@ -366,8 +366,7 @@ TEST_CASE_FIXTURE(Fixture, "parametric_tagged_union_alias") LUAU_REQUIRE_ERROR_COUNT(1, result); // FIXME: This could be improved by expanding the contents of `a` - const std::string expectedError = - "Type 'a' could not be converted into 'Err | Ok'"; + const std::string expectedError = "Type 'a' could not be converted into 'Err | Ok'"; CHECK(toString(result.errors[0]) == expectedError); } diff --git a/tests/TypeInfer.tables.test.cpp b/tests/TypeInfer.tables.test.cpp index 2cbc3088..05f6cb7e 100644 --- a/tests/TypeInfer.tables.test.cpp +++ b/tests/TypeInfer.tables.test.cpp @@ -4054,9 +4054,7 @@ TEST_CASE_FIXTURE(Fixture, "table_subtyping_error_suppression") { CHECK_EQ("{| x: any, y: string |}", toString(tm->wantedType)); CHECK_EQ("{| x: string, y: number |}", toString(tm->givenType)); - } - } TEST_SUITE_END(); diff --git a/tests/TypeInfer.test.cpp b/tests/TypeInfer.test.cpp index 7e1faa9c..8877c762 100644 --- a/tests/TypeInfer.test.cpp +++ b/tests/TypeInfer.test.cpp @@ -58,8 +58,8 @@ TEST_CASE_FIXTURE(Fixture, "tc_error") { LUAU_REQUIRE_ERROR_COUNT(1, result); - CHECK_EQ( - result.errors[0], (TypeError{Location{Position{0, 35}, Position{0, 36}}, TypeMismatch{builtinTypes->numberType, builtinTypes->stringType}})); + CHECK_EQ(result.errors[0], + (TypeError{Location{Position{0, 35}, Position{0, 36}}, TypeMismatch{builtinTypes->numberType, builtinTypes->stringType}})); } } @@ -77,9 +77,9 @@ TEST_CASE_FIXTURE(Fixture, "tc_error_2") LUAU_REQUIRE_ERROR_COUNT(1, result); CHECK_EQ(result.errors[0], (TypeError{Location{Position{0, 18}, Position{0, 22}}, TypeMismatch{ - requireType("a"), - builtinTypes->stringType, - }})); + requireType("a"), + builtinTypes->stringType, + }})); } } diff --git a/tests/TypeInfer.typestates.test.cpp b/tests/TypeInfer.typestates.test.cpp index d5229961..61d25189 100644 --- a/tests/TypeInfer.typestates.test.cpp +++ b/tests/TypeInfer.typestates.test.cpp @@ -13,7 +13,7 @@ struct TypeStateFixture : BuiltinsFixture { ScopedFastFlag dcr{FFlag::DebugLuauDeferredConstraintResolution, true}; }; -} +} // namespace TEST_SUITE_BEGIN("TypeStatesTest"); diff --git a/tests/TypeInfer.unionTypes.test.cpp b/tests/TypeInfer.unionTypes.test.cpp index 2cccd8f4..e441a6e8 100644 --- a/tests/TypeInfer.unionTypes.test.cpp +++ b/tests/TypeInfer.unionTypes.test.cpp @@ -499,10 +499,10 @@ end if (FFlag::DebugLuauDeferredConstraintResolution) { - CHECK_EQ(toString(result.errors[0]), - "Type 'X | Y | Z' could not be converted into '{ w: number }'; type X | Y | Z[0] (X) is not a subtype of { w: number } ({ w: number })\n\t" - "type X | Y | Z[1] (Y) is not a subtype of { w: number } ({ w: number })\n\t" - "type X | Y | Z[2] (Z) is not a subtype of { w: number } ({ w: number })"); + CHECK_EQ(toString(result.errors[0]), "Type 'X | Y | Z' could not be converted into '{ w: number }'; type X | Y | Z[0] (X) is not a subtype " + "of { w: number } ({ w: number })\n\t" + "type X | Y | Z[1] (Y) is not a subtype of { w: number } ({ w: number })\n\t" + "type X | Y | Z[2] (Z) is not a subtype of { w: number } ({ w: number })"); } else { diff --git a/tests/main.cpp b/tests/main.cpp index fa6d61b5..5d1ee6a6 100644 --- a/tests/main.cpp +++ b/tests/main.cpp @@ -202,9 +202,12 @@ struct TeamCityReporter : doctest::IReporter void test_case_end(const doctest::CurrentTestCaseStats& in) override { - printf("##teamcity[testMetadata testName='%s: %s' name='total_asserts' type='number' value='%d']\n", currentTest->m_test_suite, currentTest->m_name, in.numAssertsCurrentTest); - printf("##teamcity[testMetadata testName='%s: %s' name='failed_asserts' type='number' value='%d']\n", currentTest->m_test_suite, currentTest->m_name, in.numAssertsFailedCurrentTest); - printf("##teamcity[testMetadata testName='%s: %s' name='runtime' type='number' value='%f']\n", currentTest->m_test_suite, currentTest->m_name, in.seconds); + printf("##teamcity[testMetadata testName='%s: %s' name='total_asserts' type='number' value='%d']\n", currentTest->m_test_suite, + currentTest->m_name, in.numAssertsCurrentTest); + printf("##teamcity[testMetadata testName='%s: %s' name='failed_asserts' type='number' value='%d']\n", currentTest->m_test_suite, + currentTest->m_name, in.numAssertsFailedCurrentTest); + printf("##teamcity[testMetadata testName='%s: %s' name='runtime' type='number' value='%f']\n", currentTest->m_test_suite, currentTest->m_name, + in.seconds); if (!in.testCaseSuccess) printf("##teamcity[testFailed name='%s: %s']\n", currentTest->m_test_suite, currentTest->m_name); @@ -212,15 +215,18 @@ struct TeamCityReporter : doctest::IReporter printf("##teamcity[testFinished name='%s: %s']\n", currentTest->m_test_suite, currentTest->m_name); } - void test_case_exception(const doctest::TestCaseException& in) override { - printf("##teamcity[testFailed name='%s: %s' message='Unhandled exception' details='%s']\n", currentTest->m_test_suite, currentTest->m_name, in.error_string.c_str()); + void test_case_exception(const doctest::TestCaseException& in) override + { + printf("##teamcity[testFailed name='%s: %s' message='Unhandled exception' details='%s']\n", currentTest->m_test_suite, currentTest->m_name, + in.error_string.c_str()); } void subcase_start(const doctest::SubcaseSignature& /*in*/) override {} void subcase_end() override {} - void log_assert(const doctest::AssertData& ad) override { - if(!ad.m_failed) + void log_assert(const doctest::AssertData& ad) override + { + if (!ad.m_failed) return; if (ad.m_decomp.size()) @@ -229,7 +235,8 @@ struct TeamCityReporter : doctest::IReporter fprintf(stderr, "%s(%d): ERROR: %s\n", ad.m_file, ad.m_line, ad.m_expr); } - void log_message(const doctest::MessageData& md) override { + void log_message(const doctest::MessageData& md) override + { const char* severity = (md.m_severity & doctest::assertType::is_warn) ? "WARNING" : "ERROR"; bool isError = md.m_severity & (doctest::assertType::is_require | doctest::assertType::is_check); fprintf(isError ? stderr : stdout, "%s(%d): %s: %s\n", md.m_file, md.m_line, severity, md.m_string.c_str()); diff --git a/tools/faillist.txt b/tools/faillist.txt index 8299cb75..dd51634b 100644 --- a/tools/faillist.txt +++ b/tools/faillist.txt @@ -5,7 +5,6 @@ AutocompleteTest.anonymous_autofilled_generic_type_pack_vararg AutocompleteTest.autocomplete_response_perf1 AutocompleteTest.autocomplete_string_singleton_equality AutocompleteTest.do_wrong_compatible_nonself_calls -AutocompleteTest.type_correct_expected_argument_type_suggestion AutocompleteTest.type_correct_expected_argument_type_suggestion_self AutocompleteTest.type_correct_suggestion_for_overloads BuiltinTests.aliased_string_format @@ -98,8 +97,8 @@ GenericsTests.bound_tables_do_not_clone_original_fields GenericsTests.check_generic_function GenericsTests.check_generic_local_function GenericsTests.check_mutual_generic_functions -GenericsTests.check_mutual_generic_functions_unannotated GenericsTests.check_mutual_generic_functions_errors +GenericsTests.check_mutual_generic_functions_unannotated GenericsTests.check_nested_generic_function GenericsTests.check_recursive_generic_function GenericsTests.correctly_instantiate_polymorphic_member_functions @@ -165,7 +164,6 @@ IntersectionTypes.overloadeded_functions_with_weird_typepacks_3 IntersectionTypes.overloadeded_functions_with_weird_typepacks_4 IntersectionTypes.table_write_sealed_indirect IntersectionTypes.union_saturate_overloaded_functions -Linter.CleanCode Linter.DeprecatedApiFenv Linter.FormatStringTyped Linter.TableOperationsIndexer @@ -183,8 +181,6 @@ NonstrictModeTests.table_props_are_any Normalize.higher_order_function_with_annotation Normalize.negations_of_tables Normalize.specific_functions_cannot_be_negated -ParserTests.parse_nesting_based_end_detection -ParserTests.parse_nesting_based_end_detection_single_line ProvisionalTests.assign_table_with_refined_property_with_a_similar_type_is_illegal ProvisionalTests.discriminate_from_x_not_equal_to_nil ProvisionalTests.do_not_ice_when_trying_to_pick_first_of_generic_type_pack