mirror of
https://github.com/luau-lang/luau.git
synced 2024-11-15 06:15:44 +08:00
Sync to upstream/release/613 (#1167)
# What's changed? * Compiler now targets bytecode version 5 by default, this includes support for vector type literals and sub/div opcodes with a constant on lhs ### New Type Solver * Normalizer type inhabitance check has been optimized * Added ability to reduce cyclic `and`/`or` type families ### Native Code Generation * `CodeGen::compile` now returns more specific causes of a code generation failure * Fixed linking issues on platforms that don't support unwind frame data registration --- ### Internal Contributors Co-authored-by: Andy Friesen <afriesen@roblox.com> Co-authored-by: Aviral Goel <agoel@roblox.com> Co-authored-by: Vyacheslav Egorov <vegorov@roblox.com> --------- Co-authored-by: Aaron Weiss <aaronweiss@roblox.com> Co-authored-by: Alexander McCord <amccord@roblox.com> Co-authored-by: Andy Friesen <afriesen@roblox.com> Co-authored-by: Vighnesh <vvijay@roblox.com> Co-authored-by: Aviral Goel <agoel@roblox.com> Co-authored-by: David Cope <dcope@roblox.com> Co-authored-by: Lily Brown <lbrown@roblox.com>
This commit is contained in:
parent
d6c2472f0c
commit
ea14e65ea0
@ -255,8 +255,8 @@ struct ReducePackConstraint
|
|||||||
};
|
};
|
||||||
|
|
||||||
using ConstraintV = Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, IterableConstraint,
|
using ConstraintV = Variant<SubtypeConstraint, PackSubtypeConstraint, GeneralizationConstraint, InstantiationConstraint, IterableConstraint,
|
||||||
NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, FunctionCheckConstraint, PrimitiveTypeConstraint, HasPropConstraint, SetPropConstraint,
|
NameConstraint, TypeAliasExpansionConstraint, FunctionCallConstraint, FunctionCheckConstraint, PrimitiveTypeConstraint, HasPropConstraint,
|
||||||
SetIndexerConstraint, SingletonOrTopTypeConstraint, UnpackConstraint, SetOpConstraint, ReduceConstraint, ReducePackConstraint>;
|
SetPropConstraint, SetIndexerConstraint, SingletonOrTopTypeConstraint, UnpackConstraint, SetOpConstraint, ReduceConstraint, ReducePackConstraint>;
|
||||||
|
|
||||||
struct Constraint
|
struct Constraint
|
||||||
{
|
{
|
||||||
|
@ -118,10 +118,9 @@ struct ConstraintGenerator
|
|||||||
|
|
||||||
DcrLogger* logger;
|
DcrLogger* logger;
|
||||||
|
|
||||||
ConstraintGenerator(ModulePtr module, NotNull<Normalizer> normalizer, NotNull<ModuleResolver> moduleResolver,
|
ConstraintGenerator(ModulePtr module, NotNull<Normalizer> normalizer, NotNull<ModuleResolver> moduleResolver, NotNull<BuiltinTypes> builtinTypes,
|
||||||
NotNull<BuiltinTypes> builtinTypes, NotNull<InternalErrorReporter> ice, const ScopePtr& globalScope,
|
NotNull<InternalErrorReporter> ice, const ScopePtr& globalScope, std::function<void(const ModuleName&, const ScopePtr&)> prepareModuleScope,
|
||||||
std::function<void(const ModuleName&, const ScopePtr&)> prepareModuleScope, DcrLogger* logger, NotNull<DataFlowGraph> dfg,
|
DcrLogger* logger, NotNull<DataFlowGraph> dfg, std::vector<RequireCycle> requireCycles);
|
||||||
std::vector<RequireCycle> requireCycles);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The entry point to the ConstraintGenerator. This will construct a set
|
* The entry point to the ConstraintGenerator. This will construct a set
|
||||||
@ -190,8 +189,10 @@ private:
|
|||||||
};
|
};
|
||||||
|
|
||||||
using RefinementContext = InsertionOrderedMap<DefId, RefinementPartition>;
|
using RefinementContext = InsertionOrderedMap<DefId, RefinementPartition>;
|
||||||
void unionRefinements(const ScopePtr& scope, Location location, const RefinementContext& lhs, const RefinementContext& rhs, RefinementContext& dest, std::vector<ConstraintV>* constraints);
|
void unionRefinements(const ScopePtr& scope, Location location, const RefinementContext& lhs, const RefinementContext& rhs,
|
||||||
void computeRefinement(const ScopePtr& scope, Location location, RefinementId refinement, RefinementContext* refis, bool sense, bool eq, std::vector<ConstraintV>* constraints);
|
RefinementContext& dest, std::vector<ConstraintV>* constraints);
|
||||||
|
void computeRefinement(const ScopePtr& scope, Location location, RefinementId refinement, RefinementContext* refis, bool sense, bool eq,
|
||||||
|
std::vector<ConstraintV>* constraints);
|
||||||
void applyRefinements(const ScopePtr& scope, Location location, RefinementId refinement);
|
void applyRefinements(const ScopePtr& scope, Location location, RefinementId refinement);
|
||||||
|
|
||||||
ControlFlow visitBlockWithoutChildScope(const ScopePtr& scope, AstStatBlock* block);
|
ControlFlow visitBlockWithoutChildScope(const ScopePtr& scope, AstStatBlock* block);
|
||||||
|
@ -393,8 +393,9 @@ public:
|
|||||||
|
|
||||||
// Check for inhabitance
|
// Check for inhabitance
|
||||||
bool isInhabited(TypeId ty);
|
bool isInhabited(TypeId ty);
|
||||||
bool isInhabited(TypeId ty, Set<TypeId> seen);
|
bool isInhabited(TypeId ty, Set<TypeId>& seen);
|
||||||
bool isInhabited(const NormalizedType* norm, Set<TypeId> seen = {nullptr});
|
bool isInhabited(const NormalizedType* norm);
|
||||||
|
bool isInhabited(const NormalizedType* norm, Set<TypeId>& seen);
|
||||||
|
|
||||||
// Check for intersections being inhabited
|
// Check for intersections being inhabited
|
||||||
bool isIntersectionInhabited(TypeId left, TypeId right);
|
bool isIntersectionInhabited(TypeId left, TypeId right);
|
||||||
|
@ -127,7 +127,8 @@ public:
|
|||||||
const_iterator(typename Impl::const_iterator impl, typename Impl::const_iterator end)
|
const_iterator(typename Impl::const_iterator impl, typename Impl::const_iterator end)
|
||||||
: impl(impl)
|
: impl(impl)
|
||||||
, end(end)
|
, end(end)
|
||||||
{}
|
{
|
||||||
|
}
|
||||||
|
|
||||||
const T& operator*() const
|
const T& operator*() const
|
||||||
{
|
{
|
||||||
@ -168,6 +169,7 @@ public:
|
|||||||
++*this;
|
++*this;
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
typename Impl::const_iterator impl;
|
typename Impl::const_iterator impl;
|
||||||
typename Impl::const_iterator end;
|
typename Impl::const_iterator end;
|
||||||
|
@ -65,7 +65,7 @@ struct SubtypingResult
|
|||||||
bool isSubtype = false;
|
bool isSubtype = false;
|
||||||
bool normalizationTooComplex = false;
|
bool normalizationTooComplex = false;
|
||||||
bool isCacheable = true;
|
bool isCacheable = true;
|
||||||
|
ErrorVec errors;
|
||||||
/// The reason for isSubtype to be false. May not be present even if
|
/// The reason for isSubtype to be false. May not be present even if
|
||||||
/// isSubtype is false, depending on the input types.
|
/// isSubtype is false, depending on the input types.
|
||||||
SubtypingReasonings reasoning{kEmptyReasoning};
|
SubtypingReasonings reasoning{kEmptyReasoning};
|
||||||
@ -78,6 +78,7 @@ struct SubtypingResult
|
|||||||
SubtypingResult& withBothPath(TypePath::Path path);
|
SubtypingResult& withBothPath(TypePath::Path path);
|
||||||
SubtypingResult& withSubPath(TypePath::Path path);
|
SubtypingResult& withSubPath(TypePath::Path path);
|
||||||
SubtypingResult& withSuperPath(TypePath::Path path);
|
SubtypingResult& withSuperPath(TypePath::Path path);
|
||||||
|
SubtypingResult& withErrors(ErrorVec& err);
|
||||||
|
|
||||||
// Only negates the `isSubtype`.
|
// Only negates the `isSubtype`.
|
||||||
static SubtypingResult negate(const SubtypingResult& result);
|
static SubtypingResult negate(const SubtypingResult& result);
|
||||||
@ -211,18 +212,22 @@ private:
|
|||||||
template<typename T, typename Container>
|
template<typename T, typename Container>
|
||||||
TypeId makeAggregateType(const Container& container, TypeId orElse);
|
TypeId makeAggregateType(const Container& container, TypeId orElse);
|
||||||
|
|
||||||
template<typename T>
|
|
||||||
T handleTypeFamilyReductionResult(const TypeFamilyInstanceType* tf)
|
std::pair<TypeId, ErrorVec> handleTypeFamilyReductionResult(const TypeFamilyInstanceType* familyInstance)
|
||||||
{
|
{
|
||||||
TypeFamilyContext context{arena, builtinTypes, scope, normalizer, iceReporter, NotNull{&limits}};
|
TypeFamilyContext context{arena, builtinTypes, scope, normalizer, iceReporter, NotNull{&limits}};
|
||||||
TypeFamilyReductionResult<TypeId> result = tf->family->reducer(tf->typeArguments, tf->packArguments, NotNull{&context});
|
TypeId family = arena->addType(*familyInstance);
|
||||||
if (!result.blockedTypes.empty())
|
std::string familyString = toString(family);
|
||||||
unexpected(result.blockedTypes[0]);
|
FamilyGraphReductionResult result = reduceFamilies(family, {}, context, true);
|
||||||
else if (!result.blockedPacks.empty())
|
ErrorVec errors;
|
||||||
unexpected(result.blockedPacks[0]);
|
if (result.blockedTypes.size() != 0 || result.blockedPacks.size() != 0)
|
||||||
else if (result.uninhabited || result.result == std::nullopt)
|
{
|
||||||
return builtinTypes->neverType;
|
errors.push_back(TypeError{{}, UninhabitedTypeFamily{family}});
|
||||||
return *result.result;
|
return {builtinTypes->neverType, errors};
|
||||||
|
}
|
||||||
|
if (result.reducedTypes.contains(family))
|
||||||
|
return {family, errors};
|
||||||
|
return {builtinTypes->neverType, errors};
|
||||||
}
|
}
|
||||||
|
|
||||||
[[noreturn]] void unexpected(TypeId ty);
|
[[noreturn]] void unexpected(TypeId ty);
|
||||||
|
@ -406,7 +406,8 @@ struct Property
|
|||||||
// TODO: Kill all constructors in favor of `Property::rw(TypeId read, TypeId write)` and friends.
|
// TODO: Kill all constructors in favor of `Property::rw(TypeId read, TypeId write)` and friends.
|
||||||
Property();
|
Property();
|
||||||
Property(TypeId readTy, bool deprecated = false, const std::string& deprecatedSuggestion = "", std::optional<Location> location = std::nullopt,
|
Property(TypeId readTy, bool deprecated = false, const std::string& deprecatedSuggestion = "", std::optional<Location> location = std::nullopt,
|
||||||
const Tags& tags = {}, const std::optional<std::string>& documentationSymbol = std::nullopt, std::optional<Location> typeLocation = std::nullopt);
|
const Tags& tags = {}, const std::optional<std::string>& documentationSymbol = std::nullopt,
|
||||||
|
std::optional<Location> typeLocation = std::nullopt);
|
||||||
|
|
||||||
// DEPRECATED: Should only be called in non-RWP! We assert that the `readTy` is not nullopt.
|
// DEPRECATED: Should only be called in non-RWP! We assert that the `readTy` is not nullopt.
|
||||||
// TODO: Kill once we don't have non-RWP.
|
// TODO: Kill once we don't have non-RWP.
|
||||||
@ -639,9 +640,9 @@ struct NegationType
|
|||||||
|
|
||||||
using ErrorType = Unifiable::Error;
|
using ErrorType = Unifiable::Error;
|
||||||
|
|
||||||
using TypeVariant =
|
using TypeVariant = Unifiable::Variant<TypeId, FreeType, LocalType, GenericType, PrimitiveType, BlockedType, PendingExpansionType, SingletonType,
|
||||||
Unifiable::Variant<TypeId, FreeType, LocalType, GenericType, PrimitiveType, BlockedType, PendingExpansionType, SingletonType, FunctionType, TableType,
|
FunctionType, TableType, MetatableType, ClassType, AnyType, UnionType, IntersectionType, LazyType, UnknownType, NeverType, NegationType,
|
||||||
MetatableType, ClassType, AnyType, UnionType, IntersectionType, LazyType, UnknownType, NeverType, NegationType, TypeFamilyInstanceType>;
|
TypeFamilyInstanceType>;
|
||||||
|
|
||||||
struct Type final
|
struct Type final
|
||||||
{
|
{
|
||||||
|
@ -76,6 +76,10 @@ struct TypeFamilyReductionResult
|
|||||||
std::vector<TypePackId> blockedPacks;
|
std::vector<TypePackId> blockedPacks;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
template<typename T>
|
||||||
|
using ReducerFunction =
|
||||||
|
std::function<TypeFamilyReductionResult<T>(T, const std::vector<TypeId>&, const std::vector<TypePackId>&, NotNull<TypeFamilyContext>)>;
|
||||||
|
|
||||||
/// Represents a type function that may be applied to map a series of types and
|
/// Represents a type function that may be applied to map a series of types and
|
||||||
/// type packs to a single output type.
|
/// type packs to a single output type.
|
||||||
struct TypeFamily
|
struct TypeFamily
|
||||||
@ -85,7 +89,7 @@ struct TypeFamily
|
|||||||
std::string name;
|
std::string name;
|
||||||
|
|
||||||
/// The reducer function for the type family.
|
/// The reducer function for the type family.
|
||||||
std::function<TypeFamilyReductionResult<TypeId>(const std::vector<TypeId>&, const std::vector<TypePackId>&, NotNull<TypeFamilyContext>)> reducer;
|
ReducerFunction<TypeId> reducer;
|
||||||
};
|
};
|
||||||
|
|
||||||
/// Represents a type function that may be applied to map a series of types and
|
/// Represents a type function that may be applied to map a series of types and
|
||||||
@ -97,7 +101,7 @@ struct TypePackFamily
|
|||||||
std::string name;
|
std::string name;
|
||||||
|
|
||||||
/// The reducer function for the type pack family.
|
/// The reducer function for the type pack family.
|
||||||
std::function<TypeFamilyReductionResult<TypePackId>(const std::vector<TypeId>&, const std::vector<TypePackId>&, NotNull<TypeFamilyContext>)> reducer;
|
ReducerFunction<TypePackId> reducer;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct FamilyGraphReductionResult
|
struct FamilyGraphReductionResult
|
||||||
|
@ -102,9 +102,7 @@ struct Path
|
|||||||
std::vector<Component> components;
|
std::vector<Component> components;
|
||||||
|
|
||||||
/// Creates a new empty Path.
|
/// Creates a new empty Path.
|
||||||
Path()
|
Path() {}
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a new Path from a list of components.
|
/// Creates a new Path from a list of components.
|
||||||
explicit Path(std::vector<Component> components)
|
explicit Path(std::vector<Component> components)
|
||||||
|
@ -66,9 +66,15 @@ struct ErrorSuppression
|
|||||||
};
|
};
|
||||||
|
|
||||||
ErrorSuppression() = default;
|
ErrorSuppression() = default;
|
||||||
constexpr ErrorSuppression(Value enumValue) : value(enumValue) { }
|
constexpr ErrorSuppression(Value enumValue)
|
||||||
|
: value(enumValue)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
constexpr operator Value() const { return value; }
|
constexpr operator Value() const
|
||||||
|
{
|
||||||
|
return value;
|
||||||
|
}
|
||||||
explicit operator bool() const = delete;
|
explicit operator bool() const = delete;
|
||||||
|
|
||||||
ErrorSuppression orElse(const ErrorSuppression& other) const
|
ErrorSuppression orElse(const ErrorSuppression& other) const
|
||||||
@ -81,6 +87,7 @@ struct ErrorSuppression
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Value value;
|
Value value;
|
||||||
};
|
};
|
||||||
|
@ -161,7 +161,6 @@ static bool checkTypeMatch(TypeId subTy, TypeId superTy, NotNull<Scope> scope, T
|
|||||||
|
|
||||||
return unifier.canUnify(subTy, superTy).empty();
|
return unifier.canUnify(subTy, superTy).empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static TypeCorrectKind checkTypeCorrectKind(
|
static TypeCorrectKind checkTypeCorrectKind(
|
||||||
|
@ -142,9 +142,7 @@ struct HasFreeType : TypeOnceVisitor
|
|||||||
{
|
{
|
||||||
bool result = false;
|
bool result = false;
|
||||||
|
|
||||||
HasFreeType()
|
HasFreeType() {}
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
bool visit(TypeId ty) override
|
bool visit(TypeId ty) override
|
||||||
{
|
{
|
||||||
@ -288,7 +286,7 @@ std::optional<TypeId> ConstraintGenerator::lookup(const ScopePtr& scope, DefId d
|
|||||||
// `scope->lookup(operand)` may return nothing because we only bind a type to that operand
|
// `scope->lookup(operand)` may return nothing because we only bind a type to that operand
|
||||||
// once we've seen that particular `DefId`. In this case, we need to prototype those types
|
// once we've seen that particular `DefId`. In this case, we need to prototype those types
|
||||||
// and use those at a later time.
|
// and use those at a later time.
|
||||||
std::optional<TypeId> ty = lookup(scope, operand, /*prototype*/false);
|
std::optional<TypeId> ty = lookup(scope, operand, /*prototype*/ false);
|
||||||
if (!ty)
|
if (!ty)
|
||||||
{
|
{
|
||||||
ty = arena->addType(BlockedType{});
|
ty = arena->addType(BlockedType{});
|
||||||
@ -315,7 +313,8 @@ NotNull<Constraint> ConstraintGenerator::addConstraint(const ScopePtr& scope, st
|
|||||||
return NotNull{constraints.emplace_back(std::move(c)).get()};
|
return NotNull{constraints.emplace_back(std::move(c)).get()};
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConstraintGenerator::unionRefinements(const ScopePtr& scope, Location location, const RefinementContext& lhs, const RefinementContext& rhs, RefinementContext& dest, std::vector<ConstraintV>* constraints)
|
void ConstraintGenerator::unionRefinements(const ScopePtr& scope, Location location, const RefinementContext& lhs, const RefinementContext& rhs,
|
||||||
|
RefinementContext& dest, std::vector<ConstraintV>* constraints)
|
||||||
{
|
{
|
||||||
const auto intersect = [&](const std::vector<TypeId>& types) {
|
const auto intersect = [&](const std::vector<TypeId>& types) {
|
||||||
if (1 == types.size())
|
if (1 == types.size())
|
||||||
@ -346,7 +345,8 @@ void ConstraintGenerator::unionRefinements(const ScopePtr& scope, Location locat
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ConstraintGenerator::computeRefinement(const ScopePtr& scope, Location location, RefinementId refinement, RefinementContext* refis, bool sense, bool eq, std::vector<ConstraintV>* constraints)
|
void ConstraintGenerator::computeRefinement(const ScopePtr& scope, Location location, RefinementId refinement, RefinementContext* refis, bool sense,
|
||||||
|
bool eq, std::vector<ConstraintV>* constraints)
|
||||||
{
|
{
|
||||||
if (!refinement)
|
if (!refinement)
|
||||||
return;
|
return;
|
||||||
@ -907,19 +907,17 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatLocalFuncti
|
|||||||
std::make_unique<Constraint>(constraintScope, function->name->location, GeneralizationConstraint{functionType, sig.signature});
|
std::make_unique<Constraint>(constraintScope, function->name->location, GeneralizationConstraint{functionType, sig.signature});
|
||||||
|
|
||||||
Constraint* previous = nullptr;
|
Constraint* previous = nullptr;
|
||||||
forEachConstraint(start, end, this,
|
forEachConstraint(start, end, this, [&c, &previous](const ConstraintPtr& constraint) {
|
||||||
[&c, &previous](const ConstraintPtr& constraint)
|
c->dependencies.push_back(NotNull{constraint.get()});
|
||||||
|
|
||||||
|
if (auto psc = get<PackSubtypeConstraint>(*constraint); psc && psc->returns)
|
||||||
{
|
{
|
||||||
c->dependencies.push_back(NotNull{constraint.get()});
|
if (previous)
|
||||||
|
constraint->dependencies.push_back(NotNull{previous});
|
||||||
|
|
||||||
if (auto psc = get<PackSubtypeConstraint>(*constraint); psc && psc->returns)
|
previous = constraint.get();
|
||||||
{
|
}
|
||||||
if (previous)
|
});
|
||||||
constraint->dependencies.push_back(NotNull{previous});
|
|
||||||
|
|
||||||
previous = constraint.get();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
addConstraint(scope, std::move(c));
|
addConstraint(scope, std::move(c));
|
||||||
module->astTypes[function->func] = functionType;
|
module->astTypes[function->func] = functionType;
|
||||||
@ -1018,20 +1016,18 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatFunction* f
|
|||||||
std::make_unique<Constraint>(constraintScope, function->name->location, GeneralizationConstraint{generalizedType, sig.signature});
|
std::make_unique<Constraint>(constraintScope, function->name->location, GeneralizationConstraint{generalizedType, sig.signature});
|
||||||
|
|
||||||
Constraint* previous = nullptr;
|
Constraint* previous = nullptr;
|
||||||
forEachConstraint(start, end, this,
|
forEachConstraint(start, end, this, [&c, &excludeList, &previous](const ConstraintPtr& constraint) {
|
||||||
[&c, &excludeList, &previous](const ConstraintPtr& constraint)
|
if (!excludeList.contains(constraint.get()))
|
||||||
|
c->dependencies.push_back(NotNull{constraint.get()});
|
||||||
|
|
||||||
|
if (auto psc = get<PackSubtypeConstraint>(*constraint); psc && psc->returns)
|
||||||
{
|
{
|
||||||
if (!excludeList.contains(constraint.get()))
|
if (previous)
|
||||||
c->dependencies.push_back(NotNull{constraint.get()});
|
constraint->dependencies.push_back(NotNull{previous});
|
||||||
|
|
||||||
if (auto psc = get<PackSubtypeConstraint>(*constraint); psc && psc->returns)
|
previous = constraint.get();
|
||||||
{
|
}
|
||||||
if (previous)
|
});
|
||||||
constraint->dependencies.push_back(NotNull{previous});
|
|
||||||
|
|
||||||
previous = constraint.get();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
addConstraint(scope, std::move(c));
|
addConstraint(scope, std::move(c));
|
||||||
}
|
}
|
||||||
@ -1470,8 +1466,7 @@ ControlFlow ConstraintGenerator::visit(const ScopePtr& scope, AstStatError* erro
|
|||||||
return ControlFlow::None;
|
return ControlFlow::None;
|
||||||
}
|
}
|
||||||
|
|
||||||
InferencePack ConstraintGenerator::checkPack(
|
InferencePack ConstraintGenerator::checkPack(const ScopePtr& scope, AstArray<AstExpr*> exprs, const std::vector<std::optional<TypeId>>& expectedTypes)
|
||||||
const ScopePtr& scope, AstArray<AstExpr*> exprs, const std::vector<std::optional<TypeId>>& expectedTypes)
|
|
||||||
{
|
{
|
||||||
std::vector<TypeId> head;
|
std::vector<TypeId> head;
|
||||||
std::optional<TypePackId> tail;
|
std::optional<TypePackId> tail;
|
||||||
@ -1708,14 +1703,8 @@ InferencePack ConstraintGenerator::checkPack(const ScopePtr& scope, AstExprCall*
|
|||||||
* 4. Solve the call
|
* 4. Solve the call
|
||||||
*/
|
*/
|
||||||
|
|
||||||
NotNull<Constraint> checkConstraint = addConstraint(scope, call->func->location,
|
NotNull<Constraint> checkConstraint =
|
||||||
FunctionCheckConstraint{
|
addConstraint(scope, call->func->location, FunctionCheckConstraint{fnType, argPack, call, NotNull{&module->astExpectedTypes}});
|
||||||
fnType,
|
|
||||||
argPack,
|
|
||||||
call,
|
|
||||||
NotNull{&module->astExpectedTypes}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
forEachConstraint(funcBeginCheckpoint, funcEndCheckpoint, this, [checkConstraint](const ConstraintPtr& constraint) {
|
forEachConstraint(funcBeginCheckpoint, funcEndCheckpoint, this, [checkConstraint](const ConstraintPtr& constraint) {
|
||||||
checkConstraint->dependencies.emplace_back(constraint.get());
|
checkConstraint->dependencies.emplace_back(constraint.get());
|
||||||
@ -1743,8 +1732,7 @@ InferencePack ConstraintGenerator::checkPack(const ScopePtr& scope, AstExprCall*
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Inference ConstraintGenerator::check(
|
Inference ConstraintGenerator::check(const ScopePtr& scope, AstExpr* expr, std::optional<TypeId> expectedType, bool forceSingleton, bool generalize)
|
||||||
const ScopePtr& scope, AstExpr* expr, std::optional<TypeId> expectedType, bool forceSingleton, bool generalize)
|
|
||||||
{
|
{
|
||||||
RecursionCounter counter{&recursionCount};
|
RecursionCounter counter{&recursionCount};
|
||||||
|
|
||||||
@ -2403,11 +2391,9 @@ std::optional<TypeId> ConstraintGenerator::checkLValue(const ScopePtr& scope, As
|
|||||||
|
|
||||||
if (transform)
|
if (transform)
|
||||||
{
|
{
|
||||||
addConstraint(scope, local->location, UnpackConstraint{
|
addConstraint(scope, local->location,
|
||||||
arena->addTypePack({*ty}),
|
UnpackConstraint{arena->addTypePack({*ty}), arena->addTypePack({assignedTy}),
|
||||||
arena->addTypePack({assignedTy}),
|
/*resultIsLValue*/ true});
|
||||||
/*resultIsLValue*/ true
|
|
||||||
});
|
|
||||||
|
|
||||||
recordInferredBinding(local->local, *ty);
|
recordInferredBinding(local->local, *ty);
|
||||||
}
|
}
|
||||||
@ -3395,7 +3381,6 @@ void ConstraintGenerator::fillInInferredBindings(const ScopePtr& globalScope, As
|
|||||||
|
|
||||||
scope->bindings[symbol] = Binding{ty, location};
|
scope->bindings[symbol] = Binding{ty, location};
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1184,7 +1184,8 @@ bool ConstraintSolver::tryDispatch(const FunctionCheckConstraint& c, NotNull<con
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else if (expr->is<AstExprConstantBool>() || expr->is<AstExprConstantString>() || expr->is<AstExprConstantNumber>() || expr->is<AstExprConstantNil>() || expr->is<AstExprTable>())
|
else if (expr->is<AstExprConstantBool>() || expr->is<AstExprConstantString>() || expr->is<AstExprConstantNumber>() ||
|
||||||
|
expr->is<AstExprConstantNil>() || expr->is<AstExprTable>())
|
||||||
{
|
{
|
||||||
Unifier2 u2{arena, builtinTypes, constraint->scope, NotNull{&iceReporter}};
|
Unifier2 u2{arena, builtinTypes, constraint->scope, NotNull{&iceReporter}};
|
||||||
u2.unify(actualArgTy, expectedArgTy);
|
u2.unify(actualArgTy, expectedArgTy);
|
||||||
|
@ -1289,7 +1289,8 @@ ModulePtr Frontend::check(const SourceModule& sourceModule, Mode mode, std::vect
|
|||||||
{
|
{
|
||||||
return Luau::check(sourceModule, mode, requireCycles, builtinTypes, NotNull{&iceHandler},
|
return Luau::check(sourceModule, mode, requireCycles, builtinTypes, NotNull{&iceHandler},
|
||||||
NotNull{forAutocomplete ? &moduleResolverForAutocomplete : &moduleResolver}, NotNull{fileResolver},
|
NotNull{forAutocomplete ? &moduleResolverForAutocomplete : &moduleResolver}, NotNull{fileResolver},
|
||||||
environmentScope ? *environmentScope : globals.globalScope, prepareModuleScopeWrap, options, typeCheckLimits, recordJsonLog, writeJsonLog);
|
environmentScope ? *environmentScope : globals.globalScope, prepareModuleScopeWrap, options, typeCheckLimits, recordJsonLog,
|
||||||
|
writeJsonLog);
|
||||||
}
|
}
|
||||||
catch (const InternalCompilerError& err)
|
catch (const InternalCompilerError& err)
|
||||||
{
|
{
|
||||||
|
@ -402,7 +402,14 @@ static bool isShallowInhabited(const NormalizedType& norm)
|
|||||||
!get<NeverType>(norm.buffers) || !norm.functions.isNever() || !norm.tables.empty() || !norm.tyvars.empty();
|
!get<NeverType>(norm.buffers) || !norm.functions.isNever() || !norm.tables.empty() || !norm.tyvars.empty();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Normalizer::isInhabited(const NormalizedType* norm, Set<TypeId> seen)
|
bool Normalizer::isInhabited(const NormalizedType* norm)
|
||||||
|
{
|
||||||
|
Set<TypeId> seen{nullptr};
|
||||||
|
|
||||||
|
return isInhabited(norm, seen);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool Normalizer::isInhabited(const NormalizedType* norm, Set<TypeId>& seen)
|
||||||
{
|
{
|
||||||
// If normalization failed, the type is complex, and so is more likely than not to be inhabited.
|
// If normalization failed, the type is complex, and so is more likely than not to be inhabited.
|
||||||
if (!norm)
|
if (!norm)
|
||||||
@ -436,7 +443,8 @@ bool Normalizer::isInhabited(TypeId ty)
|
|||||||
return *result;
|
return *result;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool result = isInhabited(ty, {nullptr});
|
Set<TypeId> seen{nullptr};
|
||||||
|
bool result = isInhabited(ty, seen);
|
||||||
|
|
||||||
if (cacheInhabitance)
|
if (cacheInhabitance)
|
||||||
cachedIsInhabited[ty] = result;
|
cachedIsInhabited[ty] = result;
|
||||||
@ -444,7 +452,7 @@ bool Normalizer::isInhabited(TypeId ty)
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Normalizer::isInhabited(TypeId ty, Set<TypeId> seen)
|
bool Normalizer::isInhabited(TypeId ty, Set<TypeId>& seen)
|
||||||
{
|
{
|
||||||
// TODO: use log.follow(ty), CLI-64291
|
// TODO: use log.follow(ty), CLI-64291
|
||||||
ty = follow(ty);
|
ty = follow(ty);
|
||||||
|
@ -126,6 +126,7 @@ SubtypingResult& SubtypingResult::andAlso(const SubtypingResult& other)
|
|||||||
isSubtype &= other.isSubtype;
|
isSubtype &= other.isSubtype;
|
||||||
normalizationTooComplex |= other.normalizationTooComplex;
|
normalizationTooComplex |= other.normalizationTooComplex;
|
||||||
isCacheable &= other.isCacheable;
|
isCacheable &= other.isCacheable;
|
||||||
|
errors.insert(errors.end(), other.errors.begin(), other.errors.end());
|
||||||
|
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@ -147,6 +148,7 @@ SubtypingResult& SubtypingResult::orElse(const SubtypingResult& other)
|
|||||||
isSubtype |= other.isSubtype;
|
isSubtype |= other.isSubtype;
|
||||||
normalizationTooComplex |= other.normalizationTooComplex;
|
normalizationTooComplex |= other.normalizationTooComplex;
|
||||||
isCacheable &= other.isCacheable;
|
isCacheable &= other.isCacheable;
|
||||||
|
errors.insert(errors.end(), other.errors.begin(), other.errors.end());
|
||||||
|
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
@ -213,6 +215,12 @@ SubtypingResult& SubtypingResult::withSuperPath(TypePath::Path path)
|
|||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
SubtypingResult& SubtypingResult::withErrors(ErrorVec& err)
|
||||||
|
{
|
||||||
|
errors = std::move(err);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
SubtypingResult SubtypingResult::negate(const SubtypingResult& result)
|
SubtypingResult SubtypingResult::negate(const SubtypingResult& result)
|
||||||
{
|
{
|
||||||
return SubtypingResult{
|
return SubtypingResult{
|
||||||
@ -1421,15 +1429,16 @@ bool Subtyping::bindGeneric(SubtypingEnvironment& env, TypeId subTy, TypeId supe
|
|||||||
SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, const TypeFamilyInstanceType* subFamilyInstance, const TypeId superTy)
|
SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, const TypeFamilyInstanceType* subFamilyInstance, const TypeId superTy)
|
||||||
{
|
{
|
||||||
// Reduce the typefamily instance
|
// Reduce the typefamily instance
|
||||||
TypeId reduced = handleTypeFamilyReductionResult<TypeId>(subFamilyInstance);
|
auto [ty, errors] = handleTypeFamilyReductionResult(subFamilyInstance);
|
||||||
return isCovariantWith(env, reduced, superTy);
|
// If we return optional, that means the type family was irreducible - we can reduce that to never
|
||||||
|
return isCovariantWith(env, ty, superTy).withErrors(errors);
|
||||||
}
|
}
|
||||||
|
|
||||||
SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, const TypeId subTy, const TypeFamilyInstanceType* superFamilyInstance)
|
SubtypingResult Subtyping::isCovariantWith(SubtypingEnvironment& env, const TypeId subTy, const TypeFamilyInstanceType* superFamilyInstance)
|
||||||
{
|
{
|
||||||
// Reduce the typefamily instance
|
// Reduce the typefamily instance
|
||||||
TypeId reduced = handleTypeFamilyReductionResult<TypeId>(superFamilyInstance);
|
auto [ty, errors] = handleTypeFamilyReductionResult(superFamilyInstance);
|
||||||
return isCovariantWith(env, subTy, reduced);
|
return isCovariantWith(env, subTy, ty).withErrors(errors);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1284,8 +1284,8 @@ void TypeStringifier::stringify(TypePackId tpid, const std::vector<std::optional
|
|||||||
tps.stringify(tpid);
|
tps.stringify(tpid);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void assignCycleNames(const std::set<TypeId>& cycles, const std::set<TypePackId>& cycleTPs,
|
static void assignCycleNames(const std::set<TypeId>& cycles, const std::set<TypePackId>& cycleTPs, DenseHashMap<TypeId, std::string>& cycleNames,
|
||||||
DenseHashMap<TypeId, std::string>& cycleNames, DenseHashMap<TypePackId, std::string>& cycleTpNames, bool exhaustive)
|
DenseHashMap<TypePackId, std::string>& cycleTpNames, bool exhaustive)
|
||||||
{
|
{
|
||||||
int nextIndex = 1;
|
int nextIndex = 1;
|
||||||
|
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include "Luau/TypeFamily.h"
|
#include "Luau/TypeFamily.h"
|
||||||
|
|
||||||
|
#include "Luau/Common.h"
|
||||||
#include "Luau/ConstraintSolver.h"
|
#include "Luau/ConstraintSolver.h"
|
||||||
#include "Luau/DenseHash.h"
|
#include "Luau/DenseHash.h"
|
||||||
#include "Luau/Instantiation.h"
|
#include "Luau/Instantiation.h"
|
||||||
@ -11,10 +12,12 @@
|
|||||||
#include "Luau/Subtyping.h"
|
#include "Luau/Subtyping.h"
|
||||||
#include "Luau/ToString.h"
|
#include "Luau/ToString.h"
|
||||||
#include "Luau/TxnLog.h"
|
#include "Luau/TxnLog.h"
|
||||||
|
#include "Luau/Type.h"
|
||||||
#include "Luau/TypeCheckLimits.h"
|
#include "Luau/TypeCheckLimits.h"
|
||||||
#include "Luau/TypeUtils.h"
|
#include "Luau/TypeUtils.h"
|
||||||
#include "Luau/Unifier2.h"
|
#include "Luau/Unifier2.h"
|
||||||
#include "Luau/VecDeque.h"
|
#include "Luau/VecDeque.h"
|
||||||
|
#include "Luau/Set.h"
|
||||||
#include "Luau/VisitType.h"
|
#include "Luau/VisitType.h"
|
||||||
|
|
||||||
LUAU_DYNAMIC_FASTINTVARIABLE(LuauTypeFamilyGraphReductionMaximumSteps, 1'000'000);
|
LUAU_DYNAMIC_FASTINTVARIABLE(LuauTypeFamilyGraphReductionMaximumSteps, 1'000'000);
|
||||||
@ -26,6 +29,7 @@ struct InstanceCollector : TypeOnceVisitor
|
|||||||
{
|
{
|
||||||
VecDeque<TypeId> tys;
|
VecDeque<TypeId> tys;
|
||||||
VecDeque<TypePackId> tps;
|
VecDeque<TypePackId> tps;
|
||||||
|
std::vector<TypeId> cyclicInstance;
|
||||||
|
|
||||||
bool visit(TypeId ty, const TypeFamilyInstanceType&) override
|
bool visit(TypeId ty, const TypeFamilyInstanceType&) override
|
||||||
{
|
{
|
||||||
@ -39,6 +43,14 @@ struct InstanceCollector : TypeOnceVisitor
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void cycle(TypeId ty) override
|
||||||
|
{
|
||||||
|
/// Detected cyclic type pack
|
||||||
|
TypeId t = follow(ty);
|
||||||
|
if (get<TypeFamilyInstanceType>(t))
|
||||||
|
cyclicInstance.push_back(t);
|
||||||
|
}
|
||||||
|
|
||||||
bool visit(TypeId ty, const ClassType&) override
|
bool visit(TypeId ty, const ClassType&) override
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
@ -63,6 +75,7 @@ struct FamilyReducer
|
|||||||
|
|
||||||
VecDeque<TypeId> queuedTys;
|
VecDeque<TypeId> queuedTys;
|
||||||
VecDeque<TypePackId> queuedTps;
|
VecDeque<TypePackId> queuedTps;
|
||||||
|
std::vector<TypeId> cyclicTypeFamilies;
|
||||||
DenseHashSet<const void*> irreducible{nullptr};
|
DenseHashSet<const void*> irreducible{nullptr};
|
||||||
FamilyGraphReductionResult result;
|
FamilyGraphReductionResult result;
|
||||||
bool force = false;
|
bool force = false;
|
||||||
@ -70,10 +83,12 @@ struct FamilyReducer
|
|||||||
// Local to the constraint being reduced.
|
// Local to the constraint being reduced.
|
||||||
Location location;
|
Location location;
|
||||||
|
|
||||||
FamilyReducer(VecDeque<TypeId> queuedTys, VecDeque<TypePackId> queuedTps, Location location, TypeFamilyContext ctx, bool force = false)
|
FamilyReducer(VecDeque<TypeId> queuedTys, VecDeque<TypePackId> queuedTps, std::vector<TypeId> cyclicTypes, Location location,
|
||||||
|
TypeFamilyContext ctx, bool force = false)
|
||||||
: ctx(ctx)
|
: ctx(ctx)
|
||||||
, queuedTys(std::move(queuedTys))
|
, queuedTys(std::move(queuedTys))
|
||||||
, queuedTps(std::move(queuedTps))
|
, queuedTps(std::move(queuedTps))
|
||||||
|
, cyclicTypeFamilies(std::move(cyclicTypes))
|
||||||
, force(force)
|
, force(force)
|
||||||
, location(location)
|
, location(location)
|
||||||
{
|
{
|
||||||
@ -81,6 +96,7 @@ struct FamilyReducer
|
|||||||
|
|
||||||
enum class SkipTestResult
|
enum class SkipTestResult
|
||||||
{
|
{
|
||||||
|
CyclicTypeFamily,
|
||||||
Irreducible,
|
Irreducible,
|
||||||
Defer,
|
Defer,
|
||||||
Okay,
|
Okay,
|
||||||
@ -92,10 +108,16 @@ struct FamilyReducer
|
|||||||
|
|
||||||
if (is<TypeFamilyInstanceType>(ty))
|
if (is<TypeFamilyInstanceType>(ty))
|
||||||
{
|
{
|
||||||
|
for (auto t : cyclicTypeFamilies)
|
||||||
|
{
|
||||||
|
if (ty == t)
|
||||||
|
return SkipTestResult::CyclicTypeFamily;
|
||||||
|
}
|
||||||
|
|
||||||
if (!irreducible.contains(ty))
|
if (!irreducible.contains(ty))
|
||||||
return SkipTestResult::Defer;
|
return SkipTestResult::Defer;
|
||||||
else
|
|
||||||
return SkipTestResult::Irreducible;
|
return SkipTestResult::Irreducible;
|
||||||
}
|
}
|
||||||
else if (is<GenericType>(ty))
|
else if (is<GenericType>(ty))
|
||||||
{
|
{
|
||||||
@ -223,10 +245,12 @@ struct FamilyReducer
|
|||||||
|
|
||||||
if (const TypeFamilyInstanceType* tfit = get<TypeFamilyInstanceType>(subject))
|
if (const TypeFamilyInstanceType* tfit = get<TypeFamilyInstanceType>(subject))
|
||||||
{
|
{
|
||||||
if (!testParameters(subject, tfit))
|
SkipTestResult testCyclic = testForSkippability(subject);
|
||||||
|
|
||||||
|
if (!testParameters(subject, tfit) && testCyclic != SkipTestResult::CyclicTypeFamily)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> result = tfit->family->reducer(tfit->typeArguments, tfit->packArguments, NotNull{&ctx});
|
TypeFamilyReductionResult<TypeId> result = tfit->family->reducer(subject, tfit->typeArguments, tfit->packArguments, NotNull{&ctx});
|
||||||
handleFamilyReduction(subject, result);
|
handleFamilyReduction(subject, result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -244,7 +268,7 @@ struct FamilyReducer
|
|||||||
if (!testParameters(subject, tfit))
|
if (!testParameters(subject, tfit))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypePackId> result = tfit->family->reducer(tfit->typeArguments, tfit->packArguments, NotNull{&ctx});
|
TypeFamilyReductionResult<TypePackId> result = tfit->family->reducer(subject, tfit->typeArguments, tfit->packArguments, NotNull{&ctx});
|
||||||
handleFamilyReduction(subject, result);
|
handleFamilyReduction(subject, result);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -259,9 +283,9 @@ struct FamilyReducer
|
|||||||
};
|
};
|
||||||
|
|
||||||
static FamilyGraphReductionResult reduceFamiliesInternal(
|
static FamilyGraphReductionResult reduceFamiliesInternal(
|
||||||
VecDeque<TypeId> queuedTys, VecDeque<TypePackId> queuedTps, Location location, TypeFamilyContext ctx, bool force)
|
VecDeque<TypeId> queuedTys, VecDeque<TypePackId> queuedTps, std::vector<TypeId> cyclics, Location location, TypeFamilyContext ctx, bool force)
|
||||||
{
|
{
|
||||||
FamilyReducer reducer{std::move(queuedTys), std::move(queuedTps), location, ctx, force};
|
FamilyReducer reducer{std::move(queuedTys), std::move(queuedTps), std::move(cyclics), location, ctx, force};
|
||||||
int iterationCount = 0;
|
int iterationCount = 0;
|
||||||
|
|
||||||
while (!reducer.done())
|
while (!reducer.done())
|
||||||
@ -295,7 +319,7 @@ FamilyGraphReductionResult reduceFamilies(TypeId entrypoint, Location location,
|
|||||||
if (collector.tys.empty() && collector.tps.empty())
|
if (collector.tys.empty() && collector.tps.empty())
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, ctx, force);
|
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), std::move(collector.cyclicInstance), location, ctx, force);
|
||||||
}
|
}
|
||||||
|
|
||||||
FamilyGraphReductionResult reduceFamilies(TypePackId entrypoint, Location location, TypeFamilyContext ctx, bool force)
|
FamilyGraphReductionResult reduceFamilies(TypePackId entrypoint, Location location, TypeFamilyContext ctx, bool force)
|
||||||
@ -314,7 +338,7 @@ FamilyGraphReductionResult reduceFamilies(TypePackId entrypoint, Location locati
|
|||||||
if (collector.tys.empty() && collector.tps.empty())
|
if (collector.tys.empty() && collector.tps.empty())
|
||||||
return {};
|
return {};
|
||||||
|
|
||||||
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), location, ctx, force);
|
return reduceFamiliesInternal(std::move(collector.tys), std::move(collector.tps), {}, location, ctx, force);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isPending(TypeId ty, ConstraintSolver* solver)
|
bool isPending(TypeId ty, ConstraintSolver* solver)
|
||||||
@ -322,7 +346,8 @@ bool isPending(TypeId ty, ConstraintSolver* solver)
|
|||||||
return is<BlockedType>(ty) || is<PendingExpansionType>(ty) || is<TypeFamilyInstanceType>(ty) || (solver && solver->hasUnresolvedConstraints(ty));
|
return is<BlockedType>(ty) || is<PendingExpansionType>(ty) || is<TypeFamilyInstanceType>(ty) || (solver && solver->hasUnresolvedConstraints(ty));
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> notFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> notFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 1 || !packParams.empty())
|
if (typeParams.size() != 1 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -339,7 +364,8 @@ TypeFamilyReductionResult<TypeId> notFamilyFn(const std::vector<TypeId>& typePar
|
|||||||
return {ctx->builtins->booleanType, false, {}, {}};
|
return {ctx->builtins->booleanType, false, {}, {}};
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> lenFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> lenFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 1 || !packParams.empty())
|
if (typeParams.size() != 1 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -415,7 +441,7 @@ TypeFamilyReductionResult<TypeId> lenFamilyFn(const std::vector<TypeId>& typePar
|
|||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> unmFamilyFn(
|
TypeFamilyReductionResult<TypeId> unmFamilyFn(
|
||||||
const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 1 || !packParams.empty())
|
if (typeParams.size() != 1 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -486,8 +512,8 @@ TypeFamilyReductionResult<TypeId> unmFamilyFn(
|
|||||||
return {std::nullopt, true, {}, {}};
|
return {std::nullopt, true, {}, {}};
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> numericBinopFamilyFn(
|
TypeFamilyReductionResult<TypeId> numericBinopFamilyFn(TypeId instance, const std::vector<TypeId>& typeParams,
|
||||||
const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx, const std::string metamethod)
|
const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx, const std::string metamethod)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -571,7 +597,8 @@ TypeFamilyReductionResult<TypeId> numericBinopFamilyFn(
|
|||||||
return {std::nullopt, true, {}, {}};
|
return {std::nullopt, true, {}, {}};
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> addFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> addFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -579,10 +606,11 @@ TypeFamilyReductionResult<TypeId> addFamilyFn(const std::vector<TypeId>& typePar
|
|||||||
LUAU_ASSERT(false);
|
LUAU_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return numericBinopFamilyFn(typeParams, packParams, ctx, "__add");
|
return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__add");
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> subFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> subFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -590,10 +618,11 @@ TypeFamilyReductionResult<TypeId> subFamilyFn(const std::vector<TypeId>& typePar
|
|||||||
LUAU_ASSERT(false);
|
LUAU_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return numericBinopFamilyFn(typeParams, packParams, ctx, "__sub");
|
return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__sub");
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> mulFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> mulFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -601,10 +630,11 @@ TypeFamilyReductionResult<TypeId> mulFamilyFn(const std::vector<TypeId>& typePar
|
|||||||
LUAU_ASSERT(false);
|
LUAU_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return numericBinopFamilyFn(typeParams, packParams, ctx, "__mul");
|
return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__mul");
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> divFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> divFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -612,10 +642,11 @@ TypeFamilyReductionResult<TypeId> divFamilyFn(const std::vector<TypeId>& typePar
|
|||||||
LUAU_ASSERT(false);
|
LUAU_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return numericBinopFamilyFn(typeParams, packParams, ctx, "__div");
|
return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__div");
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> idivFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> idivFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -623,10 +654,11 @@ TypeFamilyReductionResult<TypeId> idivFamilyFn(const std::vector<TypeId>& typePa
|
|||||||
LUAU_ASSERT(false);
|
LUAU_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return numericBinopFamilyFn(typeParams, packParams, ctx, "__idiv");
|
return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__idiv");
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> powFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> powFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -634,10 +666,11 @@ TypeFamilyReductionResult<TypeId> powFamilyFn(const std::vector<TypeId>& typePar
|
|||||||
LUAU_ASSERT(false);
|
LUAU_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return numericBinopFamilyFn(typeParams, packParams, ctx, "__pow");
|
return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__pow");
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> modFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> modFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -645,10 +678,11 @@ TypeFamilyReductionResult<TypeId> modFamilyFn(const std::vector<TypeId>& typePar
|
|||||||
LUAU_ASSERT(false);
|
LUAU_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return numericBinopFamilyFn(typeParams, packParams, ctx, "__mod");
|
return numericBinopFamilyFn(instance, typeParams, packParams, ctx, "__mod");
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> concatFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> concatFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -733,7 +767,8 @@ TypeFamilyReductionResult<TypeId> concatFamilyFn(const std::vector<TypeId>& type
|
|||||||
return {ctx->builtins->stringType, false, {}, {}};
|
return {ctx->builtins->stringType, false, {}, {}};
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> andFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> andFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -744,6 +779,14 @@ TypeFamilyReductionResult<TypeId> andFamilyFn(const std::vector<TypeId>& typePar
|
|||||||
TypeId lhsTy = follow(typeParams.at(0));
|
TypeId lhsTy = follow(typeParams.at(0));
|
||||||
TypeId rhsTy = follow(typeParams.at(1));
|
TypeId rhsTy = follow(typeParams.at(1));
|
||||||
|
|
||||||
|
// t1 = and<lhs, t1> ~> lhs
|
||||||
|
if (follow(rhsTy) == instance && lhsTy != rhsTy)
|
||||||
|
return {lhsTy, false, {}, {}};
|
||||||
|
// t1 = and<t1, rhs> ~> rhs
|
||||||
|
if (follow(lhsTy) == instance && lhsTy != rhsTy)
|
||||||
|
return {rhsTy, false, {}, {}};
|
||||||
|
|
||||||
|
|
||||||
// check to see if both operand types are resolved enough, and wait to reduce if not
|
// check to see if both operand types are resolved enough, and wait to reduce if not
|
||||||
if (isPending(lhsTy, ctx->solver))
|
if (isPending(lhsTy, ctx->solver))
|
||||||
return {std::nullopt, false, {lhsTy}, {}};
|
return {std::nullopt, false, {lhsTy}, {}};
|
||||||
@ -761,7 +804,8 @@ TypeFamilyReductionResult<TypeId> andFamilyFn(const std::vector<TypeId>& typePar
|
|||||||
return {overallResult.result, false, std::move(blockedTypes), {}};
|
return {overallResult.result, false, std::move(blockedTypes), {}};
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> orFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> orFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -772,6 +816,13 @@ TypeFamilyReductionResult<TypeId> orFamilyFn(const std::vector<TypeId>& typePara
|
|||||||
TypeId lhsTy = follow(typeParams.at(0));
|
TypeId lhsTy = follow(typeParams.at(0));
|
||||||
TypeId rhsTy = follow(typeParams.at(1));
|
TypeId rhsTy = follow(typeParams.at(1));
|
||||||
|
|
||||||
|
// t1 = or<lhs, t1> ~> lhs
|
||||||
|
if (follow(rhsTy) == instance && lhsTy != rhsTy)
|
||||||
|
return {lhsTy, false, {}, {}};
|
||||||
|
// t1 = or<t1, rhs> ~> rhs
|
||||||
|
if (follow(lhsTy) == instance && lhsTy != rhsTy)
|
||||||
|
return {rhsTy, false, {}, {}};
|
||||||
|
|
||||||
// check to see if both operand types are resolved enough, and wait to reduce if not
|
// check to see if both operand types are resolved enough, and wait to reduce if not
|
||||||
if (isPending(lhsTy, ctx->solver))
|
if (isPending(lhsTy, ctx->solver))
|
||||||
return {std::nullopt, false, {lhsTy}, {}};
|
return {std::nullopt, false, {lhsTy}, {}};
|
||||||
@ -789,8 +840,8 @@ TypeFamilyReductionResult<TypeId> orFamilyFn(const std::vector<TypeId>& typePara
|
|||||||
return {overallResult.result, false, std::move(blockedTypes), {}};
|
return {overallResult.result, false, std::move(blockedTypes), {}};
|
||||||
}
|
}
|
||||||
|
|
||||||
static TypeFamilyReductionResult<TypeId> comparisonFamilyFn(
|
static TypeFamilyReductionResult<TypeId> comparisonFamilyFn(TypeId instance, const std::vector<TypeId>& typeParams,
|
||||||
const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx, const std::string metamethod)
|
const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx, const std::string metamethod)
|
||||||
{
|
{
|
||||||
|
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
@ -870,7 +921,8 @@ static TypeFamilyReductionResult<TypeId> comparisonFamilyFn(
|
|||||||
return {ctx->builtins->booleanType, false, {}, {}};
|
return {ctx->builtins->booleanType, false, {}, {}};
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> ltFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> ltFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -878,10 +930,11 @@ TypeFamilyReductionResult<TypeId> ltFamilyFn(const std::vector<TypeId>& typePara
|
|||||||
LUAU_ASSERT(false);
|
LUAU_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return comparisonFamilyFn(typeParams, packParams, ctx, "__lt");
|
return comparisonFamilyFn(instance, typeParams, packParams, ctx, "__lt");
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> leFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> leFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -889,10 +942,11 @@ TypeFamilyReductionResult<TypeId> leFamilyFn(const std::vector<TypeId>& typePara
|
|||||||
LUAU_ASSERT(false);
|
LUAU_ASSERT(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
return comparisonFamilyFn(typeParams, packParams, ctx, "__le");
|
return comparisonFamilyFn(instance, typeParams, packParams, ctx, "__le");
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> eqFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> eqFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -995,7 +1049,8 @@ struct FindRefinementBlockers : TypeOnceVisitor
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> refineFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> refineFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -1052,7 +1107,8 @@ TypeFamilyReductionResult<TypeId> refineFamilyFn(const std::vector<TypeId>& type
|
|||||||
return {resultTy, false, {}, {}};
|
return {resultTy, false, {}, {}};
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> unionFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> unionFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -1082,7 +1138,8 @@ TypeFamilyReductionResult<TypeId> unionFamilyFn(const std::vector<TypeId>& typeP
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> intersectFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> intersectFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 2 || !packParams.empty())
|
if (typeParams.size() != 2 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -1174,7 +1231,8 @@ bool computeKeysOf(TypeId ty, Set<std::string>& result, DenseHashSet<TypeId>& se
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> keyofFamilyImpl(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx, bool isRaw)
|
TypeFamilyReductionResult<TypeId> keyofFamilyImpl(
|
||||||
|
const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx, bool isRaw)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 1 || !packParams.empty())
|
if (typeParams.size() != 1 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -1190,7 +1248,8 @@ TypeFamilyReductionResult<TypeId> keyofFamilyImpl(const std::vector<TypeId>& typ
|
|||||||
if (!normTy)
|
if (!normTy)
|
||||||
return {std::nullopt, false, {}, {}};
|
return {std::nullopt, false, {}, {}};
|
||||||
|
|
||||||
// if we don't have either just tables or just classes, we've got nothing to get keys of (at least until a future version perhaps adds classes as well)
|
// if we don't have either just tables or just classes, we've got nothing to get keys of (at least until a future version perhaps adds classes as
|
||||||
|
// well)
|
||||||
if (normTy->hasTables() == normTy->hasClasses())
|
if (normTy->hasTables() == normTy->hasClasses())
|
||||||
return {std::nullopt, true, {}, {}};
|
return {std::nullopt, true, {}, {}};
|
||||||
|
|
||||||
@ -1289,7 +1348,8 @@ TypeFamilyReductionResult<TypeId> keyofFamilyImpl(const std::vector<TypeId>& typ
|
|||||||
return {ctx->arena->addType(UnionType{singletons}), false, {}, {}};
|
return {ctx->arena->addType(UnionType{singletons}), false, {}, {}};
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> keyofFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> keyofFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 1 || !packParams.empty())
|
if (typeParams.size() != 1 || !packParams.empty())
|
||||||
{
|
{
|
||||||
@ -1300,7 +1360,8 @@ TypeFamilyReductionResult<TypeId> keyofFamilyFn(const std::vector<TypeId>& typeP
|
|||||||
return keyofFamilyImpl(typeParams, packParams, ctx, /* isRaw */ false);
|
return keyofFamilyImpl(typeParams, packParams, ctx, /* isRaw */ false);
|
||||||
}
|
}
|
||||||
|
|
||||||
TypeFamilyReductionResult<TypeId> rawkeyofFamilyFn(const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
TypeFamilyReductionResult<TypeId> rawkeyofFamilyFn(
|
||||||
|
TypeId instance, const std::vector<TypeId>& typeParams, const std::vector<TypePackId>& packParams, NotNull<TypeFamilyContext> ctx)
|
||||||
{
|
{
|
||||||
if (typeParams.size() != 1 || !packParams.empty())
|
if (typeParams.size() != 1 || !packParams.empty())
|
||||||
{
|
{
|
||||||
|
@ -271,7 +271,8 @@ TypePackId follow(TypePackId tp, const void* context, TypePackId (*mapper)(const
|
|||||||
|
|
||||||
if (const Unifiable::Bound<TypePackId>* btv = get<Unifiable::Bound<TypePackId>>(mapped))
|
if (const Unifiable::Bound<TypePackId>* btv = get<Unifiable::Bound<TypePackId>>(mapped))
|
||||||
return btv->boundTo;
|
return btv->boundTo;
|
||||||
else if (const TypePack* tp = get<TypePack>(mapped); (FFlag::DebugLuauDeferredConstraintResolution || FFlag::LuauFollowEmptyTypePacks) && tp && tp->head.empty())
|
else if (const TypePack* tp = get<TypePack>(mapped);
|
||||||
|
(FFlag::DebugLuauDeferredConstraintResolution || FFlag::LuauFollowEmptyTypePacks) && tp && tp->head.empty())
|
||||||
return tp->tail;
|
return tp->tail;
|
||||||
else
|
else
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
@ -150,7 +150,8 @@ bool Unifier2::unify(TypeId subTy, TypeId superTy)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool Unifier2::unify(TypeId subTy, const FunctionType* superFn) {
|
bool Unifier2::unify(TypeId subTy, const FunctionType* superFn)
|
||||||
|
{
|
||||||
const FunctionType* subFn = get<FunctionType>(subTy);
|
const FunctionType* subFn = get<FunctionType>(subTy);
|
||||||
|
|
||||||
bool shouldInstantiate =
|
bool shouldInstantiate =
|
||||||
@ -465,8 +466,8 @@ struct MutatingGeneralizer : TypeOnceVisitor
|
|||||||
|
|
||||||
bool isWithinFunction = false;
|
bool isWithinFunction = false;
|
||||||
|
|
||||||
MutatingGeneralizer(
|
MutatingGeneralizer(NotNull<BuiltinTypes> builtinTypes, NotNull<Scope> scope, DenseHashMap<TypeId, size_t> positiveTypes,
|
||||||
NotNull<BuiltinTypes> builtinTypes, NotNull<Scope> scope, DenseHashMap<TypeId, size_t> positiveTypes, DenseHashMap<TypeId, size_t> negativeTypes)
|
DenseHashMap<TypeId, size_t> negativeTypes)
|
||||||
: TypeOnceVisitor(/* skipBoundTypes */ true)
|
: TypeOnceVisitor(/* skipBoundTypes */ true)
|
||||||
, builtinTypes(builtinTypes)
|
, builtinTypes(builtinTypes)
|
||||||
, scope(scope)
|
, scope(scope)
|
||||||
|
@ -522,8 +522,8 @@ void AstStatLocal::visit(AstVisitor* visitor)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
AstStatFor::AstStatFor(const Location& location, AstLocal* var, AstExpr* from, AstExpr* to, AstExpr* step, AstStatBlock* body, bool hasDo,
|
AstStatFor::AstStatFor(
|
||||||
const Location& doLocation)
|
const Location& location, AstLocal* var, AstExpr* from, AstExpr* to, AstExpr* step, AstStatBlock* body, bool hasDo, const Location& doLocation)
|
||||||
: AstStat(ClassIndex(), location)
|
: AstStat(ClassIndex(), location)
|
||||||
, var(var)
|
, var(var)
|
||||||
, from(from)
|
, from(from)
|
||||||
|
@ -599,8 +599,7 @@ AstStat* Parser::parseFor()
|
|||||||
bool hasEnd = expectMatchEndAndConsume(Lexeme::ReservedEnd, matchDo);
|
bool hasEnd = expectMatchEndAndConsume(Lexeme::ReservedEnd, matchDo);
|
||||||
body->hasEnd = hasEnd;
|
body->hasEnd = hasEnd;
|
||||||
|
|
||||||
return allocator.alloc<AstStatForIn>(
|
return allocator.alloc<AstStatForIn>(Location(start, end), copy(vars), copy(values), body, hasIn, inLocation, hasDo, matchDo.location);
|
||||||
Location(start, end), copy(vars), copy(values), body, hasIn, inLocation, hasDo, matchDo.location);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -907,8 +906,7 @@ AstStat* Parser::parseDeclaration(const Location& start)
|
|||||||
{
|
{
|
||||||
props.push_back(parseDeclaredClassMethod());
|
props.push_back(parseDeclaredClassMethod());
|
||||||
}
|
}
|
||||||
else if (lexer.current().type == '[' && (lexer.lookahead().type == Lexeme::RawString ||
|
else if (lexer.current().type == '[' && (lexer.lookahead().type == Lexeme::RawString || lexer.lookahead().type == Lexeme::QuotedString))
|
||||||
lexer.lookahead().type == Lexeme::QuotedString))
|
|
||||||
{
|
{
|
||||||
const Lexeme begin = lexer.current();
|
const Lexeme begin = lexer.current();
|
||||||
nextLexeme(); // [
|
nextLexeme(); // [
|
||||||
|
@ -32,8 +32,8 @@ struct AddressA64
|
|||||||
, offset(xzr)
|
, offset(xzr)
|
||||||
, data(off)
|
, data(off)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(base.kind == KindA64::x || base == sp);
|
CODEGEN_ASSERT(base.kind == KindA64::x || base == sp);
|
||||||
LUAU_ASSERT(kind != AddressKindA64::reg);
|
CODEGEN_ASSERT(kind != AddressKindA64::reg);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr AddressA64(RegisterA64 base, RegisterA64 offset)
|
constexpr AddressA64(RegisterA64 base, RegisterA64 offset)
|
||||||
@ -42,8 +42,8 @@ struct AddressA64
|
|||||||
, offset(offset)
|
, offset(offset)
|
||||||
, data(0)
|
, data(0)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(base.kind == KindA64::x);
|
CODEGEN_ASSERT(base.kind == KindA64::x);
|
||||||
LUAU_ASSERT(offset.kind == KindA64::x);
|
CODEGEN_ASSERT(offset.kind == KindA64::x);
|
||||||
}
|
}
|
||||||
|
|
||||||
AddressKindA64 kind;
|
AddressKindA64 kind;
|
||||||
|
@ -176,7 +176,7 @@ public:
|
|||||||
// Extracts code offset (in bytes) from label
|
// Extracts code offset (in bytes) from label
|
||||||
uint32_t getLabelOffset(const Label& label)
|
uint32_t getLabelOffset(const Label& label)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(label.location != ~0u);
|
CODEGEN_ASSERT(label.location != ~0u);
|
||||||
return label.location * 4;
|
return label.location * 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -179,7 +179,7 @@ public:
|
|||||||
// Extracts code offset (in bytes) from label
|
// Extracts code offset (in bytes) from label
|
||||||
uint32_t getLabelOffset(const Label& label)
|
uint32_t getLabelOffset(const Label& label)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(label.location != ~0u);
|
CODEGEN_ASSERT(label.location != ~0u);
|
||||||
return label.location;
|
return label.location;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "Luau/Common.h"
|
#include "Luau/CodeGenCommon.h"
|
||||||
#include "Luau/Bytecode.h"
|
#include "Luau/Bytecode.h"
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
@ -49,21 +49,21 @@ public:
|
|||||||
|
|
||||||
void incCount(unsigned nesting, uint8_t op)
|
void incCount(unsigned nesting, uint8_t op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(nesting <= getNestingLimit());
|
CODEGEN_ASSERT(nesting <= getNestingLimit());
|
||||||
LUAU_ASSERT(op < getOpLimit());
|
CODEGEN_ASSERT(op < getOpLimit());
|
||||||
++counts[nesting][op];
|
++counts[nesting][op];
|
||||||
}
|
}
|
||||||
|
|
||||||
unsigned getCount(unsigned nesting, uint8_t op) const
|
unsigned getCount(unsigned nesting, uint8_t op) const
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(nesting <= getNestingLimit());
|
CODEGEN_ASSERT(nesting <= getNestingLimit());
|
||||||
LUAU_ASSERT(op < getOpLimit());
|
CODEGEN_ASSERT(op < getOpLimit());
|
||||||
return counts[nesting][op];
|
return counts[nesting][op];
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::vector<unsigned>& getCounts(unsigned nesting) const
|
const std::vector<unsigned>& getCounts(unsigned nesting) const
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(nesting <= getNestingLimit());
|
CODEGEN_ASSERT(nesting <= getNestingLimit());
|
||||||
return counts[nesting];
|
return counts[nesting];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,14 +23,21 @@ enum CodeGenFlags
|
|||||||
CodeGen_ColdFunctions = 1 << 1,
|
CodeGen_ColdFunctions = 1 << 1,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// These enum values can be reported through telemetry.
|
||||||
|
// To ensure consistency, changes should be additive.
|
||||||
enum class CodeGenCompilationResult
|
enum class CodeGenCompilationResult
|
||||||
{
|
{
|
||||||
Success, // Successfully generated code for at least one function
|
Success = 0, // Successfully generated code for at least one function
|
||||||
NothingToCompile, // There were no new functions to compile
|
NothingToCompile = 1, // There were no new functions to compile
|
||||||
|
NotNativeModule = 2, // Module does not have `--!native` comment
|
||||||
|
|
||||||
CodeGenNotInitialized, // Native codegen system is not initialized
|
CodeGenNotInitialized = 3, // Native codegen system is not initialized
|
||||||
CodeGenFailed, // Native codegen failed due to an internal compiler error
|
CodeGenOverflowInstructionLimit = 4, // Instruction limit overflow
|
||||||
AllocationFailed, // Native codegen failed due to an allocation error
|
CodeGenOverflowBlockLimit = 5, // Block limit overflow
|
||||||
|
CodeGenOverflowBlockInstructionLimit = 6, // Block instruction limit overflow
|
||||||
|
CodeGenAssemblerFinalizationFailure = 7, // Failure during assembler finalization
|
||||||
|
CodeGenLoweringFailure = 8, // Lowering failed
|
||||||
|
AllocationFailed = 9, // Native codegen failed due to an allocation error
|
||||||
};
|
};
|
||||||
|
|
||||||
struct CompilationStats
|
struct CompilationStats
|
||||||
@ -40,6 +47,7 @@ struct CompilationStats
|
|||||||
size_t nativeDataSizeBytes = 0;
|
size_t nativeDataSizeBytes = 0;
|
||||||
size_t nativeMetadataSizeBytes = 0;
|
size_t nativeMetadataSizeBytes = 0;
|
||||||
|
|
||||||
|
uint32_t functionsTotal = 0;
|
||||||
uint32_t functionsCompiled = 0;
|
uint32_t functionsCompiled = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
12
CodeGen/include/Luau/CodeGenCommon.h
Normal file
12
CodeGen/include/Luau/CodeGenCommon.h
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include "Luau/Common.h"
|
||||||
|
|
||||||
|
#if defined(LUAU_ASSERTENABLED)
|
||||||
|
#define CODEGEN_ASSERT(expr) ((void)(!!(expr) || (Luau::assertCallHandler(#expr, __FILE__, __LINE__, __FUNCTION__) && (LUAU_DEBUGBREAK(), 0))))
|
||||||
|
#elif defined(CODEGEN_ENABLE_ASSERT_HANDLER)
|
||||||
|
#define CODEGEN_ASSERT(expr) ((void)(!!(expr) || Luau::assertCallHandler(#expr, __FILE__, __LINE__, __FUNCTION__)))
|
||||||
|
#else
|
||||||
|
#define CODEGEN_ASSERT(expr) (void)sizeof(!!(expr))
|
||||||
|
#endif
|
@ -1,7 +1,7 @@
|
|||||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "Luau/Common.h"
|
#include "Luau/CodeGenCommon.h"
|
||||||
|
|
||||||
namespace Luau
|
namespace Luau
|
||||||
{
|
{
|
||||||
@ -102,7 +102,7 @@ inline ConditionX64 getReverseCondition(ConditionX64 cond)
|
|||||||
case ConditionX64::NotParity:
|
case ConditionX64::NotParity:
|
||||||
return ConditionX64::Parity;
|
return ConditionX64::Parity;
|
||||||
case ConditionX64::Count:
|
case ConditionX64::Count:
|
||||||
LUAU_ASSERT(!"invalid ConditionX64 value");
|
CODEGEN_ASSERT(!"invalid ConditionX64 value");
|
||||||
}
|
}
|
||||||
|
|
||||||
return ConditionX64::Count;
|
return ConditionX64::Count;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "Luau/Common.h"
|
#include "Luau/CodeGenCommon.h"
|
||||||
|
|
||||||
#include <bitset>
|
#include <bitset>
|
||||||
#include <queue>
|
#include <queue>
|
||||||
@ -167,7 +167,7 @@ struct BlockIteratorWrapper
|
|||||||
|
|
||||||
uint32_t operator[](size_t pos) const
|
uint32_t operator[](size_t pos) const
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(pos < size_t(itEnd - itBegin));
|
CODEGEN_ASSERT(pos < size_t(itEnd - itBegin));
|
||||||
return itBegin[pos];
|
return itBegin[pos];
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -995,13 +995,13 @@ struct IrFunction
|
|||||||
|
|
||||||
IrBlock& blockOp(IrOp op)
|
IrBlock& blockOp(IrOp op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.kind == IrOpKind::Block);
|
CODEGEN_ASSERT(op.kind == IrOpKind::Block);
|
||||||
return blocks[op.index];
|
return blocks[op.index];
|
||||||
}
|
}
|
||||||
|
|
||||||
IrInst& instOp(IrOp op)
|
IrInst& instOp(IrOp op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.kind == IrOpKind::Inst);
|
CODEGEN_ASSERT(op.kind == IrOpKind::Inst);
|
||||||
return instructions[op.index];
|
return instructions[op.index];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1015,7 +1015,7 @@ struct IrFunction
|
|||||||
|
|
||||||
IrConst& constOp(IrOp op)
|
IrConst& constOp(IrOp op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.kind == IrOpKind::Constant);
|
CODEGEN_ASSERT(op.kind == IrOpKind::Constant);
|
||||||
return constants[op.index];
|
return constants[op.index];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1023,7 +1023,7 @@ struct IrFunction
|
|||||||
{
|
{
|
||||||
IrConst& value = constOp(op);
|
IrConst& value = constOp(op);
|
||||||
|
|
||||||
LUAU_ASSERT(value.kind == IrConstKind::Tag);
|
CODEGEN_ASSERT(value.kind == IrConstKind::Tag);
|
||||||
return value.valueTag;
|
return value.valueTag;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1044,7 +1044,7 @@ struct IrFunction
|
|||||||
{
|
{
|
||||||
IrConst& value = constOp(op);
|
IrConst& value = constOp(op);
|
||||||
|
|
||||||
LUAU_ASSERT(value.kind == IrConstKind::Int);
|
CODEGEN_ASSERT(value.kind == IrConstKind::Int);
|
||||||
return value.valueInt;
|
return value.valueInt;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1065,7 +1065,7 @@ struct IrFunction
|
|||||||
{
|
{
|
||||||
IrConst& value = constOp(op);
|
IrConst& value = constOp(op);
|
||||||
|
|
||||||
LUAU_ASSERT(value.kind == IrConstKind::Uint);
|
CODEGEN_ASSERT(value.kind == IrConstKind::Uint);
|
||||||
return value.valueUint;
|
return value.valueUint;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1086,7 +1086,7 @@ struct IrFunction
|
|||||||
{
|
{
|
||||||
IrConst& value = constOp(op);
|
IrConst& value = constOp(op);
|
||||||
|
|
||||||
LUAU_ASSERT(value.kind == IrConstKind::Double);
|
CODEGEN_ASSERT(value.kind == IrConstKind::Double);
|
||||||
return value.valueDouble;
|
return value.valueDouble;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1106,14 +1106,14 @@ struct IrFunction
|
|||||||
uint32_t getBlockIndex(const IrBlock& block) const
|
uint32_t getBlockIndex(const IrBlock& block) const
|
||||||
{
|
{
|
||||||
// Can only be called with blocks from our vector
|
// Can only be called with blocks from our vector
|
||||||
LUAU_ASSERT(&block >= blocks.data() && &block <= blocks.data() + blocks.size());
|
CODEGEN_ASSERT(&block >= blocks.data() && &block <= blocks.data() + blocks.size());
|
||||||
return uint32_t(&block - blocks.data());
|
return uint32_t(&block - blocks.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t getInstIndex(const IrInst& inst) const
|
uint32_t getInstIndex(const IrInst& inst) const
|
||||||
{
|
{
|
||||||
// Can only be called with instructions from our vector
|
// Can only be called with instructions from our vector
|
||||||
LUAU_ASSERT(&inst >= instructions.data() && &inst <= instructions.data() + instructions.size());
|
CODEGEN_ASSERT(&inst >= instructions.data() && &inst <= instructions.data() + instructions.size());
|
||||||
return uint32_t(&inst - instructions.data());
|
return uint32_t(&inst - instructions.data());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1154,7 +1154,7 @@ struct IrFunction
|
|||||||
|
|
||||||
BytecodeTypes getBytecodeTypesAt(int pcpos) const
|
BytecodeTypes getBytecodeTypesAt(int pcpos) const
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(pcpos >= 0);
|
CODEGEN_ASSERT(pcpos >= 0);
|
||||||
|
|
||||||
if (size_t(pcpos) < bcTypes.size())
|
if (size_t(pcpos) < bcTypes.size())
|
||||||
return bcTypes[pcpos];
|
return bcTypes[pcpos];
|
||||||
@ -1165,31 +1165,31 @@ struct IrFunction
|
|||||||
|
|
||||||
inline IrCondition conditionOp(IrOp op)
|
inline IrCondition conditionOp(IrOp op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.kind == IrOpKind::Condition);
|
CODEGEN_ASSERT(op.kind == IrOpKind::Condition);
|
||||||
return IrCondition(op.index);
|
return IrCondition(op.index);
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int vmRegOp(IrOp op)
|
inline int vmRegOp(IrOp op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(op.kind == IrOpKind::VmReg);
|
||||||
return op.index;
|
return op.index;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int vmConstOp(IrOp op)
|
inline int vmConstOp(IrOp op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(op.kind == IrOpKind::VmConst);
|
||||||
return op.index;
|
return op.index;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline int vmUpvalueOp(IrOp op)
|
inline int vmUpvalueOp(IrOp op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.kind == IrOpKind::VmUpvalue);
|
CODEGEN_ASSERT(op.kind == IrOpKind::VmUpvalue);
|
||||||
return op.index;
|
return op.index;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline uint32_t vmExitOp(IrOp op)
|
inline uint32_t vmExitOp(IrOp op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.kind == IrOpKind::VmExit);
|
CODEGEN_ASSERT(op.kind == IrOpKind::VmExit);
|
||||||
return op.index;
|
return op.index;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ static void visitVmRegDefsUses(T& visitor, IrFunction& function, const IrInst& i
|
|||||||
{
|
{
|
||||||
if (count >= 3)
|
if (count >= 3)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmReg && vmRegOp(inst.d) == vmRegOp(inst.c) + 1);
|
CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmReg && vmRegOp(inst.d) == vmRegOp(inst.c) + 1);
|
||||||
|
|
||||||
visitor.useRange(vmRegOp(inst.c), count);
|
visitor.useRange(vmRegOp(inst.c), count);
|
||||||
}
|
}
|
||||||
@ -206,12 +206,12 @@ static void visitVmRegDefsUses(T& visitor, IrFunction& function, const IrInst& i
|
|||||||
|
|
||||||
default:
|
default:
|
||||||
// All instructions which reference registers have to be handled explicitly
|
// All instructions which reference registers have to be handled explicitly
|
||||||
LUAU_ASSERT(inst.a.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.a.kind != IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.b.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind != IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.c.kind != IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.d.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.d.kind != IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.e.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.e.kind != IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.f.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.f.kind != IrOpKind::VmReg);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "Luau/Common.h"
|
#include "Luau/CodeGenCommon.h"
|
||||||
#include "Luau/RegisterX64.h"
|
#include "Luau/RegisterX64.h"
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
@ -62,9 +62,9 @@ struct OperandX64
|
|||||||
|
|
||||||
constexpr OperandX64 operator[](OperandX64&& addr) const
|
constexpr OperandX64 operator[](OperandX64&& addr) const
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(cat == CategoryX64::mem);
|
CODEGEN_ASSERT(cat == CategoryX64::mem);
|
||||||
LUAU_ASSERT(index == noreg && scale == 1 && base == noreg && imm == 0);
|
CODEGEN_ASSERT(index == noreg && scale == 1 && base == noreg && imm == 0);
|
||||||
LUAU_ASSERT(addr.memSize == SizeX64::none);
|
CODEGEN_ASSERT(addr.memSize == SizeX64::none);
|
||||||
|
|
||||||
addr.cat = CategoryX64::mem;
|
addr.cat = CategoryX64::mem;
|
||||||
addr.memSize = memSize;
|
addr.memSize = memSize;
|
||||||
@ -85,8 +85,8 @@ constexpr OperandX64 operator*(RegisterX64 reg, uint8_t scale)
|
|||||||
if (scale == 1)
|
if (scale == 1)
|
||||||
return OperandX64(reg);
|
return OperandX64(reg);
|
||||||
|
|
||||||
LUAU_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8);
|
CODEGEN_ASSERT(scale == 1 || scale == 2 || scale == 4 || scale == 8);
|
||||||
LUAU_ASSERT(reg.index != 0b100 && "can't scale SP");
|
CODEGEN_ASSERT(reg.index != 0b100 && "can't scale SP");
|
||||||
|
|
||||||
return OperandX64(SizeX64::none, reg, scale, noreg, 0);
|
return OperandX64(SizeX64::none, reg, scale, noreg, 0);
|
||||||
}
|
}
|
||||||
@ -103,16 +103,16 @@ constexpr OperandX64 operator-(RegisterX64 reg, int32_t disp)
|
|||||||
|
|
||||||
constexpr OperandX64 operator+(RegisterX64 base, RegisterX64 index)
|
constexpr OperandX64 operator+(RegisterX64 base, RegisterX64 index)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(index.index != 4 && "sp cannot be used as index");
|
CODEGEN_ASSERT(index.index != 4 && "sp cannot be used as index");
|
||||||
LUAU_ASSERT(base.size == index.size);
|
CODEGEN_ASSERT(base.size == index.size);
|
||||||
|
|
||||||
return OperandX64(SizeX64::none, index, 1, base, 0);
|
return OperandX64(SizeX64::none, index, 1, base, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr OperandX64 operator+(OperandX64 op, int32_t disp)
|
constexpr OperandX64 operator+(OperandX64 op, int32_t disp)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(op.cat == CategoryX64::mem);
|
||||||
LUAU_ASSERT(op.memSize == SizeX64::none);
|
CODEGEN_ASSERT(op.memSize == SizeX64::none);
|
||||||
|
|
||||||
op.imm += disp;
|
op.imm += disp;
|
||||||
return op;
|
return op;
|
||||||
@ -120,10 +120,10 @@ constexpr OperandX64 operator+(OperandX64 op, int32_t disp)
|
|||||||
|
|
||||||
constexpr OperandX64 operator+(OperandX64 op, RegisterX64 base)
|
constexpr OperandX64 operator+(OperandX64 op, RegisterX64 base)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(op.cat == CategoryX64::mem);
|
||||||
LUAU_ASSERT(op.memSize == SizeX64::none);
|
CODEGEN_ASSERT(op.memSize == SizeX64::none);
|
||||||
LUAU_ASSERT(op.base == noreg);
|
CODEGEN_ASSERT(op.base == noreg);
|
||||||
LUAU_ASSERT(op.index == noreg || op.index.size == base.size);
|
CODEGEN_ASSERT(op.index == noreg || op.index.size == base.size);
|
||||||
|
|
||||||
op.base = base;
|
op.base = base;
|
||||||
return op;
|
return op;
|
||||||
@ -131,10 +131,10 @@ constexpr OperandX64 operator+(OperandX64 op, RegisterX64 base)
|
|||||||
|
|
||||||
constexpr OperandX64 operator+(RegisterX64 base, OperandX64 op)
|
constexpr OperandX64 operator+(RegisterX64 base, OperandX64 op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(op.cat == CategoryX64::mem);
|
||||||
LUAU_ASSERT(op.memSize == SizeX64::none);
|
CODEGEN_ASSERT(op.memSize == SizeX64::none);
|
||||||
LUAU_ASSERT(op.base == noreg);
|
CODEGEN_ASSERT(op.base == noreg);
|
||||||
LUAU_ASSERT(op.index == noreg || op.index.size == base.size);
|
CODEGEN_ASSERT(op.index == noreg || op.index.size == base.size);
|
||||||
|
|
||||||
op.base = base;
|
op.base = base;
|
||||||
return op;
|
return op;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "Luau/Common.h"
|
#include "Luau/CodeGenCommon.h"
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
@ -40,9 +40,9 @@ struct RegisterA64
|
|||||||
|
|
||||||
constexpr RegisterA64 castReg(KindA64 kind, RegisterA64 reg)
|
constexpr RegisterA64 castReg(KindA64 kind, RegisterA64 reg)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(kind != reg.kind);
|
CODEGEN_ASSERT(kind != reg.kind);
|
||||||
LUAU_ASSERT(kind != KindA64::none && reg.kind != KindA64::none);
|
CODEGEN_ASSERT(kind != KindA64::none && reg.kind != KindA64::none);
|
||||||
LUAU_ASSERT((kind == KindA64::w || kind == KindA64::x) == (reg.kind == KindA64::w || reg.kind == KindA64::x));
|
CODEGEN_ASSERT((kind == KindA64::w || kind == KindA64::x) == (reg.kind == KindA64::w || reg.kind == KindA64::x));
|
||||||
|
|
||||||
return RegisterA64{kind, reg.index};
|
return RegisterA64{kind, reg.index};
|
||||||
}
|
}
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include "Luau/Common.h"
|
#include "Luau/CodeGenCommon.h"
|
||||||
|
|
||||||
#include <stdint.h>
|
#include <stdint.h>
|
||||||
|
|
||||||
|
@ -58,13 +58,13 @@ AssemblyBuilderA64::AssemblyBuilderA64(bool logText, unsigned int features)
|
|||||||
|
|
||||||
AssemblyBuilderA64::~AssemblyBuilderA64()
|
AssemblyBuilderA64::~AssemblyBuilderA64()
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(finalized);
|
CODEGEN_ASSERT(finalized);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::mov(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::mov(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst == sp);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst == sp);
|
||||||
LUAU_ASSERT(dst.kind == src.kind || (dst.kind == KindA64::x && src == sp) || (dst == sp && src.kind == KindA64::x));
|
CODEGEN_ASSERT(dst.kind == src.kind || (dst.kind == KindA64::x && src == sp) || (dst == sp && src.kind == KindA64::x));
|
||||||
|
|
||||||
if (dst == sp || src == sp)
|
if (dst == sp || src == sp)
|
||||||
placeR1("mov", dst, src, 0b00'100010'0'000000000000);
|
placeR1("mov", dst, src, 0b00'100010'0'000000000000);
|
||||||
@ -150,14 +150,14 @@ void AssemblyBuilderA64::cmp(RegisterA64 src1, uint16_t src2)
|
|||||||
|
|
||||||
void AssemblyBuilderA64::csel(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond)
|
void AssemblyBuilderA64::csel(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
|
CODEGEN_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
|
||||||
|
|
||||||
placeCS("csel", dst, src1, src2, cond, 0b11010'10'0, 0b00);
|
placeCS("csel", dst, src1, src2, cond, 0b11010'10'0, 0b00);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::cset(RegisterA64 dst, ConditionA64 cond)
|
void AssemblyBuilderA64::cset(RegisterA64 dst, ConditionA64 cond)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
|
CODEGEN_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
|
||||||
|
|
||||||
RegisterA64 src = dst.kind == KindA64::x ? xzr : wzr;
|
RegisterA64 src = dst.kind == KindA64::x ? xzr : wzr;
|
||||||
|
|
||||||
@ -240,24 +240,24 @@ void AssemblyBuilderA64::ror(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2
|
|||||||
|
|
||||||
void AssemblyBuilderA64::clz(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::clz(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
||||||
LUAU_ASSERT(dst.kind == src.kind);
|
CODEGEN_ASSERT(dst.kind == src.kind);
|
||||||
|
|
||||||
placeR1("clz", dst, src, 0b10'11010110'00000'00010'0);
|
placeR1("clz", dst, src, 0b10'11010110'00000'00010'0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::rbit(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::rbit(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
||||||
LUAU_ASSERT(dst.kind == src.kind);
|
CODEGEN_ASSERT(dst.kind == src.kind);
|
||||||
|
|
||||||
placeR1("rbit", dst, src, 0b10'11010110'00000'0000'00);
|
placeR1("rbit", dst, src, 0b10'11010110'00000'0000'00);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::rev(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::rev(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
||||||
LUAU_ASSERT(dst.kind == src.kind);
|
CODEGEN_ASSERT(dst.kind == src.kind);
|
||||||
|
|
||||||
placeR1("rev", dst, src, 0b10'11010110'00000'0000'10 | int(dst.kind == KindA64::x));
|
placeR1("rev", dst, src, 0b10'11010110'00000'0000'10 | int(dst.kind == KindA64::x));
|
||||||
}
|
}
|
||||||
@ -265,7 +265,7 @@ void AssemblyBuilderA64::rev(RegisterA64 dst, RegisterA64 src)
|
|||||||
void AssemblyBuilderA64::lsl(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
void AssemblyBuilderA64::lsl(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
||||||
{
|
{
|
||||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||||
LUAU_ASSERT(src2 < size);
|
CODEGEN_ASSERT(src2 < size);
|
||||||
|
|
||||||
placeBFM("lsl", dst, src1, src2, 0b10'100110, (-src2) & (size - 1), size - 1 - src2);
|
placeBFM("lsl", dst, src1, src2, 0b10'100110, (-src2) & (size - 1), size - 1 - src2);
|
||||||
}
|
}
|
||||||
@ -273,7 +273,7 @@ void AssemblyBuilderA64::lsl(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
|||||||
void AssemblyBuilderA64::lsr(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
void AssemblyBuilderA64::lsr(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
||||||
{
|
{
|
||||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||||
LUAU_ASSERT(src2 < size);
|
CODEGEN_ASSERT(src2 < size);
|
||||||
|
|
||||||
placeBFM("lsr", dst, src1, src2, 0b10'100110, src2, size - 1);
|
placeBFM("lsr", dst, src1, src2, 0b10'100110, src2, size - 1);
|
||||||
}
|
}
|
||||||
@ -281,7 +281,7 @@ void AssemblyBuilderA64::lsr(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
|||||||
void AssemblyBuilderA64::asr(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
void AssemblyBuilderA64::asr(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
||||||
{
|
{
|
||||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||||
LUAU_ASSERT(src2 < size);
|
CODEGEN_ASSERT(src2 < size);
|
||||||
|
|
||||||
placeBFM("asr", dst, src1, src2, 0b00'100110, src2, size - 1);
|
placeBFM("asr", dst, src1, src2, 0b00'100110, src2, size - 1);
|
||||||
}
|
}
|
||||||
@ -289,7 +289,7 @@ void AssemblyBuilderA64::asr(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
|||||||
void AssemblyBuilderA64::ror(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
void AssemblyBuilderA64::ror(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
||||||
{
|
{
|
||||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||||
LUAU_ASSERT(src2 < size);
|
CODEGEN_ASSERT(src2 < size);
|
||||||
|
|
||||||
// note: this is encoding src1 via immr which is a hack but the bit layout matches and a special archetype feels excessive
|
// note: this is encoding src1 via immr which is a hack but the bit layout matches and a special archetype feels excessive
|
||||||
placeBFM("ror", dst, src1, src2, 0b00'100111, src1.index, src2);
|
placeBFM("ror", dst, src1, src2, 0b00'100111, src1.index, src2);
|
||||||
@ -298,7 +298,7 @@ void AssemblyBuilderA64::ror(RegisterA64 dst, RegisterA64 src1, uint8_t src2)
|
|||||||
void AssemblyBuilderA64::ubfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
void AssemblyBuilderA64::ubfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
||||||
{
|
{
|
||||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||||
LUAU_ASSERT(w > 0 && f + w <= size);
|
CODEGEN_ASSERT(w > 0 && f + w <= size);
|
||||||
|
|
||||||
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
||||||
placeBFM("ubfiz", dst, src, f * 100 + w, 0b10'100110, (-f) & (size - 1), w - 1);
|
placeBFM("ubfiz", dst, src, f * 100 + w, 0b10'100110, (-f) & (size - 1), w - 1);
|
||||||
@ -307,7 +307,7 @@ void AssemblyBuilderA64::ubfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint
|
|||||||
void AssemblyBuilderA64::ubfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
void AssemblyBuilderA64::ubfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
||||||
{
|
{
|
||||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||||
LUAU_ASSERT(w > 0 && f + w <= size);
|
CODEGEN_ASSERT(w > 0 && f + w <= size);
|
||||||
|
|
||||||
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
||||||
placeBFM("ubfx", dst, src, f * 100 + w, 0b10'100110, f, f + w - 1);
|
placeBFM("ubfx", dst, src, f * 100 + w, 0b10'100110, f, f + w - 1);
|
||||||
@ -316,7 +316,7 @@ void AssemblyBuilderA64::ubfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8
|
|||||||
void AssemblyBuilderA64::sbfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
void AssemblyBuilderA64::sbfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
||||||
{
|
{
|
||||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||||
LUAU_ASSERT(w > 0 && f + w <= size);
|
CODEGEN_ASSERT(w > 0 && f + w <= size);
|
||||||
|
|
||||||
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
||||||
placeBFM("sbfiz", dst, src, f * 100 + w, 0b00'100110, (-f) & (size - 1), w - 1);
|
placeBFM("sbfiz", dst, src, f * 100 + w, 0b00'100110, (-f) & (size - 1), w - 1);
|
||||||
@ -325,7 +325,7 @@ void AssemblyBuilderA64::sbfiz(RegisterA64 dst, RegisterA64 src, uint8_t f, uint
|
|||||||
void AssemblyBuilderA64::sbfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
void AssemblyBuilderA64::sbfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8_t w)
|
||||||
{
|
{
|
||||||
int size = dst.kind == KindA64::x ? 64 : 32;
|
int size = dst.kind == KindA64::x ? 64 : 32;
|
||||||
LUAU_ASSERT(w > 0 && f + w <= size);
|
CODEGEN_ASSERT(w > 0 && f + w <= size);
|
||||||
|
|
||||||
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
// f * 100 + w is only used for disassembly printout; in the future we might replace it with two separate fields for readability
|
||||||
placeBFM("sbfx", dst, src, f * 100 + w, 0b00'100110, f, f + w - 1);
|
placeBFM("sbfx", dst, src, f * 100 + w, 0b00'100110, f, f + w - 1);
|
||||||
@ -333,7 +333,7 @@ void AssemblyBuilderA64::sbfx(RegisterA64 dst, RegisterA64 src, uint8_t f, uint8
|
|||||||
|
|
||||||
void AssemblyBuilderA64::ldr(RegisterA64 dst, AddressA64 src)
|
void AssemblyBuilderA64::ldr(RegisterA64 dst, AddressA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w || dst.kind == KindA64::s || dst.kind == KindA64::d || dst.kind == KindA64::q);
|
CODEGEN_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w || dst.kind == KindA64::s || dst.kind == KindA64::d || dst.kind == KindA64::q);
|
||||||
|
|
||||||
switch (dst.kind)
|
switch (dst.kind)
|
||||||
{
|
{
|
||||||
@ -353,56 +353,56 @@ void AssemblyBuilderA64::ldr(RegisterA64 dst, AddressA64 src)
|
|||||||
placeA("ldr", dst, src, 0b00'11110011, /* sizelog= */ 4);
|
placeA("ldr", dst, src, 0b00'11110011, /* sizelog= */ 4);
|
||||||
break;
|
break;
|
||||||
case KindA64::none:
|
case KindA64::none:
|
||||||
LUAU_ASSERT(!"Unexpected register kind");
|
CODEGEN_ASSERT(!"Unexpected register kind");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::ldrb(RegisterA64 dst, AddressA64 src)
|
void AssemblyBuilderA64::ldrb(RegisterA64 dst, AddressA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w);
|
CODEGEN_ASSERT(dst.kind == KindA64::w);
|
||||||
|
|
||||||
placeA("ldrb", dst, src, 0b00'11100001, /* sizelog= */ 0);
|
placeA("ldrb", dst, src, 0b00'11100001, /* sizelog= */ 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::ldrh(RegisterA64 dst, AddressA64 src)
|
void AssemblyBuilderA64::ldrh(RegisterA64 dst, AddressA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w);
|
CODEGEN_ASSERT(dst.kind == KindA64::w);
|
||||||
|
|
||||||
placeA("ldrh", dst, src, 0b01'11100001, /* sizelog= */ 1);
|
placeA("ldrh", dst, src, 0b01'11100001, /* sizelog= */ 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::ldrsb(RegisterA64 dst, AddressA64 src)
|
void AssemblyBuilderA64::ldrsb(RegisterA64 dst, AddressA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
|
CODEGEN_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
|
||||||
|
|
||||||
placeA("ldrsb", dst, src, 0b00'11100010 | uint8_t(dst.kind == KindA64::w), /* sizelog= */ 0);
|
placeA("ldrsb", dst, src, 0b00'11100010 | uint8_t(dst.kind == KindA64::w), /* sizelog= */ 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::ldrsh(RegisterA64 dst, AddressA64 src)
|
void AssemblyBuilderA64::ldrsh(RegisterA64 dst, AddressA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
|
CODEGEN_ASSERT(dst.kind == KindA64::x || dst.kind == KindA64::w);
|
||||||
|
|
||||||
placeA("ldrsh", dst, src, 0b01'11100010 | uint8_t(dst.kind == KindA64::w), /* sizelog= */ 1);
|
placeA("ldrsh", dst, src, 0b01'11100010 | uint8_t(dst.kind == KindA64::w), /* sizelog= */ 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::ldrsw(RegisterA64 dst, AddressA64 src)
|
void AssemblyBuilderA64::ldrsw(RegisterA64 dst, AddressA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::x);
|
||||||
|
|
||||||
placeA("ldrsw", dst, src, 0b10'11100010, /* sizelog= */ 2);
|
placeA("ldrsw", dst, src, 0b10'11100010, /* sizelog= */ 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::ldp(RegisterA64 dst1, RegisterA64 dst2, AddressA64 src)
|
void AssemblyBuilderA64::ldp(RegisterA64 dst1, RegisterA64 dst2, AddressA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst1.kind == KindA64::x || dst1.kind == KindA64::w);
|
CODEGEN_ASSERT(dst1.kind == KindA64::x || dst1.kind == KindA64::w);
|
||||||
LUAU_ASSERT(dst1.kind == dst2.kind);
|
CODEGEN_ASSERT(dst1.kind == dst2.kind);
|
||||||
|
|
||||||
placeP("ldp", dst1, dst2, src, 0b101'0'010'1, uint8_t(dst1.kind == KindA64::x) << 1, /* sizelog= */ dst1.kind == KindA64::x ? 3 : 2);
|
placeP("ldp", dst1, dst2, src, 0b101'0'010'1, uint8_t(dst1.kind == KindA64::x) << 1, /* sizelog= */ dst1.kind == KindA64::x ? 3 : 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst)
|
void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src.kind == KindA64::x || src.kind == KindA64::w || src.kind == KindA64::s || src.kind == KindA64::d || src.kind == KindA64::q);
|
CODEGEN_ASSERT(src.kind == KindA64::x || src.kind == KindA64::w || src.kind == KindA64::s || src.kind == KindA64::d || src.kind == KindA64::q);
|
||||||
|
|
||||||
switch (src.kind)
|
switch (src.kind)
|
||||||
{
|
{
|
||||||
@ -422,28 +422,28 @@ void AssemblyBuilderA64::str(RegisterA64 src, AddressA64 dst)
|
|||||||
placeA("str", src, dst, 0b00'11110010, /* sizelog= */ 4);
|
placeA("str", src, dst, 0b00'11110010, /* sizelog= */ 4);
|
||||||
break;
|
break;
|
||||||
case KindA64::none:
|
case KindA64::none:
|
||||||
LUAU_ASSERT(!"Unexpected register kind");
|
CODEGEN_ASSERT(!"Unexpected register kind");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::strb(RegisterA64 src, AddressA64 dst)
|
void AssemblyBuilderA64::strb(RegisterA64 src, AddressA64 dst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src.kind == KindA64::w);
|
CODEGEN_ASSERT(src.kind == KindA64::w);
|
||||||
|
|
||||||
placeA("strb", src, dst, 0b00'11100000, /* sizelog= */ 0);
|
placeA("strb", src, dst, 0b00'11100000, /* sizelog= */ 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::strh(RegisterA64 src, AddressA64 dst)
|
void AssemblyBuilderA64::strh(RegisterA64 src, AddressA64 dst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src.kind == KindA64::w);
|
CODEGEN_ASSERT(src.kind == KindA64::w);
|
||||||
|
|
||||||
placeA("strh", src, dst, 0b01'11100000, /* sizelog= */ 1);
|
placeA("strh", src, dst, 0b01'11100000, /* sizelog= */ 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::stp(RegisterA64 src1, RegisterA64 src2, AddressA64 dst)
|
void AssemblyBuilderA64::stp(RegisterA64 src1, RegisterA64 src2, AddressA64 dst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src1.kind == KindA64::x || src1.kind == KindA64::w);
|
CODEGEN_ASSERT(src1.kind == KindA64::x || src1.kind == KindA64::w);
|
||||||
LUAU_ASSERT(src1.kind == src2.kind);
|
CODEGEN_ASSERT(src1.kind == src2.kind);
|
||||||
|
|
||||||
placeP("stp", src1, src2, dst, 0b101'0'010'0, uint8_t(src1.kind == KindA64::x) << 1, /* sizelog= */ src1.kind == KindA64::x ? 3 : 2);
|
placeP("stp", src1, src2, dst, 0b101'0'010'0, uint8_t(src1.kind == KindA64::x) << 1, /* sizelog= */ src1.kind == KindA64::x ? 3 : 2);
|
||||||
}
|
}
|
||||||
@ -538,7 +538,7 @@ void AssemblyBuilderA64::adr(RegisterA64 dst, Label& label)
|
|||||||
|
|
||||||
void AssemblyBuilderA64::fmov(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::fmov(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::d && (src.kind == KindA64::d || src.kind == KindA64::x));
|
CODEGEN_ASSERT(dst.kind == KindA64::d && (src.kind == KindA64::d || src.kind == KindA64::x));
|
||||||
|
|
||||||
if (src.kind == KindA64::d)
|
if (src.kind == KindA64::d)
|
||||||
placeR1("fmov", dst, src, 0b000'11110'01'1'0000'00'10000);
|
placeR1("fmov", dst, src, 0b000'11110'01'1'0000'00'10000);
|
||||||
@ -548,10 +548,10 @@ void AssemblyBuilderA64::fmov(RegisterA64 dst, RegisterA64 src)
|
|||||||
|
|
||||||
void AssemblyBuilderA64::fmov(RegisterA64 dst, double src)
|
void AssemblyBuilderA64::fmov(RegisterA64 dst, double src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::d);
|
CODEGEN_ASSERT(dst.kind == KindA64::d);
|
||||||
|
|
||||||
int imm = getFmovImm(src);
|
int imm = getFmovImm(src);
|
||||||
LUAU_ASSERT(imm >= 0 && imm <= 256);
|
CODEGEN_ASSERT(imm >= 0 && imm <= 256);
|
||||||
|
|
||||||
// fmov can't encode 0, but movi can; movi is otherwise not useful for 64-bit fp immediates because it encodes repeating patterns
|
// fmov can't encode 0, but movi can; movi is otherwise not useful for 64-bit fp immediates because it encodes repeating patterns
|
||||||
if (imm == 256)
|
if (imm == 256)
|
||||||
@ -562,7 +562,7 @@ void AssemblyBuilderA64::fmov(RegisterA64 dst, double src)
|
|||||||
|
|
||||||
void AssemblyBuilderA64::fabs(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::fabs(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
|
CODEGEN_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
|
||||||
|
|
||||||
placeR1("fabs", dst, src, 0b000'11110'01'1'0000'01'10000);
|
placeR1("fabs", dst, src, 0b000'11110'01'1'0000'01'10000);
|
||||||
}
|
}
|
||||||
@ -571,13 +571,13 @@ void AssemblyBuilderA64::fadd(RegisterA64 dst, RegisterA64 src1, RegisterA64 src
|
|||||||
{
|
{
|
||||||
if (dst.kind == KindA64::d)
|
if (dst.kind == KindA64::d)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
|
CODEGEN_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
|
||||||
|
|
||||||
placeR3("fadd", dst, src1, src2, 0b11110'01'1, 0b0010'10);
|
placeR3("fadd", dst, src1, src2, 0b11110'01'1, 0b0010'10);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
|
CODEGEN_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
|
||||||
|
|
||||||
placeR3("fadd", dst, src1, src2, 0b11110'00'1, 0b0010'10);
|
placeR3("fadd", dst, src1, src2, 0b11110'00'1, 0b0010'10);
|
||||||
}
|
}
|
||||||
@ -587,13 +587,13 @@ void AssemblyBuilderA64::fdiv(RegisterA64 dst, RegisterA64 src1, RegisterA64 src
|
|||||||
{
|
{
|
||||||
if (dst.kind == KindA64::d)
|
if (dst.kind == KindA64::d)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
|
CODEGEN_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
|
||||||
|
|
||||||
placeR3("fdiv", dst, src1, src2, 0b11110'01'1, 0b0001'10);
|
placeR3("fdiv", dst, src1, src2, 0b11110'01'1, 0b0001'10);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
|
CODEGEN_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
|
||||||
|
|
||||||
placeR3("fdiv", dst, src1, src2, 0b11110'00'1, 0b0001'10);
|
placeR3("fdiv", dst, src1, src2, 0b11110'00'1, 0b0001'10);
|
||||||
}
|
}
|
||||||
@ -603,13 +603,13 @@ void AssemblyBuilderA64::fmul(RegisterA64 dst, RegisterA64 src1, RegisterA64 src
|
|||||||
{
|
{
|
||||||
if (dst.kind == KindA64::d)
|
if (dst.kind == KindA64::d)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
|
CODEGEN_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
|
||||||
|
|
||||||
placeR3("fmul", dst, src1, src2, 0b11110'01'1, 0b0000'10);
|
placeR3("fmul", dst, src1, src2, 0b11110'01'1, 0b0000'10);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
|
CODEGEN_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
|
||||||
|
|
||||||
placeR3("fmul", dst, src1, src2, 0b11110'00'1, 0b0000'10);
|
placeR3("fmul", dst, src1, src2, 0b11110'00'1, 0b0000'10);
|
||||||
}
|
}
|
||||||
@ -619,13 +619,13 @@ void AssemblyBuilderA64::fneg(RegisterA64 dst, RegisterA64 src)
|
|||||||
{
|
{
|
||||||
if (dst.kind == KindA64::d)
|
if (dst.kind == KindA64::d)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src.kind == KindA64::d);
|
CODEGEN_ASSERT(src.kind == KindA64::d);
|
||||||
|
|
||||||
placeR1("fneg", dst, src, 0b000'11110'01'1'0000'10'10000);
|
placeR1("fneg", dst, src, 0b000'11110'01'1'0000'10'10000);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::s && src.kind == KindA64::s);
|
CODEGEN_ASSERT(dst.kind == KindA64::s && src.kind == KindA64::s);
|
||||||
|
|
||||||
placeR1("fneg", dst, src, 0b000'11110'00'1'0000'10'10000);
|
placeR1("fneg", dst, src, 0b000'11110'00'1'0000'10'10000);
|
||||||
}
|
}
|
||||||
@ -633,7 +633,7 @@ void AssemblyBuilderA64::fneg(RegisterA64 dst, RegisterA64 src)
|
|||||||
|
|
||||||
void AssemblyBuilderA64::fsqrt(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::fsqrt(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
|
CODEGEN_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
|
||||||
|
|
||||||
placeR1("fsqrt", dst, src, 0b000'11110'01'1'0000'11'10000);
|
placeR1("fsqrt", dst, src, 0b000'11110'01'1'0000'11'10000);
|
||||||
}
|
}
|
||||||
@ -642,13 +642,13 @@ void AssemblyBuilderA64::fsub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src
|
|||||||
{
|
{
|
||||||
if (dst.kind == KindA64::d)
|
if (dst.kind == KindA64::d)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
|
CODEGEN_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
|
||||||
|
|
||||||
placeR3("fsub", dst, src1, src2, 0b11110'01'1, 0b0011'10);
|
placeR3("fsub", dst, src1, src2, 0b11110'01'1, 0b0011'10);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
|
CODEGEN_ASSERT(dst.kind == KindA64::s && src1.kind == KindA64::s && src2.kind == KindA64::s);
|
||||||
|
|
||||||
placeR3("fsub", dst, src1, src2, 0b11110'00'1, 0b0011'10);
|
placeR3("fsub", dst, src1, src2, 0b11110'00'1, 0b0011'10);
|
||||||
}
|
}
|
||||||
@ -656,8 +656,8 @@ void AssemblyBuilderA64::fsub(RegisterA64 dst, RegisterA64 src1, RegisterA64 src
|
|||||||
|
|
||||||
void AssemblyBuilderA64::ins_4s(RegisterA64 dst, RegisterA64 src, uint8_t index)
|
void AssemblyBuilderA64::ins_4s(RegisterA64 dst, RegisterA64 src, uint8_t index)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::q && src.kind == KindA64::w);
|
CODEGEN_ASSERT(dst.kind == KindA64::q && src.kind == KindA64::w);
|
||||||
LUAU_ASSERT(index < 4);
|
CODEGEN_ASSERT(index < 4);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
logAppend(" %-12sv%d.s[%d],w%d\n", "ins", dst.index, index, src.index);
|
logAppend(" %-12sv%d.s[%d],w%d\n", "ins", dst.index, index, src.index);
|
||||||
@ -670,9 +670,9 @@ void AssemblyBuilderA64::ins_4s(RegisterA64 dst, RegisterA64 src, uint8_t index)
|
|||||||
|
|
||||||
void AssemblyBuilderA64::ins_4s(RegisterA64 dst, uint8_t dstIndex, RegisterA64 src, uint8_t srcIndex)
|
void AssemblyBuilderA64::ins_4s(RegisterA64 dst, uint8_t dstIndex, RegisterA64 src, uint8_t srcIndex)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::q && src.kind == KindA64::q);
|
CODEGEN_ASSERT(dst.kind == KindA64::q && src.kind == KindA64::q);
|
||||||
LUAU_ASSERT(dstIndex < 4);
|
CODEGEN_ASSERT(dstIndex < 4);
|
||||||
LUAU_ASSERT(srcIndex < 4);
|
CODEGEN_ASSERT(srcIndex < 4);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
logAppend(" %-12sv%d.s[%d],v%d.s[%d]\n", "ins", dst.index, dstIndex, src.index, srcIndex);
|
logAppend(" %-12sv%d.s[%d],v%d.s[%d]\n", "ins", dst.index, dstIndex, src.index, srcIndex);
|
||||||
@ -687,8 +687,8 @@ void AssemblyBuilderA64::dup_4s(RegisterA64 dst, RegisterA64 src, uint8_t index)
|
|||||||
{
|
{
|
||||||
if (dst.kind == KindA64::s)
|
if (dst.kind == KindA64::s)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src.kind == KindA64::q);
|
CODEGEN_ASSERT(src.kind == KindA64::q);
|
||||||
LUAU_ASSERT(index < 4);
|
CODEGEN_ASSERT(index < 4);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
logAppend(" %-12ss%d,v%d.s[%d]\n", "dup", dst.index, src.index, index);
|
logAppend(" %-12ss%d,v%d.s[%d]\n", "dup", dst.index, src.index, index);
|
||||||
@ -699,8 +699,8 @@ void AssemblyBuilderA64::dup_4s(RegisterA64 dst, RegisterA64 src, uint8_t index)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src.kind == KindA64::q);
|
CODEGEN_ASSERT(src.kind == KindA64::q);
|
||||||
LUAU_ASSERT(index < 4);
|
CODEGEN_ASSERT(index < 4);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
logAppend(" %-12sv%d.4s,v%d.s[%d]\n", "dup", dst.index, src.index, index);
|
logAppend(" %-12sv%d.4s,v%d.s[%d]\n", "dup", dst.index, src.index, index);
|
||||||
@ -715,21 +715,21 @@ void AssemblyBuilderA64::dup_4s(RegisterA64 dst, RegisterA64 src, uint8_t index)
|
|||||||
|
|
||||||
void AssemblyBuilderA64::frinta(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::frinta(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
|
CODEGEN_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
|
||||||
|
|
||||||
placeR1("frinta", dst, src, 0b000'11110'01'1'001'100'10000);
|
placeR1("frinta", dst, src, 0b000'11110'01'1'001'100'10000);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::frintm(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::frintm(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
|
CODEGEN_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
|
||||||
|
|
||||||
placeR1("frintm", dst, src, 0b000'11110'01'1'001'010'10000);
|
placeR1("frintm", dst, src, 0b000'11110'01'1'001'010'10000);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::frintp(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::frintp(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
|
CODEGEN_ASSERT(dst.kind == KindA64::d && src.kind == KindA64::d);
|
||||||
|
|
||||||
placeR1("frintp", dst, src, 0b000'11110'01'1'001'001'10000);
|
placeR1("frintp", dst, src, 0b000'11110'01'1'001'001'10000);
|
||||||
}
|
}
|
||||||
@ -741,67 +741,67 @@ void AssemblyBuilderA64::fcvt(RegisterA64 dst, RegisterA64 src)
|
|||||||
else if (dst.kind == KindA64::d && src.kind == KindA64::s)
|
else if (dst.kind == KindA64::d && src.kind == KindA64::s)
|
||||||
placeR1("fcvt", dst, src, 0b11110'00'1'0001'01'10000);
|
placeR1("fcvt", dst, src, 0b11110'00'1'0001'01'10000);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unexpected register kind");
|
CODEGEN_ASSERT(!"Unexpected register kind");
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::fcvtzs(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::fcvtzs(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
||||||
LUAU_ASSERT(src.kind == KindA64::d);
|
CODEGEN_ASSERT(src.kind == KindA64::d);
|
||||||
|
|
||||||
placeR1("fcvtzs", dst, src, 0b000'11110'01'1'11'000'000000);
|
placeR1("fcvtzs", dst, src, 0b000'11110'01'1'11'000'000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::fcvtzu(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::fcvtzu(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
||||||
LUAU_ASSERT(src.kind == KindA64::d);
|
CODEGEN_ASSERT(src.kind == KindA64::d);
|
||||||
|
|
||||||
placeR1("fcvtzu", dst, src, 0b000'11110'01'1'11'001'000000);
|
placeR1("fcvtzu", dst, src, 0b000'11110'01'1'11'001'000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::scvtf(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::scvtf(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::d);
|
CODEGEN_ASSERT(dst.kind == KindA64::d);
|
||||||
LUAU_ASSERT(src.kind == KindA64::w || src.kind == KindA64::x);
|
CODEGEN_ASSERT(src.kind == KindA64::w || src.kind == KindA64::x);
|
||||||
|
|
||||||
placeR1("scvtf", dst, src, 0b000'11110'01'1'00'010'000000);
|
placeR1("scvtf", dst, src, 0b000'11110'01'1'00'010'000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::ucvtf(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::ucvtf(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::d);
|
CODEGEN_ASSERT(dst.kind == KindA64::d);
|
||||||
LUAU_ASSERT(src.kind == KindA64::w || src.kind == KindA64::x);
|
CODEGEN_ASSERT(src.kind == KindA64::w || src.kind == KindA64::x);
|
||||||
|
|
||||||
placeR1("ucvtf", dst, src, 0b000'11110'01'1'00'011'000000);
|
placeR1("ucvtf", dst, src, 0b000'11110'01'1'00'011'000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::fjcvtzs(RegisterA64 dst, RegisterA64 src)
|
void AssemblyBuilderA64::fjcvtzs(RegisterA64 dst, RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w);
|
CODEGEN_ASSERT(dst.kind == KindA64::w);
|
||||||
LUAU_ASSERT(src.kind == KindA64::d);
|
CODEGEN_ASSERT(src.kind == KindA64::d);
|
||||||
LUAU_ASSERT(features & Feature_JSCVT);
|
CODEGEN_ASSERT(features & Feature_JSCVT);
|
||||||
|
|
||||||
placeR1("fjcvtzs", dst, src, 0b000'11110'01'1'11'110'000000);
|
placeR1("fjcvtzs", dst, src, 0b000'11110'01'1'11'110'000000);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::fcmp(RegisterA64 src1, RegisterA64 src2)
|
void AssemblyBuilderA64::fcmp(RegisterA64 src1, RegisterA64 src2)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
|
CODEGEN_ASSERT(src1.kind == KindA64::d && src2.kind == KindA64::d);
|
||||||
|
|
||||||
placeFCMP("fcmp", src1, src2, 0b11110'01'1, 0b00);
|
placeFCMP("fcmp", src1, src2, 0b11110'01'1, 0b00);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::fcmpz(RegisterA64 src)
|
void AssemblyBuilderA64::fcmpz(RegisterA64 src)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src.kind == KindA64::d);
|
CODEGEN_ASSERT(src.kind == KindA64::d);
|
||||||
|
|
||||||
placeFCMP("fcmp", src, RegisterA64{src.kind, 0}, 0b11110'01'1, 0b01);
|
placeFCMP("fcmp", src, RegisterA64{src.kind, 0}, 0b11110'01'1, 0b01);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderA64::fcsel(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond)
|
void AssemblyBuilderA64::fcsel(RegisterA64 dst, RegisterA64 src1, RegisterA64 src2, ConditionA64 cond)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::d);
|
CODEGEN_ASSERT(dst.kind == KindA64::d);
|
||||||
|
|
||||||
placeCS("fcsel", dst, src1, src2, cond, 0b11110'01'1, 0b11);
|
placeCS("fcsel", dst, src1, src2, cond, 0b11110'01'1, 0b11);
|
||||||
}
|
}
|
||||||
@ -820,7 +820,7 @@ bool AssemblyBuilderA64::finalize()
|
|||||||
{
|
{
|
||||||
// If this assertion fires, a label was used in jmp without calling setLabel
|
// If this assertion fires, a label was used in jmp without calling setLabel
|
||||||
uint32_t label = fixup.label;
|
uint32_t label = fixup.label;
|
||||||
LUAU_ASSERT(labelLocations[label - 1] != ~0u);
|
CODEGEN_ASSERT(labelLocations[label - 1] != ~0u);
|
||||||
int value = int(labelLocations[label - 1]) - int(fixup.location);
|
int value = int(labelLocations[label - 1]) - int(fixup.location);
|
||||||
|
|
||||||
patchOffset(fixup.location, value, fixup.kind);
|
patchOffset(fixup.location, value, fixup.kind);
|
||||||
@ -913,9 +913,9 @@ void AssemblyBuilderA64::placeSR3(const char* name, RegisterA64 dst, RegisterA64
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src1, src2, shift);
|
log(name, dst, src1, src2, shift);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
||||||
LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
|
CODEGEN_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
|
||||||
LUAU_ASSERT(shift >= -63 && shift <= 63);
|
CODEGEN_ASSERT(shift >= -63 && shift <= 63);
|
||||||
|
|
||||||
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
||||||
|
|
||||||
@ -929,8 +929,8 @@ void AssemblyBuilderA64::placeSR2(const char* name, RegisterA64 dst, RegisterA64
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src);
|
log(name, dst, src);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
||||||
LUAU_ASSERT(dst.kind == src.kind);
|
CODEGEN_ASSERT(dst.kind == src.kind);
|
||||||
|
|
||||||
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
||||||
|
|
||||||
@ -943,8 +943,8 @@ void AssemblyBuilderA64::placeR3(const char* name, RegisterA64 dst, RegisterA64
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src1, src2);
|
log(name, dst, src1, src2);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d || dst.kind == KindA64::s);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d || dst.kind == KindA64::s);
|
||||||
LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
|
CODEGEN_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
|
||||||
|
|
||||||
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
||||||
|
|
||||||
@ -957,8 +957,8 @@ void AssemblyBuilderA64::placeR3(const char* name, RegisterA64 dst, RegisterA64
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src1, src2);
|
log(name, dst, src1, src2);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d || dst.kind == KindA64::q);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst.kind == KindA64::d || dst.kind == KindA64::q);
|
||||||
LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
|
CODEGEN_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
|
||||||
|
|
||||||
place(dst.index | (src1.index << 5) | (op2 << 10) | (src2.index << 16) | (op << 21) | (sizes << 29));
|
place(dst.index | (src1.index << 5) | (op2 << 10) | (src2.index << 16) | (op << 21) | (sizes << 29));
|
||||||
commit();
|
commit();
|
||||||
@ -980,9 +980,9 @@ void AssemblyBuilderA64::placeI12(const char* name, RegisterA64 dst, RegisterA64
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src1, src2);
|
log(name, dst, src1, src2);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst == sp);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x || dst == sp);
|
||||||
LUAU_ASSERT(dst.kind == src1.kind || (dst.kind == KindA64::x && src1 == sp) || (dst == sp && src1.kind == KindA64::x));
|
CODEGEN_ASSERT(dst.kind == src1.kind || (dst.kind == KindA64::x && src1 == sp) || (dst == sp && src1.kind == KindA64::x));
|
||||||
LUAU_ASSERT(src2 >= 0 && src2 < (1 << 12));
|
CODEGEN_ASSERT(src2 >= 0 && src2 < (1 << 12));
|
||||||
|
|
||||||
uint32_t sf = (dst.kind != KindA64::w) ? 0x80000000 : 0;
|
uint32_t sf = (dst.kind != KindA64::w) ? 0x80000000 : 0;
|
||||||
|
|
||||||
@ -995,9 +995,9 @@ void AssemblyBuilderA64::placeI16(const char* name, RegisterA64 dst, int src, ui
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src, shift);
|
log(name, dst, src, shift);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
||||||
LUAU_ASSERT(src >= 0 && src <= 0xffff);
|
CODEGEN_ASSERT(src >= 0 && src <= 0xffff);
|
||||||
LUAU_ASSERT(shift == 0 || shift == 16 || shift == 32 || shift == 48);
|
CODEGEN_ASSERT(shift == 0 || shift == 16 || shift == 32 || shift == 48);
|
||||||
|
|
||||||
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
||||||
|
|
||||||
@ -1021,14 +1021,14 @@ void AssemblyBuilderA64::placeA(const char* name, RegisterA64 dst, AddressA64 sr
|
|||||||
else if (src.data >= -256 && src.data <= 255)
|
else if (src.data >= -256 && src.data <= 255)
|
||||||
place(dst.index | (src.base.index << 5) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22));
|
place(dst.index | (src.base.index << 5) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22));
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unable to encode large immediate offset");
|
CODEGEN_ASSERT(!"Unable to encode large immediate offset");
|
||||||
break;
|
break;
|
||||||
case AddressKindA64::pre:
|
case AddressKindA64::pre:
|
||||||
LUAU_ASSERT(src.data >= -256 && src.data <= 255);
|
CODEGEN_ASSERT(src.data >= -256 && src.data <= 255);
|
||||||
place(dst.index | (src.base.index << 5) | (0b11 << 10) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22));
|
place(dst.index | (src.base.index << 5) | (0b11 << 10) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22));
|
||||||
break;
|
break;
|
||||||
case AddressKindA64::post:
|
case AddressKindA64::post:
|
||||||
LUAU_ASSERT(src.data >= -256 && src.data <= 255);
|
CODEGEN_ASSERT(src.data >= -256 && src.data <= 255);
|
||||||
place(dst.index | (src.base.index << 5) | (0b01 << 10) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22));
|
place(dst.index | (src.base.index << 5) | (0b01 << 10) | ((src.data & ((1 << 9) - 1)) << 12) | (opsize << 22));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1060,7 +1060,7 @@ void AssemblyBuilderA64::placeBC(const char* name, Label& label, uint8_t op, uin
|
|||||||
|
|
||||||
void AssemblyBuilderA64::placeBCR(const char* name, Label& label, uint8_t op, RegisterA64 cond)
|
void AssemblyBuilderA64::placeBCR(const char* name, Label& label, uint8_t op, RegisterA64 cond)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(cond.kind == KindA64::w || cond.kind == KindA64::x);
|
CODEGEN_ASSERT(cond.kind == KindA64::w || cond.kind == KindA64::x);
|
||||||
|
|
||||||
uint32_t sf = (cond.kind == KindA64::x) ? 0x80000000 : 0;
|
uint32_t sf = (cond.kind == KindA64::x) ? 0x80000000 : 0;
|
||||||
|
|
||||||
@ -1078,7 +1078,7 @@ void AssemblyBuilderA64::placeBR(const char* name, RegisterA64 src, uint32_t op)
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, src);
|
log(name, src);
|
||||||
|
|
||||||
LUAU_ASSERT(src.kind == KindA64::x);
|
CODEGEN_ASSERT(src.kind == KindA64::x);
|
||||||
|
|
||||||
place((src.index << 5) | (op << 10));
|
place((src.index << 5) | (op << 10));
|
||||||
commit();
|
commit();
|
||||||
@ -1086,8 +1086,8 @@ void AssemblyBuilderA64::placeBR(const char* name, RegisterA64 src, uint32_t op)
|
|||||||
|
|
||||||
void AssemblyBuilderA64::placeBTR(const char* name, Label& label, uint8_t op, RegisterA64 cond, uint8_t bit)
|
void AssemblyBuilderA64::placeBTR(const char* name, Label& label, uint8_t op, RegisterA64 cond, uint8_t bit)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(cond.kind == KindA64::x || cond.kind == KindA64::w);
|
CODEGEN_ASSERT(cond.kind == KindA64::x || cond.kind == KindA64::w);
|
||||||
LUAU_ASSERT(bit < (cond.kind == KindA64::x ? 64 : 32));
|
CODEGEN_ASSERT(bit < (cond.kind == KindA64::x ? 64 : 32));
|
||||||
|
|
||||||
place(cond.index | ((bit & 0x1f) << 19) | (op << 24) | ((bit >> 5) << 31));
|
place(cond.index | ((bit & 0x1f) << 19) | (op << 24) | ((bit >> 5) << 31));
|
||||||
commit();
|
commit();
|
||||||
@ -1103,7 +1103,7 @@ void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op)
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst);
|
log(name, dst);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::x);
|
||||||
|
|
||||||
place(dst.index | (op << 24));
|
place(dst.index | (op << 24));
|
||||||
commit();
|
commit();
|
||||||
@ -1111,7 +1111,7 @@ void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op)
|
|||||||
|
|
||||||
void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op, Label& label)
|
void AssemblyBuilderA64::placeADR(const char* name, RegisterA64 dst, uint8_t op, Label& label)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::x);
|
||||||
|
|
||||||
place(dst.index | (op << 24));
|
place(dst.index | (op << 24));
|
||||||
commit();
|
commit();
|
||||||
@ -1127,9 +1127,9 @@ void AssemblyBuilderA64::placeP(const char* name, RegisterA64 src1, RegisterA64
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, src1, src2, dst);
|
log(name, src1, src2, dst);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == AddressKindA64::imm);
|
CODEGEN_ASSERT(dst.kind == AddressKindA64::imm);
|
||||||
LUAU_ASSERT(dst.data >= -128 * (1 << sizelog) && dst.data <= 127 * (1 << sizelog));
|
CODEGEN_ASSERT(dst.data >= -128 * (1 << sizelog) && dst.data <= 127 * (1 << sizelog));
|
||||||
LUAU_ASSERT(dst.data % (1 << sizelog) == 0);
|
CODEGEN_ASSERT(dst.data % (1 << sizelog) == 0);
|
||||||
|
|
||||||
place(src1.index | (dst.base.index << 5) | (src2.index << 10) | (((dst.data >> sizelog) & 127) << 15) | (op << 22) | (opc << 30));
|
place(src1.index | (dst.base.index << 5) | (src2.index << 10) | (((dst.data >> sizelog) & 127) << 15) | (op << 22) | (opc << 30));
|
||||||
commit();
|
commit();
|
||||||
@ -1141,7 +1141,7 @@ void AssemblyBuilderA64::placeCS(
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src1, src2, cond);
|
log(name, dst, src1, src2, cond);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
|
CODEGEN_ASSERT(dst.kind == src1.kind && dst.kind == src2.kind);
|
||||||
|
|
||||||
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
||||||
|
|
||||||
@ -1159,7 +1159,7 @@ void AssemblyBuilderA64::placeFCMP(const char* name, RegisterA64 src1, RegisterA
|
|||||||
log(name, src1, src2);
|
log(name, src1, src2);
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(src1.kind == src2.kind);
|
CODEGEN_ASSERT(src1.kind == src2.kind);
|
||||||
|
|
||||||
place((opc << 3) | (src1.index << 5) | (0b1000 << 10) | (src2.index << 16) | (op << 21));
|
place((opc << 3) | (src1.index << 5) | (0b1000 << 10) | (src2.index << 16) | (op << 21));
|
||||||
commit();
|
commit();
|
||||||
@ -1179,9 +1179,9 @@ void AssemblyBuilderA64::placeBM(const char* name, RegisterA64 dst, RegisterA64
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src1, src2);
|
log(name, dst, src1, src2);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
||||||
LUAU_ASSERT(dst.kind == src1.kind);
|
CODEGEN_ASSERT(dst.kind == src1.kind);
|
||||||
LUAU_ASSERT(isMaskSupported(src2));
|
CODEGEN_ASSERT(isMaskSupported(src2));
|
||||||
|
|
||||||
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
||||||
|
|
||||||
@ -1200,8 +1200,8 @@ void AssemblyBuilderA64::placeBFM(const char* name, RegisterA64 dst, RegisterA64
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src1, src2);
|
log(name, dst, src1, src2);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::w || dst.kind == KindA64::x);
|
||||||
LUAU_ASSERT(dst.kind == src1.kind);
|
CODEGEN_ASSERT(dst.kind == src1.kind);
|
||||||
|
|
||||||
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0;
|
||||||
uint32_t n = (dst.kind == KindA64::x) ? 1 << 22 : 0;
|
uint32_t n = (dst.kind == KindA64::x) ? 1 << 22 : 0;
|
||||||
@ -1215,9 +1215,9 @@ void AssemblyBuilderA64::placeER(const char* name, RegisterA64 dst, RegisterA64
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src1, src2, shift);
|
log(name, dst, src1, src2, shift);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.kind == KindA64::x && src1.kind == KindA64::x);
|
CODEGEN_ASSERT(dst.kind == KindA64::x && src1.kind == KindA64::x);
|
||||||
LUAU_ASSERT(src2.kind == KindA64::w);
|
CODEGEN_ASSERT(src2.kind == KindA64::w);
|
||||||
LUAU_ASSERT(shift >= 0 && shift <= 4);
|
CODEGEN_ASSERT(shift >= 0 && shift <= 4);
|
||||||
|
|
||||||
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; // could be useful in the future for byte->word extends
|
uint32_t sf = (dst.kind == KindA64::x) ? 0x80000000 : 0; // could be useful in the future for byte->word extends
|
||||||
int option = 0b010; // UXTW
|
int option = 0b010; // UXTW
|
||||||
@ -1228,7 +1228,7 @@ void AssemblyBuilderA64::placeER(const char* name, RegisterA64 dst, RegisterA64
|
|||||||
|
|
||||||
void AssemblyBuilderA64::place(uint32_t word)
|
void AssemblyBuilderA64::place(uint32_t word)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(codePos < codeEnd);
|
CODEGEN_ASSERT(codePos < codeEnd);
|
||||||
*codePos++ = word;
|
*codePos++ = word;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1259,7 +1259,7 @@ void AssemblyBuilderA64::patchOffset(uint32_t location, int value, Patch::Kind k
|
|||||||
int offset = (kind == Patch::Imm26) ? 0 : 5;
|
int offset = (kind == Patch::Imm26) ? 0 : 5;
|
||||||
int range = (kind == Patch::Imm19) ? (1 << 19) : (kind == Patch::Imm26) ? (1 << 26) : (1 << 14);
|
int range = (kind == Patch::Imm19) ? (1 << 19) : (kind == Patch::Imm26) ? (1 << 26) : (1 << 14);
|
||||||
|
|
||||||
LUAU_ASSERT((code[location] & ((range - 1) << offset)) == 0);
|
CODEGEN_ASSERT((code[location] & ((range - 1) << offset)) == 0);
|
||||||
|
|
||||||
if (value > -(range >> 1) && value < (range >> 1))
|
if (value > -(range >> 1) && value < (range >> 1))
|
||||||
code[location] |= (value & (range - 1)) << offset;
|
code[location] |= (value & (range - 1)) << offset;
|
||||||
@ -1269,7 +1269,7 @@ void AssemblyBuilderA64::patchOffset(uint32_t location, int value, Patch::Kind k
|
|||||||
|
|
||||||
void AssemblyBuilderA64::commit()
|
void AssemblyBuilderA64::commit()
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(codePos <= codeEnd);
|
CODEGEN_ASSERT(codePos <= codeEnd);
|
||||||
|
|
||||||
if (codeEnd == codePos)
|
if (codeEnd == codePos)
|
||||||
extend();
|
extend();
|
||||||
@ -1286,7 +1286,7 @@ void AssemblyBuilderA64::extend()
|
|||||||
|
|
||||||
size_t AssemblyBuilderA64::allocateData(size_t size, size_t align)
|
size_t AssemblyBuilderA64::allocateData(size_t size, size_t align)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(align > 0 && align <= kMaxAlign && (align & (align - 1)) == 0);
|
CODEGEN_ASSERT(align > 0 && align <= kMaxAlign && (align & (align - 1)) == 0);
|
||||||
|
|
||||||
if (dataPos < size)
|
if (dataPos < size)
|
||||||
{
|
{
|
||||||
@ -1467,7 +1467,7 @@ void AssemblyBuilderA64::log(RegisterA64 reg)
|
|||||||
if (reg.index == 31)
|
if (reg.index == 31)
|
||||||
text.append("sp");
|
text.append("sp");
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unexpected register kind");
|
CODEGEN_ASSERT(!"Unexpected register kind");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -99,7 +99,7 @@ AssemblyBuilderX64::AssemblyBuilderX64(bool logText)
|
|||||||
|
|
||||||
AssemblyBuilderX64::~AssemblyBuilderX64()
|
AssemblyBuilderX64::~AssemblyBuilderX64()
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(finalized);
|
CODEGEN_ASSERT(finalized);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderX64::add(OperandX64 lhs, OperandX64 rhs)
|
void AssemblyBuilderX64::add(OperandX64 lhs, OperandX64 rhs)
|
||||||
@ -191,7 +191,7 @@ void AssemblyBuilderX64::mov(OperandX64 lhs, OperandX64 rhs)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(size == SizeX64::qword);
|
CODEGEN_ASSERT(size == SizeX64::qword);
|
||||||
|
|
||||||
place(OP_PLUS_REG(0xb8, lhs.base.index));
|
place(OP_PLUS_REG(0xb8, lhs.base.index));
|
||||||
placeImm64(rhs.imm);
|
placeImm64(rhs.imm);
|
||||||
@ -218,7 +218,7 @@ void AssemblyBuilderX64::mov(OperandX64 lhs, OperandX64 rhs)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(size == SizeX64::dword || size == SizeX64::qword);
|
CODEGEN_ASSERT(size == SizeX64::dword || size == SizeX64::qword);
|
||||||
|
|
||||||
place(0xc7);
|
place(0xc7);
|
||||||
placeModRegMem(lhs, 0, /*extraCodeBytes=*/4);
|
placeModRegMem(lhs, 0, /*extraCodeBytes=*/4);
|
||||||
@ -235,7 +235,7 @@ void AssemblyBuilderX64::mov(OperandX64 lhs, OperandX64 rhs)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"No encoding for this operand combination");
|
CODEGEN_ASSERT(!"No encoding for this operand combination");
|
||||||
}
|
}
|
||||||
|
|
||||||
commit();
|
commit();
|
||||||
@ -250,7 +250,7 @@ void AssemblyBuilderX64::mov64(RegisterX64 lhs, int64_t imm)
|
|||||||
logAppend(",%llXh\n", (unsigned long long)imm);
|
logAppend(",%llXh\n", (unsigned long long)imm);
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(lhs.size == SizeX64::qword);
|
CODEGEN_ASSERT(lhs.size == SizeX64::qword);
|
||||||
|
|
||||||
placeRex(lhs);
|
placeRex(lhs);
|
||||||
place(OP_PLUS_REG(0xb8, lhs.index));
|
place(OP_PLUS_REG(0xb8, lhs.index));
|
||||||
@ -263,7 +263,7 @@ void AssemblyBuilderX64::movsx(RegisterX64 lhs, OperandX64 rhs)
|
|||||||
if (logText)
|
if (logText)
|
||||||
log("movsx", lhs, rhs);
|
log("movsx", lhs, rhs);
|
||||||
|
|
||||||
LUAU_ASSERT(rhs.memSize == SizeX64::byte || rhs.memSize == SizeX64::word);
|
CODEGEN_ASSERT(rhs.memSize == SizeX64::byte || rhs.memSize == SizeX64::word);
|
||||||
|
|
||||||
placeRex(lhs, rhs);
|
placeRex(lhs, rhs);
|
||||||
place(0x0f);
|
place(0x0f);
|
||||||
@ -277,7 +277,7 @@ void AssemblyBuilderX64::movzx(RegisterX64 lhs, OperandX64 rhs)
|
|||||||
if (logText)
|
if (logText)
|
||||||
log("movzx", lhs, rhs);
|
log("movzx", lhs, rhs);
|
||||||
|
|
||||||
LUAU_ASSERT(rhs.memSize == SizeX64::byte || rhs.memSize == SizeX64::word);
|
CODEGEN_ASSERT(rhs.memSize == SizeX64::byte || rhs.memSize == SizeX64::word);
|
||||||
|
|
||||||
placeRex(lhs, rhs);
|
placeRex(lhs, rhs);
|
||||||
place(0x0f);
|
place(0x0f);
|
||||||
@ -372,9 +372,9 @@ void AssemblyBuilderX64::lea(OperandX64 lhs, OperandX64 rhs)
|
|||||||
if (logText)
|
if (logText)
|
||||||
log("lea", lhs, rhs);
|
log("lea", lhs, rhs);
|
||||||
|
|
||||||
LUAU_ASSERT(lhs.cat == CategoryX64::reg && rhs.cat == CategoryX64::mem && rhs.memSize == SizeX64::none);
|
CODEGEN_ASSERT(lhs.cat == CategoryX64::reg && rhs.cat == CategoryX64::mem && rhs.memSize == SizeX64::none);
|
||||||
LUAU_ASSERT(rhs.base == rip || rhs.base.size == lhs.base.size);
|
CODEGEN_ASSERT(rhs.base == rip || rhs.base.size == lhs.base.size);
|
||||||
LUAU_ASSERT(rhs.index == noreg || rhs.index.size == lhs.base.size);
|
CODEGEN_ASSERT(rhs.index == noreg || rhs.index.size == lhs.base.size);
|
||||||
rhs.memSize = lhs.base.size;
|
rhs.memSize = lhs.base.size;
|
||||||
placeBinaryRegAndRegMem(lhs, rhs, 0x8d, 0x8d);
|
placeBinaryRegAndRegMem(lhs, rhs, 0x8d, 0x8d);
|
||||||
}
|
}
|
||||||
@ -384,7 +384,7 @@ void AssemblyBuilderX64::push(OperandX64 op)
|
|||||||
if (logText)
|
if (logText)
|
||||||
log("push", op);
|
log("push", op);
|
||||||
|
|
||||||
LUAU_ASSERT(op.cat == CategoryX64::reg && op.base.size == SizeX64::qword);
|
CODEGEN_ASSERT(op.cat == CategoryX64::reg && op.base.size == SizeX64::qword);
|
||||||
placeRex(op.base);
|
placeRex(op.base);
|
||||||
place(OP_PLUS_REG(0x50, op.base.index));
|
place(OP_PLUS_REG(0x50, op.base.index));
|
||||||
commit();
|
commit();
|
||||||
@ -395,7 +395,7 @@ void AssemblyBuilderX64::pop(OperandX64 op)
|
|||||||
if (logText)
|
if (logText)
|
||||||
log("pop", op);
|
log("pop", op);
|
||||||
|
|
||||||
LUAU_ASSERT(op.cat == CategoryX64::reg && op.base.size == SizeX64::qword);
|
CODEGEN_ASSERT(op.cat == CategoryX64::reg && op.base.size == SizeX64::qword);
|
||||||
placeRex(op.base);
|
placeRex(op.base);
|
||||||
place(OP_PLUS_REG(0x58, op.base.index));
|
place(OP_PLUS_REG(0x58, op.base.index));
|
||||||
commit();
|
commit();
|
||||||
@ -413,7 +413,7 @@ void AssemblyBuilderX64::ret()
|
|||||||
void AssemblyBuilderX64::setcc(ConditionX64 cond, OperandX64 op)
|
void AssemblyBuilderX64::setcc(ConditionX64 cond, OperandX64 op)
|
||||||
{
|
{
|
||||||
SizeX64 size = op.cat == CategoryX64::reg ? op.base.size : op.memSize;
|
SizeX64 size = op.cat == CategoryX64::reg ? op.base.size : op.memSize;
|
||||||
LUAU_ASSERT(size == SizeX64::byte);
|
CODEGEN_ASSERT(size == SizeX64::byte);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
log(setccTextForCondition[size_t(cond)], op);
|
log(setccTextForCondition[size_t(cond)], op);
|
||||||
@ -428,7 +428,7 @@ void AssemblyBuilderX64::setcc(ConditionX64 cond, OperandX64 op)
|
|||||||
void AssemblyBuilderX64::cmov(ConditionX64 cond, RegisterX64 lhs, OperandX64 rhs)
|
void AssemblyBuilderX64::cmov(ConditionX64 cond, RegisterX64 lhs, OperandX64 rhs)
|
||||||
{
|
{
|
||||||
SizeX64 size = rhs.cat == CategoryX64::reg ? rhs.base.size : rhs.memSize;
|
SizeX64 size = rhs.cat == CategoryX64::reg ? rhs.base.size : rhs.memSize;
|
||||||
LUAU_ASSERT(size != SizeX64::byte && size == lhs.size);
|
CODEGEN_ASSERT(size != SizeX64::byte && size == lhs.size);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
log(cmovTextForCondition[size_t(cond)], lhs, rhs);
|
log(cmovTextForCondition[size_t(cond)], lhs, rhs);
|
||||||
@ -457,7 +457,7 @@ void AssemblyBuilderX64::jmp(Label& label)
|
|||||||
|
|
||||||
void AssemblyBuilderX64::jmp(OperandX64 op)
|
void AssemblyBuilderX64::jmp(OperandX64 op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT((op.cat == CategoryX64::reg ? op.base.size : op.memSize) == SizeX64::qword);
|
CODEGEN_ASSERT((op.cat == CategoryX64::reg ? op.base.size : op.memSize) == SizeX64::qword);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
log("jmp", op);
|
log("jmp", op);
|
||||||
@ -484,7 +484,7 @@ void AssemblyBuilderX64::call(Label& label)
|
|||||||
|
|
||||||
void AssemblyBuilderX64::call(OperandX64 op)
|
void AssemblyBuilderX64::call(OperandX64 op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT((op.cat == CategoryX64::reg ? op.base.size : op.memSize) == SizeX64::qword);
|
CODEGEN_ASSERT((op.cat == CategoryX64::reg ? op.base.size : op.memSize) == SizeX64::qword);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
log("call", op);
|
log("call", op);
|
||||||
@ -499,7 +499,7 @@ void AssemblyBuilderX64::call(OperandX64 op)
|
|||||||
|
|
||||||
void AssemblyBuilderX64::lea(RegisterX64 lhs, Label& label)
|
void AssemblyBuilderX64::lea(RegisterX64 lhs, Label& label)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(lhs.size == SizeX64::qword);
|
CODEGEN_ASSERT(lhs.size == SizeX64::qword);
|
||||||
|
|
||||||
placeBinaryRegAndRegMem(lhs, OperandX64(SizeX64::qword, noreg, 1, rip, 0), 0x8d, 0x8d);
|
placeBinaryRegAndRegMem(lhs, OperandX64(SizeX64::qword, noreg, 1, rip, 0), 0x8d, 0x8d);
|
||||||
|
|
||||||
@ -534,7 +534,7 @@ void AssemblyBuilderX64::bsr(RegisterX64 dst, OperandX64 src)
|
|||||||
if (logText)
|
if (logText)
|
||||||
log("bsr", dst, src);
|
log("bsr", dst, src);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword);
|
CODEGEN_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword);
|
||||||
|
|
||||||
placeRex(dst, src);
|
placeRex(dst, src);
|
||||||
place(0x0f);
|
place(0x0f);
|
||||||
@ -548,7 +548,7 @@ void AssemblyBuilderX64::bsf(RegisterX64 dst, OperandX64 src)
|
|||||||
if (logText)
|
if (logText)
|
||||||
log("bsf", dst, src);
|
log("bsf", dst, src);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword);
|
CODEGEN_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword);
|
||||||
|
|
||||||
placeRex(dst, src);
|
placeRex(dst, src);
|
||||||
place(0x0f);
|
place(0x0f);
|
||||||
@ -562,7 +562,7 @@ void AssemblyBuilderX64::bswap(RegisterX64 dst)
|
|||||||
if (logText)
|
if (logText)
|
||||||
log("bswap", dst);
|
log("bswap", dst);
|
||||||
|
|
||||||
LUAU_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword);
|
CODEGEN_ASSERT(dst.size == SizeX64::dword || dst.size == SizeX64::qword);
|
||||||
|
|
||||||
placeRex(dst);
|
placeRex(dst);
|
||||||
place(0x0f);
|
place(0x0f);
|
||||||
@ -668,7 +668,7 @@ void AssemblyBuilderX64::nop(uint32_t length)
|
|||||||
|
|
||||||
void AssemblyBuilderX64::align(uint32_t alignment, AlignmentDataX64 data)
|
void AssemblyBuilderX64::align(uint32_t alignment, AlignmentDataX64 data)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT((alignment & (alignment - 1)) == 0);
|
CODEGEN_ASSERT((alignment & (alignment - 1)) == 0);
|
||||||
|
|
||||||
uint32_t size = getCodeSize();
|
uint32_t size = getCodeSize();
|
||||||
uint32_t pad = ((size + alignment - 1) & ~(alignment - 1)) - size;
|
uint32_t pad = ((size + alignment - 1) & ~(alignment - 1)) - size;
|
||||||
@ -814,9 +814,9 @@ void AssemblyBuilderX64::vcvtsi2sd(OperandX64 dst, OperandX64 src1, OperandX64 s
|
|||||||
void AssemblyBuilderX64::vcvtsd2ss(OperandX64 dst, OperandX64 src1, OperandX64 src2)
|
void AssemblyBuilderX64::vcvtsd2ss(OperandX64 dst, OperandX64 src1, OperandX64 src2)
|
||||||
{
|
{
|
||||||
if (src2.cat == CategoryX64::reg)
|
if (src2.cat == CategoryX64::reg)
|
||||||
LUAU_ASSERT(src2.base.size == SizeX64::xmmword);
|
CODEGEN_ASSERT(src2.base.size == SizeX64::xmmword);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(src2.memSize == SizeX64::qword);
|
CODEGEN_ASSERT(src2.memSize == SizeX64::qword);
|
||||||
|
|
||||||
placeAvx("vcvtsd2ss", dst, src1, src2, 0x5a, (src2.cat == CategoryX64::reg ? src2.base.size : src2.memSize) == SizeX64::qword, AVX_0F, AVX_F2);
|
placeAvx("vcvtsd2ss", dst, src1, src2, 0x5a, (src2.cat == CategoryX64::reg ? src2.base.size : src2.memSize) == SizeX64::qword, AVX_0F, AVX_F2);
|
||||||
}
|
}
|
||||||
@ -824,9 +824,9 @@ void AssemblyBuilderX64::vcvtsd2ss(OperandX64 dst, OperandX64 src1, OperandX64 s
|
|||||||
void AssemblyBuilderX64::vcvtss2sd(OperandX64 dst, OperandX64 src1, OperandX64 src2)
|
void AssemblyBuilderX64::vcvtss2sd(OperandX64 dst, OperandX64 src1, OperandX64 src2)
|
||||||
{
|
{
|
||||||
if (src2.cat == CategoryX64::reg)
|
if (src2.cat == CategoryX64::reg)
|
||||||
LUAU_ASSERT(src2.base.size == SizeX64::xmmword);
|
CODEGEN_ASSERT(src2.base.size == SizeX64::xmmword);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(src2.memSize == SizeX64::dword);
|
CODEGEN_ASSERT(src2.memSize == SizeX64::dword);
|
||||||
|
|
||||||
placeAvx("vcvtsd2ss", dst, src1, src2, 0x5a, false, AVX_0F, AVX_F3);
|
placeAvx("vcvtsd2ss", dst, src1, src2, 0x5a, false, AVX_0F, AVX_F3);
|
||||||
}
|
}
|
||||||
@ -900,19 +900,19 @@ void AssemblyBuilderX64::vmovq(OperandX64 dst, OperandX64 src)
|
|||||||
{
|
{
|
||||||
if (dst.base.size == SizeX64::xmmword)
|
if (dst.base.size == SizeX64::xmmword)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(dst.cat == CategoryX64::reg);
|
||||||
LUAU_ASSERT(src.base.size == SizeX64::qword);
|
CODEGEN_ASSERT(src.base.size == SizeX64::qword);
|
||||||
placeAvx("vmovq", dst, src, 0x6e, true, AVX_0F, AVX_66);
|
placeAvx("vmovq", dst, src, 0x6e, true, AVX_0F, AVX_66);
|
||||||
}
|
}
|
||||||
else if (dst.base.size == SizeX64::qword)
|
else if (dst.base.size == SizeX64::qword)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(src.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(src.cat == CategoryX64::reg);
|
||||||
LUAU_ASSERT(src.base.size == SizeX64::xmmword);
|
CODEGEN_ASSERT(src.base.size == SizeX64::xmmword);
|
||||||
placeAvx("vmovq", src, dst, 0x7e, true, AVX_0F, AVX_66);
|
placeAvx("vmovq", src, dst, 0x7e, true, AVX_0F, AVX_66);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"No encoding for left operand of this category");
|
CODEGEN_ASSERT(!"No encoding for left operand of this category");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -955,7 +955,7 @@ bool AssemblyBuilderX64::finalize()
|
|||||||
for (Label fixup : pendingLabels)
|
for (Label fixup : pendingLabels)
|
||||||
{
|
{
|
||||||
// If this assertion fires, a label was used in jmp without calling setLabel
|
// If this assertion fires, a label was used in jmp without calling setLabel
|
||||||
LUAU_ASSERT(labelLocations[fixup.id - 1] != ~0u);
|
CODEGEN_ASSERT(labelLocations[fixup.id - 1] != ~0u);
|
||||||
uint32_t value = labelLocations[fixup.id - 1] - (fixup.location + 4);
|
uint32_t value = labelLocations[fixup.id - 1] - (fixup.location + 4);
|
||||||
writeu32(&code[fixup.location], value);
|
writeu32(&code[fixup.location], value);
|
||||||
}
|
}
|
||||||
@ -1160,16 +1160,16 @@ void AssemblyBuilderX64::placeBinary(const char* name, OperandX64 lhs, OperandX6
|
|||||||
else if (lhs.cat == CategoryX64::mem && rhs.cat == CategoryX64::reg)
|
else if (lhs.cat == CategoryX64::mem && rhs.cat == CategoryX64::reg)
|
||||||
placeBinaryRegMemAndReg(lhs, rhs, code8rev, coderev);
|
placeBinaryRegMemAndReg(lhs, rhs, code8rev, coderev);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"No encoding for this operand combination");
|
CODEGEN_ASSERT(!"No encoding for this operand combination");
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderX64::placeBinaryRegMemAndImm(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code, uint8_t codeImm8, uint8_t opreg)
|
void AssemblyBuilderX64::placeBinaryRegMemAndImm(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code, uint8_t codeImm8, uint8_t opreg)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(lhs.cat == CategoryX64::reg || lhs.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(lhs.cat == CategoryX64::reg || lhs.cat == CategoryX64::mem);
|
||||||
LUAU_ASSERT(rhs.cat == CategoryX64::imm);
|
CODEGEN_ASSERT(rhs.cat == CategoryX64::imm);
|
||||||
|
|
||||||
SizeX64 size = lhs.cat == CategoryX64::reg ? lhs.base.size : lhs.memSize;
|
SizeX64 size = lhs.cat == CategoryX64::reg ? lhs.base.size : lhs.memSize;
|
||||||
LUAU_ASSERT(size == SizeX64::byte || size == SizeX64::dword || size == SizeX64::qword);
|
CODEGEN_ASSERT(size == SizeX64::byte || size == SizeX64::dword || size == SizeX64::qword);
|
||||||
|
|
||||||
placeRex(lhs);
|
placeRex(lhs);
|
||||||
|
|
||||||
@ -1181,7 +1181,7 @@ void AssemblyBuilderX64::placeBinaryRegMemAndImm(OperandX64 lhs, OperandX64 rhs,
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(size == SizeX64::dword || size == SizeX64::qword);
|
CODEGEN_ASSERT(size == SizeX64::dword || size == SizeX64::qword);
|
||||||
|
|
||||||
if (int8_t(rhs.imm) == rhs.imm && code != codeImm8)
|
if (int8_t(rhs.imm) == rhs.imm && code != codeImm8)
|
||||||
{
|
{
|
||||||
@ -1202,11 +1202,11 @@ void AssemblyBuilderX64::placeBinaryRegMemAndImm(OperandX64 lhs, OperandX64 rhs,
|
|||||||
|
|
||||||
void AssemblyBuilderX64::placeBinaryRegAndRegMem(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code)
|
void AssemblyBuilderX64::placeBinaryRegAndRegMem(OperandX64 lhs, OperandX64 rhs, uint8_t code8, uint8_t code)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(lhs.cat == CategoryX64::reg && (rhs.cat == CategoryX64::reg || rhs.cat == CategoryX64::mem));
|
CODEGEN_ASSERT(lhs.cat == CategoryX64::reg && (rhs.cat == CategoryX64::reg || rhs.cat == CategoryX64::mem));
|
||||||
LUAU_ASSERT(lhs.base.size == (rhs.cat == CategoryX64::reg ? rhs.base.size : rhs.memSize));
|
CODEGEN_ASSERT(lhs.base.size == (rhs.cat == CategoryX64::reg ? rhs.base.size : rhs.memSize));
|
||||||
|
|
||||||
SizeX64 size = lhs.base.size;
|
SizeX64 size = lhs.base.size;
|
||||||
LUAU_ASSERT(size == SizeX64::byte || size == SizeX64::word || size == SizeX64::dword || size == SizeX64::qword);
|
CODEGEN_ASSERT(size == SizeX64::byte || size == SizeX64::word || size == SizeX64::dword || size == SizeX64::qword);
|
||||||
|
|
||||||
if (size == SizeX64::word)
|
if (size == SizeX64::word)
|
||||||
place(0x66);
|
place(0x66);
|
||||||
@ -1229,10 +1229,10 @@ void AssemblyBuilderX64::placeUnaryModRegMem(const char* name, OperandX64 op, ui
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, op);
|
log(name, op);
|
||||||
|
|
||||||
LUAU_ASSERT(op.cat == CategoryX64::reg || op.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(op.cat == CategoryX64::reg || op.cat == CategoryX64::mem);
|
||||||
|
|
||||||
SizeX64 size = op.cat == CategoryX64::reg ? op.base.size : op.memSize;
|
SizeX64 size = op.cat == CategoryX64::reg ? op.base.size : op.memSize;
|
||||||
LUAU_ASSERT(size == SizeX64::byte || size == SizeX64::dword || size == SizeX64::qword);
|
CODEGEN_ASSERT(size == SizeX64::byte || size == SizeX64::dword || size == SizeX64::qword);
|
||||||
|
|
||||||
placeRex(op);
|
placeRex(op);
|
||||||
place(size == SizeX64::byte ? code8 : code);
|
place(size == SizeX64::byte ? code8 : code);
|
||||||
@ -1246,8 +1246,8 @@ void AssemblyBuilderX64::placeShift(const char* name, OperandX64 lhs, OperandX64
|
|||||||
if (logText)
|
if (logText)
|
||||||
log(name, lhs, rhs);
|
log(name, lhs, rhs);
|
||||||
|
|
||||||
LUAU_ASSERT(lhs.cat == CategoryX64::reg || lhs.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(lhs.cat == CategoryX64::reg || lhs.cat == CategoryX64::mem);
|
||||||
LUAU_ASSERT(rhs.cat == CategoryX64::imm || (rhs.cat == CategoryX64::reg && rhs.base == cl));
|
CODEGEN_ASSERT(rhs.cat == CategoryX64::imm || (rhs.cat == CategoryX64::reg && rhs.base == cl));
|
||||||
|
|
||||||
SizeX64 size = lhs.base.size;
|
SizeX64 size = lhs.base.size;
|
||||||
|
|
||||||
@ -1260,7 +1260,7 @@ void AssemblyBuilderX64::placeShift(const char* name, OperandX64 lhs, OperandX64
|
|||||||
}
|
}
|
||||||
else if (rhs.cat == CategoryX64::imm)
|
else if (rhs.cat == CategoryX64::imm)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(int8_t(rhs.imm) == rhs.imm);
|
CODEGEN_ASSERT(int8_t(rhs.imm) == rhs.imm);
|
||||||
|
|
||||||
place(size == SizeX64::byte ? 0xc0 : 0xc1);
|
place(size == SizeX64::byte ? 0xc0 : 0xc1);
|
||||||
placeModRegMem(lhs, opreg, /*extraCodeBytes=*/1);
|
placeModRegMem(lhs, opreg, /*extraCodeBytes=*/1);
|
||||||
@ -1289,8 +1289,8 @@ void AssemblyBuilderX64::placeJcc(const char* name, Label& label, uint8_t cc)
|
|||||||
|
|
||||||
void AssemblyBuilderX64::placeAvx(const char* name, OperandX64 dst, OperandX64 src, uint8_t code, bool setW, uint8_t mode, uint8_t prefix)
|
void AssemblyBuilderX64::placeAvx(const char* name, OperandX64 dst, OperandX64 src, uint8_t code, bool setW, uint8_t mode, uint8_t prefix)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(dst.cat == CategoryX64::reg);
|
||||||
LUAU_ASSERT(src.cat == CategoryX64::reg || src.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(src.cat == CategoryX64::reg || src.cat == CategoryX64::mem);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src);
|
log(name, dst, src);
|
||||||
@ -1305,7 +1305,7 @@ void AssemblyBuilderX64::placeAvx(const char* name, OperandX64 dst, OperandX64 s
|
|||||||
void AssemblyBuilderX64::placeAvx(
|
void AssemblyBuilderX64::placeAvx(
|
||||||
const char* name, OperandX64 dst, OperandX64 src, uint8_t code, uint8_t coderev, bool setW, uint8_t mode, uint8_t prefix)
|
const char* name, OperandX64 dst, OperandX64 src, uint8_t code, uint8_t coderev, bool setW, uint8_t mode, uint8_t prefix)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT((dst.cat == CategoryX64::mem && src.cat == CategoryX64::reg) || (dst.cat == CategoryX64::reg && src.cat == CategoryX64::mem));
|
CODEGEN_ASSERT((dst.cat == CategoryX64::mem && src.cat == CategoryX64::reg) || (dst.cat == CategoryX64::reg && src.cat == CategoryX64::mem));
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src);
|
log(name, dst, src);
|
||||||
@ -1329,9 +1329,9 @@ void AssemblyBuilderX64::placeAvx(
|
|||||||
void AssemblyBuilderX64::placeAvx(
|
void AssemblyBuilderX64::placeAvx(
|
||||||
const char* name, OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t code, bool setW, uint8_t mode, uint8_t prefix)
|
const char* name, OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t code, bool setW, uint8_t mode, uint8_t prefix)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(dst.cat == CategoryX64::reg);
|
||||||
LUAU_ASSERT(src1.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(src1.cat == CategoryX64::reg);
|
||||||
LUAU_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src1, src2);
|
log(name, dst, src1, src2);
|
||||||
@ -1346,9 +1346,9 @@ void AssemblyBuilderX64::placeAvx(
|
|||||||
void AssemblyBuilderX64::placeAvx(
|
void AssemblyBuilderX64::placeAvx(
|
||||||
const char* name, OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t imm8, uint8_t code, bool setW, uint8_t mode, uint8_t prefix)
|
const char* name, OperandX64 dst, OperandX64 src1, OperandX64 src2, uint8_t imm8, uint8_t code, bool setW, uint8_t mode, uint8_t prefix)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(dst.cat == CategoryX64::reg);
|
||||||
LUAU_ASSERT(src1.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(src1.cat == CategoryX64::reg);
|
||||||
LUAU_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem);
|
||||||
|
|
||||||
if (logText)
|
if (logText)
|
||||||
log(name, dst, src1, src2, imm8);
|
log(name, dst, src1, src2, imm8);
|
||||||
@ -1378,7 +1378,7 @@ void AssemblyBuilderX64::placeRex(OperandX64 op)
|
|||||||
else if (op.cat == CategoryX64::mem)
|
else if (op.cat == CategoryX64::mem)
|
||||||
code = REX_W_BIT(op.memSize == SizeX64::qword) | REX_X(op.index) | REX_B(op.base);
|
code = REX_W_BIT(op.memSize == SizeX64::qword) | REX_X(op.index) | REX_B(op.base);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"No encoding for left operand of this category");
|
CODEGEN_ASSERT(!"No encoding for left operand of this category");
|
||||||
|
|
||||||
if (code != 0)
|
if (code != 0)
|
||||||
place(code | 0x40);
|
place(code | 0x40);
|
||||||
@ -1393,7 +1393,7 @@ void AssemblyBuilderX64::placeRexNoW(OperandX64 op)
|
|||||||
else if (op.cat == CategoryX64::mem)
|
else if (op.cat == CategoryX64::mem)
|
||||||
code = REX_X(op.index) | REX_B(op.base);
|
code = REX_X(op.index) | REX_B(op.base);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"No encoding for left operand of this category");
|
CODEGEN_ASSERT(!"No encoding for left operand of this category");
|
||||||
|
|
||||||
if (code != 0)
|
if (code != 0)
|
||||||
place(code | 0x40);
|
place(code | 0x40);
|
||||||
@ -1414,9 +1414,9 @@ void AssemblyBuilderX64::placeRex(RegisterX64 lhs, OperandX64 rhs)
|
|||||||
|
|
||||||
void AssemblyBuilderX64::placeVex(OperandX64 dst, OperandX64 src1, OperandX64 src2, bool setW, uint8_t mode, uint8_t prefix)
|
void AssemblyBuilderX64::placeVex(OperandX64 dst, OperandX64 src1, OperandX64 src2, bool setW, uint8_t mode, uint8_t prefix)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(dst.cat == CategoryX64::reg);
|
||||||
LUAU_ASSERT(src1.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(src1.cat == CategoryX64::reg);
|
||||||
LUAU_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(src2.cat == CategoryX64::reg || src2.cat == CategoryX64::mem);
|
||||||
|
|
||||||
place(AVX_3_1());
|
place(AVX_3_1());
|
||||||
place(AVX_3_2(dst.base, src2.index, src2.base, mode));
|
place(AVX_3_2(dst.base, src2.index, src2.base, mode));
|
||||||
@ -1427,13 +1427,13 @@ static uint8_t getScaleEncoding(uint8_t scale)
|
|||||||
{
|
{
|
||||||
static const uint8_t scales[9] = {0xff, 0, 1, 0xff, 2, 0xff, 0xff, 0xff, 3};
|
static const uint8_t scales[9] = {0xff, 0, 1, 0xff, 2, 0xff, 0xff, 0xff, 3};
|
||||||
|
|
||||||
LUAU_ASSERT(scale < 9 && scales[scale] != 0xff);
|
CODEGEN_ASSERT(scale < 9 && scales[scale] != 0xff);
|
||||||
return scales[scale];
|
return scales[scale];
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderX64::placeRegAndModRegMem(OperandX64 lhs, OperandX64 rhs, int32_t extraCodeBytes)
|
void AssemblyBuilderX64::placeRegAndModRegMem(OperandX64 lhs, OperandX64 rhs, int32_t extraCodeBytes)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(lhs.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(lhs.cat == CategoryX64::reg);
|
||||||
|
|
||||||
placeModRegMem(rhs, lhs.base.index, extraCodeBytes);
|
placeModRegMem(rhs, lhs.base.index, extraCodeBytes);
|
||||||
}
|
}
|
||||||
@ -1481,8 +1481,8 @@ void AssemblyBuilderX64::placeModRegMem(OperandX64 rhs, uint8_t regop, int32_t e
|
|||||||
}
|
}
|
||||||
else if ((base.index & 0x7) == 0b100) // r12/sp-based addressing requires SIB
|
else if ((base.index & 0x7) == 0b100) // r12/sp-based addressing requires SIB
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(rhs.scale == 1);
|
CODEGEN_ASSERT(rhs.scale == 1);
|
||||||
LUAU_ASSERT(index == noreg);
|
CODEGEN_ASSERT(index == noreg);
|
||||||
|
|
||||||
place(MOD_RM(mod, regop, 0b100));
|
place(MOD_RM(mod, regop, 0b100));
|
||||||
place(SIB(rhs.scale, 0b100, base.index));
|
place(SIB(rhs.scale, 0b100, base.index));
|
||||||
@ -1516,7 +1516,7 @@ void AssemblyBuilderX64::placeModRegMem(OperandX64 rhs, uint8_t regop, int32_t e
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"No encoding for right operand of this category");
|
CODEGEN_ASSERT(!"No encoding for right operand of this category");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1540,21 +1540,21 @@ void AssemblyBuilderX64::placeImm8(int32_t imm)
|
|||||||
void AssemblyBuilderX64::placeImm16(int16_t imm)
|
void AssemblyBuilderX64::placeImm16(int16_t imm)
|
||||||
{
|
{
|
||||||
uint8_t* pos = codePos;
|
uint8_t* pos = codePos;
|
||||||
LUAU_ASSERT(pos + sizeof(imm) < codeEnd);
|
CODEGEN_ASSERT(pos + sizeof(imm) < codeEnd);
|
||||||
codePos = writeu16(pos, imm);
|
codePos = writeu16(pos, imm);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderX64::placeImm32(int32_t imm)
|
void AssemblyBuilderX64::placeImm32(int32_t imm)
|
||||||
{
|
{
|
||||||
uint8_t* pos = codePos;
|
uint8_t* pos = codePos;
|
||||||
LUAU_ASSERT(pos + sizeof(imm) < codeEnd);
|
CODEGEN_ASSERT(pos + sizeof(imm) < codeEnd);
|
||||||
codePos = writeu32(pos, imm);
|
codePos = writeu32(pos, imm);
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderX64::placeImm64(int64_t imm)
|
void AssemblyBuilderX64::placeImm64(int64_t imm)
|
||||||
{
|
{
|
||||||
uint8_t* pos = codePos;
|
uint8_t* pos = codePos;
|
||||||
LUAU_ASSERT(pos + sizeof(imm) < codeEnd);
|
CODEGEN_ASSERT(pos + sizeof(imm) < codeEnd);
|
||||||
codePos = writeu64(pos, imm);
|
codePos = writeu64(pos, imm);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1579,13 +1579,13 @@ void AssemblyBuilderX64::placeLabel(Label& label)
|
|||||||
|
|
||||||
void AssemblyBuilderX64::place(uint8_t byte)
|
void AssemblyBuilderX64::place(uint8_t byte)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(codePos < codeEnd);
|
CODEGEN_ASSERT(codePos < codeEnd);
|
||||||
*codePos++ = byte;
|
*codePos++ = byte;
|
||||||
}
|
}
|
||||||
|
|
||||||
void AssemblyBuilderX64::commit()
|
void AssemblyBuilderX64::commit()
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(codePos <= codeEnd);
|
CODEGEN_ASSERT(codePos <= codeEnd);
|
||||||
|
|
||||||
++instructionCount;
|
++instructionCount;
|
||||||
|
|
||||||
@ -1604,7 +1604,7 @@ void AssemblyBuilderX64::extend()
|
|||||||
|
|
||||||
size_t AssemblyBuilderX64::allocateData(size_t size, size_t align)
|
size_t AssemblyBuilderX64::allocateData(size_t size, size_t align)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(align > 0 && align <= kMaxAlign && (align & (align - 1)) == 0);
|
CODEGEN_ASSERT(align > 0 && align <= kMaxAlign && (align & (align - 1)) == 0);
|
||||||
|
|
||||||
if (dataPos < size)
|
if (dataPos < size)
|
||||||
{
|
{
|
||||||
@ -1732,7 +1732,7 @@ void AssemblyBuilderX64::log(OperandX64 op)
|
|||||||
logAppend("%Xh", op.imm);
|
logAppend("%Xh", op.imm);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unknown operand category");
|
CODEGEN_ASSERT(!"Unknown operand category");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1740,7 +1740,7 @@ const char* AssemblyBuilderX64::getSizeName(SizeX64 size) const
|
|||||||
{
|
{
|
||||||
static const char* sizeNames[] = {"none", "byte", "word", "dword", "qword", "xmmword", "ymmword"};
|
static const char* sizeNames[] = {"none", "byte", "word", "dword", "qword", "xmmword", "ymmword"};
|
||||||
|
|
||||||
LUAU_ASSERT(unsigned(size) < sizeof(sizeNames) / sizeof(sizeNames[0]));
|
CODEGEN_ASSERT(unsigned(size) < sizeof(sizeNames) / sizeof(sizeNames[0]));
|
||||||
return sizeNames[unsigned(size)];
|
return sizeNames[unsigned(size)];
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1754,8 +1754,8 @@ const char* AssemblyBuilderX64::getRegisterName(RegisterX64 reg) const
|
|||||||
{"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"},
|
{"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"},
|
||||||
{"ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"}};
|
{"ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7", "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"}};
|
||||||
|
|
||||||
LUAU_ASSERT(reg.index < 16);
|
CODEGEN_ASSERT(reg.index < 16);
|
||||||
LUAU_ASSERT(reg.size <= SizeX64::ymmword);
|
CODEGEN_ASSERT(reg.size <= SizeX64::ymmword);
|
||||||
return names[size_t(reg.size)][reg.index];
|
return names[size_t(reg.size)][reg.index];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,8 +7,6 @@
|
|||||||
|
|
||||||
#include "lobject.h"
|
#include "lobject.h"
|
||||||
|
|
||||||
LUAU_FASTFLAGVARIABLE(LuauFixDivrkInference, false)
|
|
||||||
|
|
||||||
namespace Luau
|
namespace Luau
|
||||||
{
|
{
|
||||||
namespace CodeGen
|
namespace CodeGen
|
||||||
@ -338,7 +336,7 @@ static void applyBuiltinCall(int bfid, BytecodeTypes& types)
|
|||||||
void buildBytecodeBlocks(IrFunction& function, const std::vector<uint8_t>& jumpTargets)
|
void buildBytecodeBlocks(IrFunction& function, const std::vector<uint8_t>& jumpTargets)
|
||||||
{
|
{
|
||||||
Proto* proto = function.proto;
|
Proto* proto = function.proto;
|
||||||
LUAU_ASSERT(proto);
|
CODEGEN_ASSERT(proto);
|
||||||
|
|
||||||
std::vector<BytecodeBlock>& bcBlocks = function.bcBlocks;
|
std::vector<BytecodeBlock>& bcBlocks = function.bcBlocks;
|
||||||
|
|
||||||
@ -380,14 +378,14 @@ void buildBytecodeBlocks(IrFunction& function, const std::vector<uint8_t>& jumpT
|
|||||||
|
|
||||||
previ = i;
|
previ = i;
|
||||||
i = nexti;
|
i = nexti;
|
||||||
LUAU_ASSERT(i <= proto->sizecode);
|
CODEGEN_ASSERT(i <= proto->sizecode);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void analyzeBytecodeTypes(IrFunction& function)
|
void analyzeBytecodeTypes(IrFunction& function)
|
||||||
{
|
{
|
||||||
Proto* proto = function.proto;
|
Proto* proto = function.proto;
|
||||||
LUAU_ASSERT(proto);
|
CODEGEN_ASSERT(proto);
|
||||||
|
|
||||||
// Setup our current knowledge of type tags based on arguments
|
// Setup our current knowledge of type tags based on arguments
|
||||||
uint8_t regTags[256];
|
uint8_t regTags[256];
|
||||||
@ -398,8 +396,8 @@ void analyzeBytecodeTypes(IrFunction& function)
|
|||||||
// Now that we have VM basic blocks, we can attempt to track register type tags locally
|
// Now that we have VM basic blocks, we can attempt to track register type tags locally
|
||||||
for (const BytecodeBlock& block : function.bcBlocks)
|
for (const BytecodeBlock& block : function.bcBlocks)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(block.startpc != -1);
|
CODEGEN_ASSERT(block.startpc != -1);
|
||||||
LUAU_ASSERT(block.finishpc != -1);
|
CODEGEN_ASSERT(block.finishpc != -1);
|
||||||
|
|
||||||
// At the block start, reset or knowledge to the starting state
|
// At the block start, reset or knowledge to the starting state
|
||||||
// In the future we might be able to propagate some info between the blocks as well
|
// In the future we might be able to propagate some info between the blocks as well
|
||||||
@ -682,23 +680,11 @@ void analyzeBytecodeTypes(IrFunction& function)
|
|||||||
case LOP_DIVRK:
|
case LOP_DIVRK:
|
||||||
{
|
{
|
||||||
int ra = LUAU_INSN_A(*pc);
|
int ra = LUAU_INSN_A(*pc);
|
||||||
|
int kb = LUAU_INSN_B(*pc);
|
||||||
|
int rc = LUAU_INSN_C(*pc);
|
||||||
|
|
||||||
if (FFlag::LuauFixDivrkInference)
|
bcType.a = getBytecodeConstantTag(proto, kb);
|
||||||
{
|
bcType.b = regTags[rc];
|
||||||
int kb = LUAU_INSN_B(*pc);
|
|
||||||
int rc = LUAU_INSN_C(*pc);
|
|
||||||
|
|
||||||
bcType.a = getBytecodeConstantTag(proto, kb);
|
|
||||||
bcType.b = regTags[rc];
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
int rb = LUAU_INSN_B(*pc);
|
|
||||||
int kc = LUAU_INSN_C(*pc);
|
|
||||||
|
|
||||||
bcType.a = regTags[rb];
|
|
||||||
bcType.b = getBytecodeConstantTag(proto, kc);
|
|
||||||
}
|
|
||||||
|
|
||||||
regTags[ra] = LBC_TYPE_ANY;
|
regTags[ra] = LBC_TYPE_ANY;
|
||||||
|
|
||||||
@ -771,7 +757,7 @@ void analyzeBytecodeTypes(IrFunction& function)
|
|||||||
int skip = LUAU_INSN_C(*pc);
|
int skip = LUAU_INSN_C(*pc);
|
||||||
|
|
||||||
Instruction call = pc[skip + 1];
|
Instruction call = pc[skip + 1];
|
||||||
LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
|
CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
|
||||||
int ra = LUAU_INSN_A(call);
|
int ra = LUAU_INSN_A(call);
|
||||||
|
|
||||||
applyBuiltinCall(bfid, bcType);
|
applyBuiltinCall(bfid, bcType);
|
||||||
@ -788,7 +774,7 @@ void analyzeBytecodeTypes(IrFunction& function)
|
|||||||
int skip = LUAU_INSN_C(*pc);
|
int skip = LUAU_INSN_C(*pc);
|
||||||
|
|
||||||
Instruction call = pc[skip + 1];
|
Instruction call = pc[skip + 1];
|
||||||
LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
|
CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
|
||||||
int ra = LUAU_INSN_A(call);
|
int ra = LUAU_INSN_A(call);
|
||||||
|
|
||||||
applyBuiltinCall(bfid, bcType);
|
applyBuiltinCall(bfid, bcType);
|
||||||
@ -803,7 +789,7 @@ void analyzeBytecodeTypes(IrFunction& function)
|
|||||||
int skip = LUAU_INSN_C(*pc);
|
int skip = LUAU_INSN_C(*pc);
|
||||||
|
|
||||||
Instruction call = pc[skip + 1];
|
Instruction call = pc[skip + 1];
|
||||||
LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
|
CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
|
||||||
int ra = LUAU_INSN_A(call);
|
int ra = LUAU_INSN_A(call);
|
||||||
|
|
||||||
applyBuiltinCall(bfid, bcType);
|
applyBuiltinCall(bfid, bcType);
|
||||||
@ -886,7 +872,7 @@ void analyzeBytecodeTypes(IrFunction& function)
|
|||||||
case LOP_FORGPREP:
|
case LOP_FORGPREP:
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unknown instruction");
|
CODEGEN_ASSERT(!"Unknown instruction");
|
||||||
}
|
}
|
||||||
|
|
||||||
i += getOpLength(op);
|
i += getOpLength(op);
|
||||||
|
@ -50,7 +50,7 @@ FunctionBytecodeSummary FunctionBytecodeSummary::fromProto(Proto* proto, unsigne
|
|||||||
|
|
||||||
std::vector<FunctionBytecodeSummary> summarizeBytecode(lua_State* L, int idx, unsigned nestingLimit)
|
std::vector<FunctionBytecodeSummary> summarizeBytecode(lua_State* L, int idx, unsigned nestingLimit)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(lua_isLfunction(L, idx));
|
CODEGEN_ASSERT(lua_isLfunction(L, idx));
|
||||||
const TValue* func = luaA_toobject(L, idx);
|
const TValue* func = luaA_toobject(L, idx);
|
||||||
|
|
||||||
Proto* root = clvalue(func)->l.p;
|
Proto* root = clvalue(func)->l.p;
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
||||||
#include "Luau/CodeAllocator.h"
|
#include "Luau/CodeAllocator.h"
|
||||||
|
|
||||||
#include "Luau/Common.h"
|
#include "Luau/CodeGenCommon.h"
|
||||||
|
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
@ -35,40 +35,40 @@ static size_t alignToPageSize(size_t size)
|
|||||||
#if defined(_WIN32)
|
#if defined(_WIN32)
|
||||||
static uint8_t* allocatePagesImpl(size_t size)
|
static uint8_t* allocatePagesImpl(size_t size)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(size == alignToPageSize(size));
|
CODEGEN_ASSERT(size == alignToPageSize(size));
|
||||||
|
|
||||||
return (uint8_t*)VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
return (uint8_t*)VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void freePagesImpl(uint8_t* mem, size_t size)
|
static void freePagesImpl(uint8_t* mem, size_t size)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(size == alignToPageSize(size));
|
CODEGEN_ASSERT(size == alignToPageSize(size));
|
||||||
|
|
||||||
if (VirtualFree(mem, 0, MEM_RELEASE) == 0)
|
if (VirtualFree(mem, 0, MEM_RELEASE) == 0)
|
||||||
LUAU_ASSERT(!"failed to deallocate block memory");
|
CODEGEN_ASSERT(!"failed to deallocate block memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void makePagesExecutable(uint8_t* mem, size_t size)
|
static void makePagesExecutable(uint8_t* mem, size_t size)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
|
CODEGEN_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
|
||||||
LUAU_ASSERT(size == alignToPageSize(size));
|
CODEGEN_ASSERT(size == alignToPageSize(size));
|
||||||
|
|
||||||
DWORD oldProtect;
|
DWORD oldProtect;
|
||||||
if (VirtualProtect(mem, size, PAGE_EXECUTE_READ, &oldProtect) == 0)
|
if (VirtualProtect(mem, size, PAGE_EXECUTE_READ, &oldProtect) == 0)
|
||||||
LUAU_ASSERT(!"Failed to change page protection");
|
CODEGEN_ASSERT(!"Failed to change page protection");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void flushInstructionCache(uint8_t* mem, size_t size)
|
static void flushInstructionCache(uint8_t* mem, size_t size)
|
||||||
{
|
{
|
||||||
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM)
|
#if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP | WINAPI_PARTITION_SYSTEM)
|
||||||
if (FlushInstructionCache(GetCurrentProcess(), mem, size) == 0)
|
if (FlushInstructionCache(GetCurrentProcess(), mem, size) == 0)
|
||||||
LUAU_ASSERT(!"Failed to flush instruction cache");
|
CODEGEN_ASSERT(!"Failed to flush instruction cache");
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
static uint8_t* allocatePagesImpl(size_t size)
|
static uint8_t* allocatePagesImpl(size_t size)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(size == alignToPageSize(size));
|
CODEGEN_ASSERT(size == alignToPageSize(size));
|
||||||
|
|
||||||
#ifdef __APPLE__
|
#ifdef __APPLE__
|
||||||
void* result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_JIT, -1, 0);
|
void* result = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON | MAP_JIT, -1, 0);
|
||||||
@ -81,19 +81,19 @@ static uint8_t* allocatePagesImpl(size_t size)
|
|||||||
|
|
||||||
static void freePagesImpl(uint8_t* mem, size_t size)
|
static void freePagesImpl(uint8_t* mem, size_t size)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(size == alignToPageSize(size));
|
CODEGEN_ASSERT(size == alignToPageSize(size));
|
||||||
|
|
||||||
if (munmap(mem, size) != 0)
|
if (munmap(mem, size) != 0)
|
||||||
LUAU_ASSERT(!"Failed to deallocate block memory");
|
CODEGEN_ASSERT(!"Failed to deallocate block memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void makePagesExecutable(uint8_t* mem, size_t size)
|
static void makePagesExecutable(uint8_t* mem, size_t size)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
|
CODEGEN_ASSERT((uintptr_t(mem) & (kPageSize - 1)) == 0);
|
||||||
LUAU_ASSERT(size == alignToPageSize(size));
|
CODEGEN_ASSERT(size == alignToPageSize(size));
|
||||||
|
|
||||||
if (mprotect(mem, size, PROT_READ | PROT_EXEC) != 0)
|
if (mprotect(mem, size, PROT_READ | PROT_EXEC) != 0)
|
||||||
LUAU_ASSERT(!"Failed to change page protection");
|
CODEGEN_ASSERT(!"Failed to change page protection");
|
||||||
}
|
}
|
||||||
|
|
||||||
static void flushInstructionCache(uint8_t* mem, size_t size)
|
static void flushInstructionCache(uint8_t* mem, size_t size)
|
||||||
@ -118,8 +118,8 @@ CodeAllocator::CodeAllocator(size_t blockSize, size_t maxTotalSize, AllocationCa
|
|||||||
, allocationCallback{allocationCallback}
|
, allocationCallback{allocationCallback}
|
||||||
, allocationCallbackContext{allocationCallbackContext}
|
, allocationCallbackContext{allocationCallbackContext}
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(blockSize > kMaxReservedDataSize);
|
CODEGEN_ASSERT(blockSize > kMaxReservedDataSize);
|
||||||
LUAU_ASSERT(maxTotalSize >= blockSize);
|
CODEGEN_ASSERT(maxTotalSize >= blockSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
CodeAllocator::~CodeAllocator()
|
CodeAllocator::~CodeAllocator()
|
||||||
@ -154,10 +154,10 @@ bool CodeAllocator::allocate(
|
|||||||
if (!allocateNewBlock(startOffset))
|
if (!allocateNewBlock(startOffset))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
LUAU_ASSERT(totalSize <= size_t(blockEnd - blockPos));
|
CODEGEN_ASSERT(totalSize <= size_t(blockEnd - blockPos));
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0); // Allocation starts on page boundary
|
CODEGEN_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0); // Allocation starts on page boundary
|
||||||
|
|
||||||
size_t dataOffset = startOffset + alignedDataSize - dataSize;
|
size_t dataOffset = startOffset + alignedDataSize - dataSize;
|
||||||
size_t codeOffset = startOffset + alignedDataSize;
|
size_t codeOffset = startOffset + alignedDataSize;
|
||||||
@ -182,8 +182,8 @@ bool CodeAllocator::allocate(
|
|||||||
if (pageAlignedSize <= size_t(blockEnd - blockPos))
|
if (pageAlignedSize <= size_t(blockEnd - blockPos))
|
||||||
{
|
{
|
||||||
blockPos += pageAlignedSize;
|
blockPos += pageAlignedSize;
|
||||||
LUAU_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0);
|
CODEGEN_ASSERT((uintptr_t(blockPos) & (kPageSize - 1)) == 0);
|
||||||
LUAU_ASSERT(blockPos <= blockEnd);
|
CODEGEN_ASSERT(blockPos <= blockEnd);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -217,7 +217,7 @@ bool CodeAllocator::allocateNewBlock(size_t& unwindInfoSize)
|
|||||||
// 'Round up' to preserve alignment of the following data and code
|
// 'Round up' to preserve alignment of the following data and code
|
||||||
unwindInfoSize = (unwindInfoSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1);
|
unwindInfoSize = (unwindInfoSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1);
|
||||||
|
|
||||||
LUAU_ASSERT(unwindInfoSize <= kMaxReservedDataSize);
|
CODEGEN_ASSERT(unwindInfoSize <= kMaxReservedDataSize);
|
||||||
|
|
||||||
if (!unwindInfo)
|
if (!unwindInfo)
|
||||||
return false;
|
return false;
|
||||||
|
@ -20,8 +20,8 @@
|
|||||||
#elif defined(__linux__) || defined(__APPLE__)
|
#elif defined(__linux__) || defined(__APPLE__)
|
||||||
|
|
||||||
// Defined in unwind.h which may not be easily discoverable on various platforms
|
// Defined in unwind.h which may not be easily discoverable on various platforms
|
||||||
extern "C" void __register_frame(const void*);
|
extern "C" void __register_frame(const void*) __attribute__((weak));
|
||||||
extern "C" void __deregister_frame(const void*);
|
extern "C" void __deregister_frame(const void*) __attribute__((weak));
|
||||||
|
|
||||||
extern "C" void __unw_add_dynamic_fde() __attribute__((weak));
|
extern "C" void __unw_add_dynamic_fde() __attribute__((weak));
|
||||||
#endif
|
#endif
|
||||||
@ -104,7 +104,7 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz
|
|||||||
// All unwinding related data is placed together at the start of the block
|
// All unwinding related data is placed together at the start of the block
|
||||||
size_t unwindSize = unwind->getSize();
|
size_t unwindSize = unwind->getSize();
|
||||||
unwindSize = (unwindSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1); // Match code allocator alignment
|
unwindSize = (unwindSize + (kCodeAlignment - 1)) & ~(kCodeAlignment - 1); // Match code allocator alignment
|
||||||
LUAU_ASSERT(blockSize >= unwindSize);
|
CODEGEN_ASSERT(blockSize >= unwindSize);
|
||||||
|
|
||||||
char* unwindData = (char*)block;
|
char* unwindData = (char*)block;
|
||||||
unwind->finalize(unwindData, unwindSize, block, blockSize);
|
unwind->finalize(unwindData, unwindSize, block, blockSize);
|
||||||
@ -112,10 +112,13 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz
|
|||||||
#if defined(_WIN32) && defined(_M_X64)
|
#if defined(_WIN32) && defined(_M_X64)
|
||||||
if (!RtlAddFunctionTable((RUNTIME_FUNCTION*)block, uint32_t(unwind->getFunctionCount()), uintptr_t(block)))
|
if (!RtlAddFunctionTable((RUNTIME_FUNCTION*)block, uint32_t(unwind->getFunctionCount()), uintptr_t(block)))
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Failed to allocate function table");
|
CODEGEN_ASSERT(!"Failed to allocate function table");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
#elif defined(__linux__) || defined(__APPLE__)
|
#elif defined(__linux__) || defined(__APPLE__)
|
||||||
|
if (!__register_frame)
|
||||||
|
return nullptr;
|
||||||
|
|
||||||
visitFdeEntries(unwindData, __register_frame);
|
visitFdeEntries(unwindData, __register_frame);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -125,7 +128,7 @@ void* createBlockUnwindInfo(void* context, uint8_t* block, size_t blockSize, siz
|
|||||||
static unw_add_find_dynamic_unwind_sections_t unw_add_find_dynamic_unwind_sections =
|
static unw_add_find_dynamic_unwind_sections_t unw_add_find_dynamic_unwind_sections =
|
||||||
unw_add_find_dynamic_unwind_sections_t(dlsym(RTLD_DEFAULT, "__unw_add_find_dynamic_unwind_sections"));
|
unw_add_find_dynamic_unwind_sections_t(dlsym(RTLD_DEFAULT, "__unw_add_find_dynamic_unwind_sections"));
|
||||||
static int regonce = unw_add_find_dynamic_unwind_sections ? unw_add_find_dynamic_unwind_sections(findDynamicUnwindSections) : 0;
|
static int regonce = unw_add_find_dynamic_unwind_sections ? unw_add_find_dynamic_unwind_sections(findDynamicUnwindSections) : 0;
|
||||||
LUAU_ASSERT(regonce == 0);
|
CODEGEN_ASSERT(regonce == 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
beginOffset = unwindSize + unwind->getBeginOffset();
|
beginOffset = unwindSize + unwind->getBeginOffset();
|
||||||
@ -136,8 +139,14 @@ void destroyBlockUnwindInfo(void* context, void* unwindData)
|
|||||||
{
|
{
|
||||||
#if defined(_WIN32) && defined(_M_X64)
|
#if defined(_WIN32) && defined(_M_X64)
|
||||||
if (!RtlDeleteFunctionTable((RUNTIME_FUNCTION*)unwindData))
|
if (!RtlDeleteFunctionTable((RUNTIME_FUNCTION*)unwindData))
|
||||||
LUAU_ASSERT(!"Failed to deallocate function table");
|
CODEGEN_ASSERT(!"Failed to deallocate function table");
|
||||||
#elif defined(__linux__) || defined(__APPLE__)
|
#elif defined(__linux__) || defined(__APPLE__)
|
||||||
|
if (!__deregister_frame)
|
||||||
|
{
|
||||||
|
CODEGEN_ASSERT(!"Cannot deregister unwind information");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
visitFdeEntries((char*)unwindData, __deregister_frame);
|
visitFdeEntries((char*)unwindData, __deregister_frame);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
@ -45,13 +45,13 @@ LUAU_FASTFLAGVARIABLE(DebugCodegenOptSize, false)
|
|||||||
LUAU_FASTFLAGVARIABLE(DebugCodegenSkipNumbering, false)
|
LUAU_FASTFLAGVARIABLE(DebugCodegenSkipNumbering, false)
|
||||||
|
|
||||||
// Per-module IR instruction count limit
|
// Per-module IR instruction count limit
|
||||||
LUAU_FASTINTVARIABLE(CodegenHeuristicsInstructionLimit, 1'048'576) // 1 M
|
LUAU_FASTINTVARIABLE(CodegenHeuristicsInstructionLimit, 1'048'576) // 1 M
|
||||||
|
|
||||||
// Per-function IR block limit
|
// Per-function IR block limit
|
||||||
// Current value is based on some member variables being limited to 16 bits
|
// Current value is based on some member variables being limited to 16 bits
|
||||||
// Because block check is made before optimization passes and optimization can generate new blocks, limit is lowered 2x
|
// Because block check is made before optimization passes and optimization can generate new blocks, limit is lowered 2x
|
||||||
// The limit will probably be adjusted in the future to avoid performance issues with analysis that's more complex than O(n)
|
// The limit will probably be adjusted in the future to avoid performance issues with analysis that's more complex than O(n)
|
||||||
LUAU_FASTINTVARIABLE(CodegenHeuristicsBlockLimit, 32'768) // 32 K
|
LUAU_FASTINTVARIABLE(CodegenHeuristicsBlockLimit, 32'768) // 32 K
|
||||||
|
|
||||||
// Per-function IR instruction limit
|
// Per-function IR instruction limit
|
||||||
// Current value is based on some member variables being limited to 16 bits
|
// Current value is based on some member variables being limited to 16 bits
|
||||||
@ -85,7 +85,7 @@ static NativeProto createNativeProto(Proto* proto, const IrBuilder& ir)
|
|||||||
|
|
||||||
for (int i = 0; i < sizecode; i++)
|
for (int i = 0; i < sizecode; i++)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget);
|
CODEGEN_ASSERT(ir.function.bcMapping[i].asmLocation >= instTarget);
|
||||||
|
|
||||||
instOffsets[i] = ir.function.bcMapping[i].asmLocation - instTarget;
|
instOffsets[i] = ir.function.bcMapping[i].asmLocation - instTarget;
|
||||||
}
|
}
|
||||||
@ -104,7 +104,7 @@ static void destroyExecData(void* execdata)
|
|||||||
|
|
||||||
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
|
static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(p->source);
|
CODEGEN_ASSERT(p->source);
|
||||||
|
|
||||||
const char* source = getstr(p->source);
|
const char* source = getstr(p->source);
|
||||||
source = (source[0] == '=' || source[0] == '@') ? source + 1 : "[string]";
|
source = (source[0] == '=' || source[0] == '@') ? source + 1 : "[string]";
|
||||||
@ -117,7 +117,8 @@ static void logPerfFunction(Proto* p, uintptr_t addr, unsigned size)
|
|||||||
}
|
}
|
||||||
|
|
||||||
template<typename AssemblyBuilder>
|
template<typename AssemblyBuilder>
|
||||||
static std::optional<NativeProto> createNativeFunction(AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, uint32_t& totalIrInstCount)
|
static std::optional<NativeProto> createNativeFunction(
|
||||||
|
AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, uint32_t& totalIrInstCount, CodeGenCompilationResult& result)
|
||||||
{
|
{
|
||||||
IrBuilder ir;
|
IrBuilder ir;
|
||||||
ir.buildFunctionIr(proto);
|
ir.buildFunctionIr(proto);
|
||||||
@ -125,11 +126,13 @@ static std::optional<NativeProto> createNativeFunction(AssemblyBuilder& build, M
|
|||||||
unsigned instCount = unsigned(ir.function.instructions.size());
|
unsigned instCount = unsigned(ir.function.instructions.size());
|
||||||
|
|
||||||
if (totalIrInstCount + instCount >= unsigned(FInt::CodegenHeuristicsInstructionLimit.value))
|
if (totalIrInstCount + instCount >= unsigned(FInt::CodegenHeuristicsInstructionLimit.value))
|
||||||
|
{
|
||||||
|
result = CodeGenCompilationResult::CodeGenOverflowInstructionLimit;
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
}
|
||||||
totalIrInstCount += instCount;
|
totalIrInstCount += instCount;
|
||||||
|
|
||||||
if (!lowerFunction(ir, build, helpers, proto, {}, /* stats */ nullptr))
|
if (!lowerFunction(ir, build, helpers, proto, {}, /* stats */ nullptr, result))
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
|
|
||||||
return createNativeProto(proto, ir);
|
return createNativeProto(proto, ir);
|
||||||
@ -158,8 +161,8 @@ static int onEnter(lua_State* L, Proto* proto)
|
|||||||
{
|
{
|
||||||
NativeState* data = getNativeState(L);
|
NativeState* data = getNativeState(L);
|
||||||
|
|
||||||
LUAU_ASSERT(proto->execdata);
|
CODEGEN_ASSERT(proto->execdata);
|
||||||
LUAU_ASSERT(L->ci->savedpc >= proto->code && L->ci->savedpc < proto->code + proto->sizecode);
|
CODEGEN_ASSERT(L->ci->savedpc >= proto->code && L->ci->savedpc < proto->code + proto->sizecode);
|
||||||
|
|
||||||
uintptr_t target = proto->exectarget + static_cast<uint32_t*>(proto->execdata)[L->ci->savedpc - proto->code];
|
uintptr_t target = proto->exectarget + static_cast<uint32_t*>(proto->execdata)[L->ci->savedpc - proto->code];
|
||||||
|
|
||||||
@ -266,7 +269,7 @@ bool isSupported()
|
|||||||
|
|
||||||
void create(lua_State* L, AllocationCallback* allocationCallback, void* allocationCallbackContext)
|
void create(lua_State* L, AllocationCallback* allocationCallback, void* allocationCallbackContext)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(isSupported());
|
CODEGEN_ASSERT(isSupported());
|
||||||
|
|
||||||
std::unique_ptr<NativeState> data = std::make_unique<NativeState>(allocationCallback, allocationCallbackContext);
|
std::unique_ptr<NativeState> data = std::make_unique<NativeState>(allocationCallback, allocationCallbackContext);
|
||||||
|
|
||||||
@ -309,12 +312,13 @@ void create(lua_State* L)
|
|||||||
|
|
||||||
CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, CompilationStats* stats)
|
CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, CompilationStats* stats)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(lua_isLfunction(L, idx));
|
CODEGEN_ASSERT(lua_isLfunction(L, idx));
|
||||||
const TValue* func = luaA_toobject(L, idx);
|
const TValue* func = luaA_toobject(L, idx);
|
||||||
|
|
||||||
Proto* root = clvalue(func)->l.p;
|
Proto* root = clvalue(func)->l.p;
|
||||||
|
|
||||||
if ((flags & CodeGen_OnlyNativeModules) != 0 && (root->flags & LPF_NATIVE_MODULE) == 0)
|
if ((flags & CodeGen_OnlyNativeModules) != 0 && (root->flags & LPF_NATIVE_MODULE) == 0)
|
||||||
return CodeGenCompilationResult::NothingToCompile;
|
return CodeGenCompilationResult::NotNativeModule;
|
||||||
|
|
||||||
// If initialization has failed, do not compile any functions
|
// If initialization has failed, do not compile any functions
|
||||||
NativeState* data = getNativeState(L);
|
NativeState* data = getNativeState(L);
|
||||||
@ -334,6 +338,9 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp
|
|||||||
if (protos.empty())
|
if (protos.empty())
|
||||||
return CodeGenCompilationResult::NothingToCompile;
|
return CodeGenCompilationResult::NothingToCompile;
|
||||||
|
|
||||||
|
if (stats != nullptr)
|
||||||
|
stats->functionsTotal = uint32_t(protos.size());
|
||||||
|
|
||||||
#if defined(__aarch64__)
|
#if defined(__aarch64__)
|
||||||
static unsigned int cpuFeatures = getCpuFeaturesA64();
|
static unsigned int cpuFeatures = getCpuFeaturesA64();
|
||||||
A64::AssemblyBuilderA64 build(/* logText= */ false, cpuFeatures);
|
A64::AssemblyBuilderA64 build(/* logText= */ false, cpuFeatures);
|
||||||
@ -353,10 +360,19 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp
|
|||||||
|
|
||||||
uint32_t totalIrInstCount = 0;
|
uint32_t totalIrInstCount = 0;
|
||||||
|
|
||||||
|
CodeGenCompilationResult codeGenCompilationResult = CodeGenCompilationResult::Success;
|
||||||
|
|
||||||
for (Proto* p : protos)
|
for (Proto* p : protos)
|
||||||
{
|
{
|
||||||
if (std::optional<NativeProto> np = createNativeFunction(build, helpers, p, totalIrInstCount))
|
// If compiling a proto fails, we want to propagate the failure via codeGenCompilationResult
|
||||||
|
// If multiple compilations fail, we only use the failure from the first unsuccessful compilation.
|
||||||
|
CodeGenCompilationResult temp = CodeGenCompilationResult::Success;
|
||||||
|
|
||||||
|
if (std::optional<NativeProto> np = createNativeFunction(build, helpers, p, totalIrInstCount, temp))
|
||||||
results.push_back(*np);
|
results.push_back(*np);
|
||||||
|
// second compilation failure onwards, this condition fails and codeGenCompilationResult is not assigned.
|
||||||
|
else if (codeGenCompilationResult == CodeGenCompilationResult::Success)
|
||||||
|
codeGenCompilationResult = temp;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Very large modules might result in overflowing a jump offset; in this case we currently abandon the entire module
|
// Very large modules might result in overflowing a jump offset; in this case we currently abandon the entire module
|
||||||
@ -365,12 +381,15 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp
|
|||||||
for (NativeProto result : results)
|
for (NativeProto result : results)
|
||||||
destroyExecData(result.execdata);
|
destroyExecData(result.execdata);
|
||||||
|
|
||||||
return CodeGenCompilationResult::CodeGenFailed;
|
return CodeGenCompilationResult::CodeGenAssemblerFinalizationFailure;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If no functions were assembled, we don't need to allocate/copy executable pages for helpers
|
// If no functions were assembled, we don't need to allocate/copy executable pages for helpers
|
||||||
if (results.empty())
|
if (results.empty())
|
||||||
return CodeGenCompilationResult::CodeGenFailed;
|
{
|
||||||
|
LUAU_ASSERT(codeGenCompilationResult != CodeGenCompilationResult::Success);
|
||||||
|
return codeGenCompilationResult;
|
||||||
|
}
|
||||||
|
|
||||||
uint8_t* nativeData = nullptr;
|
uint8_t* nativeData = nullptr;
|
||||||
size_t sizeNativeData = 0;
|
size_t sizeNativeData = 0;
|
||||||
@ -392,7 +411,7 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp
|
|||||||
{
|
{
|
||||||
uint32_t begin = uint32_t(results[i].exectarget);
|
uint32_t begin = uint32_t(results[i].exectarget);
|
||||||
uint32_t end = i + 1 < results.size() ? uint32_t(results[i + 1].exectarget) : uint32_t(build.code.size() * sizeof(build.code[0]));
|
uint32_t end = i + 1 < results.size() ? uint32_t(results[i + 1].exectarget) : uint32_t(build.code.size() * sizeof(build.code[0]));
|
||||||
LUAU_ASSERT(begin < end);
|
CODEGEN_ASSERT(begin < end);
|
||||||
|
|
||||||
logPerfFunction(results[i].p, uintptr_t(codeStart) + begin, end - begin);
|
logPerfFunction(results[i].p, uintptr_t(codeStart) + begin, end - begin);
|
||||||
}
|
}
|
||||||
@ -421,7 +440,7 @@ CodeGenCompilationResult compile(lua_State* L, int idx, unsigned int flags, Comp
|
|||||||
stats->nativeDataSizeBytes += build.data.size();
|
stats->nativeDataSizeBytes += build.data.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
return CodeGenCompilationResult::Success;
|
return codeGenCompilationResult;
|
||||||
}
|
}
|
||||||
|
|
||||||
void setPerfLog(void* context, PerfLogFn logFn)
|
void setPerfLog(void* context, PerfLogFn logFn)
|
||||||
|
@ -103,7 +103,7 @@ static void emitContinueCall(AssemblyBuilderA64& build, ModuleHelpers& helpers)
|
|||||||
|
|
||||||
// If the fallback yielded, we need to do this right away
|
// If the fallback yielded, we need to do this right away
|
||||||
// note: it's slightly cheaper to check x0 LSB; a valid Closure pointer must be aligned to 8 bytes
|
// note: it's slightly cheaper to check x0 LSB; a valid Closure pointer must be aligned to 8 bytes
|
||||||
LUAU_ASSERT(CALL_FALLBACK_YIELD == 1);
|
CODEGEN_ASSERT(CALL_FALLBACK_YIELD == 1);
|
||||||
build.tbnz(x0, 0, helpers.exitNoContinueVm);
|
build.tbnz(x0, 0, helpers.exitNoContinueVm);
|
||||||
|
|
||||||
// Need to update state of the current function before we jump away
|
// Need to update state of the current function before we jump away
|
||||||
@ -114,7 +114,7 @@ static void emitContinueCall(AssemblyBuilderA64& build, ModuleHelpers& helpers)
|
|||||||
|
|
||||||
build.mov(rClosure, x0);
|
build.mov(rClosure, x0);
|
||||||
|
|
||||||
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
|
CODEGEN_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
|
||||||
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
|
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
|
||||||
|
|
||||||
build.br(x2);
|
build.br(x2);
|
||||||
@ -178,7 +178,7 @@ void emitReturn(AssemblyBuilderA64& build, ModuleHelpers& helpers)
|
|||||||
|
|
||||||
build.ldr(x1, mem(rClosure, offsetof(Closure, l.p))); // cl->l.p aka proto
|
build.ldr(x1, mem(rClosure, offsetof(Closure, l.p))); // cl->l.p aka proto
|
||||||
|
|
||||||
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
|
CODEGEN_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
|
||||||
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
|
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
|
||||||
|
|
||||||
// Get instruction index from instruction pointer
|
// Get instruction index from instruction pointer
|
||||||
@ -188,7 +188,7 @@ void emitReturn(AssemblyBuilderA64& build, ModuleHelpers& helpers)
|
|||||||
build.sub(x2, x2, rCode);
|
build.sub(x2, x2, rCode);
|
||||||
|
|
||||||
// Get new instruction location and jump to it
|
// Get new instruction location and jump to it
|
||||||
LUAU_ASSERT(offsetof(Proto, exectarget) == offsetof(Proto, execdata) + 8);
|
CODEGEN_ASSERT(offsetof(Proto, exectarget) == offsetof(Proto, execdata) + 8);
|
||||||
build.ldp(x3, x4, mem(x1, offsetof(Proto, execdata)));
|
build.ldp(x3, x4, mem(x1, offsetof(Proto, execdata)));
|
||||||
build.ldr(w2, mem(x3, x2));
|
build.ldr(w2, mem(x3, x2));
|
||||||
build.add(x4, x4, x2);
|
build.add(x4, x4, x2);
|
||||||
@ -226,7 +226,7 @@ static EntryLocations buildEntryFunction(AssemblyBuilderA64& build, UnwindBuilde
|
|||||||
|
|
||||||
build.ldr(rBase, mem(x0, offsetof(lua_State, base))); // L->base
|
build.ldr(rBase, mem(x0, offsetof(lua_State, base))); // L->base
|
||||||
|
|
||||||
LUAU_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
|
CODEGEN_ASSERT(offsetof(Proto, code) == offsetof(Proto, k) + 8);
|
||||||
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
|
build.ldp(rConstants, rCode, mem(x1, offsetof(Proto, k))); // proto->k, proto->code
|
||||||
|
|
||||||
build.ldr(x9, mem(x0, offsetof(lua_State, ci))); // L->ci
|
build.ldr(x9, mem(x0, offsetof(lua_State, ci))); // L->ci
|
||||||
@ -270,13 +270,13 @@ bool initHeaderFunctions(NativeState& data)
|
|||||||
|
|
||||||
unwind.finishInfo();
|
unwind.finishInfo();
|
||||||
|
|
||||||
LUAU_ASSERT(build.data.empty());
|
CODEGEN_ASSERT(build.data.empty());
|
||||||
|
|
||||||
uint8_t* codeStart = nullptr;
|
uint8_t* codeStart = nullptr;
|
||||||
if (!data.codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast<const uint8_t*>(build.code.data()),
|
if (!data.codeAllocator.allocate(build.data.data(), int(build.data.size()), reinterpret_cast<const uint8_t*>(build.code.data()),
|
||||||
int(build.code.size() * sizeof(build.code[0])), data.gateData, data.gateDataSize, codeStart))
|
int(build.code.size() * sizeof(build.code[0])), data.gateData, data.gateDataSize, codeStart))
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Failed to create entry function");
|
CODEGEN_ASSERT(!"Failed to create entry function");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +100,9 @@ static std::string getAssemblyImpl(AssemblyBuilder& build, const TValue* func, A
|
|||||||
if (options.includeAssembly || options.includeIr)
|
if (options.includeAssembly || options.includeIr)
|
||||||
logFunctionHeader(build, p);
|
logFunctionHeader(build, p);
|
||||||
|
|
||||||
if (!lowerFunction(ir, build, helpers, p, options, stats))
|
CodeGenCompilationResult result = CodeGenCompilationResult::Success;
|
||||||
|
|
||||||
|
if (!lowerFunction(ir, build, helpers, p, options, stats, result))
|
||||||
{
|
{
|
||||||
if (build.logText)
|
if (build.logText)
|
||||||
build.logAppend("; skipping (can't lower)\n");
|
build.logAppend("; skipping (can't lower)\n");
|
||||||
@ -154,7 +156,7 @@ unsigned int getCpuFeaturesA64();
|
|||||||
|
|
||||||
std::string getAssembly(lua_State* L, int idx, AssemblyOptions options, LoweringStats* stats)
|
std::string getAssembly(lua_State* L, int idx, AssemblyOptions options, LoweringStats* stats)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(lua_isLfunction(L, idx));
|
CODEGEN_ASSERT(lua_isLfunction(L, idx));
|
||||||
const TValue* func = luaA_toobject(L, idx);
|
const TValue* func = luaA_toobject(L, idx);
|
||||||
|
|
||||||
switch (options.target)
|
switch (options.target)
|
||||||
@ -200,7 +202,7 @@ std::string getAssembly(lua_State* L, int idx, AssemblyOptions options, Lowering
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unknown target");
|
CODEGEN_ASSERT(!"Unknown target");
|
||||||
return std::string();
|
return std::string();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -85,7 +85,7 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
|
|||||||
dummy.start = ~0u;
|
dummy.start = ~0u;
|
||||||
|
|
||||||
// Make sure entry block is first
|
// Make sure entry block is first
|
||||||
LUAU_ASSERT(sortedBlocks[0] == 0);
|
CODEGEN_ASSERT(sortedBlocks[0] == 0);
|
||||||
|
|
||||||
for (size_t i = 0; i < sortedBlocks.size(); ++i)
|
for (size_t i = 0; i < sortedBlocks.size(); ++i)
|
||||||
{
|
{
|
||||||
@ -95,8 +95,8 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
|
|||||||
if (block.kind == IrBlockKind::Dead)
|
if (block.kind == IrBlockKind::Dead)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
LUAU_ASSERT(block.start != ~0u);
|
CODEGEN_ASSERT(block.start != ~0u);
|
||||||
LUAU_ASSERT(block.finish != ~0u);
|
CODEGEN_ASSERT(block.finish != ~0u);
|
||||||
|
|
||||||
// If we want to skip fallback code IR/asm, we'll record when those blocks start once we see them
|
// If we want to skip fallback code IR/asm, we'll record when those blocks start once we see them
|
||||||
if (block.kind == IrBlockKind::Fallback && !seenFallback)
|
if (block.kind == IrBlockKind::Fallback && !seenFallback)
|
||||||
@ -129,11 +129,11 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
|
|||||||
// Optimizations often propagate information between blocks
|
// Optimizations often propagate information between blocks
|
||||||
// To make sure the register and spill state is correct when blocks are lowered, we check that sorted block order matches the expected one
|
// To make sure the register and spill state is correct when blocks are lowered, we check that sorted block order matches the expected one
|
||||||
if (block.expectedNextBlock != ~0u)
|
if (block.expectedNextBlock != ~0u)
|
||||||
LUAU_ASSERT(function.getBlockIndex(nextBlock) == block.expectedNextBlock);
|
CODEGEN_ASSERT(function.getBlockIndex(nextBlock) == block.expectedNextBlock);
|
||||||
|
|
||||||
for (uint32_t index = block.start; index <= block.finish; index++)
|
for (uint32_t index = block.start; index <= block.finish; index++)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(index < function.instructions.size());
|
CODEGEN_ASSERT(index < function.instructions.size());
|
||||||
|
|
||||||
uint32_t bcLocation = bcLocations[index];
|
uint32_t bcLocation = bcLocations[index];
|
||||||
|
|
||||||
@ -165,12 +165,12 @@ inline bool lowerImpl(AssemblyBuilder& build, IrLowering& lowering, IrFunction&
|
|||||||
// This also prevents them from getting into text output when that's enabled
|
// This also prevents them from getting into text output when that's enabled
|
||||||
if (isPseudo(inst.cmd))
|
if (isPseudo(inst.cmd))
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(inst.useCount == 0);
|
CODEGEN_ASSERT(inst.useCount == 0);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Either instruction result value is not referenced or the use count is not zero
|
// Either instruction result value is not referenced or the use count is not zero
|
||||||
LUAU_ASSERT(inst.lastUse == 0 || inst.useCount != 0);
|
CODEGEN_ASSERT(inst.lastUse == 0 || inst.useCount != 0);
|
||||||
|
|
||||||
if (options.includeIr)
|
if (options.includeIr)
|
||||||
{
|
{
|
||||||
@ -246,7 +246,8 @@ inline bool lowerIr(A64::AssemblyBuilderA64& build, IrBuilder& ir, const std::ve
|
|||||||
}
|
}
|
||||||
|
|
||||||
template<typename AssemblyBuilder>
|
template<typename AssemblyBuilder>
|
||||||
inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options, LoweringStats* stats)
|
inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers& helpers, Proto* proto, AssemblyOptions options, LoweringStats* stats,
|
||||||
|
CodeGenCompilationResult& codeGenCompilationResult)
|
||||||
{
|
{
|
||||||
killUnusedBlocks(ir.function);
|
killUnusedBlocks(ir.function);
|
||||||
|
|
||||||
@ -269,10 +270,16 @@ inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers&
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (preOptBlockCount >= unsigned(FInt::CodegenHeuristicsBlockLimit.value))
|
if (preOptBlockCount >= unsigned(FInt::CodegenHeuristicsBlockLimit.value))
|
||||||
|
{
|
||||||
|
codeGenCompilationResult = CodeGenCompilationResult::CodeGenOverflowBlockLimit;
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
if (maxBlockInstructions >= unsigned(FInt::CodegenHeuristicsBlockInstructionLimit.value))
|
if (maxBlockInstructions >= unsigned(FInt::CodegenHeuristicsBlockInstructionLimit.value))
|
||||||
|
{
|
||||||
|
codeGenCompilationResult = CodeGenCompilationResult::CodeGenOverflowBlockInstructionLimit;
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
computeCfgInfo(ir.function);
|
computeCfgInfo(ir.function);
|
||||||
|
|
||||||
@ -318,7 +325,12 @@ inline bool lowerFunction(IrBuilder& ir, AssemblyBuilder& build, ModuleHelpers&
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return lowerIr(build, ir, sortedBlocks, helpers, proto, options, stats);
|
bool result = lowerIr(build, ir, sortedBlocks, helpers, proto, options, stats);
|
||||||
|
|
||||||
|
if (!result)
|
||||||
|
codeGenCompilationResult = CodeGenCompilationResult::CodeGenLoweringFailure;
|
||||||
|
|
||||||
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace CodeGen
|
} // namespace CodeGen
|
||||||
|
@ -198,13 +198,13 @@ bool initHeaderFunctions(NativeState& data)
|
|||||||
|
|
||||||
unwind.finishInfo();
|
unwind.finishInfo();
|
||||||
|
|
||||||
LUAU_ASSERT(build.data.empty());
|
CODEGEN_ASSERT(build.data.empty());
|
||||||
|
|
||||||
uint8_t* codeStart = nullptr;
|
uint8_t* codeStart = nullptr;
|
||||||
if (!data.codeAllocator.allocate(
|
if (!data.codeAllocator.allocate(
|
||||||
build.data.data(), int(build.data.size()), build.code.data(), int(build.code.size()), data.gateData, data.gateDataSize, codeStart))
|
build.data.data(), int(build.data.size()), build.code.data(), int(build.code.size()), data.gateData, data.gateDataSize, codeStart))
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Failed to create entry function");
|
CODEGEN_ASSERT(!"Failed to create entry function");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -81,16 +81,16 @@ void emitBuiltin(IrRegAllocX64& regs, AssemblyBuilderX64& build, int bfid, int r
|
|||||||
switch (bfid)
|
switch (bfid)
|
||||||
{
|
{
|
||||||
case LBF_MATH_FREXP:
|
case LBF_MATH_FREXP:
|
||||||
LUAU_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
|
CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
|
||||||
return emitBuiltinMathFrexp(regs, build, ra, arg, nresults);
|
return emitBuiltinMathFrexp(regs, build, ra, arg, nresults);
|
||||||
case LBF_MATH_MODF:
|
case LBF_MATH_MODF:
|
||||||
LUAU_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
|
CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
|
||||||
return emitBuiltinMathModf(regs, build, ra, arg, nresults);
|
return emitBuiltinMathModf(regs, build, ra, arg, nresults);
|
||||||
case LBF_MATH_SIGN:
|
case LBF_MATH_SIGN:
|
||||||
LUAU_ASSERT(nparams == 1 && nresults == 1);
|
CODEGEN_ASSERT(nparams == 1 && nresults == 1);
|
||||||
return emitBuiltinMathSign(regs, build, ra, arg);
|
return emitBuiltinMathSign(regs, build, ra, arg);
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Missing x64 lowering");
|
CODEGEN_ASSERT(!"Missing x64 lowering");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -73,7 +73,7 @@ void jumpOnNumberCmp(AssemblyBuilderX64& build, RegisterX64 tmp, OperandX64 lhs,
|
|||||||
build.jcc(ConditionX64::Parity, label);
|
build.jcc(ConditionX64::Parity, label);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unsupported condition");
|
CODEGEN_ASSERT(!"Unsupported condition");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -110,15 +110,15 @@ ConditionX64 getConditionInt(IrCondition cond)
|
|||||||
case IrCondition::UnsignedGreaterEqual:
|
case IrCondition::UnsignedGreaterEqual:
|
||||||
return ConditionX64::AboveEqual;
|
return ConditionX64::AboveEqual;
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unsupported condition");
|
CODEGEN_ASSERT(!"Unsupported condition");
|
||||||
return ConditionX64::Zero;
|
return ConditionX64::Zero;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void getTableNodeAtCachedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, RegisterX64 table, int pcpos)
|
void getTableNodeAtCachedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 node, RegisterX64 table, int pcpos)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(tmp != node);
|
CODEGEN_ASSERT(tmp != node);
|
||||||
LUAU_ASSERT(table != node);
|
CODEGEN_ASSERT(table != node);
|
||||||
|
|
||||||
build.mov(node, qword[table + offsetof(Table, node)]);
|
build.mov(node, qword[table + offsetof(Table, node)]);
|
||||||
|
|
||||||
@ -134,7 +134,7 @@ void getTableNodeAtCachedSlot(AssemblyBuilderX64& build, RegisterX64 tmp, Regist
|
|||||||
|
|
||||||
void convertNumberToIndexOrJump(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 numd, RegisterX64 numi, Label& label)
|
void convertNumberToIndexOrJump(AssemblyBuilderX64& build, RegisterX64 tmp, RegisterX64 numd, RegisterX64 numi, Label& label)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(numi.size == SizeX64::dword);
|
CODEGEN_ASSERT(numi.size == SizeX64::dword);
|
||||||
|
|
||||||
// Convert to integer, NaN is converted into 0x80000000
|
// Convert to integer, NaN is converted into 0x80000000
|
||||||
build.vcvttsd2si(numi, numd);
|
build.vcvttsd2si(numi, numd);
|
||||||
|
@ -73,7 +73,7 @@ inline unsigned getNonVolXmmStorageSize(ABIX64 abi, uint8_t xmmRegCount)
|
|||||||
if (xmmRegCount <= kWindowsFirstNonVolXmmReg)
|
if (xmmRegCount <= kWindowsFirstNonVolXmmReg)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
LUAU_ASSERT(xmmRegCount <= 16);
|
CODEGEN_ASSERT(xmmRegCount <= 16);
|
||||||
return (xmmRegCount - kWindowsFirstNonVolXmmReg) * 16;
|
return (xmmRegCount - kWindowsFirstNonVolXmmReg) * 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -160,7 +160,7 @@ inline OperandX64 luauNodeKeyTag(RegisterX64 node)
|
|||||||
|
|
||||||
inline void setLuauReg(AssemblyBuilderX64& build, RegisterX64 tmp, int ri, OperandX64 op)
|
inline void setLuauReg(AssemblyBuilderX64& build, RegisterX64 tmp, int ri, OperandX64 op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.cat == CategoryX64::mem);
|
CODEGEN_ASSERT(op.cat == CategoryX64::mem);
|
||||||
|
|
||||||
build.vmovups(tmp, op);
|
build.vmovups(tmp, op);
|
||||||
build.vmovups(luauReg(ri), tmp);
|
build.vmovups(luauReg(ri), tmp);
|
||||||
|
@ -296,7 +296,7 @@ void emitInstSetList(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int
|
|||||||
build.jcc(ConditionX64::NotBelow, skipResize);
|
build.jcc(ConditionX64::NotBelow, skipResize);
|
||||||
|
|
||||||
// Argument setup reordered to avoid conflicts
|
// Argument setup reordered to avoid conflicts
|
||||||
LUAU_ASSERT(rArg3 != table);
|
CODEGEN_ASSERT(rArg3 != table);
|
||||||
build.mov(dwordReg(rArg3), last);
|
build.mov(dwordReg(rArg3), last);
|
||||||
build.mov(rArg2, table);
|
build.mov(rArg2, table);
|
||||||
build.mov(rArg1, rState);
|
build.mov(rArg1, rState);
|
||||||
@ -324,7 +324,7 @@ void emitInstSetList(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(count != 0);
|
CODEGEN_ASSERT(count != 0);
|
||||||
|
|
||||||
build.xor_(offset, offset);
|
build.xor_(offset, offset);
|
||||||
if (index != 1)
|
if (index != 1)
|
||||||
@ -359,7 +359,7 @@ void emitInstSetList(IrRegAllocX64& regs, AssemblyBuilderX64& build, int ra, int
|
|||||||
void emitInstForGLoop(AssemblyBuilderX64& build, int ra, int aux, Label& loopRepeat)
|
void emitInstForGLoop(AssemblyBuilderX64& build, int ra, int aux, Label& loopRepeat)
|
||||||
{
|
{
|
||||||
// ipairs-style traversal is handled in IR
|
// ipairs-style traversal is handled in IR
|
||||||
LUAU_ASSERT(aux >= 0);
|
CODEGEN_ASSERT(aux >= 0);
|
||||||
|
|
||||||
// TODO: This should use IrCallWrapperX64
|
// TODO: This should use IrCallWrapperX64
|
||||||
RegisterX64 rArg1 = (build.abi == ABIX64::Windows) ? rcx : rdi;
|
RegisterX64 rArg1 = (build.abi == ABIX64::Windows) ? rcx : rdi;
|
||||||
|
@ -33,13 +33,13 @@ void updateUseCounts(IrFunction& function)
|
|||||||
if (op.kind == IrOpKind::Inst)
|
if (op.kind == IrOpKind::Inst)
|
||||||
{
|
{
|
||||||
IrInst& target = instructions[op.index];
|
IrInst& target = instructions[op.index];
|
||||||
LUAU_ASSERT(target.useCount < 0xffff);
|
CODEGEN_ASSERT(target.useCount < 0xffff);
|
||||||
target.useCount++;
|
target.useCount++;
|
||||||
}
|
}
|
||||||
else if (op.kind == IrOpKind::Block)
|
else if (op.kind == IrOpKind::Block)
|
||||||
{
|
{
|
||||||
IrBlock& target = blocks[op.index];
|
IrBlock& target = blocks[op.index];
|
||||||
LUAU_ASSERT(target.useCount < 0xffff);
|
CODEGEN_ASSERT(target.useCount < 0xffff);
|
||||||
target.useCount++;
|
target.useCount++;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -59,10 +59,10 @@ void updateLastUseLocations(IrFunction& function, const std::vector<uint32_t>& s
|
|||||||
{
|
{
|
||||||
std::vector<IrInst>& instructions = function.instructions;
|
std::vector<IrInst>& instructions = function.instructions;
|
||||||
|
|
||||||
#if defined(LUAU_ASSERTENABLED)
|
#if defined(CODEGEN_ASSERTENABLED)
|
||||||
// Last use assignements should be called only once
|
// Last use assignements should be called only once
|
||||||
for (IrInst& inst : instructions)
|
for (IrInst& inst : instructions)
|
||||||
LUAU_ASSERT(inst.lastUse == 0);
|
CODEGEN_ASSERT(inst.lastUse == 0);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (size_t i = 0; i < sortedBlocks.size(); ++i)
|
for (size_t i = 0; i < sortedBlocks.size(); ++i)
|
||||||
@ -73,12 +73,12 @@ void updateLastUseLocations(IrFunction& function, const std::vector<uint32_t>& s
|
|||||||
if (block.kind == IrBlockKind::Dead)
|
if (block.kind == IrBlockKind::Dead)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
LUAU_ASSERT(block.start != ~0u);
|
CODEGEN_ASSERT(block.start != ~0u);
|
||||||
LUAU_ASSERT(block.finish != ~0u);
|
CODEGEN_ASSERT(block.finish != ~0u);
|
||||||
|
|
||||||
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
|
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(instIdx < function.instructions.size());
|
CODEGEN_ASSERT(instIdx < function.instructions.size());
|
||||||
IrInst& inst = instructions[instIdx];
|
IrInst& inst = instructions[instIdx];
|
||||||
|
|
||||||
auto checkOp = [&](IrOp op) {
|
auto checkOp = [&](IrOp op) {
|
||||||
@ -101,7 +101,7 @@ void updateLastUseLocations(IrFunction& function, const std::vector<uint32_t>& s
|
|||||||
|
|
||||||
uint32_t getNextInstUse(IrFunction& function, uint32_t targetInstIdx, uint32_t startInstIdx)
|
uint32_t getNextInstUse(IrFunction& function, uint32_t targetInstIdx, uint32_t startInstIdx)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(startInstIdx < function.instructions.size());
|
CODEGEN_ASSERT(startInstIdx < function.instructions.size());
|
||||||
IrInst& targetInst = function.instructions[targetInstIdx];
|
IrInst& targetInst = function.instructions[targetInstIdx];
|
||||||
|
|
||||||
for (uint32_t i = startInstIdx; i <= targetInst.lastUse; i++)
|
for (uint32_t i = startInstIdx; i <= targetInst.lastUse; i++)
|
||||||
@ -131,7 +131,7 @@ uint32_t getNextInstUse(IrFunction& function, uint32_t targetInstIdx, uint32_t s
|
|||||||
}
|
}
|
||||||
|
|
||||||
// There must be a next use since there is the last use location
|
// There must be a next use since there is the last use location
|
||||||
LUAU_ASSERT(!"Failed to find next use");
|
CODEGEN_ASSERT(!"Failed to find next use");
|
||||||
return targetInst.lastUse;
|
return targetInst.lastUse;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,7 +188,7 @@ void requireVariadicSequence(RegisterSet& sourceRs, const RegisterSet& defRs, ui
|
|||||||
while (defRs.regs.test(varargStart))
|
while (defRs.regs.test(varargStart))
|
||||||
varargStart++;
|
varargStart++;
|
||||||
|
|
||||||
LUAU_ASSERT(!sourceRs.varargSeq || sourceRs.varargStart == varargStart);
|
CODEGEN_ASSERT(!sourceRs.varargSeq || sourceRs.varargStart == varargStart);
|
||||||
|
|
||||||
sourceRs.varargSeq = true;
|
sourceRs.varargSeq = true;
|
||||||
sourceRs.varargStart = varargStart;
|
sourceRs.varargStart = varargStart;
|
||||||
@ -381,7 +381,7 @@ static void computeCfgLiveInOutRegSets(IrFunction& function)
|
|||||||
if (curr.kind != IrBlockKind::Fallback && succ.kind == IrBlockKind::Fallback)
|
if (curr.kind != IrBlockKind::Fallback && succ.kind == IrBlockKind::Fallback)
|
||||||
{
|
{
|
||||||
// If this is the only successor, this skip will not be valid
|
// If this is the only successor, this skip will not be valid
|
||||||
LUAU_ASSERT(successorsIt.size() != 1);
|
CODEGEN_ASSERT(successorsIt.size() != 1);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -391,7 +391,7 @@ static void computeCfgLiveInOutRegSets(IrFunction& function)
|
|||||||
|
|
||||||
if (succRs.varargSeq)
|
if (succRs.varargSeq)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!outRs.varargSeq || outRs.varargStart == succRs.varargStart);
|
CODEGEN_ASSERT(!outRs.varargSeq || outRs.varargStart == succRs.varargStart);
|
||||||
|
|
||||||
outRs.varargSeq = true;
|
outRs.varargSeq = true;
|
||||||
outRs.varargStart = succRs.varargStart;
|
outRs.varargStart = succRs.varargStart;
|
||||||
@ -426,10 +426,10 @@ static void computeCfgLiveInOutRegSets(IrFunction& function)
|
|||||||
{
|
{
|
||||||
RegisterSet& entryIn = info.in[0];
|
RegisterSet& entryIn = info.in[0];
|
||||||
|
|
||||||
LUAU_ASSERT(!entryIn.varargSeq);
|
CODEGEN_ASSERT(!entryIn.varargSeq);
|
||||||
|
|
||||||
for (size_t i = 0; i < entryIn.regs.size(); i++)
|
for (size_t i = 0; i < entryIn.regs.size(); i++)
|
||||||
LUAU_ASSERT(!entryIn.regs.test(i) || i < function.proto->numparams);
|
CODEGEN_ASSERT(!entryIn.regs.test(i) || i < function.proto->numparams);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -509,7 +509,7 @@ void computeBlockOrdering(
|
|||||||
{
|
{
|
||||||
CfgInfo& info = function.cfg;
|
CfgInfo& info = function.cfg;
|
||||||
|
|
||||||
LUAU_ASSERT(info.idoms.size() == function.blocks.size());
|
CODEGEN_ASSERT(info.idoms.size() == function.blocks.size());
|
||||||
|
|
||||||
ordering.clear();
|
ordering.clear();
|
||||||
ordering.resize(function.blocks.size());
|
ordering.resize(function.blocks.size());
|
||||||
@ -582,13 +582,13 @@ static uint32_t findCommonDominator(const std::vector<uint32_t>& idoms, const st
|
|||||||
while (data[a].postOrder < data[b].postOrder)
|
while (data[a].postOrder < data[b].postOrder)
|
||||||
{
|
{
|
||||||
a = idoms[a];
|
a = idoms[a];
|
||||||
LUAU_ASSERT(a != ~0u);
|
CODEGEN_ASSERT(a != ~0u);
|
||||||
}
|
}
|
||||||
|
|
||||||
while (data[b].postOrder < data[a].postOrder)
|
while (data[b].postOrder < data[a].postOrder)
|
||||||
{
|
{
|
||||||
b = idoms[b];
|
b = idoms[b];
|
||||||
LUAU_ASSERT(b != ~0u);
|
CODEGEN_ASSERT(b != ~0u);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -707,10 +707,10 @@ void computeCfgDominanceTreeChildren(IrFunction& function)
|
|||||||
void computeIteratedDominanceFrontierForDefs(
|
void computeIteratedDominanceFrontierForDefs(
|
||||||
IdfContext& ctx, const IrFunction& function, const std::vector<uint32_t>& defBlocks, const std::vector<uint32_t>& liveInBlocks)
|
IdfContext& ctx, const IrFunction& function, const std::vector<uint32_t>& defBlocks, const std::vector<uint32_t>& liveInBlocks)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!function.cfg.domOrdering.empty());
|
CODEGEN_ASSERT(!function.cfg.domOrdering.empty());
|
||||||
|
|
||||||
LUAU_ASSERT(ctx.queue.empty());
|
CODEGEN_ASSERT(ctx.queue.empty());
|
||||||
LUAU_ASSERT(ctx.worklist.empty());
|
CODEGEN_ASSERT(ctx.worklist.empty());
|
||||||
|
|
||||||
ctx.idf.clear();
|
ctx.idf.clear();
|
||||||
|
|
||||||
@ -728,7 +728,7 @@ void computeIteratedDominanceFrontierForDefs(
|
|||||||
IdfContext::BlockAndOrdering root = ctx.queue.top();
|
IdfContext::BlockAndOrdering root = ctx.queue.top();
|
||||||
ctx.queue.pop();
|
ctx.queue.pop();
|
||||||
|
|
||||||
LUAU_ASSERT(ctx.worklist.empty());
|
CODEGEN_ASSERT(ctx.worklist.empty());
|
||||||
ctx.worklist.push_back(root.blockIdx);
|
ctx.worklist.push_back(root.blockIdx);
|
||||||
ctx.visits[root.blockIdx].seenInWorklist = true;
|
ctx.visits[root.blockIdx].seenInWorklist = true;
|
||||||
|
|
||||||
@ -785,7 +785,7 @@ void computeCfgInfo(IrFunction& function)
|
|||||||
|
|
||||||
BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx)
|
BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(blockIdx < cfg.predecessorsOffsets.size());
|
CODEGEN_ASSERT(blockIdx < cfg.predecessorsOffsets.size());
|
||||||
|
|
||||||
uint32_t start = cfg.predecessorsOffsets[blockIdx];
|
uint32_t start = cfg.predecessorsOffsets[blockIdx];
|
||||||
uint32_t end = blockIdx + 1 < cfg.predecessorsOffsets.size() ? cfg.predecessorsOffsets[blockIdx + 1] : uint32_t(cfg.predecessors.size());
|
uint32_t end = blockIdx + 1 < cfg.predecessorsOffsets.size() ? cfg.predecessorsOffsets[blockIdx + 1] : uint32_t(cfg.predecessors.size());
|
||||||
@ -795,7 +795,7 @@ BlockIteratorWrapper predecessors(const CfgInfo& cfg, uint32_t blockIdx)
|
|||||||
|
|
||||||
BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx)
|
BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(blockIdx < cfg.successorsOffsets.size());
|
CODEGEN_ASSERT(blockIdx < cfg.successorsOffsets.size());
|
||||||
|
|
||||||
uint32_t start = cfg.successorsOffsets[blockIdx];
|
uint32_t start = cfg.successorsOffsets[blockIdx];
|
||||||
uint32_t end = blockIdx + 1 < cfg.successorsOffsets.size() ? cfg.successorsOffsets[blockIdx + 1] : uint32_t(cfg.successors.size());
|
uint32_t end = blockIdx + 1 < cfg.successorsOffsets.size() ? cfg.successorsOffsets[blockIdx + 1] : uint32_t(cfg.successors.size());
|
||||||
@ -805,7 +805,7 @@ BlockIteratorWrapper successors(const CfgInfo& cfg, uint32_t blockIdx)
|
|||||||
|
|
||||||
BlockIteratorWrapper domChildren(const CfgInfo& cfg, uint32_t blockIdx)
|
BlockIteratorWrapper domChildren(const CfgInfo& cfg, uint32_t blockIdx)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(blockIdx < cfg.domChildrenOffsets.size());
|
CODEGEN_ASSERT(blockIdx < cfg.domChildrenOffsets.size());
|
||||||
|
|
||||||
uint32_t start = cfg.domChildrenOffsets[blockIdx];
|
uint32_t start = cfg.domChildrenOffsets[blockIdx];
|
||||||
uint32_t end = blockIdx + 1 < cfg.domChildrenOffsets.size() ? cfg.domChildrenOffsets[blockIdx + 1] : uint32_t(cfg.domChildren.size());
|
uint32_t end = blockIdx + 1 < cfg.domChildrenOffsets.size() ? cfg.domChildrenOffsets[blockIdx + 1] : uint32_t(cfg.domChildren.size());
|
||||||
|
@ -32,7 +32,7 @@ static bool hasTypedParameters(Proto* proto)
|
|||||||
|
|
||||||
static void buildArgumentTypeChecks(IrBuilder& build, Proto* proto)
|
static void buildArgumentTypeChecks(IrBuilder& build, Proto* proto)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(hasTypedParameters(proto));
|
CODEGEN_ASSERT(hasTypedParameters(proto));
|
||||||
|
|
||||||
for (int i = 0; i < proto->numparams; ++i)
|
for (int i = 0; i < proto->numparams; ++i)
|
||||||
{
|
{
|
||||||
@ -145,7 +145,7 @@ void IrBuilder::buildFunctionIr(Proto* proto)
|
|||||||
LuauOpcode op = LuauOpcode(LUAU_INSN_OP(*pc));
|
LuauOpcode op = LuauOpcode(LUAU_INSN_OP(*pc));
|
||||||
|
|
||||||
int nexti = i + getOpLength(op);
|
int nexti = i + getOpLength(op);
|
||||||
LUAU_ASSERT(nexti <= proto->sizecode);
|
CODEGEN_ASSERT(nexti <= proto->sizecode);
|
||||||
|
|
||||||
function.bcMapping[i] = {uint32_t(function.instructions.size()), ~0u};
|
function.bcMapping[i] = {uint32_t(function.instructions.size()), ~0u};
|
||||||
|
|
||||||
@ -181,7 +181,7 @@ void IrBuilder::buildFunctionIr(Proto* proto)
|
|||||||
afterInstForNLoop(*this, pc);
|
afterInstForNLoop(*this, pc);
|
||||||
|
|
||||||
i = nexti;
|
i = nexti;
|
||||||
LUAU_ASSERT(i <= proto->sizecode);
|
CODEGEN_ASSERT(i <= proto->sizecode);
|
||||||
|
|
||||||
// If we are going into a new block at the next instruction and it's a fallthrough, jump has to be placed to mark block termination
|
// If we are going into a new block at the next instruction and it's a fallthrough, jump has to be placed to mark block termination
|
||||||
if (i < int(instIndexToBlock.size()) && instIndexToBlock[i] != kNoAssociatedBlockIndex)
|
if (i < int(instIndexToBlock.size()) && instIndexToBlock[i] != kNoAssociatedBlockIndex)
|
||||||
@ -213,7 +213,7 @@ void IrBuilder::rebuildBytecodeBasicBlocks(Proto* proto)
|
|||||||
jumpTargets[target] = true;
|
jumpTargets[target] = true;
|
||||||
|
|
||||||
i += getOpLength(op);
|
i += getOpLength(op);
|
||||||
LUAU_ASSERT(i <= proto->sizecode);
|
CODEGEN_ASSERT(i <= proto->sizecode);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bytecode blocks are created at bytecode jump targets and the start of a function
|
// Bytecode blocks are created at bytecode jump targets and the start of a function
|
||||||
@ -521,7 +521,7 @@ void IrBuilder::translateInst(LuauOpcode op, const Instruction* pc, int i)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unknown instruction");
|
CODEGEN_ASSERT(!"Unknown instruction");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -556,7 +556,7 @@ void IrBuilder::beginBlock(IrOp block)
|
|||||||
IrBlock& target = function.blocks[block.index];
|
IrBlock& target = function.blocks[block.index];
|
||||||
activeBlockIdx = block.index;
|
activeBlockIdx = block.index;
|
||||||
|
|
||||||
LUAU_ASSERT(target.start == ~0u || target.start == uint32_t(function.instructions.size()));
|
CODEGEN_ASSERT(target.start == ~0u || target.start == uint32_t(function.instructions.size()));
|
||||||
|
|
||||||
target.start = uint32_t(function.instructions.size());
|
target.start = uint32_t(function.instructions.size());
|
||||||
target.sortkey = target.start;
|
target.sortkey = target.start;
|
||||||
@ -579,7 +579,7 @@ void IrBuilder::clone(const IrBlock& source, bool removeCurrentTerminator)
|
|||||||
if (const uint32_t* newIndex = instRedir.find(op.index))
|
if (const uint32_t* newIndex = instRedir.find(op.index))
|
||||||
op.index = *newIndex;
|
op.index = *newIndex;
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Values can only be used if they are defined in the same block");
|
CODEGEN_ASSERT(!"Values can only be used if they are defined in the same block");
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -594,13 +594,13 @@ void IrBuilder::clone(const IrBlock& source, bool removeCurrentTerminator)
|
|||||||
|
|
||||||
for (uint32_t index = source.start; index <= source.finish; index++)
|
for (uint32_t index = source.start; index <= source.finish; index++)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(index < function.instructions.size());
|
CODEGEN_ASSERT(index < function.instructions.size());
|
||||||
IrInst clone = function.instructions[index];
|
IrInst clone = function.instructions[index];
|
||||||
|
|
||||||
// Skip pseudo instructions to make clone more compact, but validate that they have no users
|
// Skip pseudo instructions to make clone more compact, but validate that they have no users
|
||||||
if (isPseudo(clone.cmd))
|
if (isPseudo(clone.cmd))
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(clone.useCount == 0);
|
CODEGEN_ASSERT(clone.useCount == 0);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -723,7 +723,7 @@ IrOp IrBuilder::inst(IrCmd cmd, IrOp a, IrOp b, IrOp c, IrOp d, IrOp e, IrOp f)
|
|||||||
uint32_t index = uint32_t(function.instructions.size());
|
uint32_t index = uint32_t(function.instructions.size());
|
||||||
function.instructions.push_back({cmd, a, b, c, d, e, f});
|
function.instructions.push_back({cmd, a, b, c, d, e, f});
|
||||||
|
|
||||||
LUAU_ASSERT(!inTerminatedBlock);
|
CODEGEN_ASSERT(!inTerminatedBlock);
|
||||||
|
|
||||||
if (isBlockTerminator(cmd))
|
if (isBlockTerminator(cmd))
|
||||||
{
|
{
|
||||||
|
@ -38,9 +38,9 @@ IrCallWrapperX64::IrCallWrapperX64(IrRegAllocX64& regs, AssemblyBuilderX64& buil
|
|||||||
void IrCallWrapperX64::addArgument(SizeX64 targetSize, OperandX64 source, IrOp sourceOp)
|
void IrCallWrapperX64::addArgument(SizeX64 targetSize, OperandX64 source, IrOp sourceOp)
|
||||||
{
|
{
|
||||||
// Instruction operands rely on current instruction index for lifetime tracking
|
// Instruction operands rely on current instruction index for lifetime tracking
|
||||||
LUAU_ASSERT(instIdx != kInvalidInstIdx || sourceOp.kind == IrOpKind::None);
|
CODEGEN_ASSERT(instIdx != kInvalidInstIdx || sourceOp.kind == IrOpKind::None);
|
||||||
|
|
||||||
LUAU_ASSERT(argCount < kMaxCallArguments);
|
CODEGEN_ASSERT(argCount < kMaxCallArguments);
|
||||||
CallArgument& arg = args[argCount++];
|
CallArgument& arg = args[argCount++];
|
||||||
arg = {targetSize, source, sourceOp};
|
arg = {targetSize, source, sourceOp};
|
||||||
|
|
||||||
@ -142,11 +142,11 @@ void IrCallWrapperX64::call(const OperandX64& func)
|
|||||||
if (CallArgument* candidate = findNonInterferingArgument())
|
if (CallArgument* candidate = findNonInterferingArgument())
|
||||||
{
|
{
|
||||||
// This section is only for handling register targets
|
// This section is only for handling register targets
|
||||||
LUAU_ASSERT(candidate->target.cat == CategoryX64::reg);
|
CODEGEN_ASSERT(candidate->target.cat == CategoryX64::reg);
|
||||||
|
|
||||||
freeSourceRegisters(*candidate);
|
freeSourceRegisters(*candidate);
|
||||||
|
|
||||||
LUAU_ASSERT(getRegisterUses(candidate->target.base) == 0);
|
CODEGEN_ASSERT(getRegisterUses(candidate->target.base) == 0);
|
||||||
regs.takeReg(candidate->target.base, kInvalidInstIdx);
|
regs.takeReg(candidate->target.base, kInvalidInstIdx);
|
||||||
|
|
||||||
moveToTarget(*candidate);
|
moveToTarget(*candidate);
|
||||||
@ -161,7 +161,7 @@ void IrCallWrapperX64::call(const OperandX64& func)
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
for (int i = 0; i < argCount; ++i)
|
for (int i = 0; i < argCount; ++i)
|
||||||
LUAU_ASSERT(!args[i].candidate);
|
CODEGEN_ASSERT(!args[i].candidate);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -225,13 +225,13 @@ OperandX64 IrCallWrapperX64::getNextArgumentTarget(SizeX64 size) const
|
|||||||
{
|
{
|
||||||
if (size == SizeX64::xmmword)
|
if (size == SizeX64::xmmword)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(size_t(xmmPos) < kXmmOrder.size());
|
CODEGEN_ASSERT(size_t(xmmPos) < kXmmOrder.size());
|
||||||
return kXmmOrder[xmmPos];
|
return kXmmOrder[xmmPos];
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::array<OperandX64, 6>& gprOrder = build.abi == ABIX64::Windows ? kWindowsGprOrder : kSystemvGprOrder;
|
const std::array<OperandX64, 6>& gprOrder = build.abi == ABIX64::Windows ? kWindowsGprOrder : kSystemvGprOrder;
|
||||||
|
|
||||||
LUAU_ASSERT(size_t(gprPos) < gprOrder.size());
|
CODEGEN_ASSERT(size_t(gprPos) < gprOrder.size());
|
||||||
OperandX64 target = gprOrder[gprPos];
|
OperandX64 target = gprOrder[gprPos];
|
||||||
|
|
||||||
// Keep requested argument size
|
// Keep requested argument size
|
||||||
@ -416,7 +416,7 @@ void IrCallWrapperX64::removeRegisterUse(RegisterX64 reg)
|
|||||||
{
|
{
|
||||||
if (reg.size == SizeX64::xmmword)
|
if (reg.size == SizeX64::xmmword)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(xmmUses[reg.index] != 0);
|
CODEGEN_ASSERT(xmmUses[reg.index] != 0);
|
||||||
xmmUses[reg.index]--;
|
xmmUses[reg.index]--;
|
||||||
|
|
||||||
if (xmmUses[reg.index] == 0) // we don't use persistent xmm regs so no need to call shouldFreeRegister
|
if (xmmUses[reg.index] == 0) // we don't use persistent xmm regs so no need to call shouldFreeRegister
|
||||||
@ -424,7 +424,7 @@ void IrCallWrapperX64::removeRegisterUse(RegisterX64 reg)
|
|||||||
}
|
}
|
||||||
else if (reg.size != SizeX64::none)
|
else if (reg.size != SizeX64::none)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(gprUses[reg.index] != 0);
|
CODEGEN_ASSERT(gprUses[reg.index] != 0);
|
||||||
gprUses[reg.index]--;
|
gprUses[reg.index]--;
|
||||||
|
|
||||||
if (gprUses[reg.index] == 0 && regs.shouldFreeGpr(reg))
|
if (gprUses[reg.index] == 0 && regs.shouldFreeGpr(reg))
|
||||||
|
@ -70,7 +70,7 @@ static const char* getTagName(uint8_t tag)
|
|||||||
case LUA_TDEADKEY:
|
case LUA_TDEADKEY:
|
||||||
return "tdeadkey";
|
return "tdeadkey";
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unknown type tag");
|
CODEGEN_ASSERT(!"Unknown type tag");
|
||||||
LUAU_UNREACHABLE();
|
LUAU_UNREACHABLE();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -429,7 +429,7 @@ void toString(IrToStringContext& ctx, IrOp op)
|
|||||||
toString(ctx.result, ctx.constants[op.index]);
|
toString(ctx.result, ctx.constants[op.index]);
|
||||||
break;
|
break;
|
||||||
case IrOpKind::Condition:
|
case IrOpKind::Condition:
|
||||||
LUAU_ASSERT(op.index < uint32_t(IrCondition::Count));
|
CODEGEN_ASSERT(op.index < uint32_t(IrCondition::Count));
|
||||||
ctx.result.append(textForCondition[op.index]);
|
ctx.result.append(textForCondition[op.index]);
|
||||||
break;
|
break;
|
||||||
case IrOpKind::Inst:
|
case IrOpKind::Inst:
|
||||||
@ -506,7 +506,7 @@ const char* getBytecodeTypeName(uint8_t type)
|
|||||||
return "any";
|
return "any";
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(!"Unhandled type in getBytecodeTypeName");
|
CODEGEN_ASSERT(!"Unhandled type in getBytecodeTypeName");
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -568,7 +568,7 @@ static RegisterSet getJumpTargetExtraLiveIn(IrToStringContext& ctx, const IrBloc
|
|||||||
const RegisterSet& defRs = ctx.cfg.in[blockIdx];
|
const RegisterSet& defRs = ctx.cfg.in[blockIdx];
|
||||||
|
|
||||||
// Find first block argument, for guard instructions (isNonTerminatingJump), that's the first and only one
|
// Find first block argument, for guard instructions (isNonTerminatingJump), that's the first and only one
|
||||||
LUAU_ASSERT(isNonTerminatingJump(inst.cmd));
|
CODEGEN_ASSERT(isNonTerminatingJump(inst.cmd));
|
||||||
IrOp op = inst.a;
|
IrOp op = inst.a;
|
||||||
|
|
||||||
if (inst.b.kind == IrOpKind::Block)
|
if (inst.b.kind == IrOpKind::Block)
|
||||||
|
@ -55,7 +55,7 @@ inline ConditionA64 getConditionFP(IrCondition cond)
|
|||||||
return ConditionA64::Less;
|
return ConditionA64::Less;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unexpected condition code");
|
CODEGEN_ASSERT(!"Unexpected condition code");
|
||||||
return ConditionA64::Always;
|
return ConditionA64::Always;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -107,15 +107,15 @@ inline ConditionA64 getConditionInt(IrCondition cond)
|
|||||||
return ConditionA64::CarrySet;
|
return ConditionA64::CarrySet;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unexpected condition code");
|
CODEGEN_ASSERT(!"Unexpected condition code");
|
||||||
return ConditionA64::Always;
|
return ConditionA64::Always;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void emitAddOffset(AssemblyBuilderA64& build, RegisterA64 dst, RegisterA64 src, size_t offset)
|
static void emitAddOffset(AssemblyBuilderA64& build, RegisterA64 dst, RegisterA64 src, size_t offset)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(dst != src);
|
CODEGEN_ASSERT(dst != src);
|
||||||
LUAU_ASSERT(offset <= INT_MAX);
|
CODEGEN_ASSERT(offset <= INT_MAX);
|
||||||
|
|
||||||
if (offset <= AssemblyBuilderA64::kMaxImmediate)
|
if (offset <= AssemblyBuilderA64::kMaxImmediate)
|
||||||
{
|
{
|
||||||
@ -186,7 +186,7 @@ static void emitFallback(AssemblyBuilderA64& build, int offset, int pcpos)
|
|||||||
|
|
||||||
static void emitInvokeLibm1P(AssemblyBuilderA64& build, size_t func, int arg)
|
static void emitInvokeLibm1P(AssemblyBuilderA64& build, size_t func, int arg)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(kTempSlots >= 1);
|
CODEGEN_ASSERT(kTempSlots >= 1);
|
||||||
build.ldr(d0, mem(rBase, arg * sizeof(TValue) + offsetof(TValue, value.n)));
|
build.ldr(d0, mem(rBase, arg * sizeof(TValue) + offsetof(TValue, value.n)));
|
||||||
build.add(x0, sp, sTemporary.data); // sp-relative offset
|
build.add(x0, sp, sTemporary.data); // sp-relative offset
|
||||||
build.ldr(x1, mem(rNativeContext, uint32_t(func)));
|
build.ldr(x1, mem(rNativeContext, uint32_t(func)));
|
||||||
@ -199,7 +199,7 @@ static bool emitBuiltin(
|
|||||||
switch (bfid)
|
switch (bfid)
|
||||||
{
|
{
|
||||||
case LBF_MATH_FREXP:
|
case LBF_MATH_FREXP:
|
||||||
LUAU_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
|
CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
|
||||||
emitInvokeLibm1P(build, offsetof(NativeContext, libm_frexp), arg);
|
emitInvokeLibm1P(build, offsetof(NativeContext, libm_frexp), arg);
|
||||||
build.str(d0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
|
build.str(d0, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
|
||||||
if (nresults == 2)
|
if (nresults == 2)
|
||||||
@ -210,7 +210,7 @@ static bool emitBuiltin(
|
|||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
case LBF_MATH_MODF:
|
case LBF_MATH_MODF:
|
||||||
LUAU_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
|
CODEGEN_ASSERT(nparams == 1 && (nresults == 1 || nresults == 2));
|
||||||
emitInvokeLibm1P(build, offsetof(NativeContext, libm_modf), arg);
|
emitInvokeLibm1P(build, offsetof(NativeContext, libm_modf), arg);
|
||||||
build.ldr(d1, sTemporary);
|
build.ldr(d1, sTemporary);
|
||||||
build.str(d1, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
|
build.str(d1, mem(rBase, res * sizeof(TValue) + offsetof(TValue, value.n)));
|
||||||
@ -218,7 +218,7 @@ static bool emitBuiltin(
|
|||||||
build.str(d0, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, value.n)));
|
build.str(d0, mem(rBase, (res + 1) * sizeof(TValue) + offsetof(TValue, value.n)));
|
||||||
return true;
|
return true;
|
||||||
case LBF_MATH_SIGN:
|
case LBF_MATH_SIGN:
|
||||||
LUAU_ASSERT(nparams == 1 && nresults == 1);
|
CODEGEN_ASSERT(nparams == 1 && nresults == 1);
|
||||||
build.ldr(d0, mem(rBase, arg * sizeof(TValue) + offsetof(TValue, value.n)));
|
build.ldr(d0, mem(rBase, arg * sizeof(TValue) + offsetof(TValue, value.n)));
|
||||||
build.fcmpz(d0);
|
build.fcmpz(d0);
|
||||||
build.fmov(d0, 0.0);
|
build.fmov(d0, 0.0);
|
||||||
@ -230,7 +230,7 @@ static bool emitBuiltin(
|
|||||||
return true;
|
return true;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Missing A64 lowering");
|
CODEGEN_ASSERT(!"Missing A64 lowering");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -342,7 +342,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case IrCmd::GET_SLOT_NODE_ADDR:
|
case IrCmd::GET_SLOT_NODE_ADDR:
|
||||||
@ -363,7 +363,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// C field can be shifted as long as it's at the most significant byte of the instruction word
|
// C field can be shifted as long as it's at the most significant byte of the instruction word
|
||||||
LUAU_ASSERT(kOffsetOfInstructionC == 3);
|
CODEGEN_ASSERT(kOffsetOfInstructionC == 3);
|
||||||
build.ldrb(temp2, mem(regOp(inst.a), offsetof(Table, nodemask8)));
|
build.ldrb(temp2, mem(regOp(inst.a), offsetof(Table, nodemask8)));
|
||||||
build.and_(temp2, temp2, temp1w, -24);
|
build.and_(temp2, temp2, temp1w, -24);
|
||||||
|
|
||||||
@ -419,7 +419,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
AddressA64 addr = tempAddr(inst.a, offsetof(TValue, value));
|
AddressA64 addr = tempAddr(inst.a, offsetof(TValue, value));
|
||||||
if (inst.b.kind == IrOpKind::Constant)
|
if (inst.b.kind == IrOpKind::Constant)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(intOp(inst.b) == 0);
|
CODEGEN_ASSERT(intOp(inst.b) == 0);
|
||||||
build.str(xzr, addr);
|
build.str(xzr, addr);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -479,7 +479,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
RegisterA64 temp4 = regs.allocTemp(KindA64::s);
|
RegisterA64 temp4 = regs.allocTemp(KindA64::s);
|
||||||
|
|
||||||
AddressA64 addr = tempAddr(inst.a, offsetof(TValue, value));
|
AddressA64 addr = tempAddr(inst.a, offsetof(TValue, value));
|
||||||
LUAU_ASSERT(addr.kind == AddressKindA64::imm && addr.data % 4 == 0 && unsigned(addr.data + 8) / 4 <= AddressA64::kMaxOffset);
|
CODEGEN_ASSERT(addr.kind == AddressKindA64::imm && addr.data % 4 == 0 && unsigned(addr.data + 8) / 4 <= AddressA64::kMaxOffset);
|
||||||
|
|
||||||
build.fcvt(temp4, temp1);
|
build.fcvt(temp4, temp1);
|
||||||
build.str(temp4, AddressA64(addr.base, addr.data + 0));
|
build.str(temp4, AddressA64(addr.base, addr.data + 0));
|
||||||
@ -512,7 +512,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
if (inst.c.kind == IrOpKind::Constant)
|
if (inst.c.kind == IrOpKind::Constant)
|
||||||
{
|
{
|
||||||
// note: we reuse tag temp register as value for true booleans, and use built-in zero register for false values
|
// note: we reuse tag temp register as value for true booleans, and use built-in zero register for false values
|
||||||
LUAU_ASSERT(LUA_TBOOLEAN == 1);
|
CODEGEN_ASSERT(LUA_TBOOLEAN == 1);
|
||||||
build.str(intOp(inst.c) ? tempt : wzr, addr);
|
build.str(intOp(inst.c) ? tempt : wzr, addr);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -529,7 +529,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -754,7 +754,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
if (inst.a.kind == IrOpKind::Constant)
|
if (inst.a.kind == IrOpKind::Constant)
|
||||||
{
|
{
|
||||||
// other cases should've been constant folded
|
// other cases should've been constant folded
|
||||||
LUAU_ASSERT(tagOp(inst.a) == LUA_TBOOLEAN);
|
CODEGEN_ASSERT(tagOp(inst.a) == LUA_TBOOLEAN);
|
||||||
build.eor(inst.regA64, regOp(inst.b), 1);
|
build.eor(inst.regA64, regOp(inst.b), 1);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -762,7 +762,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
Label notbool, exit;
|
Label notbool, exit;
|
||||||
|
|
||||||
// use the fact that NIL is the only value less than BOOLEAN to do two tag comparisons at once
|
// use the fact that NIL is the only value less than BOOLEAN to do two tag comparisons at once
|
||||||
LUAU_ASSERT(LUA_TNIL == 0 && LUA_TBOOLEAN == 1);
|
CODEGEN_ASSERT(LUA_TNIL == 0 && LUA_TBOOLEAN == 1);
|
||||||
build.cmp(regOp(inst.a), LUA_TBOOLEAN);
|
build.cmp(regOp(inst.a), LUA_TBOOLEAN);
|
||||||
build.b(ConditionA64::NotEqual, notbool);
|
build.b(ConditionA64::NotEqual, notbool);
|
||||||
|
|
||||||
@ -797,7 +797,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (cond == IrCondition::Equal)
|
else if (cond == IrCondition::Equal)
|
||||||
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, luaV_equalval)));
|
build.ldr(x3, mem(rNativeContext, offsetof(NativeContext, luaV_equalval)));
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported condition");
|
CODEGEN_ASSERT(!"Unsupported condition");
|
||||||
|
|
||||||
build.blr(x3);
|
build.blr(x3);
|
||||||
|
|
||||||
@ -823,7 +823,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||||
build.ldr(temp, mem(rBase, vmRegOp(inst.a) * sizeof(TValue) + offsetof(TValue, tt)));
|
build.ldr(temp, mem(rBase, vmRegOp(inst.a) * sizeof(TValue) + offsetof(TValue, tt)));
|
||||||
// nil => falsy
|
// nil => falsy
|
||||||
LUAU_ASSERT(LUA_TNIL == 0);
|
CODEGEN_ASSERT(LUA_TNIL == 0);
|
||||||
build.cbz(temp, labelOp(inst.c));
|
build.cbz(temp, labelOp(inst.c));
|
||||||
// not boolean => truthy
|
// not boolean => truthy
|
||||||
build.cmp(temp, LUA_TBOOLEAN);
|
build.cmp(temp, LUA_TBOOLEAN);
|
||||||
@ -839,7 +839,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||||
build.ldr(temp, mem(rBase, vmRegOp(inst.a) * sizeof(TValue) + offsetof(TValue, tt)));
|
build.ldr(temp, mem(rBase, vmRegOp(inst.a) * sizeof(TValue) + offsetof(TValue, tt)));
|
||||||
// nil => falsy
|
// nil => falsy
|
||||||
LUAU_ASSERT(LUA_TNIL == 0);
|
CODEGEN_ASSERT(LUA_TNIL == 0);
|
||||||
build.cbz(temp, labelOp(inst.b));
|
build.cbz(temp, labelOp(inst.b));
|
||||||
// not boolean => truthy
|
// not boolean => truthy
|
||||||
build.cmp(temp, LUA_TBOOLEAN);
|
build.cmp(temp, LUA_TBOOLEAN);
|
||||||
@ -865,7 +865,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Inst)
|
else if (inst.a.kind == IrOpKind::Constant && inst.b.kind == IrOpKind::Inst)
|
||||||
build.cmp(regOp(inst.b), tagOp(inst.a));
|
build.cmp(regOp(inst.b), tagOp(inst.a));
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
|
|
||||||
if (isFallthroughBlock(blockOp(inst.d), next))
|
if (isFallthroughBlock(blockOp(inst.d), next))
|
||||||
{
|
{
|
||||||
@ -899,7 +899,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(unsigned(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate);
|
CODEGEN_ASSERT(unsigned(intOp(inst.b)) <= AssemblyBuilderA64::kMaxImmediate);
|
||||||
build.cmp(regOp(inst.a), uint16_t(intOp(inst.b)));
|
build.cmp(regOp(inst.a), uint16_t(intOp(inst.b)));
|
||||||
build.b(getConditionInt(cond), labelOp(inst.d));
|
build.b(getConditionInt(cond), labelOp(inst.d));
|
||||||
}
|
}
|
||||||
@ -1131,7 +1131,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
build.str(temp, mem(rState, offsetof(lua_State, top)));
|
build.str(temp, mem(rState, offsetof(lua_State, top)));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case IrCmd::ADJUST_STACK_TO_TOP:
|
case IrCmd::ADJUST_STACK_TO_TOP:
|
||||||
@ -1159,7 +1159,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.d.kind == IrOpKind::VmConst)
|
else if (inst.d.kind == IrOpKind::VmConst)
|
||||||
emitAddOffset(build, x4, rConstants, vmConstOp(inst.d) * sizeof(TValue));
|
emitAddOffset(build, x4, rConstants, vmConstOp(inst.d) * sizeof(TValue));
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(inst.d.kind == IrOpKind::Undef);
|
CODEGEN_ASSERT(inst.d.kind == IrOpKind::Undef);
|
||||||
|
|
||||||
// nparams
|
// nparams
|
||||||
if (intOp(inst.e) == LUA_MULTRET)
|
if (intOp(inst.e) == LUA_MULTRET)
|
||||||
@ -1228,7 +1228,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
build.adr(x2, &n, sizeof(n));
|
build.adr(x2, &n, sizeof(n));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
|
|
||||||
build.add(x3, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue)));
|
build.add(x3, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue)));
|
||||||
build.ldr(x4, mem(rNativeContext, offsetof(NativeContext, luaV_gettable)));
|
build.ldr(x4, mem(rNativeContext, offsetof(NativeContext, luaV_gettable)));
|
||||||
@ -1250,7 +1250,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
build.adr(x2, &n, sizeof(n));
|
build.adr(x2, &n, sizeof(n));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
|
|
||||||
build.add(x3, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue)));
|
build.add(x3, rBase, uint16_t(vmRegOp(inst.a) * sizeof(TValue)));
|
||||||
build.ldr(x4, mem(rNativeContext, offsetof(NativeContext, luaV_settable)));
|
build.ldr(x4, mem(rNativeContext, offsetof(NativeContext, luaV_settable)));
|
||||||
@ -1366,7 +1366,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
case IrCmd::CHECK_TRUTHY:
|
case IrCmd::CHECK_TRUTHY:
|
||||||
{
|
{
|
||||||
// Constant tags which don't require boolean value check should've been removed in constant folding
|
// Constant tags which don't require boolean value check should've been removed in constant folding
|
||||||
LUAU_ASSERT(inst.a.kind != IrOpKind::Constant || tagOp(inst.a) == LUA_TBOOLEAN);
|
CODEGEN_ASSERT(inst.a.kind != IrOpKind::Constant || tagOp(inst.a) == LUA_TBOOLEAN);
|
||||||
|
|
||||||
Label fresh; // used when guard aborts execution or jumps to a VM exit
|
Label fresh; // used when guard aborts execution or jumps to a VM exit
|
||||||
Label& target = getTargetLabel(inst.c, fresh);
|
Label& target = getTargetLabel(inst.c, fresh);
|
||||||
@ -1376,7 +1376,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
if (inst.a.kind != IrOpKind::Constant)
|
if (inst.a.kind != IrOpKind::Constant)
|
||||||
{
|
{
|
||||||
// fail to fallback on 'nil' (falsy)
|
// fail to fallback on 'nil' (falsy)
|
||||||
LUAU_ASSERT(LUA_TNIL == 0);
|
CODEGEN_ASSERT(LUA_TNIL == 0);
|
||||||
build.cbz(regOp(inst.a), target);
|
build.cbz(regOp(inst.a), target);
|
||||||
|
|
||||||
// skip value test if it's not a boolean (truthy)
|
// skip value test if it's not a boolean (truthy)
|
||||||
@ -1455,7 +1455,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
|
|
||||||
finalizeTargetLabel(inst.c, fresh);
|
finalizeTargetLabel(inst.c, fresh);
|
||||||
break;
|
break;
|
||||||
@ -1471,7 +1471,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
RegisterA64 temp1w = castReg(KindA64::w, temp1);
|
RegisterA64 temp1w = castReg(KindA64::w, temp1);
|
||||||
RegisterA64 temp2 = regs.allocTemp(KindA64::x);
|
RegisterA64 temp2 = regs.allocTemp(KindA64::x);
|
||||||
|
|
||||||
LUAU_ASSERT(offsetof(LuaNode, key.value) == offsetof(LuaNode, key) && kOffsetOfTKeyTagNext >= 8 && kOffsetOfTKeyTagNext < 16);
|
CODEGEN_ASSERT(offsetof(LuaNode, key.value) == offsetof(LuaNode, key) && kOffsetOfTKeyTagNext >= 8 && kOffsetOfTKeyTagNext < 16);
|
||||||
build.ldp(temp1, temp2, mem(regOp(inst.a), offsetof(LuaNode, key))); // load key.value into temp1 and key.tt (alongside other bits) into temp2
|
build.ldp(temp1, temp2, mem(regOp(inst.a), offsetof(LuaNode, key))); // load key.value into temp1 and key.tt (alongside other bits) into temp2
|
||||||
build.ubfx(temp2, temp2, (kOffsetOfTKeyTagNext - 8) * 8, kTKeyTagBits); // .tt is right before .next, and 8 bytes are skipped by ldp
|
build.ubfx(temp2, temp2, (kOffsetOfTKeyTagNext - 8) * 8, kTKeyTagBits); // .tt is right before .next, and 8 bytes are skipped by ldp
|
||||||
build.cmp(temp2, LUA_TSTRING);
|
build.cmp(temp2, LUA_TSTRING);
|
||||||
@ -1483,7 +1483,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
build.b(ConditionA64::NotEqual, mismatch);
|
build.b(ConditionA64::NotEqual, mismatch);
|
||||||
|
|
||||||
build.ldr(temp1w, mem(regOp(inst.a), offsetof(LuaNode, val.tt)));
|
build.ldr(temp1w, mem(regOp(inst.a), offsetof(LuaNode, val.tt)));
|
||||||
LUAU_ASSERT(LUA_TNIL == 0);
|
CODEGEN_ASSERT(LUA_TNIL == 0);
|
||||||
build.cbz(temp1w, mismatch);
|
build.cbz(temp1w, mismatch);
|
||||||
|
|
||||||
if (inst.cmd == IrCmd::JUMP_SLOT_MATCH)
|
if (inst.cmd == IrCmd::JUMP_SLOT_MATCH)
|
||||||
@ -1509,7 +1509,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
RegisterA64 temp = regs.allocTemp(KindA64::w);
|
||||||
|
|
||||||
build.ldr(temp, mem(regOp(inst.a), offsetof(LuaNode, val.tt)));
|
build.ldr(temp, mem(regOp(inst.a), offsetof(LuaNode, val.tt)));
|
||||||
LUAU_ASSERT(LUA_TNIL == 0);
|
CODEGEN_ASSERT(LUA_TNIL == 0);
|
||||||
build.cbz(temp, getTargetLabel(inst.b, fresh));
|
build.cbz(temp, getTargetLabel(inst.b, fresh));
|
||||||
finalizeTargetLabel(inst.b, fresh);
|
finalizeTargetLabel(inst.b, fresh);
|
||||||
break;
|
break;
|
||||||
@ -1517,7 +1517,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
case IrCmd::CHECK_BUFFER_LEN:
|
case IrCmd::CHECK_BUFFER_LEN:
|
||||||
{
|
{
|
||||||
int accessSize = intOp(inst.c);
|
int accessSize = intOp(inst.c);
|
||||||
LUAU_ASSERT(accessSize > 0 && accessSize <= int(AssemblyBuilderA64::kMaxImmediate));
|
CODEGEN_ASSERT(accessSize > 0 && accessSize <= int(AssemblyBuilderA64::kMaxImmediate));
|
||||||
|
|
||||||
Label fresh; // used when guard aborts execution or jumps to a VM exit
|
Label fresh; // used when guard aborts execution or jumps to a VM exit
|
||||||
Label& target = getTargetLabel(inst.d, fresh);
|
Label& target = getTargetLabel(inst.d, fresh);
|
||||||
@ -1570,7 +1570,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
finalizeTargetLabel(inst.d, fresh);
|
finalizeTargetLabel(inst.d, fresh);
|
||||||
break;
|
break;
|
||||||
@ -1594,7 +1594,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
RegisterA64 temp1 = regs.allocTemp(KindA64::x);
|
RegisterA64 temp1 = regs.allocTemp(KindA64::x);
|
||||||
RegisterA64 temp2 = regs.allocTemp(KindA64::x);
|
RegisterA64 temp2 = regs.allocTemp(KindA64::x);
|
||||||
|
|
||||||
LUAU_ASSERT(offsetof(global_State, totalbytes) == offsetof(global_State, GCthreshold) + 8);
|
CODEGEN_ASSERT(offsetof(global_State, totalbytes) == offsetof(global_State, GCthreshold) + 8);
|
||||||
Label skip;
|
Label skip;
|
||||||
build.ldp(temp1, temp2, mem(rGlobalState, offsetof(global_State, GCthreshold)));
|
build.ldp(temp1, temp2, mem(rGlobalState, offsetof(global_State, GCthreshold)));
|
||||||
build.cmp(temp1, temp2);
|
build.cmp(temp1, temp2);
|
||||||
@ -1818,7 +1818,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
// clear extra variables since we might have more than two
|
// clear extra variables since we might have more than two
|
||||||
if (intOp(inst.b) > 2)
|
if (intOp(inst.b) > 2)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(LUA_TNIL == 0);
|
CODEGEN_ASSERT(LUA_TNIL == 0);
|
||||||
for (int i = 2; i < intOp(inst.b); ++i)
|
for (int i = 2; i < intOp(inst.b); ++i)
|
||||||
build.str(wzr, mem(rBase, (vmRegOp(inst.a) + 3 + i) * sizeof(TValue) + offsetof(TValue, tt)));
|
build.str(wzr, mem(rBase, (vmRegOp(inst.a) + 3 + i) * sizeof(TValue) + offsetof(TValue, tt)));
|
||||||
}
|
}
|
||||||
@ -1875,52 +1875,52 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
|
|
||||||
// Full instruction fallbacks
|
// Full instruction fallbacks
|
||||||
case IrCmd::FALLBACK_GETGLOBAL:
|
case IrCmd::FALLBACK_GETGLOBAL:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
regs.spill(build, index);
|
regs.spill(build, index);
|
||||||
emitFallback(build, offsetof(NativeContext, executeGETGLOBAL), uintOp(inst.a));
|
emitFallback(build, offsetof(NativeContext, executeGETGLOBAL), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_SETGLOBAL:
|
case IrCmd::FALLBACK_SETGLOBAL:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
regs.spill(build, index);
|
regs.spill(build, index);
|
||||||
emitFallback(build, offsetof(NativeContext, executeSETGLOBAL), uintOp(inst.a));
|
emitFallback(build, offsetof(NativeContext, executeSETGLOBAL), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_GETTABLEKS:
|
case IrCmd::FALLBACK_GETTABLEKS:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
regs.spill(build, index);
|
regs.spill(build, index);
|
||||||
emitFallback(build, offsetof(NativeContext, executeGETTABLEKS), uintOp(inst.a));
|
emitFallback(build, offsetof(NativeContext, executeGETTABLEKS), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_SETTABLEKS:
|
case IrCmd::FALLBACK_SETTABLEKS:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
regs.spill(build, index);
|
regs.spill(build, index);
|
||||||
emitFallback(build, offsetof(NativeContext, executeSETTABLEKS), uintOp(inst.a));
|
emitFallback(build, offsetof(NativeContext, executeSETTABLEKS), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_NAMECALL:
|
case IrCmd::FALLBACK_NAMECALL:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
regs.spill(build, index);
|
regs.spill(build, index);
|
||||||
emitFallback(build, offsetof(NativeContext, executeNAMECALL), uintOp(inst.a));
|
emitFallback(build, offsetof(NativeContext, executeNAMECALL), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_PREPVARARGS:
|
case IrCmd::FALLBACK_PREPVARARGS:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::Constant);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::Constant);
|
||||||
|
|
||||||
regs.spill(build, index);
|
regs.spill(build, index);
|
||||||
emitFallback(build, offsetof(NativeContext, executePREPVARARGS), uintOp(inst.a));
|
emitFallback(build, offsetof(NativeContext, executePREPVARARGS), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_GETVARARGS:
|
case IrCmd::FALLBACK_GETVARARGS:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::Constant);
|
||||||
|
|
||||||
regs.spill(build, index);
|
regs.spill(build, index);
|
||||||
build.mov(x0, rState);
|
build.mov(x0, rState);
|
||||||
@ -1967,8 +1967,8 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case IrCmd::FALLBACK_DUPCLOSURE:
|
case IrCmd::FALLBACK_DUPCLOSURE:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
regs.spill(build, index);
|
regs.spill(build, index);
|
||||||
emitFallback(build, offsetof(NativeContext, executeDUPCLOSURE), uintOp(inst.a));
|
emitFallback(build, offsetof(NativeContext, executeDUPCLOSURE), uintOp(inst.a));
|
||||||
@ -1982,7 +1982,7 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
// Pseudo instructions
|
// Pseudo instructions
|
||||||
case IrCmd::NOP:
|
case IrCmd::NOP:
|
||||||
case IrCmd::SUBSTITUTE:
|
case IrCmd::SUBSTITUTE:
|
||||||
LUAU_ASSERT(!"Pseudo instructions should not be lowered");
|
CODEGEN_ASSERT(!"Pseudo instructions should not be lowered");
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case IrCmd::BITAND_UINT:
|
case IrCmd::BITAND_UINT:
|
||||||
@ -2167,14 +2167,14 @@ void IrLoweringA64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
{
|
{
|
||||||
inst.regA64 = regs.allocReg(KindA64::x, index);
|
inst.regA64 = regs.allocReg(KindA64::x, index);
|
||||||
|
|
||||||
LUAU_ASSERT(sizeof(TString*) == 8);
|
CODEGEN_ASSERT(sizeof(TString*) == 8);
|
||||||
|
|
||||||
if (inst.a.kind == IrOpKind::Inst)
|
if (inst.a.kind == IrOpKind::Inst)
|
||||||
build.add(inst.regA64, rGlobalState, regOp(inst.a), 3); // implicit uxtw
|
build.add(inst.regA64, rGlobalState, regOp(inst.a), 3); // implicit uxtw
|
||||||
else if (inst.a.kind == IrOpKind::Constant)
|
else if (inst.a.kind == IrOpKind::Constant)
|
||||||
build.add(inst.regA64, rGlobalState, uint16_t(tagOp(inst.a)) * 8);
|
build.add(inst.regA64, rGlobalState, uint16_t(tagOp(inst.a)) * 8);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
|
|
||||||
build.ldr(inst.regA64, mem(inst.regA64, offsetof(global_State, ttname)));
|
build.ldr(inst.regA64, mem(inst.regA64, offsetof(global_State, ttname)));
|
||||||
break;
|
break;
|
||||||
@ -2330,10 +2330,10 @@ void IrLoweringA64::finishBlock(const IrBlock& curr, const IrBlock& next)
|
|||||||
{
|
{
|
||||||
// If we have spills remaining, we have to immediately lower the successor block
|
// If we have spills remaining, we have to immediately lower the successor block
|
||||||
for (uint32_t predIdx : predecessors(function.cfg, function.getBlockIndex(next)))
|
for (uint32_t predIdx : predecessors(function.cfg, function.getBlockIndex(next)))
|
||||||
LUAU_ASSERT(predIdx == function.getBlockIndex(curr));
|
CODEGEN_ASSERT(predIdx == function.getBlockIndex(curr));
|
||||||
|
|
||||||
// And the next block cannot be a join block in cfg
|
// And the next block cannot be a join block in cfg
|
||||||
LUAU_ASSERT(next.useCount == 1);
|
CODEGEN_ASSERT(next.useCount == 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2355,7 +2355,7 @@ void IrLoweringA64::finishFunction()
|
|||||||
|
|
||||||
for (ExitHandler& handler : exitHandlers)
|
for (ExitHandler& handler : exitHandlers)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(handler.pcpos != kVmExitEntryGuardPc);
|
CODEGEN_ASSERT(handler.pcpos != kVmExitEntryGuardPc);
|
||||||
|
|
||||||
build.setLabel(handler.self);
|
build.setLabel(handler.self);
|
||||||
|
|
||||||
@ -2465,7 +2465,7 @@ RegisterA64 IrLoweringA64::tempDouble(IrOp op)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
return noreg;
|
return noreg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2482,7 +2482,7 @@ RegisterA64 IrLoweringA64::tempInt(IrOp op)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
return noreg;
|
return noreg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2499,7 +2499,7 @@ RegisterA64 IrLoweringA64::tempUint(IrOp op)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
return noreg;
|
return noreg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2507,9 +2507,9 @@ RegisterA64 IrLoweringA64::tempUint(IrOp op)
|
|||||||
AddressA64 IrLoweringA64::tempAddr(IrOp op, int offset)
|
AddressA64 IrLoweringA64::tempAddr(IrOp op, int offset)
|
||||||
{
|
{
|
||||||
// This is needed to tighten the bounds checks in the VmConst case below
|
// This is needed to tighten the bounds checks in the VmConst case below
|
||||||
LUAU_ASSERT(offset % 4 == 0);
|
CODEGEN_ASSERT(offset % 4 == 0);
|
||||||
// Full encoded range is wider depending on the load size, but this assertion helps establish a smaller guaranteed working range [0..4096)
|
// Full encoded range is wider depending on the load size, but this assertion helps establish a smaller guaranteed working range [0..4096)
|
||||||
LUAU_ASSERT(offset >= 0 && unsigned(offset / 4) <= AssemblyBuilderA64::kMaxImmediate);
|
CODEGEN_ASSERT(offset >= 0 && unsigned(offset / 4) <= AssemblyBuilderA64::kMaxImmediate);
|
||||||
|
|
||||||
if (op.kind == IrOpKind::VmReg)
|
if (op.kind == IrOpKind::VmReg)
|
||||||
return mem(rBase, vmRegOp(op) * sizeof(TValue) + offset);
|
return mem(rBase, vmRegOp(op) * sizeof(TValue) + offset);
|
||||||
@ -2532,7 +2532,7 @@ AddressA64 IrLoweringA64::tempAddr(IrOp op, int offset)
|
|||||||
return mem(regOp(op), offset);
|
return mem(regOp(op), offset);
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
return noreg;
|
return noreg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2561,7 +2561,7 @@ AddressA64 IrLoweringA64::tempAddrBuffer(IrOp bufferOp, IrOp indexOp)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
return noreg;
|
return noreg;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2573,7 +2573,7 @@ RegisterA64 IrLoweringA64::regOp(IrOp op)
|
|||||||
if (inst.spilled || inst.needsReload)
|
if (inst.spilled || inst.needsReload)
|
||||||
regs.restoreReg(build, inst);
|
regs.restoreReg(build, inst);
|
||||||
|
|
||||||
LUAU_ASSERT(inst.regA64 != noreg);
|
CODEGEN_ASSERT(inst.regA64 != noreg);
|
||||||
return inst.regA64;
|
return inst.regA64;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ void IrLoweringX64::storeDoubleAsFloat(OperandX64 dst, IrOp src)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
build.vmovss(dst, tmp.reg);
|
build.vmovss(dst, tmp.reg);
|
||||||
}
|
}
|
||||||
@ -77,7 +77,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.a.kind == IrOpKind::Inst)
|
else if (inst.a.kind == IrOpKind::Inst)
|
||||||
build.mov(inst.regX64, dword[regOp(inst.a) + offsetof(TValue, tt)]);
|
build.mov(inst.regX64, dword[regOp(inst.a) + offsetof(TValue, tt)]);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
break;
|
break;
|
||||||
case IrCmd::LOAD_POINTER:
|
case IrCmd::LOAD_POINTER:
|
||||||
inst.regX64 = regs.allocReg(SizeX64::qword, index);
|
inst.regX64 = regs.allocReg(SizeX64::qword, index);
|
||||||
@ -91,7 +91,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.a.kind == IrOpKind::Inst)
|
else if (inst.a.kind == IrOpKind::Inst)
|
||||||
build.mov(inst.regX64, qword[regOp(inst.a) + offsetof(TValue, value)]);
|
build.mov(inst.regX64, qword[regOp(inst.a) + offsetof(TValue, value)]);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
break;
|
break;
|
||||||
case IrCmd::LOAD_DOUBLE:
|
case IrCmd::LOAD_DOUBLE:
|
||||||
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
|
inst.regX64 = regs.allocReg(SizeX64::xmmword, index);
|
||||||
@ -101,7 +101,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.a.kind == IrOpKind::VmConst)
|
else if (inst.a.kind == IrOpKind::VmConst)
|
||||||
build.vmovsd(inst.regX64, luauConstantValue(vmConstOp(inst.a)));
|
build.vmovsd(inst.regX64, luauConstantValue(vmConstOp(inst.a)));
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
break;
|
break;
|
||||||
case IrCmd::LOAD_INT:
|
case IrCmd::LOAD_INT:
|
||||||
inst.regX64 = regs.allocReg(SizeX64::dword, index);
|
inst.regX64 = regs.allocReg(SizeX64::dword, index);
|
||||||
@ -117,7 +117,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
build.vcvtss2sd(
|
build.vcvtss2sd(
|
||||||
inst.regX64, inst.regX64, dword[rConstants + vmConstOp(inst.a) * sizeof(TValue) + offsetof(TValue, value) + intOp(inst.b)]);
|
inst.regX64, inst.regX64, dword[rConstants + vmConstOp(inst.a) * sizeof(TValue) + offsetof(TValue, value) + intOp(inst.b)]);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
break;
|
break;
|
||||||
case IrCmd::LOAD_TVALUE:
|
case IrCmd::LOAD_TVALUE:
|
||||||
{
|
{
|
||||||
@ -132,7 +132,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.a.kind == IrOpKind::Inst)
|
else if (inst.a.kind == IrOpKind::Inst)
|
||||||
build.vmovups(inst.regX64, xmmword[regOp(inst.a) + addrOffset]);
|
build.vmovups(inst.regX64, xmmword[regOp(inst.a) + addrOffset]);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case IrCmd::LOAD_ENV:
|
case IrCmd::LOAD_ENV:
|
||||||
@ -163,7 +163,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IrCmd::GET_SLOT_NODE_ADDR:
|
case IrCmd::GET_SLOT_NODE_ADDR:
|
||||||
@ -222,7 +222,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IrCmd::STORE_POINTER:
|
case IrCmd::STORE_POINTER:
|
||||||
@ -231,7 +231,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
|
|
||||||
if (inst.b.kind == IrOpKind::Constant)
|
if (inst.b.kind == IrOpKind::Constant)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(intOp(inst.b) == 0);
|
CODEGEN_ASSERT(intOp(inst.b) == 0);
|
||||||
build.mov(valueLhs, 0);
|
build.mov(valueLhs, 0);
|
||||||
}
|
}
|
||||||
else if (inst.b.kind == IrOpKind::Inst)
|
else if (inst.b.kind == IrOpKind::Inst)
|
||||||
@ -240,7 +240,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -254,7 +254,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IrCmd::STORE_DOUBLE:
|
case IrCmd::STORE_DOUBLE:
|
||||||
@ -274,7 +274,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -284,7 +284,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.b.kind == IrOpKind::Inst)
|
else if (inst.b.kind == IrOpKind::Inst)
|
||||||
build.mov(luauRegValueInt(vmRegOp(inst.a)), regOp(inst.b));
|
build.mov(luauRegValueInt(vmRegOp(inst.a)), regOp(inst.b));
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
break;
|
break;
|
||||||
case IrCmd::STORE_VECTOR:
|
case IrCmd::STORE_VECTOR:
|
||||||
storeDoubleAsFloat(luauRegValueVector(vmRegOp(inst.a), 0), inst.b);
|
storeDoubleAsFloat(luauRegValueVector(vmRegOp(inst.a), 0), inst.b);
|
||||||
@ -300,7 +300,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.a.kind == IrOpKind::Inst)
|
else if (inst.a.kind == IrOpKind::Inst)
|
||||||
build.vmovups(xmmword[regOp(inst.a) + addrOffset], regOp(inst.b));
|
build.vmovups(xmmword[regOp(inst.a) + addrOffset], regOp(inst.b));
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case IrCmd::STORE_SPLIT_TVALUE:
|
case IrCmd::STORE_SPLIT_TVALUE:
|
||||||
@ -341,7 +341,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -374,7 +374,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -682,7 +682,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
if (inst.a.kind == IrOpKind::Constant)
|
if (inst.a.kind == IrOpKind::Constant)
|
||||||
{
|
{
|
||||||
// Other cases should've been constant folded
|
// Other cases should've been constant folded
|
||||||
LUAU_ASSERT(tagOp(inst.a) == LUA_TBOOLEAN);
|
CODEGEN_ASSERT(tagOp(inst.a) == LUA_TBOOLEAN);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -731,7 +731,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (cond == IrCondition::Equal)
|
else if (cond == IrCondition::Equal)
|
||||||
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_equalval)]);
|
callWrap.call(qword[rNativeContext + offsetof(NativeContext, luaV_equalval)]);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported condition");
|
CODEGEN_ASSERT(!"Unsupported condition");
|
||||||
|
|
||||||
emitUpdateBase(build);
|
emitUpdateBase(build);
|
||||||
|
|
||||||
@ -751,7 +751,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
break;
|
break;
|
||||||
case IrCmd::JUMP_EQ_TAG:
|
case IrCmd::JUMP_EQ_TAG:
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::Inst || inst.b.kind == IrOpKind::Constant);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::Inst || inst.b.kind == IrOpKind::Constant);
|
||||||
OperandX64 opb = inst.b.kind == IrOpKind::Inst ? regOp(inst.b) : OperandX64(tagOp(inst.b));
|
OperandX64 opb = inst.b.kind == IrOpKind::Inst ? regOp(inst.b) : OperandX64(tagOp(inst.b));
|
||||||
|
|
||||||
if (inst.a.kind == IrOpKind::Constant)
|
if (inst.a.kind == IrOpKind::Constant)
|
||||||
@ -950,7 +950,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(source != IrCmd::SUBSTITUTE); // we don't process substitutions
|
CODEGEN_ASSERT(source != IrCmd::SUBSTITUTE); // we don't process substitutions
|
||||||
build.vcvtsi2sd(inst.regX64, inst.regX64, qwordReg(regOp(inst.a)));
|
build.vcvtsi2sd(inst.regX64, inst.regX64, qwordReg(regOp(inst.a)));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -1001,7 +1001,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1032,7 +1032,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.d.kind == IrOpKind::VmConst)
|
else if (inst.d.kind == IrOpKind::VmConst)
|
||||||
args = luauConstantAddress(vmConstOp(inst.d));
|
args = luauConstantAddress(vmConstOp(inst.d));
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(inst.d.kind == IrOpKind::Undef);
|
CODEGEN_ASSERT(inst.d.kind == IrOpKind::Undef);
|
||||||
|
|
||||||
int ra = vmRegOp(inst.b);
|
int ra = vmRegOp(inst.b);
|
||||||
int arg = vmRegOp(inst.c);
|
int arg = vmRegOp(inst.c);
|
||||||
@ -1102,7 +1102,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IrCmd::SET_TABLE:
|
case IrCmd::SET_TABLE:
|
||||||
@ -1118,7 +1118,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case IrCmd::GET_IMPORT:
|
case IrCmd::GET_IMPORT:
|
||||||
@ -1202,7 +1202,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
case IrCmd::CHECK_TRUTHY:
|
case IrCmd::CHECK_TRUTHY:
|
||||||
{
|
{
|
||||||
// Constant tags which don't require boolean value check should've been removed in constant folding
|
// Constant tags which don't require boolean value check should've been removed in constant folding
|
||||||
LUAU_ASSERT(inst.a.kind != IrOpKind::Constant || tagOp(inst.a) == LUA_TBOOLEAN);
|
CODEGEN_ASSERT(inst.a.kind != IrOpKind::Constant || tagOp(inst.a) == LUA_TBOOLEAN);
|
||||||
|
|
||||||
Label skip;
|
Label skip;
|
||||||
|
|
||||||
@ -1250,7 +1250,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.b.kind == IrOpKind::Constant)
|
else if (inst.b.kind == IrOpKind::Constant)
|
||||||
build.cmp(dword[regOp(inst.a) + offsetof(Table, sizearray)], intOp(inst.b));
|
build.cmp(dword[regOp(inst.a) + offsetof(Table, sizearray)], intOp(inst.b));
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
|
|
||||||
jumpOrAbortOnUndef(ConditionX64::BelowEqual, inst.c, next);
|
jumpOrAbortOnUndef(ConditionX64::BelowEqual, inst.c, next);
|
||||||
break;
|
break;
|
||||||
@ -1310,7 +1310,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
case IrCmd::CHECK_BUFFER_LEN:
|
case IrCmd::CHECK_BUFFER_LEN:
|
||||||
{
|
{
|
||||||
int accessSize = intOp(inst.c);
|
int accessSize = intOp(inst.c);
|
||||||
LUAU_ASSERT(accessSize > 0);
|
CODEGEN_ASSERT(accessSize > 0);
|
||||||
|
|
||||||
if (inst.b.kind == IrOpKind::Inst)
|
if (inst.b.kind == IrOpKind::Inst)
|
||||||
{
|
{
|
||||||
@ -1361,7 +1361,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1537,46 +1537,46 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
|
|
||||||
// Full instruction fallbacks
|
// Full instruction fallbacks
|
||||||
case IrCmd::FALLBACK_GETGLOBAL:
|
case IrCmd::FALLBACK_GETGLOBAL:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
emitFallback(regs, build, offsetof(NativeContext, executeGETGLOBAL), uintOp(inst.a));
|
emitFallback(regs, build, offsetof(NativeContext, executeGETGLOBAL), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_SETGLOBAL:
|
case IrCmd::FALLBACK_SETGLOBAL:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
emitFallback(regs, build, offsetof(NativeContext, executeSETGLOBAL), uintOp(inst.a));
|
emitFallback(regs, build, offsetof(NativeContext, executeSETGLOBAL), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_GETTABLEKS:
|
case IrCmd::FALLBACK_GETTABLEKS:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
emitFallback(regs, build, offsetof(NativeContext, executeGETTABLEKS), uintOp(inst.a));
|
emitFallback(regs, build, offsetof(NativeContext, executeGETTABLEKS), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_SETTABLEKS:
|
case IrCmd::FALLBACK_SETTABLEKS:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
emitFallback(regs, build, offsetof(NativeContext, executeSETTABLEKS), uintOp(inst.a));
|
emitFallback(regs, build, offsetof(NativeContext, executeSETTABLEKS), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_NAMECALL:
|
case IrCmd::FALLBACK_NAMECALL:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.d.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
emitFallback(regs, build, offsetof(NativeContext, executeNAMECALL), uintOp(inst.a));
|
emitFallback(regs, build, offsetof(NativeContext, executeNAMECALL), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_PREPVARARGS:
|
case IrCmd::FALLBACK_PREPVARARGS:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::Constant);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::Constant);
|
||||||
|
|
||||||
emitFallback(regs, build, offsetof(NativeContext, executePREPVARARGS), uintOp(inst.a));
|
emitFallback(regs, build, offsetof(NativeContext, executePREPVARARGS), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
case IrCmd::FALLBACK_GETVARARGS:
|
case IrCmd::FALLBACK_GETVARARGS:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::Constant);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::Constant);
|
||||||
|
|
||||||
if (intOp(inst.c) == LUA_MULTRET)
|
if (intOp(inst.c) == LUA_MULTRET)
|
||||||
{
|
{
|
||||||
@ -1623,8 +1623,8 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case IrCmd::FALLBACK_DUPCLOSURE:
|
case IrCmd::FALLBACK_DUPCLOSURE:
|
||||||
LUAU_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
CODEGEN_ASSERT(inst.c.kind == IrOpKind::VmConst);
|
||||||
|
|
||||||
emitFallback(regs, build, offsetof(NativeContext, executeDUPCLOSURE), uintOp(inst.a));
|
emitFallback(regs, build, offsetof(NativeContext, executeDUPCLOSURE), uintOp(inst.a));
|
||||||
break;
|
break;
|
||||||
@ -1882,7 +1882,7 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
else if (inst.a.kind == IrOpKind::Constant)
|
else if (inst.a.kind == IrOpKind::Constant)
|
||||||
build.mov(inst.regX64, qword[inst.regX64 + tagOp(inst.a) * sizeof(TString*) + offsetof(global_State, ttname)]);
|
build.mov(inst.regX64, qword[inst.regX64 + tagOp(inst.a) * sizeof(TString*) + offsetof(global_State, ttname)]);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case IrCmd::GET_TYPEOF:
|
case IrCmd::GET_TYPEOF:
|
||||||
@ -1990,14 +1990,14 @@ void IrLoweringX64::lowerInst(IrInst& inst, uint32_t index, const IrBlock& next)
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
// Pseudo instructions
|
// Pseudo instructions
|
||||||
case IrCmd::NOP:
|
case IrCmd::NOP:
|
||||||
case IrCmd::SUBSTITUTE:
|
case IrCmd::SUBSTITUTE:
|
||||||
LUAU_ASSERT(!"Pseudo instructions should not be lowered");
|
CODEGEN_ASSERT(!"Pseudo instructions should not be lowered");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2012,10 +2012,10 @@ void IrLoweringX64::finishBlock(const IrBlock& curr, const IrBlock& next)
|
|||||||
{
|
{
|
||||||
// If we have spills remaining, we have to immediately lower the successor block
|
// If we have spills remaining, we have to immediately lower the successor block
|
||||||
for (uint32_t predIdx : predecessors(function.cfg, function.getBlockIndex(next)))
|
for (uint32_t predIdx : predecessors(function.cfg, function.getBlockIndex(next)))
|
||||||
LUAU_ASSERT(predIdx == function.getBlockIndex(curr) || function.blocks[predIdx].kind == IrBlockKind::Dead);
|
CODEGEN_ASSERT(predIdx == function.getBlockIndex(curr) || function.blocks[predIdx].kind == IrBlockKind::Dead);
|
||||||
|
|
||||||
// And the next block cannot be a join block in cfg
|
// And the next block cannot be a join block in cfg
|
||||||
LUAU_ASSERT(next.useCount == 1);
|
CODEGEN_ASSERT(next.useCount == 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2037,7 +2037,7 @@ void IrLoweringX64::finishFunction()
|
|||||||
|
|
||||||
for (ExitHandler& handler : exitHandlers)
|
for (ExitHandler& handler : exitHandlers)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(handler.pcpos != kVmExitEntryGuardPc);
|
CODEGEN_ASSERT(handler.pcpos != kVmExitEntryGuardPc);
|
||||||
|
|
||||||
build.setLabel(handler.self);
|
build.setLabel(handler.self);
|
||||||
|
|
||||||
@ -2154,7 +2154,7 @@ OperandX64 IrLoweringX64::memRegDoubleOp(IrOp op)
|
|||||||
case IrOpKind::VmConst:
|
case IrOpKind::VmConst:
|
||||||
return luauConstantValue(vmConstOp(op));
|
return luauConstantValue(vmConstOp(op));
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unsupported operand kind");
|
CODEGEN_ASSERT(!"Unsupported operand kind");
|
||||||
}
|
}
|
||||||
|
|
||||||
return noreg;
|
return noreg;
|
||||||
@ -2171,7 +2171,7 @@ OperandX64 IrLoweringX64::memRegUintOp(IrOp op)
|
|||||||
case IrOpKind::VmReg:
|
case IrOpKind::VmReg:
|
||||||
return luauRegValueInt(vmRegOp(op));
|
return luauRegValueInt(vmRegOp(op));
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unsupported operand kind");
|
CODEGEN_ASSERT(!"Unsupported operand kind");
|
||||||
}
|
}
|
||||||
|
|
||||||
return noreg;
|
return noreg;
|
||||||
@ -2188,7 +2188,7 @@ OperandX64 IrLoweringX64::memRegTagOp(IrOp op)
|
|||||||
case IrOpKind::VmConst:
|
case IrOpKind::VmConst:
|
||||||
return luauConstantTag(vmConstOp(op));
|
return luauConstantTag(vmConstOp(op));
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unsupported operand kind");
|
CODEGEN_ASSERT(!"Unsupported operand kind");
|
||||||
}
|
}
|
||||||
|
|
||||||
return noreg;
|
return noreg;
|
||||||
@ -2201,7 +2201,7 @@ RegisterX64 IrLoweringX64::regOp(IrOp op)
|
|||||||
if (inst.spilled || inst.needsReload)
|
if (inst.spilled || inst.needsReload)
|
||||||
regs.restore(inst, false);
|
regs.restore(inst, false);
|
||||||
|
|
||||||
LUAU_ASSERT(inst.regX64 != noreg);
|
CODEGEN_ASSERT(inst.regX64 != noreg);
|
||||||
return inst.regX64;
|
return inst.regX64;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2212,7 +2212,7 @@ OperandX64 IrLoweringX64::bufferAddrOp(IrOp bufferOp, IrOp indexOp)
|
|||||||
else if (indexOp.kind == IrOpKind::Constant)
|
else if (indexOp.kind == IrOpKind::Constant)
|
||||||
return regOp(bufferOp) + intOp(indexOp) + offsetof(Buffer, data);
|
return regOp(bufferOp) + intOp(indexOp) + offsetof(Buffer, data);
|
||||||
|
|
||||||
LUAU_ASSERT(!"Unsupported instruction form");
|
CODEGEN_ASSERT(!"Unsupported instruction form");
|
||||||
return noreg;
|
return noreg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,7 @@ static const int8_t kInvalidSpill = 64;
|
|||||||
|
|
||||||
static int allocSpill(uint32_t& free, KindA64 kind)
|
static int allocSpill(uint32_t& free, KindA64 kind)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(kStackSize <= 256); // to support larger stack frames, we need to ensure qN is allocated at 16b boundary to fit in ldr/str encoding
|
CODEGEN_ASSERT(kStackSize <= 256); // to support larger stack frames, we need to ensure qN is allocated at 16b boundary to fit in ldr/str encoding
|
||||||
|
|
||||||
// qN registers use two consecutive slots
|
// qN registers use two consecutive slots
|
||||||
int slot = countrz(kind == KindA64::q ? free & (free >> 1) : free);
|
int slot = countrz(kind == KindA64::q ? free & (free >> 1) : free);
|
||||||
@ -32,7 +32,7 @@ static int allocSpill(uint32_t& free, KindA64 kind)
|
|||||||
|
|
||||||
uint32_t mask = (kind == KindA64::q ? 3u : 1u) << slot;
|
uint32_t mask = (kind == KindA64::q ? 3u : 1u) << slot;
|
||||||
|
|
||||||
LUAU_ASSERT((free & mask) == mask);
|
CODEGEN_ASSERT((free & mask) == mask);
|
||||||
free &= ~mask;
|
free &= ~mask;
|
||||||
|
|
||||||
return slot;
|
return slot;
|
||||||
@ -43,7 +43,7 @@ static void freeSpill(uint32_t& free, KindA64 kind, uint8_t slot)
|
|||||||
// qN registers use two consecutive slots
|
// qN registers use two consecutive slots
|
||||||
uint32_t mask = (kind == KindA64::q ? 3u : 1u) << slot;
|
uint32_t mask = (kind == KindA64::q ? 3u : 1u) << slot;
|
||||||
|
|
||||||
LUAU_ASSERT((free & mask) == 0);
|
CODEGEN_ASSERT((free & mask) == 0);
|
||||||
free |= mask;
|
free |= mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ static int getReloadOffset(IrCmd cmd)
|
|||||||
{
|
{
|
||||||
case IrValueKind::Unknown:
|
case IrValueKind::Unknown:
|
||||||
case IrValueKind::None:
|
case IrValueKind::None:
|
||||||
LUAU_ASSERT(!"Invalid operand restore value kind");
|
CODEGEN_ASSERT(!"Invalid operand restore value kind");
|
||||||
break;
|
break;
|
||||||
case IrValueKind::Tag:
|
case IrValueKind::Tag:
|
||||||
return offsetof(TValue, tt);
|
return offsetof(TValue, tt);
|
||||||
@ -67,7 +67,7 @@ static int getReloadOffset(IrCmd cmd)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(!"Invalid operand restore value kind");
|
CODEGEN_ASSERT(!"Invalid operand restore value kind");
|
||||||
LUAU_UNREACHABLE();
|
LUAU_UNREACHABLE();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ static AddressA64 getReloadAddress(const IrFunction& function, const IrInst& ins
|
|||||||
static void restoreInst(AssemblyBuilderA64& build, uint32_t& freeSpillSlots, IrFunction& function, const IrRegAllocA64::Spill& s, RegisterA64 reg)
|
static void restoreInst(AssemblyBuilderA64& build, uint32_t& freeSpillSlots, IrFunction& function, const IrRegAllocA64::Spill& s, RegisterA64 reg)
|
||||||
{
|
{
|
||||||
IrInst& inst = function.instructions[s.inst];
|
IrInst& inst = function.instructions[s.inst];
|
||||||
LUAU_ASSERT(inst.regA64 == noreg);
|
CODEGEN_ASSERT(inst.regA64 == noreg);
|
||||||
|
|
||||||
if (s.slot >= 0)
|
if (s.slot >= 0)
|
||||||
{
|
{
|
||||||
@ -99,9 +99,9 @@ static void restoreInst(AssemblyBuilderA64& build, uint32_t& freeSpillSlots, IrF
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!inst.spilled && inst.needsReload);
|
CODEGEN_ASSERT(!inst.spilled && inst.needsReload);
|
||||||
AddressA64 addr = getReloadAddress(function, function.instructions[s.inst], /*limitToCurrentBlock*/ false);
|
AddressA64 addr = getReloadAddress(function, function.instructions[s.inst], /*limitToCurrentBlock*/ false);
|
||||||
LUAU_ASSERT(addr.base != xzr);
|
CODEGEN_ASSERT(addr.base != xzr);
|
||||||
build.ldr(reg, addr);
|
build.ldr(reg, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -116,7 +116,7 @@ IrRegAllocA64::IrRegAllocA64(IrFunction& function, LoweringStats* stats, std::in
|
|||||||
{
|
{
|
||||||
for (auto& p : regs)
|
for (auto& p : regs)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(p.first.kind == p.second.kind && p.first.index <= p.second.index);
|
CODEGEN_ASSERT(p.first.kind == p.second.kind && p.first.index <= p.second.index);
|
||||||
|
|
||||||
Set& set = getSet(p.first.kind);
|
Set& set = getSet(p.first.kind);
|
||||||
|
|
||||||
@ -130,7 +130,7 @@ IrRegAllocA64::IrRegAllocA64(IrFunction& function, LoweringStats* stats, std::in
|
|||||||
memset(gpr.defs, -1, sizeof(gpr.defs));
|
memset(gpr.defs, -1, sizeof(gpr.defs));
|
||||||
memset(simd.defs, -1, sizeof(simd.defs));
|
memset(simd.defs, -1, sizeof(simd.defs));
|
||||||
|
|
||||||
LUAU_ASSERT(kSpillSlots <= 32);
|
CODEGEN_ASSERT(kSpillSlots <= 32);
|
||||||
freeSpillSlots = (kSpillSlots == 32) ? ~0u : (1u << kSpillSlots) - 1;
|
freeSpillSlots = (kSpillSlots == 32) ? ~0u : (1u << kSpillSlots) - 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -172,7 +172,7 @@ RegisterA64 IrRegAllocA64::allocTemp(KindA64 kind)
|
|||||||
|
|
||||||
set.free &= ~(1u << reg);
|
set.free &= ~(1u << reg);
|
||||||
set.temp |= 1u << reg;
|
set.temp |= 1u << reg;
|
||||||
LUAU_ASSERT(set.defs[reg] == kInvalidInstIdx);
|
CODEGEN_ASSERT(set.defs[reg] == kInvalidInstIdx);
|
||||||
|
|
||||||
return RegisterA64{kind, uint8_t(reg)};
|
return RegisterA64{kind, uint8_t(reg)};
|
||||||
}
|
}
|
||||||
@ -188,11 +188,11 @@ RegisterA64 IrRegAllocA64::allocReuse(KindA64 kind, uint32_t index, std::initial
|
|||||||
|
|
||||||
if (source.lastUse == index && !source.reusedReg && source.regA64 != noreg)
|
if (source.lastUse == index && !source.reusedReg && source.regA64 != noreg)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!source.spilled && !source.needsReload);
|
CODEGEN_ASSERT(!source.spilled && !source.needsReload);
|
||||||
LUAU_ASSERT(source.regA64.kind == kind);
|
CODEGEN_ASSERT(source.regA64.kind == kind);
|
||||||
|
|
||||||
Set& set = getSet(kind);
|
Set& set = getSet(kind);
|
||||||
LUAU_ASSERT(set.defs[source.regA64.index] == op.index);
|
CODEGEN_ASSERT(set.defs[source.regA64.index] == op.index);
|
||||||
set.defs[source.regA64.index] = index;
|
set.defs[source.regA64.index] = index;
|
||||||
|
|
||||||
source.reusedReg = true;
|
source.reusedReg = true;
|
||||||
@ -207,8 +207,8 @@ RegisterA64 IrRegAllocA64::takeReg(RegisterA64 reg, uint32_t index)
|
|||||||
{
|
{
|
||||||
Set& set = getSet(reg.kind);
|
Set& set = getSet(reg.kind);
|
||||||
|
|
||||||
LUAU_ASSERT(set.free & (1u << reg.index));
|
CODEGEN_ASSERT(set.free & (1u << reg.index));
|
||||||
LUAU_ASSERT(set.defs[reg.index] == kInvalidInstIdx);
|
CODEGEN_ASSERT(set.defs[reg.index] == kInvalidInstIdx);
|
||||||
|
|
||||||
set.free &= ~(1u << reg.index);
|
set.free &= ~(1u << reg.index);
|
||||||
set.defs[reg.index] = index;
|
set.defs[reg.index] = index;
|
||||||
@ -220,9 +220,9 @@ void IrRegAllocA64::freeReg(RegisterA64 reg)
|
|||||||
{
|
{
|
||||||
Set& set = getSet(reg.kind);
|
Set& set = getSet(reg.kind);
|
||||||
|
|
||||||
LUAU_ASSERT((set.base & (1u << reg.index)) != 0);
|
CODEGEN_ASSERT((set.base & (1u << reg.index)) != 0);
|
||||||
LUAU_ASSERT((set.free & (1u << reg.index)) == 0);
|
CODEGEN_ASSERT((set.free & (1u << reg.index)) == 0);
|
||||||
LUAU_ASSERT((set.temp & (1u << reg.index)) == 0);
|
CODEGEN_ASSERT((set.temp & (1u << reg.index)) == 0);
|
||||||
|
|
||||||
set.free |= 1u << reg.index;
|
set.free |= 1u << reg.index;
|
||||||
set.defs[reg.index] = kInvalidInstIdx;
|
set.defs[reg.index] = kInvalidInstIdx;
|
||||||
@ -232,7 +232,7 @@ void IrRegAllocA64::freeLastUseReg(IrInst& target, uint32_t index)
|
|||||||
{
|
{
|
||||||
if (target.lastUse == index && !target.reusedReg)
|
if (target.lastUse == index && !target.reusedReg)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!target.spilled && !target.needsReload);
|
CODEGEN_ASSERT(!target.spilled && !target.needsReload);
|
||||||
|
|
||||||
// Register might have already been freed if it had multiple uses inside a single instruction
|
// Register might have already been freed if it had multiple uses inside a single instruction
|
||||||
if (target.regA64 == noreg)
|
if (target.regA64 == noreg)
|
||||||
@ -260,11 +260,11 @@ void IrRegAllocA64::freeLastUseRegs(const IrInst& inst, uint32_t index)
|
|||||||
|
|
||||||
void IrRegAllocA64::freeTempRegs()
|
void IrRegAllocA64::freeTempRegs()
|
||||||
{
|
{
|
||||||
LUAU_ASSERT((gpr.free & gpr.temp) == 0);
|
CODEGEN_ASSERT((gpr.free & gpr.temp) == 0);
|
||||||
gpr.free |= gpr.temp;
|
gpr.free |= gpr.temp;
|
||||||
gpr.temp = 0;
|
gpr.temp = 0;
|
||||||
|
|
||||||
LUAU_ASSERT((simd.free & simd.temp) == 0);
|
CODEGEN_ASSERT((simd.free & simd.temp) == 0);
|
||||||
simd.free |= simd.temp;
|
simd.free |= simd.temp;
|
||||||
simd.temp = 0;
|
simd.temp = 0;
|
||||||
}
|
}
|
||||||
@ -299,7 +299,7 @@ size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::init
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
// free all temp registers
|
// free all temp registers
|
||||||
LUAU_ASSERT((set.free & set.temp) == 0);
|
CODEGEN_ASSERT((set.free & set.temp) == 0);
|
||||||
set.free |= set.temp;
|
set.free |= set.temp;
|
||||||
set.temp = 0;
|
set.temp = 0;
|
||||||
|
|
||||||
@ -311,13 +311,13 @@ size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::init
|
|||||||
int reg = 31 - countlz(regs);
|
int reg = 31 - countlz(regs);
|
||||||
|
|
||||||
uint32_t inst = set.defs[reg];
|
uint32_t inst = set.defs[reg];
|
||||||
LUAU_ASSERT(inst != kInvalidInstIdx);
|
CODEGEN_ASSERT(inst != kInvalidInstIdx);
|
||||||
|
|
||||||
IrInst& def = function.instructions[inst];
|
IrInst& def = function.instructions[inst];
|
||||||
LUAU_ASSERT(def.regA64.index == reg);
|
CODEGEN_ASSERT(def.regA64.index == reg);
|
||||||
LUAU_ASSERT(!def.reusedReg);
|
CODEGEN_ASSERT(!def.reusedReg);
|
||||||
LUAU_ASSERT(!def.spilled);
|
CODEGEN_ASSERT(!def.spilled);
|
||||||
LUAU_ASSERT(!def.needsReload);
|
CODEGEN_ASSERT(!def.needsReload);
|
||||||
|
|
||||||
if (def.lastUse == index)
|
if (def.lastUse == index)
|
||||||
{
|
{
|
||||||
@ -367,7 +367,7 @@ size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::init
|
|||||||
set.defs[reg] = kInvalidInstIdx;
|
set.defs[reg] = kInvalidInstIdx;
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(set.free == set.base);
|
CODEGEN_ASSERT(set.free == set.base);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (FFlag::DebugCodegenChaosA64)
|
if (FFlag::DebugCodegenChaosA64)
|
||||||
@ -386,7 +386,7 @@ size_t IrRegAllocA64::spill(AssemblyBuilderA64& build, uint32_t index, std::init
|
|||||||
|
|
||||||
void IrRegAllocA64::restore(AssemblyBuilderA64& build, size_t start)
|
void IrRegAllocA64::restore(AssemblyBuilderA64& build, size_t start)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(start <= spills.size());
|
CODEGEN_ASSERT(start <= spills.size());
|
||||||
|
|
||||||
if (start < spills.size())
|
if (start < spills.size())
|
||||||
{
|
{
|
||||||
@ -421,7 +421,7 @@ void IrRegAllocA64::restoreReg(AssemblyBuilderA64& build, IrInst& inst)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(!"Expected to find a spill record");
|
CODEGEN_ASSERT(!"Expected to find a spill record");
|
||||||
}
|
}
|
||||||
|
|
||||||
IrRegAllocA64::Set& IrRegAllocA64::getSet(KindA64 kind)
|
IrRegAllocA64::Set& IrRegAllocA64::getSet(KindA64 kind)
|
||||||
@ -438,7 +438,7 @@ IrRegAllocA64::Set& IrRegAllocA64::getSet(KindA64 kind)
|
|||||||
return simd;
|
return simd;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unexpected register kind");
|
CODEGEN_ASSERT(!"Unexpected register kind");
|
||||||
LUAU_UNREACHABLE();
|
LUAU_UNREACHABLE();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ RegisterX64 IrRegAllocX64::allocReg(SizeX64 size, uint32_t instIdx)
|
|||||||
return takeReg(reg, instIdx);
|
return takeReg(reg, instIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(!"Out of registers to allocate");
|
CODEGEN_ASSERT(!"Out of registers to allocate");
|
||||||
return noreg;
|
return noreg;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,7 +83,7 @@ RegisterX64 IrRegAllocX64::allocRegOrReuse(SizeX64 size, uint32_t instIdx, std::
|
|||||||
if ((size == SizeX64::xmmword) != (source.regX64.size == SizeX64::xmmword))
|
if ((size == SizeX64::xmmword) != (source.regX64.size == SizeX64::xmmword))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
LUAU_ASSERT(source.regX64 != noreg);
|
CODEGEN_ASSERT(source.regX64 != noreg);
|
||||||
|
|
||||||
source.reusedReg = true;
|
source.reusedReg = true;
|
||||||
|
|
||||||
@ -105,11 +105,11 @@ RegisterX64 IrRegAllocX64::takeReg(RegisterX64 reg, uint32_t instIdx)
|
|||||||
{
|
{
|
||||||
if (!freeXmmMap[reg.index])
|
if (!freeXmmMap[reg.index])
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(xmmInstUsers[reg.index] != kInvalidInstIdx);
|
CODEGEN_ASSERT(xmmInstUsers[reg.index] != kInvalidInstIdx);
|
||||||
preserve(function.instructions[xmmInstUsers[reg.index]]);
|
preserve(function.instructions[xmmInstUsers[reg.index]]);
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(freeXmmMap[reg.index]);
|
CODEGEN_ASSERT(freeXmmMap[reg.index]);
|
||||||
freeXmmMap[reg.index] = false;
|
freeXmmMap[reg.index] = false;
|
||||||
xmmInstUsers[reg.index] = instIdx;
|
xmmInstUsers[reg.index] = instIdx;
|
||||||
}
|
}
|
||||||
@ -117,11 +117,11 @@ RegisterX64 IrRegAllocX64::takeReg(RegisterX64 reg, uint32_t instIdx)
|
|||||||
{
|
{
|
||||||
if (!freeGprMap[reg.index])
|
if (!freeGprMap[reg.index])
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(gprInstUsers[reg.index] != kInvalidInstIdx);
|
CODEGEN_ASSERT(gprInstUsers[reg.index] != kInvalidInstIdx);
|
||||||
preserve(function.instructions[gprInstUsers[reg.index]]);
|
preserve(function.instructions[gprInstUsers[reg.index]]);
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(freeGprMap[reg.index]);
|
CODEGEN_ASSERT(freeGprMap[reg.index]);
|
||||||
freeGprMap[reg.index] = false;
|
freeGprMap[reg.index] = false;
|
||||||
gprInstUsers[reg.index] = instIdx;
|
gprInstUsers[reg.index] = instIdx;
|
||||||
}
|
}
|
||||||
@ -141,13 +141,13 @@ void IrRegAllocX64::freeReg(RegisterX64 reg)
|
|||||||
{
|
{
|
||||||
if (reg.size == SizeX64::xmmword)
|
if (reg.size == SizeX64::xmmword)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!freeXmmMap[reg.index]);
|
CODEGEN_ASSERT(!freeXmmMap[reg.index]);
|
||||||
freeXmmMap[reg.index] = true;
|
freeXmmMap[reg.index] = true;
|
||||||
xmmInstUsers[reg.index] = kInvalidInstIdx;
|
xmmInstUsers[reg.index] = kInvalidInstIdx;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!freeGprMap[reg.index]);
|
CODEGEN_ASSERT(!freeGprMap[reg.index]);
|
||||||
freeGprMap[reg.index] = true;
|
freeGprMap[reg.index] = true;
|
||||||
gprInstUsers[reg.index] = kInvalidInstIdx;
|
gprInstUsers[reg.index] = kInvalidInstIdx;
|
||||||
}
|
}
|
||||||
@ -157,7 +157,7 @@ void IrRegAllocX64::freeLastUseReg(IrInst& target, uint32_t instIdx)
|
|||||||
{
|
{
|
||||||
if (isLastUseReg(target, instIdx))
|
if (isLastUseReg(target, instIdx))
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!target.spilled && !target.needsReload);
|
CODEGEN_ASSERT(!target.spilled && !target.needsReload);
|
||||||
|
|
||||||
// Register might have already been freed if it had multiple uses inside a single instruction
|
// Register might have already been freed if it had multiple uses inside a single instruction
|
||||||
if (target.regX64 == noreg)
|
if (target.regX64 == noreg)
|
||||||
@ -210,7 +210,7 @@ void IrRegAllocX64::preserve(IrInst& inst)
|
|||||||
else if (spill.valueKind == IrValueKind::Tag || spill.valueKind == IrValueKind::Int)
|
else if (spill.valueKind == IrValueKind::Tag || spill.valueKind == IrValueKind::Int)
|
||||||
build.mov(dword[sSpillArea + i * 8], inst.regX64);
|
build.mov(dword[sSpillArea + i * 8], inst.regX64);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(!"Unsupported value kind");
|
CODEGEN_ASSERT(!"Unsupported value kind");
|
||||||
|
|
||||||
usedSpillSlots.set(i);
|
usedSpillSlots.set(i);
|
||||||
|
|
||||||
@ -312,7 +312,7 @@ bool IrRegAllocX64::shouldFreeGpr(RegisterX64 reg) const
|
|||||||
if (reg == noreg)
|
if (reg == noreg)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
LUAU_ASSERT(reg.size != SizeX64::xmmword);
|
CODEGEN_ASSERT(reg.size != SizeX64::xmmword);
|
||||||
|
|
||||||
for (RegisterX64 gpr : kGprAllocOrder)
|
for (RegisterX64 gpr : kGprAllocOrder)
|
||||||
{
|
{
|
||||||
@ -340,7 +340,7 @@ unsigned IrRegAllocX64::findSpillStackSlot(IrValueKind valueKind)
|
|||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(!"Nowhere to spill");
|
CODEGEN_ASSERT(!"Nowhere to spill");
|
||||||
return ~0u;
|
return ~0u;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -364,18 +364,18 @@ bool IrRegAllocX64::hasRestoreOp(const IrInst& inst) const
|
|||||||
|
|
||||||
OperandX64 IrRegAllocX64::getRestoreAddress(const IrInst& inst, IrOp restoreOp)
|
OperandX64 IrRegAllocX64::getRestoreAddress(const IrInst& inst, IrOp restoreOp)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(restoreOp.kind != IrOpKind::None);
|
CODEGEN_ASSERT(restoreOp.kind != IrOpKind::None);
|
||||||
|
|
||||||
switch (getCmdValueKind(inst.cmd))
|
switch (getCmdValueKind(inst.cmd))
|
||||||
{
|
{
|
||||||
case IrValueKind::Unknown:
|
case IrValueKind::Unknown:
|
||||||
case IrValueKind::None:
|
case IrValueKind::None:
|
||||||
LUAU_ASSERT(!"Invalid operand restore value kind");
|
CODEGEN_ASSERT(!"Invalid operand restore value kind");
|
||||||
break;
|
break;
|
||||||
case IrValueKind::Tag:
|
case IrValueKind::Tag:
|
||||||
return restoreOp.kind == IrOpKind::VmReg ? luauRegTag(vmRegOp(restoreOp)) : luauConstantTag(vmConstOp(restoreOp));
|
return restoreOp.kind == IrOpKind::VmReg ? luauRegTag(vmRegOp(restoreOp)) : luauConstantTag(vmConstOp(restoreOp));
|
||||||
case IrValueKind::Int:
|
case IrValueKind::Int:
|
||||||
LUAU_ASSERT(restoreOp.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(restoreOp.kind == IrOpKind::VmReg);
|
||||||
return luauRegValueInt(vmRegOp(restoreOp));
|
return luauRegValueInt(vmRegOp(restoreOp));
|
||||||
case IrValueKind::Pointer:
|
case IrValueKind::Pointer:
|
||||||
return restoreOp.kind == IrOpKind::VmReg ? luauRegValue(vmRegOp(restoreOp)) : luauConstantValue(vmConstOp(restoreOp));
|
return restoreOp.kind == IrOpKind::VmReg ? luauRegValue(vmRegOp(restoreOp)) : luauConstantValue(vmConstOp(restoreOp));
|
||||||
@ -385,7 +385,7 @@ OperandX64 IrRegAllocX64::getRestoreAddress(const IrInst& inst, IrOp restoreOp)
|
|||||||
return restoreOp.kind == IrOpKind::VmReg ? luauReg(vmRegOp(restoreOp)) : luauConstant(vmConstOp(restoreOp));
|
return restoreOp.kind == IrOpKind::VmReg ? luauReg(vmRegOp(restoreOp)) : luauConstant(vmConstOp(restoreOp));
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(!"Failed to find restore operand location");
|
CODEGEN_ASSERT(!"Failed to find restore operand location");
|
||||||
return noreg;
|
return noreg;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -419,23 +419,23 @@ uint32_t IrRegAllocX64::findInstructionWithFurthestNextUse(const std::array<uint
|
|||||||
void IrRegAllocX64::assertFree(RegisterX64 reg) const
|
void IrRegAllocX64::assertFree(RegisterX64 reg) const
|
||||||
{
|
{
|
||||||
if (reg.size == SizeX64::xmmword)
|
if (reg.size == SizeX64::xmmword)
|
||||||
LUAU_ASSERT(freeXmmMap[reg.index]);
|
CODEGEN_ASSERT(freeXmmMap[reg.index]);
|
||||||
else
|
else
|
||||||
LUAU_ASSERT(freeGprMap[reg.index]);
|
CODEGEN_ASSERT(freeGprMap[reg.index]);
|
||||||
}
|
}
|
||||||
|
|
||||||
void IrRegAllocX64::assertAllFree() const
|
void IrRegAllocX64::assertAllFree() const
|
||||||
{
|
{
|
||||||
for (RegisterX64 reg : kGprAllocOrder)
|
for (RegisterX64 reg : kGprAllocOrder)
|
||||||
LUAU_ASSERT(freeGprMap[reg.index]);
|
CODEGEN_ASSERT(freeGprMap[reg.index]);
|
||||||
|
|
||||||
for (bool free : freeXmmMap)
|
for (bool free : freeXmmMap)
|
||||||
LUAU_ASSERT(free);
|
CODEGEN_ASSERT(free);
|
||||||
}
|
}
|
||||||
|
|
||||||
void IrRegAllocX64::assertNoSpills() const
|
void IrRegAllocX64::assertNoSpills() const
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(spills.empty());
|
CODEGEN_ASSERT(spills.empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
ScopedRegX64::ScopedRegX64(IrRegAllocX64& owner)
|
ScopedRegX64::ScopedRegX64(IrRegAllocX64& owner)
|
||||||
@ -465,19 +465,19 @@ ScopedRegX64::~ScopedRegX64()
|
|||||||
|
|
||||||
void ScopedRegX64::take(RegisterX64 reg)
|
void ScopedRegX64::take(RegisterX64 reg)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(this->reg == noreg);
|
CODEGEN_ASSERT(this->reg == noreg);
|
||||||
this->reg = owner.takeReg(reg, kInvalidInstIdx);
|
this->reg = owner.takeReg(reg, kInvalidInstIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScopedRegX64::alloc(SizeX64 size)
|
void ScopedRegX64::alloc(SizeX64 size)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(reg == noreg);
|
CODEGEN_ASSERT(reg == noreg);
|
||||||
reg = owner.allocReg(size, kInvalidInstIdx);
|
reg = owner.allocReg(size, kInvalidInstIdx);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ScopedRegX64::free()
|
void ScopedRegX64::free()
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(reg != noreg);
|
CODEGEN_ASSERT(reg != noreg);
|
||||||
owner.freeReg(reg);
|
owner.freeReg(reg);
|
||||||
reg = noreg;
|
reg = noreg;
|
||||||
}
|
}
|
||||||
@ -504,7 +504,7 @@ ScopedSpills::~ScopedSpills()
|
|||||||
IrSpillX64& spill = owner.spills[i];
|
IrSpillX64& spill = owner.spills[i];
|
||||||
|
|
||||||
// Restoring spills inside this scope cannot create new spills
|
// Restoring spills inside this scope cannot create new spills
|
||||||
LUAU_ASSERT(spill.spillId < endSpillId);
|
CODEGEN_ASSERT(spill.spillId < endSpillId);
|
||||||
|
|
||||||
// If spill was created inside current scope, it has to be restored
|
// If spill was created inside current scope, it has to be restored
|
||||||
if (spill.spillId >= startSpillId)
|
if (spill.spillId >= startSpillId)
|
||||||
|
@ -21,7 +21,7 @@ namespace CodeGen
|
|||||||
static void builtinCheckDouble(IrBuilder& build, IrOp arg, int pcpos)
|
static void builtinCheckDouble(IrBuilder& build, IrOp arg, int pcpos)
|
||||||
{
|
{
|
||||||
if (arg.kind == IrOpKind::Constant)
|
if (arg.kind == IrOpKind::Constant)
|
||||||
LUAU_ASSERT(build.function.constOp(arg).kind == IrConstKind::Double);
|
CODEGEN_ASSERT(build.function.constOp(arg).kind == IrConstKind::Double);
|
||||||
else
|
else
|
||||||
build.loadAndCheckTag(arg, LUA_TNUMBER, build.vmExit(pcpos));
|
build.loadAndCheckTag(arg, LUA_TNUMBER, build.vmExit(pcpos));
|
||||||
}
|
}
|
||||||
@ -227,7 +227,7 @@ static BuiltinImplResult translateBuiltinMathClamp(IrBuilder& build, int nparams
|
|||||||
|
|
||||||
IrOp block = build.block(IrBlockKind::Internal);
|
IrOp block = build.block(IrBlockKind::Internal);
|
||||||
|
|
||||||
LUAU_ASSERT(args.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(args.kind == IrOpKind::VmReg);
|
||||||
|
|
||||||
builtinCheckDouble(build, build.vmReg(arg), pcpos);
|
builtinCheckDouble(build, build.vmReg(arg), pcpos);
|
||||||
builtinCheckDouble(build, args, pcpos);
|
builtinCheckDouble(build, args, pcpos);
|
||||||
@ -463,7 +463,7 @@ static BuiltinImplResult translateBuiltinBit32Extract(
|
|||||||
if (vb.kind == IrOpKind::Constant)
|
if (vb.kind == IrOpKind::Constant)
|
||||||
{
|
{
|
||||||
int f = int(build.function.doubleOp(vb));
|
int f = int(build.function.doubleOp(vb));
|
||||||
LUAU_ASSERT(unsigned(f) < 32); // checked above
|
CODEGEN_ASSERT(unsigned(f) < 32); // checked above
|
||||||
|
|
||||||
value = n;
|
value = n;
|
||||||
|
|
||||||
@ -658,7 +658,7 @@ static BuiltinImplResult translateBuiltinVector(IrBuilder& build, int nparams, i
|
|||||||
if (nparams < 3 || nresults > 1)
|
if (nparams < 3 || nresults > 1)
|
||||||
return {BuiltinImplType::None, -1};
|
return {BuiltinImplType::None, -1};
|
||||||
|
|
||||||
LUAU_ASSERT(LUA_VECTOR_SIZE == 3);
|
CODEGEN_ASSERT(LUA_VECTOR_SIZE == 3);
|
||||||
|
|
||||||
builtinCheckDouble(build, build.vmReg(arg), pcpos);
|
builtinCheckDouble(build, build.vmReg(arg), pcpos);
|
||||||
builtinCheckDouble(build, args, pcpos);
|
builtinCheckDouble(build, args, pcpos);
|
||||||
@ -690,7 +690,7 @@ static BuiltinImplResult translateBuiltinTableInsert(IrBuilder& build, int npara
|
|||||||
|
|
||||||
if (args.kind == IrOpKind::Constant)
|
if (args.kind == IrOpKind::Constant)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(build.function.constOp(args).kind == IrConstKind::Double);
|
CODEGEN_ASSERT(build.function.constOp(args).kind == IrConstKind::Double);
|
||||||
|
|
||||||
// No barrier necessary since numbers aren't collectable
|
// No barrier necessary since numbers aren't collectable
|
||||||
build.inst(IrCmd::STORE_DOUBLE, setnum, args);
|
build.inst(IrCmd::STORE_DOUBLE, setnum, args);
|
||||||
@ -702,7 +702,7 @@ static BuiltinImplResult translateBuiltinTableInsert(IrBuilder& build, int npara
|
|||||||
build.inst(IrCmd::STORE_TVALUE, setnum, va);
|
build.inst(IrCmd::STORE_TVALUE, setnum, va);
|
||||||
|
|
||||||
// Compiler only generates FASTCALL*K for source-level constants, so dynamic imports are not affected
|
// Compiler only generates FASTCALL*K for source-level constants, so dynamic imports are not affected
|
||||||
LUAU_ASSERT(build.function.proto);
|
CODEGEN_ASSERT(build.function.proto);
|
||||||
IrOp argstag = args.kind == IrOpKind::VmConst ? build.constTag(build.function.proto->k[vmConstOp(args)].tt) : build.undef();
|
IrOp argstag = args.kind == IrOpKind::VmConst ? build.constTag(build.function.proto->k[vmConstOp(args)].tt) : build.undef();
|
||||||
|
|
||||||
build.inst(IrCmd::BARRIER_TABLE_FORWARD, table, args, argstag);
|
build.inst(IrCmd::BARRIER_TABLE_FORWARD, table, args, argstag);
|
||||||
|
@ -27,8 +27,8 @@ struct FallbackStreamScope
|
|||||||
: build(build)
|
: build(build)
|
||||||
, next(next)
|
, next(next)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(fallback.kind == IrOpKind::Block);
|
CODEGEN_ASSERT(fallback.kind == IrOpKind::Block);
|
||||||
LUAU_ASSERT(next.kind == IrOpKind::Block);
|
CODEGEN_ASSERT(next.kind == IrOpKind::Block);
|
||||||
|
|
||||||
build.inst(IrCmd::JUMP, next);
|
build.inst(IrCmd::JUMP, next);
|
||||||
build.beginBlock(fallback);
|
build.beginBlock(fallback);
|
||||||
@ -55,10 +55,10 @@ static IrOp loadDoubleOrConstant(IrBuilder& build, IrOp arg)
|
|||||||
{
|
{
|
||||||
if (arg.kind == IrOpKind::VmConst)
|
if (arg.kind == IrOpKind::VmConst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(build.function.proto);
|
CODEGEN_ASSERT(build.function.proto);
|
||||||
TValue protok = build.function.proto->k[vmConstOp(arg)];
|
TValue protok = build.function.proto->k[vmConstOp(arg)];
|
||||||
|
|
||||||
LUAU_ASSERT(protok.tt == LUA_TNUMBER);
|
CODEGEN_ASSERT(protok.tt == LUA_TNUMBER);
|
||||||
|
|
||||||
return build.constDouble(protok.value.n);
|
return build.constDouble(protok.value.n);
|
||||||
}
|
}
|
||||||
@ -312,10 +312,10 @@ void translateInstJumpxEqN(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||||||
build.beginBlock(checkValue);
|
build.beginBlock(checkValue);
|
||||||
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra));
|
IrOp va = build.inst(IrCmd::LOAD_DOUBLE, build.vmReg(ra));
|
||||||
|
|
||||||
LUAU_ASSERT(build.function.proto);
|
CODEGEN_ASSERT(build.function.proto);
|
||||||
TValue protok = build.function.proto->k[aux & 0xffffff];
|
TValue protok = build.function.proto->k[aux & 0xffffff];
|
||||||
|
|
||||||
LUAU_ASSERT(protok.tt == LUA_TNUMBER);
|
CODEGEN_ASSERT(protok.tt == LUA_TNUMBER);
|
||||||
IrOp vb = build.constDouble(protok.value.n);
|
IrOp vb = build.constDouble(protok.value.n);
|
||||||
|
|
||||||
build.inst(IrCmd::JUMP_CMP_NUM, va, vb, build.cond(IrCondition::NotEqual), not_ ? target : next, not_ ? next : target);
|
build.inst(IrCmd::JUMP_CMP_NUM, va, vb, build.cond(IrCondition::NotEqual), not_ ? target : next, not_ ? next : target);
|
||||||
@ -468,10 +468,10 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc,
|
|||||||
{
|
{
|
||||||
if (opb.kind == IrOpKind::VmConst)
|
if (opb.kind == IrOpKind::VmConst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(build.function.proto);
|
CODEGEN_ASSERT(build.function.proto);
|
||||||
TValue protok = build.function.proto->k[vmConstOp(opb)];
|
TValue protok = build.function.proto->k[vmConstOp(opb)];
|
||||||
|
|
||||||
LUAU_ASSERT(protok.tt == LUA_TNUMBER);
|
CODEGEN_ASSERT(protok.tt == LUA_TNUMBER);
|
||||||
|
|
||||||
vb = build.constDouble(protok.value.n);
|
vb = build.constDouble(protok.value.n);
|
||||||
}
|
}
|
||||||
@ -483,10 +483,10 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc,
|
|||||||
|
|
||||||
if (opc.kind == IrOpKind::VmConst)
|
if (opc.kind == IrOpKind::VmConst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(build.function.proto);
|
CODEGEN_ASSERT(build.function.proto);
|
||||||
TValue protok = build.function.proto->k[vmConstOp(opc)];
|
TValue protok = build.function.proto->k[vmConstOp(opc)];
|
||||||
|
|
||||||
LUAU_ASSERT(protok.tt == LUA_TNUMBER);
|
CODEGEN_ASSERT(protok.tt == LUA_TNUMBER);
|
||||||
|
|
||||||
// VM has special cases for exponentiation with constants
|
// VM has special cases for exponentiation with constants
|
||||||
if (tm == TM_POW && protok.value.n == 0.5)
|
if (tm == TM_POW && protok.value.n == 0.5)
|
||||||
@ -505,7 +505,7 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc,
|
|||||||
|
|
||||||
if (result.kind == IrOpKind::None)
|
if (result.kind == IrOpKind::None)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(vc.kind != IrOpKind::None);
|
CODEGEN_ASSERT(vc.kind != IrOpKind::None);
|
||||||
|
|
||||||
switch (tm)
|
switch (tm)
|
||||||
{
|
{
|
||||||
@ -531,7 +531,7 @@ static void translateInstBinaryNumeric(IrBuilder& build, int ra, int rb, int rc,
|
|||||||
result = build.inst(IrCmd::INVOKE_LIBM, build.constUint(LBF_MATH_POW), vb, vc);
|
result = build.inst(IrCmd::INVOKE_LIBM, build.constUint(LBF_MATH_POW), vb, vc);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unsupported binary op");
|
CODEGEN_ASSERT(!"Unsupported binary op");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -717,7 +717,7 @@ IrOp translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool
|
|||||||
int skip = LUAU_INSN_C(*pc);
|
int skip = LUAU_INSN_C(*pc);
|
||||||
|
|
||||||
Instruction call = pc[skip + 1];
|
Instruction call = pc[skip + 1];
|
||||||
LUAU_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
|
CODEGEN_ASSERT(LUAU_INSN_OP(call) == LOP_CALL);
|
||||||
int ra = LUAU_INSN_A(call);
|
int ra = LUAU_INSN_A(call);
|
||||||
|
|
||||||
int nparams = customParams ? customParamCount : LUAU_INSN_B(call) - 1;
|
int nparams = customParams ? customParamCount : LUAU_INSN_B(call) - 1;
|
||||||
@ -729,7 +729,7 @@ IrOp translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool
|
|||||||
|
|
||||||
if (customArgs.kind == IrOpKind::VmConst)
|
if (customArgs.kind == IrOpKind::VmConst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(build.function.proto);
|
CODEGEN_ASSERT(build.function.proto);
|
||||||
TValue protok = build.function.proto->k[vmConstOp(customArgs)];
|
TValue protok = build.function.proto->k[vmConstOp(customArgs)];
|
||||||
|
|
||||||
if (protok.tt == LUA_TNUMBER)
|
if (protok.tt == LUA_TNUMBER)
|
||||||
@ -746,7 +746,7 @@ IrOp translateFastCallN(IrBuilder& build, const Instruction* pc, int pcpos, bool
|
|||||||
|
|
||||||
if (br.type != BuiltinImplType::None)
|
if (br.type != BuiltinImplType::None)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(nparams != LUA_MULTRET && "builtins are not allowed to handle variadic arguments");
|
CODEGEN_ASSERT(nparams != LUA_MULTRET && "builtins are not allowed to handle variadic arguments");
|
||||||
|
|
||||||
if (nresults == LUA_MULTRET)
|
if (nresults == LUA_MULTRET)
|
||||||
build.inst(IrCmd::ADJUST_STACK_TO_REG, build.vmReg(ra), build.constInt(br.actualResultCount));
|
build.inst(IrCmd::ADJUST_STACK_TO_REG, build.vmReg(ra), build.constInt(br.actualResultCount));
|
||||||
@ -808,7 +808,7 @@ void beforeInstForNPrep(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||||||
|
|
||||||
void afterInstForNLoop(IrBuilder& build, const Instruction* pc)
|
void afterInstForNLoop(IrBuilder& build, const Instruction* pc)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!build.numericLoopStack.empty());
|
CODEGEN_ASSERT(!build.numericLoopStack.empty());
|
||||||
build.numericLoopStack.pop_back();
|
build.numericLoopStack.pop_back();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -819,7 +819,7 @@ void translateInstForNPrep(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||||||
IrOp loopStart = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc))));
|
IrOp loopStart = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc))));
|
||||||
IrOp loopExit = build.blockAtInst(getJumpTarget(*pc, pcpos));
|
IrOp loopExit = build.blockAtInst(getJumpTarget(*pc, pcpos));
|
||||||
|
|
||||||
LUAU_ASSERT(!build.numericLoopStack.empty());
|
CODEGEN_ASSERT(!build.numericLoopStack.empty());
|
||||||
IrOp stepK = build.numericLoopStack.back().step;
|
IrOp stepK = build.numericLoopStack.back().step;
|
||||||
|
|
||||||
// When loop parameters are not numbers, VM tries to perform type coercion from string and raises an exception if that fails
|
// When loop parameters are not numbers, VM tries to perform type coercion from string and raises an exception if that fails
|
||||||
@ -872,7 +872,7 @@ void translateInstForNLoop(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||||||
IrOp loopRepeat = build.blockAtInst(repeatJumpTarget);
|
IrOp loopRepeat = build.blockAtInst(repeatJumpTarget);
|
||||||
IrOp loopExit = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc))));
|
IrOp loopExit = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc))));
|
||||||
|
|
||||||
LUAU_ASSERT(!build.numericLoopStack.empty());
|
CODEGEN_ASSERT(!build.numericLoopStack.empty());
|
||||||
IrBuilder::LoopInfo loopInfo = build.numericLoopStack.back();
|
IrBuilder::LoopInfo loopInfo = build.numericLoopStack.back();
|
||||||
|
|
||||||
// normally, the interrupt is placed at the beginning of the loop body by FORNPREP translation
|
// normally, the interrupt is placed at the beginning of the loop body by FORNPREP translation
|
||||||
@ -979,7 +979,7 @@ void translateInstForGPrepInext(IrBuilder& build, const Instruction* pc, int pcp
|
|||||||
void translateInstForGLoopIpairs(IrBuilder& build, const Instruction* pc, int pcpos)
|
void translateInstForGLoopIpairs(IrBuilder& build, const Instruction* pc, int pcpos)
|
||||||
{
|
{
|
||||||
int ra = LUAU_INSN_A(*pc);
|
int ra = LUAU_INSN_A(*pc);
|
||||||
LUAU_ASSERT(int(pc[1]) < 0);
|
CODEGEN_ASSERT(int(pc[1]) < 0);
|
||||||
|
|
||||||
IrOp loopRepeat = build.blockAtInst(getJumpTarget(*pc, pcpos));
|
IrOp loopRepeat = build.blockAtInst(getJumpTarget(*pc, pcpos));
|
||||||
IrOp loopExit = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc))));
|
IrOp loopExit = build.blockAtInst(pcpos + getOpLength(LuauOpcode(LUAU_INSN_OP(*pc))));
|
||||||
@ -1376,7 +1376,7 @@ void translateInstCapture(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||||||
build.inst(IrCmd::CAPTURE, build.vmUpvalue(index), build.constUint(0));
|
build.inst(IrCmd::CAPTURE, build.vmUpvalue(index), build.constUint(0));
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unknown upvalue capture type");
|
CODEGEN_ASSERT(!"Unknown upvalue capture type");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1394,7 +1394,7 @@ void translateInstNamecall(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||||||
build.loadAndCheckTag(build.vmReg(rb), LUA_TTABLE, fallback);
|
build.loadAndCheckTag(build.vmReg(rb), LUA_TTABLE, fallback);
|
||||||
IrOp table = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
|
IrOp table = build.inst(IrCmd::LOAD_POINTER, build.vmReg(rb));
|
||||||
|
|
||||||
LUAU_ASSERT(build.function.proto);
|
CODEGEN_ASSERT(build.function.proto);
|
||||||
IrOp addrNodeEl = build.inst(IrCmd::GET_HASH_NODE_ADDR, table, build.constUint(tsvalue(&build.function.proto->k[aux])->hash));
|
IrOp addrNodeEl = build.inst(IrCmd::GET_HASH_NODE_ADDR, table, build.constUint(tsvalue(&build.function.proto->k[aux])->hash));
|
||||||
|
|
||||||
// We use 'jump' version instead of 'check' guard because we are jumping away into a non-fallback block
|
// We use 'jump' version instead of 'check' guard because we are jumping away into a non-fallback block
|
||||||
@ -1506,7 +1506,7 @@ void translateInstOrX(IrBuilder& build, const Instruction* pc, int pcpos, IrOp c
|
|||||||
|
|
||||||
void translateInstNewClosure(IrBuilder& build, const Instruction* pc, int pcpos)
|
void translateInstNewClosure(IrBuilder& build, const Instruction* pc, int pcpos)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(unsigned(LUAU_INSN_D(*pc)) < unsigned(build.function.proto->sizep));
|
CODEGEN_ASSERT(unsigned(LUAU_INSN_D(*pc)) < unsigned(build.function.proto->sizep));
|
||||||
|
|
||||||
int ra = LUAU_INSN_A(*pc);
|
int ra = LUAU_INSN_A(*pc);
|
||||||
Proto* pv = build.function.proto->p[LUAU_INSN_D(*pc)];
|
Proto* pv = build.function.proto->p[LUAU_INSN_D(*pc)];
|
||||||
@ -1522,7 +1522,7 @@ void translateInstNewClosure(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||||||
for (int ui = 0; ui < pv->nups; ++ui)
|
for (int ui = 0; ui < pv->nups; ++ui)
|
||||||
{
|
{
|
||||||
Instruction uinsn = pc[ui + 1];
|
Instruction uinsn = pc[ui + 1];
|
||||||
LUAU_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
|
CODEGEN_ASSERT(LUAU_INSN_OP(uinsn) == LOP_CAPTURE);
|
||||||
|
|
||||||
switch (LUAU_INSN_A(uinsn))
|
switch (LUAU_INSN_A(uinsn))
|
||||||
{
|
{
|
||||||
@ -1553,7 +1553,7 @@ void translateInstNewClosure(IrBuilder& build, const Instruction* pc, int pcpos)
|
|||||||
}
|
}
|
||||||
|
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unknown upvalue capture type");
|
CODEGEN_ASSERT(!"Unknown upvalue capture type");
|
||||||
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
|
LUAU_UNREACHABLE(); // improves switch() codegen by eliding opcode bounds checks
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -208,7 +208,7 @@ static void removeInstUse(IrFunction& function, uint32_t instIdx)
|
|||||||
{
|
{
|
||||||
IrInst& inst = function.instructions[instIdx];
|
IrInst& inst = function.instructions[instIdx];
|
||||||
|
|
||||||
LUAU_ASSERT(inst.useCount);
|
CODEGEN_ASSERT(inst.useCount);
|
||||||
inst.useCount--;
|
inst.useCount--;
|
||||||
|
|
||||||
if (inst.useCount == 0)
|
if (inst.useCount == 0)
|
||||||
@ -219,7 +219,7 @@ static void removeBlockUse(IrFunction& function, uint32_t blockIdx)
|
|||||||
{
|
{
|
||||||
IrBlock& block = function.blocks[blockIdx];
|
IrBlock& block = function.blocks[blockIdx];
|
||||||
|
|
||||||
LUAU_ASSERT(block.useCount);
|
CODEGEN_ASSERT(block.useCount);
|
||||||
block.useCount--;
|
block.useCount--;
|
||||||
|
|
||||||
// Entry block is never removed because is has an implicit use
|
// Entry block is never removed because is has an implicit use
|
||||||
@ -245,7 +245,7 @@ void removeUse(IrFunction& function, IrOp op)
|
|||||||
|
|
||||||
bool isGCO(uint8_t tag)
|
bool isGCO(uint8_t tag)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(tag < LUA_T_COUNT);
|
CODEGEN_ASSERT(tag < LUA_T_COUNT);
|
||||||
|
|
||||||
// mirrors iscollectable(o) from VM/lobject.h
|
// mirrors iscollectable(o) from VM/lobject.h
|
||||||
return tag >= LUA_TSTRING;
|
return tag >= LUA_TSTRING;
|
||||||
@ -253,7 +253,7 @@ bool isGCO(uint8_t tag)
|
|||||||
|
|
||||||
void kill(IrFunction& function, IrInst& inst)
|
void kill(IrFunction& function, IrInst& inst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(inst.useCount == 0);
|
CODEGEN_ASSERT(inst.useCount == 0);
|
||||||
|
|
||||||
inst.cmd = IrCmd::NOP;
|
inst.cmd = IrCmd::NOP;
|
||||||
|
|
||||||
@ -277,7 +277,7 @@ void kill(IrFunction& function, uint32_t start, uint32_t end)
|
|||||||
// Kill instructions in reverse order to avoid killing instructions that are still marked as used
|
// Kill instructions in reverse order to avoid killing instructions that are still marked as used
|
||||||
for (int i = int(end); i >= int(start); i--)
|
for (int i = int(end); i >= int(start); i--)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(unsigned(i) < function.instructions.size());
|
CODEGEN_ASSERT(unsigned(i) < function.instructions.size());
|
||||||
IrInst& curr = function.instructions[i];
|
IrInst& curr = function.instructions[i];
|
||||||
|
|
||||||
if (curr.cmd == IrCmd::NOP)
|
if (curr.cmd == IrCmd::NOP)
|
||||||
@ -289,7 +289,7 @@ void kill(IrFunction& function, uint32_t start, uint32_t end)
|
|||||||
|
|
||||||
void kill(IrFunction& function, IrBlock& block)
|
void kill(IrFunction& function, IrBlock& block)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(block.useCount == 0);
|
CODEGEN_ASSERT(block.useCount == 0);
|
||||||
|
|
||||||
block.kind = IrBlockKind::Dead;
|
block.kind = IrBlockKind::Dead;
|
||||||
|
|
||||||
@ -326,8 +326,8 @@ void replace(IrFunction& function, IrBlock& block, uint32_t instIdx, IrInst repl
|
|||||||
if (!isBlockTerminator(inst.cmd) && isBlockTerminator(replacement.cmd))
|
if (!isBlockTerminator(inst.cmd) && isBlockTerminator(replacement.cmd))
|
||||||
{
|
{
|
||||||
// Block has has to be fully constructed before replacement is performed
|
// Block has has to be fully constructed before replacement is performed
|
||||||
LUAU_ASSERT(block.finish != ~0u);
|
CODEGEN_ASSERT(block.finish != ~0u);
|
||||||
LUAU_ASSERT(instIdx + 1 <= block.finish);
|
CODEGEN_ASSERT(instIdx + 1 <= block.finish);
|
||||||
|
|
||||||
kill(function, instIdx + 1, block.finish);
|
kill(function, instIdx + 1, block.finish);
|
||||||
|
|
||||||
@ -353,7 +353,7 @@ void replace(IrFunction& function, IrBlock& block, uint32_t instIdx, IrInst repl
|
|||||||
|
|
||||||
void substitute(IrFunction& function, IrInst& inst, IrOp replacement)
|
void substitute(IrFunction& function, IrInst& inst, IrOp replacement)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!isBlockTerminator(inst.cmd));
|
CODEGEN_ASSERT(!isBlockTerminator(inst.cmd));
|
||||||
|
|
||||||
inst.cmd = IrCmd::SUBSTITUTE;
|
inst.cmd = IrCmd::SUBSTITUTE;
|
||||||
|
|
||||||
@ -389,12 +389,12 @@ void applySubstitutions(IrFunction& function, IrOp& op)
|
|||||||
if (op.kind == IrOpKind::Inst)
|
if (op.kind == IrOpKind::Inst)
|
||||||
{
|
{
|
||||||
IrInst& dst = function.instructions[op.index];
|
IrInst& dst = function.instructions[op.index];
|
||||||
LUAU_ASSERT(dst.cmd != IrCmd::SUBSTITUTE && "chained substitutions are not allowed");
|
CODEGEN_ASSERT(dst.cmd != IrCmd::SUBSTITUTE && "chained substitutions are not allowed");
|
||||||
|
|
||||||
dst.useCount++;
|
dst.useCount++;
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(src.useCount > 0);
|
CODEGEN_ASSERT(src.useCount > 0);
|
||||||
src.useCount--;
|
src.useCount--;
|
||||||
|
|
||||||
if (src.useCount == 0)
|
if (src.useCount == 0)
|
||||||
@ -443,7 +443,7 @@ bool compare(double a, double b, IrCondition cond)
|
|||||||
case IrCondition::NotGreaterEqual:
|
case IrCondition::NotGreaterEqual:
|
||||||
return !bool(a >= b);
|
return !bool(a >= b);
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unsupported condition");
|
CODEGEN_ASSERT(!"Unsupported condition");
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
@ -482,7 +482,7 @@ bool compare(int a, int b, IrCondition cond)
|
|||||||
case IrCondition::UnsignedGreaterEqual:
|
case IrCondition::UnsignedGreaterEqual:
|
||||||
return unsigned(a) >= unsigned(b);
|
return unsigned(a) >= unsigned(b);
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unsupported condition");
|
CODEGEN_ASSERT(!"Unsupported condition");
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
@ -871,7 +871,7 @@ uint32_t getNativeContextOffset(int bfid)
|
|||||||
case LBF_MATH_LDEXP:
|
case LBF_MATH_LDEXP:
|
||||||
return offsetof(NativeContext, libm_ldexp);
|
return offsetof(NativeContext, libm_ldexp);
|
||||||
default:
|
default:
|
||||||
LUAU_ASSERT(!"Unsupported bfid");
|
CODEGEN_ASSERT(!"Unsupported bfid");
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -140,12 +140,12 @@ void IrValueLocationTracking::beforeInstLowering(IrInst& inst)
|
|||||||
|
|
||||||
default:
|
default:
|
||||||
// All instructions which reference registers have to be handled explicitly
|
// All instructions which reference registers have to be handled explicitly
|
||||||
LUAU_ASSERT(inst.a.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.a.kind != IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.b.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.b.kind != IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.c.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.c.kind != IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.d.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.d.kind != IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.e.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.e.kind != IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(inst.f.kind != IrOpKind::VmReg);
|
CODEGEN_ASSERT(inst.f.kind != IrOpKind::VmReg);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -232,7 +232,7 @@ void IrValueLocationTracking::invalidateRestoreOp(IrOp location, bool skipValueI
|
|||||||
}
|
}
|
||||||
else if (location.kind == IrOpKind::VmConst)
|
else if (location.kind == IrOpKind::VmConst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"VM constants are immutable");
|
CODEGEN_ASSERT(!"VM constants are immutable");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,7 +17,6 @@
|
|||||||
LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3)
|
LUAU_FASTINTVARIABLE(LuauCodeGenMinLinearBlockPath, 3)
|
||||||
LUAU_FASTINTVARIABLE(LuauCodeGenReuseSlotLimit, 64)
|
LUAU_FASTINTVARIABLE(LuauCodeGenReuseSlotLimit, 64)
|
||||||
LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false)
|
LUAU_FASTFLAGVARIABLE(DebugLuauAbortingChecks, false)
|
||||||
LUAU_FASTFLAGVARIABLE(LuauReuseBufferChecks, false)
|
|
||||||
LUAU_FASTFLAG(LuauCodegenVector)
|
LUAU_FASTFLAG(LuauCodegenVector)
|
||||||
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauCodeGenCheckGcEffectFix, false)
|
LUAU_DYNAMIC_FASTFLAGVARIABLE(LuauCodeGenCheckGcEffectFix, false)
|
||||||
|
|
||||||
@ -94,7 +93,7 @@ struct ConstPropState
|
|||||||
|
|
||||||
void saveValue(IrOp op, IrOp value)
|
void saveValue(IrOp op, IrOp value)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(value.kind == IrOpKind::Constant);
|
CODEGEN_ASSERT(value.kind == IrOpKind::Constant);
|
||||||
|
|
||||||
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
if (RegisterInfo* info = tryGetRegisterInfo(op))
|
||||||
{
|
{
|
||||||
@ -240,7 +239,7 @@ struct ConstPropState
|
|||||||
|
|
||||||
void createRegLink(uint32_t instIdx, IrOp regOp)
|
void createRegLink(uint32_t instIdx, IrOp regOp)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!instLink.contains(instIdx));
|
CODEGEN_ASSERT(!instLink.contains(instIdx));
|
||||||
instLink[instIdx] = RegisterLink{uint8_t(vmRegOp(regOp)), regs[vmRegOp(regOp)].version};
|
instLink[instIdx] = RegisterLink{uint8_t(vmRegOp(regOp)), regs[vmRegOp(regOp)].version};
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -282,16 +281,16 @@ struct ConstPropState
|
|||||||
// This is used to allow instructions with register references to be compared for equality
|
// This is used to allow instructions with register references to be compared for equality
|
||||||
IrInst versionedVmRegLoad(IrCmd loadCmd, IrOp op)
|
IrInst versionedVmRegLoad(IrCmd loadCmd, IrOp op)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(op.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(op.kind == IrOpKind::VmReg);
|
||||||
uint32_t version = regs[vmRegOp(op)].version;
|
uint32_t version = regs[vmRegOp(op)].version;
|
||||||
LUAU_ASSERT(version <= 0xffffff);
|
CODEGEN_ASSERT(version <= 0xffffff);
|
||||||
op.index = vmRegOp(op) | (version << 8);
|
op.index = vmRegOp(op) | (version << 8);
|
||||||
return IrInst{loadCmd, op};
|
return IrInst{loadCmd, op};
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t* getPreviousInstIndex(const IrInst& inst)
|
uint32_t* getPreviousInstIndex(const IrInst& inst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(useValueNumbering);
|
CODEGEN_ASSERT(useValueNumbering);
|
||||||
|
|
||||||
if (uint32_t* prevIdx = valueMap.find(inst))
|
if (uint32_t* prevIdx = valueMap.find(inst))
|
||||||
{
|
{
|
||||||
@ -305,7 +304,7 @@ struct ConstPropState
|
|||||||
|
|
||||||
uint32_t* getPreviousVersionedLoadIndex(IrCmd cmd, IrOp vmReg)
|
uint32_t* getPreviousVersionedLoadIndex(IrCmd cmd, IrOp vmReg)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(vmReg.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(vmReg.kind == IrOpKind::VmReg);
|
||||||
return getPreviousInstIndex(versionedVmRegLoad(cmd, vmReg));
|
return getPreviousInstIndex(versionedVmRegLoad(cmd, vmReg));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -352,7 +351,7 @@ struct ConstPropState
|
|||||||
// If there is no previous load, we record the current one for future lookups
|
// If there is no previous load, we record the current one for future lookups
|
||||||
void substituteOrRecordVmRegLoad(IrInst& loadInst)
|
void substituteOrRecordVmRegLoad(IrInst& loadInst)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(loadInst.a.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(loadInst.a.kind == IrOpKind::VmReg);
|
||||||
|
|
||||||
if (!useValueNumbering)
|
if (!useValueNumbering)
|
||||||
return;
|
return;
|
||||||
@ -388,8 +387,8 @@ struct ConstPropState
|
|||||||
// VM register loads can use the value that was stored in the same Vm register earlier
|
// VM register loads can use the value that was stored in the same Vm register earlier
|
||||||
void forwardVmRegStoreToLoad(const IrInst& storeInst, IrCmd loadCmd)
|
void forwardVmRegStoreToLoad(const IrInst& storeInst, IrCmd loadCmd)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(storeInst.a.kind == IrOpKind::VmReg);
|
CODEGEN_ASSERT(storeInst.a.kind == IrOpKind::VmReg);
|
||||||
LUAU_ASSERT(storeInst.b.kind == IrOpKind::Inst);
|
CODEGEN_ASSERT(storeInst.b.kind == IrOpKind::Inst);
|
||||||
|
|
||||||
if (!useValueNumbering)
|
if (!useValueNumbering)
|
||||||
return;
|
return;
|
||||||
@ -694,7 +693,7 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||||||
{
|
{
|
||||||
if (inst.a.kind == IrOpKind::VmReg)
|
if (inst.a.kind == IrOpKind::VmReg)
|
||||||
{
|
{
|
||||||
if (FFlag::LuauReuseBufferChecks && inst.b.kind == IrOpKind::Inst)
|
if (inst.b.kind == IrOpKind::Inst)
|
||||||
{
|
{
|
||||||
if (uint32_t* prevIdx = state.getPreviousVersionedLoadIndex(IrCmd::LOAD_TVALUE, inst.a))
|
if (uint32_t* prevIdx = state.getPreviousVersionedLoadIndex(IrCmd::LOAD_TVALUE, inst.a))
|
||||||
{
|
{
|
||||||
@ -964,12 +963,9 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||||||
break;
|
break;
|
||||||
case IrCmd::CHECK_BUFFER_LEN:
|
case IrCmd::CHECK_BUFFER_LEN:
|
||||||
{
|
{
|
||||||
if (!FFlag::LuauReuseBufferChecks)
|
|
||||||
break;
|
|
||||||
|
|
||||||
std::optional<int> bufferOffset = function.asIntOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b));
|
std::optional<int> bufferOffset = function.asIntOp(inst.b.kind == IrOpKind::Constant ? inst.b : state.tryGetValue(inst.b));
|
||||||
int accessSize = function.intOp(inst.c);
|
int accessSize = function.intOp(inst.c);
|
||||||
LUAU_ASSERT(accessSize > 0);
|
CODEGEN_ASSERT(accessSize > 0);
|
||||||
|
|
||||||
if (bufferOffset)
|
if (bufferOffset)
|
||||||
{
|
{
|
||||||
@ -1003,8 +999,8 @@ static void constPropInInst(ConstPropState& state, IrBuilder& build, IrFunction&
|
|||||||
int prevBound = function.intOp(prev.b);
|
int prevBound = function.intOp(prev.b);
|
||||||
|
|
||||||
// Negative and overflowing constant offsets should already be replaced with unconditional jumps to a fallback
|
// Negative and overflowing constant offsets should already be replaced with unconditional jumps to a fallback
|
||||||
LUAU_ASSERT(currBound >= 0);
|
CODEGEN_ASSERT(currBound >= 0);
|
||||||
LUAU_ASSERT(prevBound >= 0);
|
CODEGEN_ASSERT(prevBound >= 0);
|
||||||
|
|
||||||
if (unsigned(currBound) >= unsigned(prevBound))
|
if (unsigned(currBound) >= unsigned(prevBound))
|
||||||
replace(function, prev.b, inst.b);
|
replace(function, prev.b, inst.b);
|
||||||
@ -1396,7 +1392,7 @@ static void constPropInBlock(IrBuilder& build, IrBlock& block, ConstPropState& s
|
|||||||
|
|
||||||
for (uint32_t index = block.start; index <= block.finish; index++)
|
for (uint32_t index = block.start; index <= block.finish; index++)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(index < function.instructions.size());
|
CODEGEN_ASSERT(index < function.instructions.size());
|
||||||
IrInst& inst = function.instructions[index];
|
IrInst& inst = function.instructions[index];
|
||||||
|
|
||||||
applySubstitutions(function, inst);
|
applySubstitutions(function, inst);
|
||||||
@ -1419,7 +1415,7 @@ static void constPropInBlockChain(IrBuilder& build, std::vector<uint8_t>& visite
|
|||||||
while (block)
|
while (block)
|
||||||
{
|
{
|
||||||
uint32_t blockIdx = function.getBlockIndex(*block);
|
uint32_t blockIdx = function.getBlockIndex(*block);
|
||||||
LUAU_ASSERT(!visited[blockIdx]);
|
CODEGEN_ASSERT(!visited[blockIdx]);
|
||||||
visited[blockIdx] = true;
|
visited[blockIdx] = true;
|
||||||
|
|
||||||
constPropInBlock(build, *block, state);
|
constPropInBlock(build, *block, state);
|
||||||
@ -1474,7 +1470,7 @@ static std::vector<uint32_t> collectDirectBlockJumpPath(IrFunction& function, st
|
|||||||
// Usually that would mean that we would have a conditional jump at the end of 'block'
|
// Usually that would mean that we would have a conditional jump at the end of 'block'
|
||||||
// But using check guards and fallback blocks it becomes a possible setup
|
// But using check guards and fallback blocks it becomes a possible setup
|
||||||
// We avoid this by making sure fallbacks rejoin the other immediate successor of 'block'
|
// We avoid this by making sure fallbacks rejoin the other immediate successor of 'block'
|
||||||
LUAU_ASSERT(getLiveOutValueCount(function, *block) == 0);
|
CODEGEN_ASSERT(getLiveOutValueCount(function, *block) == 0);
|
||||||
|
|
||||||
std::vector<uint32_t> path;
|
std::vector<uint32_t> path;
|
||||||
|
|
||||||
@ -1516,7 +1512,7 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited
|
|||||||
IrFunction& function = build.function;
|
IrFunction& function = build.function;
|
||||||
|
|
||||||
uint32_t blockIdx = function.getBlockIndex(startingBlock);
|
uint32_t blockIdx = function.getBlockIndex(startingBlock);
|
||||||
LUAU_ASSERT(!visited[blockIdx]);
|
CODEGEN_ASSERT(!visited[blockIdx]);
|
||||||
visited[blockIdx] = true;
|
visited[blockIdx] = true;
|
||||||
|
|
||||||
IrInst& termInst = function.instructions[startingBlock.finish];
|
IrInst& termInst = function.instructions[startingBlock.finish];
|
||||||
@ -1549,7 +1545,7 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited
|
|||||||
constPropInBlock(build, startingBlock, state);
|
constPropInBlock(build, startingBlock, state);
|
||||||
|
|
||||||
// Verify that target hasn't changed
|
// Verify that target hasn't changed
|
||||||
LUAU_ASSERT(function.instructions[startingBlock.finish].a.index == targetBlockIdx);
|
CODEGEN_ASSERT(function.instructions[startingBlock.finish].a.index == targetBlockIdx);
|
||||||
|
|
||||||
// Note: using startingBlock after this line is unsafe as the reference may be reallocated by build.block() below
|
// Note: using startingBlock after this line is unsafe as the reference may be reallocated by build.block() below
|
||||||
const uint32_t startingSortKey = startingBlock.sortkey;
|
const uint32_t startingSortKey = startingBlock.sortkey;
|
||||||
@ -1580,8 +1576,8 @@ static void tryCreateLinearBlock(IrBuilder& build, std::vector<uint8_t>& visited
|
|||||||
// The information generated here is consistent with current state that could be outdated, but still useful in IR inspection
|
// The information generated here is consistent with current state that could be outdated, but still useful in IR inspection
|
||||||
if (function.cfg.in.size() == newBlock.index)
|
if (function.cfg.in.size() == newBlock.index)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(function.cfg.in.size() == function.cfg.out.size());
|
CODEGEN_ASSERT(function.cfg.in.size() == function.cfg.out.size());
|
||||||
LUAU_ASSERT(function.cfg.in.size() == function.cfg.def.size());
|
CODEGEN_ASSERT(function.cfg.in.size() == function.cfg.def.size());
|
||||||
|
|
||||||
// Live in is the same as the input of the original first block
|
// Live in is the same as the input of the original first block
|
||||||
function.cfg.in.push_back(function.cfg.in[path.front()]);
|
function.cfg.in.push_back(function.cfg.in[path.front()]);
|
||||||
|
@ -17,11 +17,11 @@ namespace CodeGen
|
|||||||
// This pass might not be useful on different architectures
|
// This pass might not be useful on different architectures
|
||||||
static void optimizeMemoryOperandsX64(IrFunction& function, IrBlock& block)
|
static void optimizeMemoryOperandsX64(IrFunction& function, IrBlock& block)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(block.kind != IrBlockKind::Dead);
|
CODEGEN_ASSERT(block.kind != IrBlockKind::Dead);
|
||||||
|
|
||||||
for (uint32_t index = block.start; index <= block.finish; index++)
|
for (uint32_t index = block.start; index <= block.finish; index++)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(index < function.instructions.size());
|
CODEGEN_ASSERT(index < function.instructions.size());
|
||||||
IrInst& inst = function.instructions[index];
|
IrInst& inst = function.instructions[index];
|
||||||
|
|
||||||
switch (inst.cmd)
|
switch (inst.cmd)
|
||||||
|
@ -81,7 +81,7 @@ static uint8_t* defineCfaExpressionOffset(uint8_t* pos, uint32_t stackOffset)
|
|||||||
|
|
||||||
static uint8_t* defineSavedRegisterLocation(uint8_t* pos, int dwReg, uint32_t stackOffset)
|
static uint8_t* defineSavedRegisterLocation(uint8_t* pos, int dwReg, uint32_t stackOffset)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(stackOffset % kDataAlignFactor == 0 && "stack offsets have to be measured in kDataAlignFactor units");
|
CODEGEN_ASSERT(stackOffset % kDataAlignFactor == 0 && "stack offsets have to be measured in kDataAlignFactor units");
|
||||||
|
|
||||||
if (dwReg <= 0x3f)
|
if (dwReg <= 0x3f)
|
||||||
{
|
{
|
||||||
@ -99,7 +99,7 @@ static uint8_t* defineSavedRegisterLocation(uint8_t* pos, int dwReg, uint32_t st
|
|||||||
|
|
||||||
static uint8_t* advanceLocation(uint8_t* pos, unsigned int offset)
|
static uint8_t* advanceLocation(uint8_t* pos, unsigned int offset)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(offset < 256);
|
CODEGEN_ASSERT(offset < 256);
|
||||||
pos = writeu8(pos, DW_CFA_advance_loc1);
|
pos = writeu8(pos, DW_CFA_advance_loc1);
|
||||||
pos = writeu8(pos, offset);
|
pos = writeu8(pos, offset);
|
||||||
return pos;
|
return pos;
|
||||||
@ -133,7 +133,7 @@ size_t UnwindBuilderDwarf2::getBeginOffset() const
|
|||||||
|
|
||||||
void UnwindBuilderDwarf2::startInfo(Arch arch)
|
void UnwindBuilderDwarf2::startInfo(Arch arch)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(arch == A64 || arch == X64);
|
CODEGEN_ASSERT(arch == A64 || arch == X64);
|
||||||
|
|
||||||
uint8_t* cieLength = pos;
|
uint8_t* cieLength = pos;
|
||||||
pos = writeu32(pos, 0); // Length (to be filled later)
|
pos = writeu32(pos, 0); // Length (to be filled later)
|
||||||
@ -191,7 +191,7 @@ void UnwindBuilderDwarf2::finishFunction(uint32_t beginOffset, uint32_t endOffse
|
|||||||
unwindFunctions.back().beginOffset = beginOffset;
|
unwindFunctions.back().beginOffset = beginOffset;
|
||||||
unwindFunctions.back().endOffset = endOffset;
|
unwindFunctions.back().endOffset = endOffset;
|
||||||
|
|
||||||
LUAU_ASSERT(fdeEntryStart != nullptr);
|
CODEGEN_ASSERT(fdeEntryStart != nullptr);
|
||||||
|
|
||||||
pos = alignPosition(fdeEntryStart, pos);
|
pos = alignPosition(fdeEntryStart, pos);
|
||||||
writeu32(fdeEntryStart, unsigned(pos - fdeEntryStart - 4)); // Length field itself is excluded from length
|
writeu32(fdeEntryStart, unsigned(pos - fdeEntryStart - 4)); // Length field itself is excluded from length
|
||||||
@ -202,14 +202,14 @@ void UnwindBuilderDwarf2::finishInfo()
|
|||||||
// Terminate section
|
// Terminate section
|
||||||
pos = writeu32(pos, 0);
|
pos = writeu32(pos, 0);
|
||||||
|
|
||||||
LUAU_ASSERT(getSize() <= kRawDataLimit);
|
CODEGEN_ASSERT(getSize() <= kRawDataLimit);
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnwindBuilderDwarf2::prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list<A64::RegisterA64> regs)
|
void UnwindBuilderDwarf2::prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list<A64::RegisterA64> regs)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(stackSize % 16 == 0);
|
CODEGEN_ASSERT(stackSize % 16 == 0);
|
||||||
LUAU_ASSERT(regs.size() >= 2 && regs.begin()[0] == A64::x29 && regs.begin()[1] == A64::x30);
|
CODEGEN_ASSERT(regs.size() >= 2 && regs.begin()[0] == A64::x29 && regs.begin()[1] == A64::x30);
|
||||||
LUAU_ASSERT(regs.size() * 8 <= stackSize);
|
CODEGEN_ASSERT(regs.size() * 8 <= stackSize);
|
||||||
|
|
||||||
// sub sp, sp, stackSize
|
// sub sp, sp, stackSize
|
||||||
pos = advanceLocation(pos, 4);
|
pos = advanceLocation(pos, 4);
|
||||||
@ -220,7 +220,7 @@ void UnwindBuilderDwarf2::prologueA64(uint32_t prologueSize, uint32_t stackSize,
|
|||||||
|
|
||||||
for (size_t i = 0; i < regs.size(); ++i)
|
for (size_t i = 0; i < regs.size(); ++i)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(regs.begin()[i].kind == A64::KindA64::x);
|
CODEGEN_ASSERT(regs.begin()[i].kind == A64::KindA64::x);
|
||||||
pos = defineSavedRegisterLocation(pos, regs.begin()[i].index, stackSize - unsigned(i * 8));
|
pos = defineSavedRegisterLocation(pos, regs.begin()[i].index, stackSize - unsigned(i * 8));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -228,7 +228,7 @@ void UnwindBuilderDwarf2::prologueA64(uint32_t prologueSize, uint32_t stackSize,
|
|||||||
void UnwindBuilderDwarf2::prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list<X64::RegisterX64> gpr,
|
void UnwindBuilderDwarf2::prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list<X64::RegisterX64> gpr,
|
||||||
const std::vector<X64::RegisterX64>& simd)
|
const std::vector<X64::RegisterX64>& simd)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(stackSize > 0 && stackSize < 4096 && stackSize % 8 == 0);
|
CODEGEN_ASSERT(stackSize > 0 && stackSize < 4096 && stackSize % 8 == 0);
|
||||||
|
|
||||||
unsigned int stackOffset = 8; // Return address was pushed by calling the function
|
unsigned int stackOffset = 8; // Return address was pushed by calling the function
|
||||||
unsigned int prologueOffset = 0;
|
unsigned int prologueOffset = 0;
|
||||||
@ -250,7 +250,7 @@ void UnwindBuilderDwarf2::prologueX64(uint32_t prologueSize, uint32_t stackSize,
|
|||||||
// push reg
|
// push reg
|
||||||
for (X64::RegisterX64 reg : gpr)
|
for (X64::RegisterX64 reg : gpr)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(reg.size == X64::SizeX64::qword);
|
CODEGEN_ASSERT(reg.size == X64::SizeX64::qword);
|
||||||
|
|
||||||
stackOffset += 8;
|
stackOffset += 8;
|
||||||
prologueOffset += 2;
|
prologueOffset += 2;
|
||||||
@ -259,7 +259,7 @@ void UnwindBuilderDwarf2::prologueX64(uint32_t prologueSize, uint32_t stackSize,
|
|||||||
pos = defineSavedRegisterLocation(pos, regIndexToDwRegX64[reg.index], stackOffset);
|
pos = defineSavedRegisterLocation(pos, regIndexToDwRegX64[reg.index], stackOffset);
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(simd.empty());
|
CODEGEN_ASSERT(simd.empty());
|
||||||
|
|
||||||
// sub rsp, stackSize
|
// sub rsp, stackSize
|
||||||
stackOffset += stackSize;
|
stackOffset += stackSize;
|
||||||
@ -267,8 +267,8 @@ void UnwindBuilderDwarf2::prologueX64(uint32_t prologueSize, uint32_t stackSize,
|
|||||||
pos = advanceLocation(pos, 4);
|
pos = advanceLocation(pos, 4);
|
||||||
pos = defineCfaExpressionOffset(pos, stackOffset);
|
pos = defineCfaExpressionOffset(pos, stackOffset);
|
||||||
|
|
||||||
LUAU_ASSERT(stackOffset % 16 == 0);
|
CODEGEN_ASSERT(stackOffset % 16 == 0);
|
||||||
LUAU_ASSERT(prologueOffset == prologueSize);
|
CODEGEN_ASSERT(prologueOffset == prologueSize);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t UnwindBuilderDwarf2::getSize() const
|
size_t UnwindBuilderDwarf2::getSize() const
|
||||||
|
@ -33,7 +33,7 @@ size_t UnwindBuilderWin::getBeginOffset() const
|
|||||||
|
|
||||||
void UnwindBuilderWin::startInfo(Arch arch)
|
void UnwindBuilderWin::startInfo(Arch arch)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(arch == X64);
|
CODEGEN_ASSERT(arch == X64);
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnwindBuilderWin::startFunction()
|
void UnwindBuilderWin::startFunction()
|
||||||
@ -61,7 +61,7 @@ void UnwindBuilderWin::finishFunction(uint32_t beginOffset, uint32_t endOffset)
|
|||||||
unwindFunctions.back().endOffset = endOffset;
|
unwindFunctions.back().endOffset = endOffset;
|
||||||
|
|
||||||
// Windows unwind code count is stored in uint8_t, so we can't have more
|
// Windows unwind code count is stored in uint8_t, so we can't have more
|
||||||
LUAU_ASSERT(unwindCodes.size() < 256);
|
CODEGEN_ASSERT(unwindCodes.size() < 256);
|
||||||
|
|
||||||
UnwindInfoWin info;
|
UnwindInfoWin info;
|
||||||
info.version = 1;
|
info.version = 1;
|
||||||
@ -69,13 +69,13 @@ void UnwindBuilderWin::finishFunction(uint32_t beginOffset, uint32_t endOffset)
|
|||||||
info.prologsize = prologSize;
|
info.prologsize = prologSize;
|
||||||
info.unwindcodecount = uint8_t(unwindCodes.size());
|
info.unwindcodecount = uint8_t(unwindCodes.size());
|
||||||
|
|
||||||
LUAU_ASSERT(frameReg.index < 16);
|
CODEGEN_ASSERT(frameReg.index < 16);
|
||||||
info.framereg = frameReg.index;
|
info.framereg = frameReg.index;
|
||||||
|
|
||||||
LUAU_ASSERT(frameRegOffset < 16);
|
CODEGEN_ASSERT(frameRegOffset < 16);
|
||||||
info.frameregoff = frameRegOffset;
|
info.frameregoff = frameRegOffset;
|
||||||
|
|
||||||
LUAU_ASSERT(rawDataPos + sizeof(info) <= rawData + kRawDataLimit);
|
CODEGEN_ASSERT(rawDataPos + sizeof(info) <= rawData + kRawDataLimit);
|
||||||
memcpy(rawDataPos, &info, sizeof(info));
|
memcpy(rawDataPos, &info, sizeof(info));
|
||||||
rawDataPos += sizeof(info);
|
rawDataPos += sizeof(info);
|
||||||
|
|
||||||
@ -84,7 +84,7 @@ void UnwindBuilderWin::finishFunction(uint32_t beginOffset, uint32_t endOffset)
|
|||||||
// Copy unwind codes in reverse order
|
// Copy unwind codes in reverse order
|
||||||
// Some unwind codes take up two array slots, we write those in reverse order
|
// Some unwind codes take up two array slots, we write those in reverse order
|
||||||
uint8_t* unwindCodePos = rawDataPos + sizeof(UnwindCodeWin) * (unwindCodes.size() - 1);
|
uint8_t* unwindCodePos = rawDataPos + sizeof(UnwindCodeWin) * (unwindCodes.size() - 1);
|
||||||
LUAU_ASSERT(unwindCodePos <= rawData + kRawDataLimit);
|
CODEGEN_ASSERT(unwindCodePos <= rawData + kRawDataLimit);
|
||||||
|
|
||||||
for (size_t i = 0; i < unwindCodes.size(); i++)
|
for (size_t i = 0; i < unwindCodes.size(); i++)
|
||||||
{
|
{
|
||||||
@ -99,21 +99,21 @@ void UnwindBuilderWin::finishFunction(uint32_t beginOffset, uint32_t endOffset)
|
|||||||
if (unwindCodes.size() % 2 != 0)
|
if (unwindCodes.size() % 2 != 0)
|
||||||
rawDataPos += sizeof(UnwindCodeWin);
|
rawDataPos += sizeof(UnwindCodeWin);
|
||||||
|
|
||||||
LUAU_ASSERT(rawDataPos <= rawData + kRawDataLimit);
|
CODEGEN_ASSERT(rawDataPos <= rawData + kRawDataLimit);
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnwindBuilderWin::finishInfo() {}
|
void UnwindBuilderWin::finishInfo() {}
|
||||||
|
|
||||||
void UnwindBuilderWin::prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list<A64::RegisterA64> regs)
|
void UnwindBuilderWin::prologueA64(uint32_t prologueSize, uint32_t stackSize, std::initializer_list<A64::RegisterA64> regs)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(!"Not implemented");
|
CODEGEN_ASSERT(!"Not implemented");
|
||||||
}
|
}
|
||||||
|
|
||||||
void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list<X64::RegisterX64> gpr,
|
void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bool setupFrame, std::initializer_list<X64::RegisterX64> gpr,
|
||||||
const std::vector<X64::RegisterX64>& simd)
|
const std::vector<X64::RegisterX64>& simd)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(stackSize > 0 && stackSize < 4096 && stackSize % 8 == 0);
|
CODEGEN_ASSERT(stackSize > 0 && stackSize < 4096 && stackSize % 8 == 0);
|
||||||
LUAU_ASSERT(prologueSize < 256);
|
CODEGEN_ASSERT(prologueSize < 256);
|
||||||
|
|
||||||
unsigned int stackOffset = 8; // Return address was pushed by calling the function
|
unsigned int stackOffset = 8; // Return address was pushed by calling the function
|
||||||
unsigned int prologueOffset = 0;
|
unsigned int prologueOffset = 0;
|
||||||
@ -135,7 +135,7 @@ void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bo
|
|||||||
// push reg
|
// push reg
|
||||||
for (X64::RegisterX64 reg : gpr)
|
for (X64::RegisterX64 reg : gpr)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(reg.size == X64::SizeX64::qword);
|
CODEGEN_ASSERT(reg.size == X64::SizeX64::qword);
|
||||||
|
|
||||||
stackOffset += 8;
|
stackOffset += 8;
|
||||||
prologueOffset += 2;
|
prologueOffset += 2;
|
||||||
@ -143,7 +143,7 @@ void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bo
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If frame pointer is used, simd register storage is not implemented, it will require reworking store offsets
|
// If frame pointer is used, simd register storage is not implemented, it will require reworking store offsets
|
||||||
LUAU_ASSERT(!setupFrame || simd.size() == 0);
|
CODEGEN_ASSERT(!setupFrame || simd.size() == 0);
|
||||||
|
|
||||||
unsigned int simdStorageSize = unsigned(simd.size()) * 16;
|
unsigned int simdStorageSize = unsigned(simd.size()) * 16;
|
||||||
|
|
||||||
@ -161,7 +161,7 @@ void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bo
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
// This command can handle allocations up to 512K-8 bytes, but that potentially requires stack probing
|
// This command can handle allocations up to 512K-8 bytes, but that potentially requires stack probing
|
||||||
LUAU_ASSERT(stackSize < 4096);
|
CODEGEN_ASSERT(stackSize < 4096);
|
||||||
|
|
||||||
stackOffset += stackSize;
|
stackOffset += stackSize;
|
||||||
prologueOffset += 7;
|
prologueOffset += 7;
|
||||||
@ -179,8 +179,8 @@ void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bo
|
|||||||
// vmovaps [rsp+n], xmm
|
// vmovaps [rsp+n], xmm
|
||||||
for (X64::RegisterX64 reg : simd)
|
for (X64::RegisterX64 reg : simd)
|
||||||
{
|
{
|
||||||
LUAU_ASSERT(reg.size == X64::SizeX64::xmmword);
|
CODEGEN_ASSERT(reg.size == X64::SizeX64::xmmword);
|
||||||
LUAU_ASSERT(xmmStoreOffset % 16 == 0 && "simd stores have to be performed to aligned locations");
|
CODEGEN_ASSERT(xmmStoreOffset % 16 == 0 && "simd stores have to be performed to aligned locations");
|
||||||
|
|
||||||
prologueOffset += xmmStoreOffset >= 128 ? 10 : 7;
|
prologueOffset += xmmStoreOffset >= 128 ? 10 : 7;
|
||||||
unwindCodes.push_back({uint8_t(xmmStoreOffset / 16), 0, 0});
|
unwindCodes.push_back({uint8_t(xmmStoreOffset / 16), 0, 0});
|
||||||
@ -188,8 +188,8 @@ void UnwindBuilderWin::prologueX64(uint32_t prologueSize, uint32_t stackSize, bo
|
|||||||
xmmStoreOffset += 16;
|
xmmStoreOffset += 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_ASSERT(stackOffset % 16 == 0);
|
CODEGEN_ASSERT(stackOffset % 16 == 0);
|
||||||
LUAU_ASSERT(prologueOffset == prologueSize);
|
CODEGEN_ASSERT(prologueOffset == prologueSize);
|
||||||
|
|
||||||
this->prologSize = prologueSize;
|
this->prologSize = prologueSize;
|
||||||
}
|
}
|
||||||
|
@ -431,7 +431,7 @@ enum LuauBytecodeTag
|
|||||||
// Bytecode version; runtime supports [MIN, MAX], compiler emits TARGET by default but may emit a higher version when flags are enabled
|
// Bytecode version; runtime supports [MIN, MAX], compiler emits TARGET by default but may emit a higher version when flags are enabled
|
||||||
LBC_VERSION_MIN = 3,
|
LBC_VERSION_MIN = 3,
|
||||||
LBC_VERSION_MAX = 5,
|
LBC_VERSION_MAX = 5,
|
||||||
LBC_VERSION_TARGET = 4,
|
LBC_VERSION_TARGET = 5,
|
||||||
// Type encoding version
|
// Type encoding version
|
||||||
LBC_TYPE_VERSION = 1,
|
LBC_TYPE_VERSION = 1,
|
||||||
// Types of constant table entries
|
// Types of constant table entries
|
||||||
|
@ -11,9 +11,9 @@ inline bool isFlagExperimental(const char* flag)
|
|||||||
// Flags in this list are disabled by default in various command-line tools. They may have behavior that is not fully final,
|
// Flags in this list are disabled by default in various command-line tools. They may have behavior that is not fully final,
|
||||||
// or critical bugs that are found after the code has been submitted.
|
// or critical bugs that are found after the code has been submitted.
|
||||||
static const char* const kList[] = {
|
static const char* const kList[] = {
|
||||||
"LuauInstantiateInSubtyping", // requires some fixes to lua-apps code
|
"LuauInstantiateInSubtyping", // requires some fixes to lua-apps code
|
||||||
"LuauTinyControlFlowAnalysis", // waiting for updates to packages depended by internal builtin plugins
|
"LuauTinyControlFlowAnalysis", // waiting for updates to packages depended by internal builtin plugins
|
||||||
"LuauFixIndexerSubtypingOrdering", // requires some small fixes to lua-apps code since this fixes a false negative
|
"LuauFixIndexerSubtypingOrdering", // requires some small fixes to lua-apps code since this fixes a false negative
|
||||||
"LuauUpdatedRequireByStringSemantics", // requires some small fixes to fully implement some proposed changes
|
"LuauUpdatedRequireByStringSemantics", // requires some small fixes to fully implement some proposed changes
|
||||||
// makes sure we always have at least one entry
|
// makes sure we always have at least one entry
|
||||||
nullptr,
|
nullptr,
|
||||||
|
@ -284,26 +284,30 @@ public:
|
|||||||
return buffer[logicalToPhysical(pos)];
|
return buffer[logicalToPhysical(pos)];
|
||||||
}
|
}
|
||||||
|
|
||||||
T& front() {
|
T& front()
|
||||||
|
{
|
||||||
LUAU_ASSERT(!empty());
|
LUAU_ASSERT(!empty());
|
||||||
|
|
||||||
return buffer[head];
|
return buffer[head];
|
||||||
}
|
}
|
||||||
|
|
||||||
const T& front() const {
|
const T& front() const
|
||||||
|
{
|
||||||
LUAU_ASSERT(!empty());
|
LUAU_ASSERT(!empty());
|
||||||
|
|
||||||
return buffer[head];
|
return buffer[head];
|
||||||
}
|
}
|
||||||
|
|
||||||
T& back() {
|
T& back()
|
||||||
|
{
|
||||||
LUAU_ASSERT(!empty());
|
LUAU_ASSERT(!empty());
|
||||||
|
|
||||||
size_t back = logicalToPhysical(queue_size - 1);
|
size_t back = logicalToPhysical(queue_size - 1);
|
||||||
return buffer[back];
|
return buffer[back];
|
||||||
}
|
}
|
||||||
|
|
||||||
const T& back() const {
|
const T& back() const
|
||||||
|
{
|
||||||
LUAU_ASSERT(!empty());
|
LUAU_ASSERT(!empty());
|
||||||
|
|
||||||
size_t back = logicalToPhysical(queue_size - 1);
|
size_t back = logicalToPhysical(queue_size - 1);
|
||||||
@ -427,7 +431,7 @@ public:
|
|||||||
grow();
|
grow();
|
||||||
|
|
||||||
size_t next_back = logicalToPhysical(queue_size);
|
size_t next_back = logicalToPhysical(queue_size);
|
||||||
new (buffer + next_back)T(value);
|
new (buffer + next_back) T(value);
|
||||||
queue_size++;
|
queue_size++;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -446,7 +450,7 @@ public:
|
|||||||
grow();
|
grow();
|
||||||
|
|
||||||
head = (head == 0) ? capacity() - 1 : head - 1;
|
head = (head == 0) ? capacity() - 1 : head - 1;
|
||||||
new (buffer + head)T(value);
|
new (buffer + head) T(value);
|
||||||
queue_size++;
|
queue_size++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,8 +5,6 @@
|
|||||||
|
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
|
|
||||||
LUAU_FASTFLAGVARIABLE(LuauVectorLiterals, false)
|
|
||||||
|
|
||||||
namespace Luau
|
namespace Luau
|
||||||
{
|
{
|
||||||
namespace Compile
|
namespace Compile
|
||||||
@ -473,8 +471,7 @@ Constant foldBuiltin(int bfid, const Constant* args, size_t count)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case LBF_VECTOR:
|
case LBF_VECTOR:
|
||||||
if (FFlag::LuauVectorLiterals && count >= 3 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number &&
|
if (count >= 3 && args[0].type == Constant::Type_Number && args[1].type == Constant::Type_Number && args[2].type == Constant::Type_Number)
|
||||||
args[2].type == Constant::Type_Number)
|
|
||||||
{
|
{
|
||||||
if (count == 3)
|
if (count == 3)
|
||||||
return cvector(args[0].valueNumber, args[1].valueNumber, args[2].valueNumber, 0.0);
|
return cvector(args[0].valueNumber, args[1].valueNumber, args[2].valueNumber, 0.0);
|
||||||
|
@ -7,9 +7,6 @@
|
|||||||
#include <algorithm>
|
#include <algorithm>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
LUAU_FASTFLAG(LuauVectorLiterals)
|
|
||||||
LUAU_FASTFLAG(LuauCompileRevK)
|
|
||||||
|
|
||||||
namespace Luau
|
namespace Luau
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -1125,7 +1122,7 @@ std::string BytecodeBuilder::getError(const std::string& message)
|
|||||||
uint8_t BytecodeBuilder::getVersion()
|
uint8_t BytecodeBuilder::getVersion()
|
||||||
{
|
{
|
||||||
// This function usually returns LBC_VERSION_TARGET but may sometimes return a higher number (within LBC_VERSION_MIN/MAX) under fast flags
|
// This function usually returns LBC_VERSION_TARGET but may sometimes return a higher number (within LBC_VERSION_MIN/MAX) under fast flags
|
||||||
return (FFlag::LuauVectorLiterals || FFlag::LuauCompileRevK) ? 5 : LBC_VERSION_TARGET;
|
return LBC_VERSION_TARGET;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint8_t BytecodeBuilder::getTypeEncodingVersion()
|
uint8_t BytecodeBuilder::getTypeEncodingVersion()
|
||||||
|
@ -26,8 +26,6 @@ LUAU_FASTINTVARIABLE(LuauCompileInlineThreshold, 25)
|
|||||||
LUAU_FASTINTVARIABLE(LuauCompileInlineThresholdMaxBoost, 300)
|
LUAU_FASTINTVARIABLE(LuauCompileInlineThresholdMaxBoost, 300)
|
||||||
LUAU_FASTINTVARIABLE(LuauCompileInlineDepth, 5)
|
LUAU_FASTINTVARIABLE(LuauCompileInlineDepth, 5)
|
||||||
|
|
||||||
LUAU_FASTFLAGVARIABLE(LuauCompileRevK, false)
|
|
||||||
|
|
||||||
namespace Luau
|
namespace Luau
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -1517,7 +1515,7 @@ struct Compiler
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (FFlag::LuauCompileRevK && (expr->op == AstExprBinary::Sub || expr->op == AstExprBinary::Div))
|
if (expr->op == AstExprBinary::Sub || expr->op == AstExprBinary::Div)
|
||||||
{
|
{
|
||||||
int32_t lc = getConstantNumber(expr->left);
|
int32_t lc = getConstantNumber(expr->left);
|
||||||
|
|
||||||
|
@ -74,6 +74,7 @@ target_sources(Luau.CodeGen PRIVATE
|
|||||||
CodeGen/include/Luau/CodeAllocator.h
|
CodeGen/include/Luau/CodeAllocator.h
|
||||||
CodeGen/include/Luau/CodeBlockUnwind.h
|
CodeGen/include/Luau/CodeBlockUnwind.h
|
||||||
CodeGen/include/Luau/CodeGen.h
|
CodeGen/include/Luau/CodeGen.h
|
||||||
|
CodeGen/include/Luau/CodeGenCommon.h
|
||||||
CodeGen/include/Luau/ConditionA64.h
|
CodeGen/include/Luau/ConditionA64.h
|
||||||
CodeGen/include/Luau/ConditionX64.h
|
CodeGen/include/Luau/ConditionX64.h
|
||||||
CodeGen/include/Luau/IrAnalysis.h
|
CodeGen/include/Luau/IrAnalysis.h
|
||||||
|
@ -477,7 +477,7 @@ TEST_CASE_FIXTURE(JsonEncoderFixture, "encode_AstTypeFunction")
|
|||||||
AstStat* statement = expectParseStatement(R"(type fun = (string, bool, named: number) -> ())");
|
AstStat* statement = expectParseStatement(R"(type fun = (string, bool, named: number) -> ())");
|
||||||
|
|
||||||
std::string_view expected =
|
std::string_view expected =
|
||||||
R"({"type":"AstStatTypeAlias","location":"0,0 - 0,46","name":"fun","generics":[],"genericPacks":[],"type":{"type":"AstTypeFunction","location":"0,11 - 0,46","generics":[],"genericPacks":[],"argTypes":{"type":"AstTypeList","types":[{"type":"AstTypeReference","location":"0,12 - 0,18","name":"string","nameLocation":"0,12 - 0,18","parameters":[]},{"type":"AstTypeReference","location":"0,20 - 0,24","name":"bool","nameLocation":"0,20 - 0,24","parameters":[]},{"type":"AstTypeReference","location":"0,33 - 0,39","name":"number","nameLocation":"0,33 - 0,39","parameters":[]}]},"argNames":[null,null,{"type":"AstArgumentName","name":"named","location":"0,26 - 0,31"}],"returnTypes":{"type":"AstTypeList","types":[]}},"exported":false})";
|
R"({"type":"AstStatTypeAlias","location":"0,0 - 0,46","name":"fun","generics":[],"genericPacks":[],"type":{"type":"AstTypeFunction","location":"0,11 - 0,46","generics":[],"genericPacks":[],"argTypes":{"type":"AstTypeList","types":[{"type":"AstTypeReference","location":"0,12 - 0,18","name":"string","nameLocation":"0,12 - 0,18","parameters":[]},{"type":"AstTypeReference","location":"0,20 - 0,24","name":"bool","nameLocation":"0,20 - 0,24","parameters":[]},{"type":"AstTypeReference","location":"0,33 - 0,39","name":"number","nameLocation":"0,33 - 0,39","parameters":[]}]},"argNames":[null,null,{"type":"AstArgumentName","name":"named","location":"0,26 - 0,31"}],"returnTypes":{"type":"AstTypeList","types":[]}},"exported":false})";
|
||||||
|
|
||||||
CHECK(toJson(statement) == expected);
|
CHECK(toJson(statement) == expected);
|
||||||
}
|
}
|
||||||
|
@ -15,8 +15,6 @@ namespace Luau
|
|||||||
std::string rep(const std::string& s, size_t n);
|
std::string rep(const std::string& s, size_t n);
|
||||||
}
|
}
|
||||||
|
|
||||||
LUAU_FASTFLAG(LuauVectorLiterals)
|
|
||||||
LUAU_FASTFLAG(LuauCompileRevK)
|
|
||||||
LUAU_FASTINT(LuauCompileInlineDepth)
|
LUAU_FASTINT(LuauCompileInlineDepth)
|
||||||
LUAU_FASTINT(LuauCompileInlineThreshold)
|
LUAU_FASTINT(LuauCompileInlineThreshold)
|
||||||
LUAU_FASTINT(LuauCompileInlineThresholdMaxBoost)
|
LUAU_FASTINT(LuauCompileInlineThresholdMaxBoost)
|
||||||
@ -1182,8 +1180,6 @@ RETURN R0 1
|
|||||||
|
|
||||||
TEST_CASE("AndOrChainCodegen")
|
TEST_CASE("AndOrChainCodegen")
|
||||||
{
|
{
|
||||||
ScopedFastFlag sff(FFlag::LuauCompileRevK, true);
|
|
||||||
|
|
||||||
const char* source = R"(
|
const char* source = R"(
|
||||||
return
|
return
|
||||||
(1 - verticalGradientTurbulence < waterLevel + .015 and Enum.Material.Sand)
|
(1 - verticalGradientTurbulence < waterLevel + .015 and Enum.Material.Sand)
|
||||||
@ -2106,8 +2102,6 @@ RETURN R0 0
|
|||||||
|
|
||||||
TEST_CASE("AndOrOptimizations")
|
TEST_CASE("AndOrOptimizations")
|
||||||
{
|
{
|
||||||
ScopedFastFlag sff(FFlag::LuauCompileRevK, true);
|
|
||||||
|
|
||||||
// the OR/ORK optimization triggers for cutoff since lhs is simple
|
// the OR/ORK optimization triggers for cutoff since lhs is simple
|
||||||
CHECK_EQ("\n" + compileFunction(R"(
|
CHECK_EQ("\n" + compileFunction(R"(
|
||||||
local function advancedRidgedFilter(value, cutoff)
|
local function advancedRidgedFilter(value, cutoff)
|
||||||
@ -4490,8 +4484,6 @@ L0: RETURN R0 -1
|
|||||||
|
|
||||||
TEST_CASE("VectorLiterals")
|
TEST_CASE("VectorLiterals")
|
||||||
{
|
{
|
||||||
ScopedFastFlag sff(FFlag::LuauVectorLiterals, true);
|
|
||||||
|
|
||||||
CHECK_EQ("\n" + compileFunction("return Vector3.new(1, 2, 3)", 0, 2, /*enableVectors*/ true), R"(
|
CHECK_EQ("\n" + compileFunction("return Vector3.new(1, 2, 3)", 0, 2, /*enableVectors*/ true), R"(
|
||||||
LOADK R0 K0 [1, 2, 3]
|
LOADK R0 K0 [1, 2, 3]
|
||||||
RETURN R0 1
|
RETURN R0 1
|
||||||
@ -7852,8 +7844,6 @@ RETURN R0 1
|
|||||||
|
|
||||||
TEST_CASE("ArithRevK")
|
TEST_CASE("ArithRevK")
|
||||||
{
|
{
|
||||||
ScopedFastFlag sff(FFlag::LuauCompileRevK, true);
|
|
||||||
|
|
||||||
// - and / have special optimized form for reverse constants; in the future, + and * will likely get compiled to ADDK/MULK
|
// - and / have special optimized form for reverse constants; in the future, + and * will likely get compiled to ADDK/MULK
|
||||||
// other operators are not important enough to optimize reverse constant forms for
|
// other operators are not important enough to optimize reverse constant forms for
|
||||||
CHECK_EQ("\n" + compileFunction0(R"(
|
CHECK_EQ("\n" + compileFunction0(R"(
|
||||||
|
@ -2158,7 +2158,7 @@ TEST_CASE("IrInstructionLimit")
|
|||||||
Luau::CodeGen::CodeGenCompilationResult nativeResult = Luau::CodeGen::compile(L, -1, Luau::CodeGen::CodeGen_ColdFunctions, &nativeStats);
|
Luau::CodeGen::CodeGenCompilationResult nativeResult = Luau::CodeGen::compile(L, -1, Luau::CodeGen::CodeGen_ColdFunctions, &nativeStats);
|
||||||
|
|
||||||
// Limit is not hit immediately, so with some functions compiled it should be a success
|
// Limit is not hit immediately, so with some functions compiled it should be a success
|
||||||
CHECK(nativeResult != Luau::CodeGen::CodeGenCompilationResult::CodeGenFailed);
|
CHECK(nativeResult == Luau::CodeGen::CodeGenCompilationResult::CodeGenOverflowInstructionLimit);
|
||||||
|
|
||||||
// We should be able to compile at least one of our functions
|
// We should be able to compile at least one of our functions
|
||||||
CHECK(nativeStats.functionsCompiled > 0);
|
CHECK(nativeStats.functionsCompiled > 0);
|
||||||
|
@ -26,7 +26,7 @@ std::string diff<TypeId, TypeId>(TypeId l, TypeId r);
|
|||||||
template<>
|
template<>
|
||||||
std::string diff<const Type&, const Type&>(const Type& l, const Type& r);
|
std::string diff<const Type&, const Type&>(const Type& l, const Type& r);
|
||||||
|
|
||||||
}
|
} // namespace Luau
|
||||||
|
|
||||||
// Note: the do-while blocks in the macros below is to scope the INFO block to
|
// Note: the do-while blocks in the macros below is to scope the INFO block to
|
||||||
// only that assertion.
|
// only that assertion.
|
||||||
@ -36,11 +36,11 @@ std::string diff<const Type&, const Type&>(const Type& l, const Type& r);
|
|||||||
{ \
|
{ \
|
||||||
INFO("Left and right values were not equal: ", diff(l, r)); \
|
INFO("Left and right values were not equal: ", diff(l, r)); \
|
||||||
CHECK_EQ(l, r); \
|
CHECK_EQ(l, r); \
|
||||||
} while(false);
|
} while (false);
|
||||||
|
|
||||||
#define REQUIRE_EQ_DIFF(l, r) \
|
#define REQUIRE_EQ_DIFF(l, r) \
|
||||||
do \
|
do \
|
||||||
{ \
|
{ \
|
||||||
INFO("Left and right values were not equal: ", diff(l, r)); \
|
INFO("Left and right values were not equal: ", diff(l, r)); \
|
||||||
REQUIRE_EQ(l, r); \
|
REQUIRE_EQ(l, r); \
|
||||||
} while(false);
|
} while (false);
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
|
|
||||||
using namespace Luau::CodeGen;
|
using namespace Luau::CodeGen;
|
||||||
|
|
||||||
LUAU_FASTFLAG(LuauReuseBufferChecks)
|
|
||||||
LUAU_DYNAMIC_FASTFLAG(LuauCodeGenCheckGcEffectFix)
|
LUAU_DYNAMIC_FASTFLAG(LuauCodeGenCheckGcEffectFix)
|
||||||
|
|
||||||
class IrBuilderFixture
|
class IrBuilderFixture
|
||||||
@ -2400,8 +2399,6 @@ bb_fallback_1:
|
|||||||
|
|
||||||
TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateBufferLengthChecks")
|
TEST_CASE_FIXTURE(IrBuilderFixture, "DuplicateBufferLengthChecks")
|
||||||
{
|
{
|
||||||
ScopedFastFlag luauReuseBufferChecks{FFlag::LuauReuseBufferChecks, true};
|
|
||||||
|
|
||||||
IrOp block = build.block(IrBlockKind::Internal);
|
IrOp block = build.block(IrBlockKind::Internal);
|
||||||
IrOp fallback = build.block(IrBlockKind::Fallback);
|
IrOp fallback = build.block(IrBlockKind::Fallback);
|
||||||
|
|
||||||
@ -2470,8 +2467,6 @@ bb_fallback_1:
|
|||||||
|
|
||||||
TEST_CASE_FIXTURE(IrBuilderFixture, "BufferLenghtChecksNegativeIndex")
|
TEST_CASE_FIXTURE(IrBuilderFixture, "BufferLenghtChecksNegativeIndex")
|
||||||
{
|
{
|
||||||
ScopedFastFlag luauReuseBufferChecks{FFlag::LuauReuseBufferChecks, true};
|
|
||||||
|
|
||||||
IrOp block = build.block(IrBlockKind::Internal);
|
IrOp block = build.block(IrBlockKind::Internal);
|
||||||
IrOp fallback = build.block(IrBlockKind::Fallback);
|
IrOp fallback = build.block(IrBlockKind::Fallback);
|
||||||
|
|
||||||
|
@ -12,8 +12,6 @@
|
|||||||
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
|
||||||
LUAU_FASTFLAG(LuauFixDivrkInference)
|
|
||||||
LUAU_FASTFLAG(LuauCompileRevK)
|
|
||||||
LUAU_FASTFLAG(LuauCodegenVector)
|
LUAU_FASTFLAG(LuauCodegenVector)
|
||||||
LUAU_FASTFLAG(LuauCodegenMathMemArgs)
|
LUAU_FASTFLAG(LuauCodegenMathMemArgs)
|
||||||
|
|
||||||
@ -66,8 +64,6 @@ TEST_SUITE_BEGIN("IrLowering");
|
|||||||
|
|
||||||
TEST_CASE("VectorReciprocal")
|
TEST_CASE("VectorReciprocal")
|
||||||
{
|
{
|
||||||
ScopedFastFlag luauFixDivrkInference{FFlag::LuauFixDivrkInference, true};
|
|
||||||
ScopedFastFlag luauCompileRevK{FFlag::LuauCompileRevK, true};
|
|
||||||
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
|
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
|
||||||
|
|
||||||
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
||||||
@ -218,8 +214,6 @@ bb_bytecode_1:
|
|||||||
TEST_CASE("VectorMulDivMixed")
|
TEST_CASE("VectorMulDivMixed")
|
||||||
{
|
{
|
||||||
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
|
ScopedFastFlag luauCodegenVector{FFlag::LuauCodegenVector, true};
|
||||||
ScopedFastFlag luauFixDivrkInference{FFlag::LuauFixDivrkInference, true};
|
|
||||||
ScopedFastFlag luauCompileRevK{FFlag::LuauCompileRevK, true};
|
|
||||||
|
|
||||||
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
CHECK_EQ("\n" + getCodegenAssembly(R"(
|
||||||
local function vec3combo(a: vector, b: vector, c: vector, d: vector)
|
local function vec3combo(a: vector, b: vector, c: vector, d: vector)
|
||||||
|
@ -18,7 +18,18 @@ function fib(n)
|
|||||||
return n < 2 and 1 or fib(n-1) + fib(n-2)
|
return n < 2 and 1 or fib(n-1) + fib(n-2)
|
||||||
end
|
end
|
||||||
|
|
||||||
return math.max(fib(5), 1)
|
)");
|
||||||
|
|
||||||
|
REQUIRE(0 == result.warnings.size());
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_CASE_FIXTURE(Fixture, "type_family_fully_reduces")
|
||||||
|
{
|
||||||
|
LintResult result = lint(R"(
|
||||||
|
function fib(n)
|
||||||
|
return n < 2 or fib(n-2)
|
||||||
|
end
|
||||||
|
|
||||||
)");
|
)");
|
||||||
|
|
||||||
REQUIRE(0 == result.warnings.size());
|
REQUIRE(0 == result.warnings.size());
|
||||||
|
@ -24,7 +24,8 @@ struct FamilyFixture : Fixture
|
|||||||
{
|
{
|
||||||
swapFamily = TypeFamily{/* name */ "Swap",
|
swapFamily = TypeFamily{/* name */ "Swap",
|
||||||
/* reducer */
|
/* reducer */
|
||||||
[](std::vector<TypeId> tys, std::vector<TypePackId> tps, NotNull<TypeFamilyContext> ctx) -> TypeFamilyReductionResult<TypeId> {
|
[](TypeId instance, std::vector<TypeId> tys, std::vector<TypePackId> tps,
|
||||||
|
NotNull<TypeFamilyContext> ctx) -> TypeFamilyReductionResult<TypeId> {
|
||||||
LUAU_ASSERT(tys.size() == 1);
|
LUAU_ASSERT(tys.size() == 1);
|
||||||
TypeId param = follow(tys.at(0));
|
TypeId param = follow(tys.at(0));
|
||||||
|
|
||||||
|
@ -267,8 +267,8 @@ TEST_CASE_FIXTURE(Fixture, "infer_type_of_value_a_via_typeof_with_assignment")
|
|||||||
CHECK("nil" == toString(requireType("b")));
|
CHECK("nil" == toString(requireType("b")));
|
||||||
|
|
||||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||||
CHECK(
|
CHECK(result.errors[0] ==
|
||||||
result.errors[0] == (TypeError{Location{Position{2, 29}, Position{2, 30}}, TypeMismatch{builtinTypes->nilType, builtinTypes->numberType}}));
|
(TypeError{Location{Position{2, 29}, Position{2, 30}}, TypeMismatch{builtinTypes->nilType, builtinTypes->numberType}}));
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -276,8 +276,8 @@ TEST_CASE_FIXTURE(Fixture, "infer_type_of_value_a_via_typeof_with_assignment")
|
|||||||
CHECK_EQ(*builtinTypes->numberType, *requireType("b"));
|
CHECK_EQ(*builtinTypes->numberType, *requireType("b"));
|
||||||
|
|
||||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||||
CHECK_EQ(
|
CHECK_EQ(result.errors[0],
|
||||||
result.errors[0], (TypeError{Location{Position{4, 12}, Position{4, 17}}, TypeMismatch{builtinTypes->numberType, builtinTypes->stringType}}));
|
(TypeError{Location{Position{4, 12}, Position{4, 17}}, TypeMismatch{builtinTypes->numberType, builtinTypes->stringType}}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -541,13 +541,13 @@ TEST_CASE_FIXTURE(Fixture, "intersection_of_tables")
|
|||||||
)");
|
)");
|
||||||
|
|
||||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||||
const std::string expected =
|
const std::string expected = (FFlag::DebugLuauDeferredConstraintResolution)
|
||||||
(FFlag::DebugLuauDeferredConstraintResolution) ?
|
? "Type "
|
||||||
"Type "
|
"'{ p: number?, q: number?, r: number? } & { p: number?, q: string? }'"
|
||||||
"'{ p: number?, q: number?, r: number? } & { p: number?, q: string? }'"
|
" could not be converted into "
|
||||||
" could not be converted into "
|
"'{ p: nil }'; none of the intersection parts are compatible"
|
||||||
"'{ p: nil }'; none of the intersection parts are compatible" :
|
:
|
||||||
R"(Type
|
R"(Type
|
||||||
'{| p: number?, q: number?, r: number? |} & {| p: number?, q: string? |}'
|
'{| p: number?, q: number?, r: number? |} & {| p: number?, q: string? |}'
|
||||||
could not be converted into
|
could not be converted into
|
||||||
'{| p: nil |}'; none of the intersection parts are compatible)";
|
'{| p: nil |}'; none of the intersection parts are compatible)";
|
||||||
@ -618,13 +618,13 @@ TEST_CASE_FIXTURE(Fixture, "overloaded_functions_returning_intersections")
|
|||||||
)");
|
)");
|
||||||
|
|
||||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||||
const std::string expected =
|
const std::string expected = (FFlag::DebugLuauDeferredConstraintResolution) ?
|
||||||
(FFlag::DebugLuauDeferredConstraintResolution) ?
|
R"(Type
|
||||||
R"(Type
|
|
||||||
'((number?) -> { p: number } & { q: number }) & ((string?) -> { p: number } & { r: number })'
|
'((number?) -> { p: number } & { q: number }) & ((string?) -> { p: number } & { r: number })'
|
||||||
could not be converted into
|
could not be converted into
|
||||||
'(number?) -> { p: number, q: number, r: number }'; none of the intersection parts are compatible)" :
|
'(number?) -> { p: number, q: number, r: number }'; none of the intersection parts are compatible)"
|
||||||
R"(Type
|
:
|
||||||
|
R"(Type
|
||||||
'((number?) -> {| p: number |} & {| q: number |}) & ((string?) -> {| p: number |} & {| r: number |})'
|
'((number?) -> {| p: number |} & {| q: number |}) & ((string?) -> {| p: number |} & {| r: number |})'
|
||||||
could not be converted into
|
could not be converted into
|
||||||
'(number?) -> {| p: number, q: number, r: number |}'; none of the intersection parts are compatible)";
|
'(number?) -> {| p: number, q: number, r: number |}'; none of the intersection parts are compatible)";
|
||||||
|
@ -273,7 +273,6 @@ TEST_CASE_FIXTURE(Fixture, "discriminate_from_x_not_equal_to_nil")
|
|||||||
// Should be {| x: nil, y: nil |}
|
// Should be {| x: nil, y: nil |}
|
||||||
CHECK_EQ("{| x: nil, y: nil |} | {| x: string, y: number |}", toString(requireTypeAtPosition({7, 28})));
|
CHECK_EQ("{| x: nil, y: nil |} | {| x: string, y: number |}", toString(requireTypeAtPosition({7, 28})));
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_CASE_FIXTURE(BuiltinsFixture, "bail_early_if_unification_is_too_complicated" * doctest::timeout(0.5))
|
TEST_CASE_FIXTURE(BuiltinsFixture, "bail_early_if_unification_is_too_complicated" * doctest::timeout(0.5))
|
||||||
|
@ -515,9 +515,9 @@ TEST_CASE_FIXTURE(Fixture, "free_type_is_equal_to_an_lvalue")
|
|||||||
LUAU_REQUIRE_NO_ERRORS(result);
|
LUAU_REQUIRE_NO_ERRORS(result);
|
||||||
|
|
||||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||||
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "unknown"); // a == b
|
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "unknown"); // a == b
|
||||||
else
|
else
|
||||||
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "a"); // a == b
|
CHECK_EQ(toString(requireTypeAtPosition({3, 33})), "a"); // a == b
|
||||||
|
|
||||||
CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "string?"); // a == b
|
CHECK_EQ(toString(requireTypeAtPosition({3, 36})), "string?"); // a == b
|
||||||
}
|
}
|
||||||
|
@ -366,8 +366,7 @@ TEST_CASE_FIXTURE(Fixture, "parametric_tagged_union_alias")
|
|||||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||||
|
|
||||||
// FIXME: This could be improved by expanding the contents of `a`
|
// FIXME: This could be improved by expanding the contents of `a`
|
||||||
const std::string expectedError =
|
const std::string expectedError = "Type 'a' could not be converted into 'Err<number> | Ok<string>'";
|
||||||
"Type 'a' could not be converted into 'Err<number> | Ok<string>'";
|
|
||||||
|
|
||||||
CHECK(toString(result.errors[0]) == expectedError);
|
CHECK(toString(result.errors[0]) == expectedError);
|
||||||
}
|
}
|
||||||
|
@ -4054,9 +4054,7 @@ TEST_CASE_FIXTURE(Fixture, "table_subtyping_error_suppression")
|
|||||||
{
|
{
|
||||||
CHECK_EQ("{| x: any, y: string |}", toString(tm->wantedType));
|
CHECK_EQ("{| x: any, y: string |}", toString(tm->wantedType));
|
||||||
CHECK_EQ("{| x: string, y: number |}", toString(tm->givenType));
|
CHECK_EQ("{| x: string, y: number |}", toString(tm->givenType));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TEST_SUITE_END();
|
TEST_SUITE_END();
|
||||||
|
@ -58,8 +58,8 @@ TEST_CASE_FIXTURE(Fixture, "tc_error")
|
|||||||
{
|
{
|
||||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||||
|
|
||||||
CHECK_EQ(
|
CHECK_EQ(result.errors[0],
|
||||||
result.errors[0], (TypeError{Location{Position{0, 35}, Position{0, 36}}, TypeMismatch{builtinTypes->numberType, builtinTypes->stringType}}));
|
(TypeError{Location{Position{0, 35}, Position{0, 36}}, TypeMismatch{builtinTypes->numberType, builtinTypes->stringType}}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -77,9 +77,9 @@ TEST_CASE_FIXTURE(Fixture, "tc_error_2")
|
|||||||
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
LUAU_REQUIRE_ERROR_COUNT(1, result);
|
||||||
|
|
||||||
CHECK_EQ(result.errors[0], (TypeError{Location{Position{0, 18}, Position{0, 22}}, TypeMismatch{
|
CHECK_EQ(result.errors[0], (TypeError{Location{Position{0, 18}, Position{0, 22}}, TypeMismatch{
|
||||||
requireType("a"),
|
requireType("a"),
|
||||||
builtinTypes->stringType,
|
builtinTypes->stringType,
|
||||||
}}));
|
}}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,7 +13,7 @@ struct TypeStateFixture : BuiltinsFixture
|
|||||||
{
|
{
|
||||||
ScopedFastFlag dcr{FFlag::DebugLuauDeferredConstraintResolution, true};
|
ScopedFastFlag dcr{FFlag::DebugLuauDeferredConstraintResolution, true};
|
||||||
};
|
};
|
||||||
}
|
} // namespace
|
||||||
|
|
||||||
TEST_SUITE_BEGIN("TypeStatesTest");
|
TEST_SUITE_BEGIN("TypeStatesTest");
|
||||||
|
|
||||||
|
@ -499,10 +499,10 @@ end
|
|||||||
|
|
||||||
if (FFlag::DebugLuauDeferredConstraintResolution)
|
if (FFlag::DebugLuauDeferredConstraintResolution)
|
||||||
{
|
{
|
||||||
CHECK_EQ(toString(result.errors[0]),
|
CHECK_EQ(toString(result.errors[0]), "Type 'X | Y | Z' could not be converted into '{ w: number }'; type X | Y | Z[0] (X) is not a subtype "
|
||||||
"Type 'X | Y | Z' could not be converted into '{ w: number }'; type X | Y | Z[0] (X) is not a subtype of { w: number } ({ w: number })\n\t"
|
"of { w: number } ({ w: number })\n\t"
|
||||||
"type X | Y | Z[1] (Y) is not a subtype of { w: number } ({ w: number })\n\t"
|
"type X | Y | Z[1] (Y) is not a subtype of { w: number } ({ w: number })\n\t"
|
||||||
"type X | Y | Z[2] (Z) is not a subtype of { w: number } ({ w: number })");
|
"type X | Y | Z[2] (Z) is not a subtype of { w: number } ({ w: number })");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -202,9 +202,12 @@ struct TeamCityReporter : doctest::IReporter
|
|||||||
|
|
||||||
void test_case_end(const doctest::CurrentTestCaseStats& in) override
|
void test_case_end(const doctest::CurrentTestCaseStats& in) override
|
||||||
{
|
{
|
||||||
printf("##teamcity[testMetadata testName='%s: %s' name='total_asserts' type='number' value='%d']\n", currentTest->m_test_suite, currentTest->m_name, in.numAssertsCurrentTest);
|
printf("##teamcity[testMetadata testName='%s: %s' name='total_asserts' type='number' value='%d']\n", currentTest->m_test_suite,
|
||||||
printf("##teamcity[testMetadata testName='%s: %s' name='failed_asserts' type='number' value='%d']\n", currentTest->m_test_suite, currentTest->m_name, in.numAssertsFailedCurrentTest);
|
currentTest->m_name, in.numAssertsCurrentTest);
|
||||||
printf("##teamcity[testMetadata testName='%s: %s' name='runtime' type='number' value='%f']\n", currentTest->m_test_suite, currentTest->m_name, in.seconds);
|
printf("##teamcity[testMetadata testName='%s: %s' name='failed_asserts' type='number' value='%d']\n", currentTest->m_test_suite,
|
||||||
|
currentTest->m_name, in.numAssertsFailedCurrentTest);
|
||||||
|
printf("##teamcity[testMetadata testName='%s: %s' name='runtime' type='number' value='%f']\n", currentTest->m_test_suite, currentTest->m_name,
|
||||||
|
in.seconds);
|
||||||
|
|
||||||
if (!in.testCaseSuccess)
|
if (!in.testCaseSuccess)
|
||||||
printf("##teamcity[testFailed name='%s: %s']\n", currentTest->m_test_suite, currentTest->m_name);
|
printf("##teamcity[testFailed name='%s: %s']\n", currentTest->m_test_suite, currentTest->m_name);
|
||||||
@ -212,15 +215,18 @@ struct TeamCityReporter : doctest::IReporter
|
|||||||
printf("##teamcity[testFinished name='%s: %s']\n", currentTest->m_test_suite, currentTest->m_name);
|
printf("##teamcity[testFinished name='%s: %s']\n", currentTest->m_test_suite, currentTest->m_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
void test_case_exception(const doctest::TestCaseException& in) override {
|
void test_case_exception(const doctest::TestCaseException& in) override
|
||||||
printf("##teamcity[testFailed name='%s: %s' message='Unhandled exception' details='%s']\n", currentTest->m_test_suite, currentTest->m_name, in.error_string.c_str());
|
{
|
||||||
|
printf("##teamcity[testFailed name='%s: %s' message='Unhandled exception' details='%s']\n", currentTest->m_test_suite, currentTest->m_name,
|
||||||
|
in.error_string.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
void subcase_start(const doctest::SubcaseSignature& /*in*/) override {}
|
void subcase_start(const doctest::SubcaseSignature& /*in*/) override {}
|
||||||
void subcase_end() override {}
|
void subcase_end() override {}
|
||||||
|
|
||||||
void log_assert(const doctest::AssertData& ad) override {
|
void log_assert(const doctest::AssertData& ad) override
|
||||||
if(!ad.m_failed)
|
{
|
||||||
|
if (!ad.m_failed)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (ad.m_decomp.size())
|
if (ad.m_decomp.size())
|
||||||
@ -229,7 +235,8 @@ struct TeamCityReporter : doctest::IReporter
|
|||||||
fprintf(stderr, "%s(%d): ERROR: %s\n", ad.m_file, ad.m_line, ad.m_expr);
|
fprintf(stderr, "%s(%d): ERROR: %s\n", ad.m_file, ad.m_line, ad.m_expr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void log_message(const doctest::MessageData& md) override {
|
void log_message(const doctest::MessageData& md) override
|
||||||
|
{
|
||||||
const char* severity = (md.m_severity & doctest::assertType::is_warn) ? "WARNING" : "ERROR";
|
const char* severity = (md.m_severity & doctest::assertType::is_warn) ? "WARNING" : "ERROR";
|
||||||
bool isError = md.m_severity & (doctest::assertType::is_require | doctest::assertType::is_check);
|
bool isError = md.m_severity & (doctest::assertType::is_require | doctest::assertType::is_check);
|
||||||
fprintf(isError ? stderr : stdout, "%s(%d): %s: %s\n", md.m_file, md.m_line, severity, md.m_string.c_str());
|
fprintf(isError ? stderr : stdout, "%s(%d): %s: %s\n", md.m_file, md.m_line, severity, md.m_string.c_str());
|
||||||
|
@ -5,7 +5,6 @@ AutocompleteTest.anonymous_autofilled_generic_type_pack_vararg
|
|||||||
AutocompleteTest.autocomplete_response_perf1
|
AutocompleteTest.autocomplete_response_perf1
|
||||||
AutocompleteTest.autocomplete_string_singleton_equality
|
AutocompleteTest.autocomplete_string_singleton_equality
|
||||||
AutocompleteTest.do_wrong_compatible_nonself_calls
|
AutocompleteTest.do_wrong_compatible_nonself_calls
|
||||||
AutocompleteTest.type_correct_expected_argument_type_suggestion
|
|
||||||
AutocompleteTest.type_correct_expected_argument_type_suggestion_self
|
AutocompleteTest.type_correct_expected_argument_type_suggestion_self
|
||||||
AutocompleteTest.type_correct_suggestion_for_overloads
|
AutocompleteTest.type_correct_suggestion_for_overloads
|
||||||
BuiltinTests.aliased_string_format
|
BuiltinTests.aliased_string_format
|
||||||
@ -98,8 +97,8 @@ GenericsTests.bound_tables_do_not_clone_original_fields
|
|||||||
GenericsTests.check_generic_function
|
GenericsTests.check_generic_function
|
||||||
GenericsTests.check_generic_local_function
|
GenericsTests.check_generic_local_function
|
||||||
GenericsTests.check_mutual_generic_functions
|
GenericsTests.check_mutual_generic_functions
|
||||||
GenericsTests.check_mutual_generic_functions_unannotated
|
|
||||||
GenericsTests.check_mutual_generic_functions_errors
|
GenericsTests.check_mutual_generic_functions_errors
|
||||||
|
GenericsTests.check_mutual_generic_functions_unannotated
|
||||||
GenericsTests.check_nested_generic_function
|
GenericsTests.check_nested_generic_function
|
||||||
GenericsTests.check_recursive_generic_function
|
GenericsTests.check_recursive_generic_function
|
||||||
GenericsTests.correctly_instantiate_polymorphic_member_functions
|
GenericsTests.correctly_instantiate_polymorphic_member_functions
|
||||||
@ -165,7 +164,6 @@ IntersectionTypes.overloadeded_functions_with_weird_typepacks_3
|
|||||||
IntersectionTypes.overloadeded_functions_with_weird_typepacks_4
|
IntersectionTypes.overloadeded_functions_with_weird_typepacks_4
|
||||||
IntersectionTypes.table_write_sealed_indirect
|
IntersectionTypes.table_write_sealed_indirect
|
||||||
IntersectionTypes.union_saturate_overloaded_functions
|
IntersectionTypes.union_saturate_overloaded_functions
|
||||||
Linter.CleanCode
|
|
||||||
Linter.DeprecatedApiFenv
|
Linter.DeprecatedApiFenv
|
||||||
Linter.FormatStringTyped
|
Linter.FormatStringTyped
|
||||||
Linter.TableOperationsIndexer
|
Linter.TableOperationsIndexer
|
||||||
@ -183,8 +181,6 @@ NonstrictModeTests.table_props_are_any
|
|||||||
Normalize.higher_order_function_with_annotation
|
Normalize.higher_order_function_with_annotation
|
||||||
Normalize.negations_of_tables
|
Normalize.negations_of_tables
|
||||||
Normalize.specific_functions_cannot_be_negated
|
Normalize.specific_functions_cannot_be_negated
|
||||||
ParserTests.parse_nesting_based_end_detection
|
|
||||||
ParserTests.parse_nesting_based_end_detection_single_line
|
|
||||||
ProvisionalTests.assign_table_with_refined_property_with_a_similar_type_is_illegal
|
ProvisionalTests.assign_table_with_refined_property_with_a_similar_type_is_illegal
|
||||||
ProvisionalTests.discriminate_from_x_not_equal_to_nil
|
ProvisionalTests.discriminate_from_x_not_equal_to_nil
|
||||||
ProvisionalTests.do_not_ice_when_trying_to_pick_first_of_generic_type_pack
|
ProvisionalTests.do_not_ice_when_trying_to_pick_first_of_generic_type_pack
|
||||||
|
Loading…
Reference in New Issue
Block a user