mirror of
https://github.com/luau-lang/luau.git
synced 2024-11-15 22:35:43 +08:00
16fbfe912c
- Cleaned up `FFlag::FixFindBindingAtFunctionName`, `FFlag::LuauNormalizeBlockedTypes`, `FFlag::LuauPCallDebuggerFix` - Added support for break and continue into control flow analysis - The old type unification engine will now report a more fine-grained error at times, indicating that type normalization in particular failed # New Type Solver - Refactor of Unifier2, the new unification implementation for Luau - Completed MVP of new unification implementation - Dramatically simplified overload selection logic - Type family reduction can now apply sooner to free types that have been solved - Subtyping now supports table indexers - Generalization now replaces bad generics with unknown # Native Code Generation - Reduce stack spills caused by FINDUPVAL and STORE_TAG - Improve Generate SHL/SHR/SAR/rotates with immediate operands in X64 - Removed redundant case re-check in table lookup fallback --------- Co-authored-by: Arseny Kapoulkine <arseny.kapoulkine@gmail.com> Co-authored-by: Vyacheslav Egorov <vegorov@roblox.com> Co-authored-by: Andy Friesen <afriesen@roblox.com> Co-authored-by: Lily Brown <lbrown@roblox.com>
225 lines
6.8 KiB
C++
225 lines
6.8 KiB
C++
// This file is part of the Luau programming language and is licensed under MIT License; see LICENSE.txt for details
|
|
#pragma once
|
|
|
|
#include "Luau/Common.h"
|
|
#include "Luau/IrData.h"
|
|
|
|
namespace Luau
|
|
{
|
|
namespace CodeGen
|
|
{
|
|
|
|
template<typename T>
|
|
static void visitVmRegDefsUses(T& visitor, IrFunction& function, const IrInst& inst)
|
|
{
|
|
// For correct analysis, all instruction uses must be handled before handling the definitions
|
|
switch (inst.cmd)
|
|
{
|
|
case IrCmd::LOAD_TAG:
|
|
case IrCmd::LOAD_POINTER:
|
|
case IrCmd::LOAD_DOUBLE:
|
|
case IrCmd::LOAD_INT:
|
|
case IrCmd::LOAD_TVALUE:
|
|
visitor.maybeUse(inst.a); // Argument can also be a VmConst
|
|
break;
|
|
case IrCmd::STORE_TAG:
|
|
case IrCmd::STORE_POINTER:
|
|
case IrCmd::STORE_DOUBLE:
|
|
case IrCmd::STORE_INT:
|
|
case IrCmd::STORE_VECTOR:
|
|
case IrCmd::STORE_TVALUE:
|
|
case IrCmd::STORE_SPLIT_TVALUE:
|
|
visitor.maybeDef(inst.a); // Argument can also be a pointer value
|
|
break;
|
|
case IrCmd::CMP_ANY:
|
|
visitor.use(inst.a);
|
|
visitor.use(inst.b);
|
|
break;
|
|
case IrCmd::JUMP_IF_TRUTHY:
|
|
case IrCmd::JUMP_IF_FALSY:
|
|
visitor.use(inst.a);
|
|
break;
|
|
// A <- B, C
|
|
case IrCmd::DO_ARITH:
|
|
case IrCmd::GET_TABLE:
|
|
visitor.use(inst.b);
|
|
visitor.maybeUse(inst.c); // Argument can also be a VmConst
|
|
|
|
visitor.def(inst.a);
|
|
break;
|
|
case IrCmd::SET_TABLE:
|
|
visitor.use(inst.a);
|
|
visitor.use(inst.b);
|
|
visitor.maybeUse(inst.c); // Argument can also be a VmConst
|
|
break;
|
|
// A <- B
|
|
case IrCmd::DO_LEN:
|
|
visitor.use(inst.b);
|
|
|
|
visitor.def(inst.a);
|
|
break;
|
|
case IrCmd::GET_IMPORT:
|
|
visitor.def(inst.a);
|
|
break;
|
|
case IrCmd::CONCAT:
|
|
visitor.useRange(vmRegOp(inst.a), function.uintOp(inst.b));
|
|
|
|
visitor.defRange(vmRegOp(inst.a), function.uintOp(inst.b));
|
|
break;
|
|
case IrCmd::GET_UPVALUE:
|
|
visitor.def(inst.a);
|
|
break;
|
|
case IrCmd::SET_UPVALUE:
|
|
visitor.use(inst.b);
|
|
break;
|
|
case IrCmd::INTERRUPT:
|
|
break;
|
|
case IrCmd::BARRIER_OBJ:
|
|
case IrCmd::BARRIER_TABLE_FORWARD:
|
|
visitor.maybeUse(inst.b);
|
|
break;
|
|
case IrCmd::CLOSE_UPVALS:
|
|
// Closing an upvalue should be counted as a register use (it copies the fresh register value)
|
|
// But we lack the required information about the specific set of registers that are affected
|
|
// Because we don't plan to optimize captured registers atm, we skip full dataflow analysis for them right now
|
|
break;
|
|
case IrCmd::CAPTURE:
|
|
visitor.maybeUse(inst.a);
|
|
|
|
if (function.uintOp(inst.b) == 1)
|
|
visitor.capture(vmRegOp(inst.a));
|
|
break;
|
|
case IrCmd::SETLIST:
|
|
visitor.use(inst.b);
|
|
visitor.useRange(vmRegOp(inst.c), function.intOp(inst.d));
|
|
break;
|
|
case IrCmd::CALL:
|
|
visitor.use(inst.a);
|
|
visitor.useRange(vmRegOp(inst.a) + 1, function.intOp(inst.b));
|
|
|
|
visitor.defRange(vmRegOp(inst.a), function.intOp(inst.c));
|
|
break;
|
|
case IrCmd::RETURN:
|
|
visitor.useRange(vmRegOp(inst.a), function.intOp(inst.b));
|
|
break;
|
|
|
|
// TODO: FASTCALL is more restrictive than INVOKE_FASTCALL; we should either determine the exact semantics, or rework it
|
|
case IrCmd::FASTCALL:
|
|
case IrCmd::INVOKE_FASTCALL:
|
|
if (int count = function.intOp(inst.e); count != -1)
|
|
{
|
|
if (count >= 3)
|
|
{
|
|
LUAU_ASSERT(inst.d.kind == IrOpKind::VmReg && vmRegOp(inst.d) == vmRegOp(inst.c) + 1);
|
|
|
|
visitor.useRange(vmRegOp(inst.c), count);
|
|
}
|
|
else
|
|
{
|
|
if (count >= 1)
|
|
visitor.use(inst.c);
|
|
|
|
if (count >= 2)
|
|
visitor.maybeUse(inst.d); // Argument can also be a VmConst
|
|
}
|
|
}
|
|
else
|
|
{
|
|
visitor.useVarargs(vmRegOp(inst.c));
|
|
}
|
|
|
|
// Multiple return sequences (count == -1) are defined by ADJUST_STACK_TO_REG
|
|
if (int count = function.intOp(inst.f); count != -1)
|
|
visitor.defRange(vmRegOp(inst.b), count);
|
|
break;
|
|
case IrCmd::FORGLOOP:
|
|
// First register is not used by instruction, we check that it's still 'nil' with CHECK_TAG
|
|
visitor.use(inst.a, 1);
|
|
visitor.use(inst.a, 2);
|
|
|
|
visitor.def(inst.a, 2);
|
|
visitor.defRange(vmRegOp(inst.a) + 3, function.intOp(inst.b));
|
|
break;
|
|
case IrCmd::FORGLOOP_FALLBACK:
|
|
visitor.useRange(vmRegOp(inst.a), 3);
|
|
|
|
visitor.def(inst.a, 2);
|
|
visitor.defRange(vmRegOp(inst.a) + 3, uint8_t(function.intOp(inst.b))); // ignore most significant bit
|
|
break;
|
|
case IrCmd::FORGPREP_XNEXT_FALLBACK:
|
|
visitor.use(inst.b);
|
|
break;
|
|
case IrCmd::FALLBACK_GETGLOBAL:
|
|
visitor.def(inst.b);
|
|
break;
|
|
case IrCmd::FALLBACK_SETGLOBAL:
|
|
visitor.use(inst.b);
|
|
break;
|
|
case IrCmd::FALLBACK_GETTABLEKS:
|
|
visitor.use(inst.c);
|
|
|
|
visitor.def(inst.b);
|
|
break;
|
|
case IrCmd::FALLBACK_SETTABLEKS:
|
|
visitor.use(inst.b);
|
|
visitor.use(inst.c);
|
|
break;
|
|
case IrCmd::FALLBACK_NAMECALL:
|
|
visitor.use(inst.c);
|
|
|
|
visitor.defRange(vmRegOp(inst.b), 2);
|
|
break;
|
|
case IrCmd::FALLBACK_PREPVARARGS:
|
|
// No effect on explicitly referenced registers
|
|
break;
|
|
case IrCmd::FALLBACK_GETVARARGS:
|
|
visitor.defRange(vmRegOp(inst.b), function.intOp(inst.c));
|
|
break;
|
|
case IrCmd::FALLBACK_DUPCLOSURE:
|
|
visitor.def(inst.b);
|
|
break;
|
|
case IrCmd::FALLBACK_FORGPREP:
|
|
visitor.use(inst.b);
|
|
|
|
visitor.defRange(vmRegOp(inst.b), 3);
|
|
break;
|
|
case IrCmd::ADJUST_STACK_TO_REG:
|
|
visitor.defRange(vmRegOp(inst.a), -1);
|
|
break;
|
|
case IrCmd::ADJUST_STACK_TO_TOP:
|
|
// While this can be considered to be a vararg consumer, it is already handled in fastcall instructions
|
|
break;
|
|
case IrCmd::GET_TYPEOF:
|
|
visitor.use(inst.a);
|
|
break;
|
|
|
|
case IrCmd::FINDUPVAL:
|
|
visitor.use(inst.a);
|
|
break;
|
|
|
|
default:
|
|
// All instructions which reference registers have to be handled explicitly
|
|
LUAU_ASSERT(inst.a.kind != IrOpKind::VmReg);
|
|
LUAU_ASSERT(inst.b.kind != IrOpKind::VmReg);
|
|
LUAU_ASSERT(inst.c.kind != IrOpKind::VmReg);
|
|
LUAU_ASSERT(inst.d.kind != IrOpKind::VmReg);
|
|
LUAU_ASSERT(inst.e.kind != IrOpKind::VmReg);
|
|
LUAU_ASSERT(inst.f.kind != IrOpKind::VmReg);
|
|
break;
|
|
}
|
|
}
|
|
|
|
template<typename T>
|
|
static void visitVmRegDefsUses(T& visitor, IrFunction& function, const IrBlock& block)
|
|
{
|
|
for (uint32_t instIdx = block.start; instIdx <= block.finish; instIdx++)
|
|
{
|
|
IrInst& inst = function.instructions[instIdx];
|
|
|
|
visitVmRegDefsUses(visitor, function, inst);
|
|
}
|
|
}
|
|
|
|
} // namespace CodeGen
|
|
} // namespace Luau
|