From 51d4d18ec0c4c6d1060960fd4845b56198bee755 Mon Sep 17 00:00:00 2001 From: Vyacheslav Egorov Date: Fri, 25 Aug 2023 18:46:28 +0300 Subject: [PATCH] Late fixes --- Analysis/src/Unifier.cpp | 4 ++++ CodeGen/src/EmitCommonX64.h | 5 +++-- tests/CodeAllocator.test.cpp | 4 ++++ tests/IrRegAllocX64.test.cpp | 6 +++--- 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/Analysis/src/Unifier.cpp b/Analysis/src/Unifier.cpp index db8e2008..bc8ef018 100644 --- a/Analysis/src/Unifier.cpp +++ b/Analysis/src/Unifier.cpp @@ -605,6 +605,10 @@ void Unifier::tryUnify_(TypeId subTy, TypeId superTy, bool isFunctionCall, bool { // TODO: there are probably cheaper ways to check if any <: T. const NormalizedType* superNorm = normalizer->normalize(superTy); + + if (!superNorm) + return reportError(location, UnificationTooComplex{}); + if (!log.get(superNorm->tops)) failure = true; } diff --git a/CodeGen/src/EmitCommonX64.h b/CodeGen/src/EmitCommonX64.h index 834be075..dd9b082b 100644 --- a/CodeGen/src/EmitCommonX64.h +++ b/CodeGen/src/EmitCommonX64.h @@ -59,7 +59,8 @@ inline uint8_t getXmmRegisterCount(ABIX64 abi) // Native code is as stackless as the interpreter, so we can place some data on the stack once and have it accessible at any point // Stack is separated into sections for different data. See CodeGenX64.cpp for layout overview constexpr unsigned kStackAlign = 8; // Bytes we need to align the stack for non-vol xmm register storage -constexpr unsigned kStackLocalStorage = 8 * kExtraLocals + 8 * kSpillSlots; +constexpr unsigned kStackLocalStorage = 8 * kExtraLocals; +constexpr unsigned kStackSpillStorage = 8 * kSpillSlots; constexpr unsigned kStackExtraArgumentStorage = 2 * 8; // Bytes for 5th and 6th function call arguments used under Windows ABI constexpr unsigned kStackRegHomeStorage = 4 * 8; // Register 'home' locations that can be used by callees under Windows ABI @@ -82,7 +83,7 @@ constexpr unsigned kStackOffsetToSpillSlots = kStackOffsetToLocals + kStackLocal inline unsigned getFullStackSize(ABIX64 abi, uint8_t xmmRegCount) { - return kStackOffsetToSpillSlots + getNonVolXmmStorageSize(abi, xmmRegCount) + kStackAlign; + return kStackOffsetToSpillSlots + kStackSpillStorage + getNonVolXmmStorageSize(abi, xmmRegCount) + kStackAlign; } constexpr OperandX64 sClosure = qword[rsp + kStackOffsetToLocals + 0]; // Closure* cl diff --git a/tests/CodeAllocator.test.cpp b/tests/CodeAllocator.test.cpp index 1380e669..298035c2 100644 --- a/tests/CodeAllocator.test.cpp +++ b/tests/CodeAllocator.test.cpp @@ -412,6 +412,10 @@ static void obscureThrowCase(int64_t (*f)(int64_t, void (*)(int64_t))) TEST_CASE("GeneratedCodeExecutionWithThrowX64Simd") { + // This test requires AVX + if (!Luau::CodeGen::isSupported()) + return; + using namespace X64; AssemblyBuilderX64 build(/* logText= */ false); diff --git a/tests/IrRegAllocX64.test.cpp b/tests/IrRegAllocX64.test.cpp index e9330c1d..bbf9c154 100644 --- a/tests/IrRegAllocX64.test.cpp +++ b/tests/IrRegAllocX64.test.cpp @@ -49,9 +49,9 @@ TEST_CASE_FIXTURE(IrRegAllocX64Fixture, "RelocateFix") LUAU_ASSERT(function.instructions[1].spilled); checkMatch(R"( - vmovsd qword ptr [rsp+070h],rax - vmovsd qword ptr [rsp+078h],rax - vmovsd rax,qword ptr [rsp+070h] + vmovsd qword ptr [rsp+048h],rax + vmovsd qword ptr [rsp+050h],rax + vmovsd rax,qword ptr [rsp+048h] )"); }