diff options
Diffstat (limited to 'src/qml')
-rw-r--r-- | src/qml/jit/qv4assembler.cpp | 113 | ||||
-rw-r--r-- | src/qml/jit/qv4assembler_p.h | 167 | ||||
-rw-r--r-- | src/qml/jit/qv4binop.cpp | 12 | ||||
-rw-r--r-- | src/qml/jit/qv4isel_masm.cpp | 62 | ||||
-rw-r--r-- | src/qml/jit/qv4isel_masm_p.h | 42 | ||||
-rw-r--r-- | src/qml/jit/qv4unop.cpp | 2 |
6 files changed, 248 insertions, 150 deletions
diff --git a/src/qml/jit/qv4assembler.cpp b/src/qml/jit/qv4assembler.cpp index 0aacacfb42..ecab4902fc 100644 --- a/src/qml/jit/qv4assembler.cpp +++ b/src/qml/jit/qv4assembler.cpp @@ -146,9 +146,11 @@ bool CompilationUnit::memoryMapCode(QString *errorString) return true; } -const Assembler::VoidType Assembler::Void; +template <typename TargetConfiguration> +const typename Assembler<TargetConfiguration>::VoidType Assembler<TargetConfiguration>::Void; -Assembler::Assembler(QV4::Compiler::JSUnitGenerator *jsGenerator, IR::Function* function, QV4::ExecutableAllocator *executableAllocator) +template <typename TargetConfiguration> +Assembler<TargetConfiguration>::Assembler(QV4::Compiler::JSUnitGenerator *jsGenerator, IR::Function* function, QV4::ExecutableAllocator *executableAllocator) : _function(function) , _nextBlock(0) , _executableAllocator(executableAllocator) @@ -159,14 +161,16 @@ Assembler::Assembler(QV4::Compiler::JSUnitGenerator *jsGenerator, IR::Function* _labelPatches.resize(_function->basicBlockCount()); } -void Assembler::registerBlock(IR::BasicBlock* block, IR::BasicBlock *nextBlock) +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::registerBlock(IR::BasicBlock* block, IR::BasicBlock *nextBlock) { _addrs[block->index()] = label(); catchBlock = block->catchBlock; _nextBlock = nextBlock; } -void Assembler::jumpToBlock(IR::BasicBlock* current, IR::BasicBlock *target) +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::jumpToBlock(IR::BasicBlock* current, IR::BasicBlock *target) { Q_UNUSED(current); @@ -174,12 +178,14 @@ void Assembler::jumpToBlock(IR::BasicBlock* current, IR::BasicBlock *target) _patches[target->index()].push_back(jump()); } -void Assembler::addPatch(IR::BasicBlock* targetBlock, Jump targetJump) +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::addPatch(IR::BasicBlock* targetBlock, Jump targetJump) { _patches[targetBlock->index()].push_back(targetJump); } -void Assembler::addPatch(DataLabelPtr patch, Label target) +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::addPatch(DataLabelPtr patch, Label target) { DataLabelPatch p; p.dataLabel = patch; @@ -187,19 +193,22 @@ void Assembler::addPatch(DataLabelPtr patch, Label target) _dataLabelPatches.push_back(p); } -void Assembler::addPatch(DataLabelPtr patch, IR::BasicBlock *target) +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::addPatch(DataLabelPtr patch, IR::BasicBlock *target) { _labelPatches[target->index()].push_back(patch); } -void Assembler::generateCJumpOnNonZero(RegisterID reg, IR::BasicBlock *currentBlock, +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::generateCJumpOnNonZero(RegisterID reg, IR::BasicBlock *currentBlock, IR::BasicBlock *trueBlock, IR::BasicBlock *falseBlock) { - generateCJumpOnCompare(NotEqual, reg, TrustedImm32(0), currentBlock, trueBlock, falseBlock); + generateCJumpOnCompare(RelationalCondition::NotEqual, reg, TrustedImm32(0), currentBlock, trueBlock, falseBlock); } #ifdef QV4_USE_64_BIT_VALUE_ENCODING -void Assembler::generateCJumpOnCompare(RelationalCondition cond, +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::generateCJumpOnCompare(RelationalCondition cond, RegisterID left, TrustedImm64 right, IR::BasicBlock *currentBlock, @@ -217,7 +226,8 @@ void Assembler::generateCJumpOnCompare(RelationalCondition cond, } #endif -void Assembler::generateCJumpOnCompare(RelationalCondition cond, +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::generateCJumpOnCompare(RelationalCondition cond, RegisterID left, TrustedImm32 right, IR::BasicBlock *currentBlock, @@ -234,7 +244,8 @@ void Assembler::generateCJumpOnCompare(RelationalCondition cond, } } -void Assembler::generateCJumpOnCompare(RelationalCondition cond, +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::generateCJumpOnCompare(RelationalCondition cond, RegisterID left, RegisterID right, IR::BasicBlock *currentBlock, @@ -251,7 +262,8 @@ void Assembler::generateCJumpOnCompare(RelationalCondition cond, } } -Assembler::Pointer Assembler::loadAddress(RegisterID tmp, IR::Expr *e) +template <typename TargetConfiguration> +typename Assembler<TargetConfiguration>::Pointer Assembler<TargetConfiguration>::loadAddress(RegisterID tmp, IR::Expr *e) { IR::Temp *t = e->asTemp(); if (t) @@ -260,7 +272,8 @@ Assembler::Pointer Assembler::loadAddress(RegisterID tmp, IR::Expr *e) return loadArgLocalAddress(tmp, e->asArgLocal()); } -Assembler::Pointer Assembler::loadTempAddress(IR::Temp *t) +template <typename TargetConfiguration> +typename Assembler<TargetConfiguration>::Pointer Assembler<TargetConfiguration>::loadTempAddress(IR::Temp *t) { if (t->kind == IR::Temp::StackSlot) return stackSlotPointer(t); @@ -268,7 +281,8 @@ Assembler::Pointer Assembler::loadTempAddress(IR::Temp *t) Q_UNREACHABLE(); } -Assembler::Pointer Assembler::loadArgLocalAddress(RegisterID baseReg, IR::ArgLocal *al) +template <typename TargetConfiguration> +typename Assembler<TargetConfiguration>::Pointer Assembler<TargetConfiguration>::loadArgLocalAddress(RegisterID baseReg, IR::ArgLocal *al) { int32_t offset = 0; int scope = al->scope; @@ -298,7 +312,8 @@ Assembler::Pointer Assembler::loadArgLocalAddress(RegisterID baseReg, IR::ArgLoc return Pointer(baseReg, offset); } -Assembler::Pointer Assembler::loadStringAddress(RegisterID reg, const QString &string) +template <typename TargetConfiguration> +typename Assembler<TargetConfiguration>::Pointer Assembler<TargetConfiguration>::loadStringAddress(RegisterID reg, const QString &string) { loadPtr(Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), Assembler::ScratchRegister); loadPtr(Address(Assembler::ScratchRegister, qOffsetOf(QV4::Heap::ExecutionContext, compilationUnit)), Assembler::ScratchRegister); @@ -307,12 +322,14 @@ Assembler::Pointer Assembler::loadStringAddress(RegisterID reg, const QString &s return Pointer(reg, id * sizeof(QV4::String*)); } -Assembler::Address Assembler::loadConstant(IR::Const *c, RegisterID baseReg) +template <typename TargetConfiguration> +typename Assembler<TargetConfiguration>::Address Assembler<TargetConfiguration>::loadConstant(IR::Const *c, RegisterID baseReg) { return loadConstant(convertToValue(c), baseReg); } -Assembler::Address Assembler::loadConstant(const Primitive &v, RegisterID baseReg) +template <typename TargetConfiguration> +typename Assembler<TargetConfiguration>::Address Assembler<TargetConfiguration>::loadConstant(const Primitive &v, RegisterID baseReg) { loadPtr(Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), baseReg); loadPtr(Address(baseReg, qOffsetOf(QV4::Heap::ExecutionContext, constantTable)), baseReg); @@ -320,33 +337,36 @@ Assembler::Address Assembler::loadConstant(const Primitive &v, RegisterID baseRe return Address(baseReg, index * sizeof(QV4::Value)); } -void Assembler::loadStringRef(RegisterID reg, const QString &string) +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::loadStringRef(RegisterID reg, const QString &string) { const int id = _jsGenerator->registerString(string); move(TrustedImm32(id), reg); } -void Assembler::storeValue(QV4::Primitive value, IR::Expr *destination) +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::storeValue(QV4::Primitive value, IR::Expr *destination) { Address addr = loadAddress(ScratchRegister, destination); storeValue(value, addr); } -void Assembler::enterStandardStackFrame(const RegisterInformation ®ularRegistersToSave, +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::enterStandardStackFrame(const RegisterInformation ®ularRegistersToSave, const RegisterInformation &fpRegistersToSave) { platformEnterStandardStackFrame(this); - move(StackPointerRegister, FramePointerRegister); + move(StackPointerRegister, JITTargetPlatform::FramePointerRegister); const int frameSize = _stackLayout->calculateStackFrameSize(); subPtr(TrustedImm32(frameSize), StackPointerRegister); - Address slotAddr(FramePointerRegister, 0); + Address slotAddr(JITTargetPlatform::FramePointerRegister, 0); for (int i = 0, ei = fpRegistersToSave.size(); i < ei; ++i) { Q_ASSERT(fpRegistersToSave.at(i).isFloatingPoint()); slotAddr.offset -= sizeof(double); - JSC::MacroAssembler<PlatformMacroAssembler>::storeDouble(fpRegistersToSave.at(i).reg<FPRegisterID>(), slotAddr); + TargetConfiguration::MacroAssembler::storeDouble(fpRegistersToSave.at(i).reg<FPRegisterID>(), slotAddr); } for (int i = 0, ei = regularRegistersToSave.size(); i < ei; ++i) { Q_ASSERT(regularRegistersToSave.at(i).isRegularRegister()); @@ -355,10 +375,11 @@ void Assembler::enterStandardStackFrame(const RegisterInformation ®ularRegist } } -void Assembler::leaveStandardStackFrame(const RegisterInformation ®ularRegistersToSave, +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::leaveStandardStackFrame(const RegisterInformation ®ularRegistersToSave, const RegisterInformation &fpRegistersToSave) { - Address slotAddr(FramePointerRegister, -regularRegistersToSave.size() * RegisterSize - fpRegistersToSave.size() * sizeof(double)); + Address slotAddr(JITTargetPlatform::FramePointerRegister, -regularRegistersToSave.size() * RegisterSize - fpRegistersToSave.size() * sizeof(double)); // restore the callee saved registers for (int i = regularRegistersToSave.size() - 1; i >= 0; --i) { @@ -368,7 +389,7 @@ void Assembler::leaveStandardStackFrame(const RegisterInformation ®ularRegist } for (int i = fpRegistersToSave.size() - 1; i >= 0; --i) { Q_ASSERT(fpRegistersToSave.at(i).isFloatingPoint()); - JSC::MacroAssembler<PlatformMacroAssembler>::loadDouble(slotAddr, fpRegistersToSave.at(i).reg<FPRegisterID>()); + TargetConfiguration::MacroAssembler::loadDouble(slotAddr, fpRegistersToSave.at(i).reg<FPRegisterID>()); slotAddr.offset += sizeof(double); } @@ -393,7 +414,8 @@ void Assembler::leaveStandardStackFrame(const RegisterInformation ®ularRegist // Try to load the source expression into the destination FP register. This assumes that two // general purpose (integer) registers are available: the ScratchRegister and the // ReturnValueRegister. It returns a Jump if no conversion can be performed. -Assembler::Jump Assembler::genTryDoubleConversion(IR::Expr *src, Assembler::FPRegisterID dest) +template <typename TargetConfiguration> +typename Assembler<TargetConfiguration>::Jump Assembler<TargetConfiguration>::genTryDoubleConversion(IR::Expr *src, FPRegisterID dest) { switch (src->type) { case IR::DoubleType: @@ -436,11 +458,10 @@ Assembler::Jump Assembler::genTryDoubleConversion(IR::Expr *src, Assembler::FPRe isNoInt.link(this); #ifdef QV4_USE_64_BIT_VALUE_ENCODING rshift32(TrustedImm32(Value::IsDoubleTag_Shift), ScratchRegister); - Assembler::Jump isNoDbl = branch32(Equal, ScratchRegister, TrustedImm32(0)); + Assembler::Jump isNoDbl = branch32(RelationalCondition::Equal, JITTargetPlatform::ScratchRegister, TrustedImm32(0)); #else and32(Assembler::TrustedImm32(Value::NotDouble_Mask), Assembler::ScratchRegister); - Assembler::Jump isNoDbl = branch32(Assembler::Equal, Assembler::ScratchRegister, - Assembler::TrustedImm32(Value::NotDouble_Mask)); + Assembler::Jump isNoDbl = branch32(RelationalCondition::Equal, JITTargetPlatform::ScratchRegister, TrustedImm32(Value::NotDouble_Mask)); #endif toDoubleRegister(src, dest); intDone.link(this); @@ -448,10 +469,11 @@ Assembler::Jump Assembler::genTryDoubleConversion(IR::Expr *src, Assembler::FPRe return isNoDbl; } -Assembler::Jump Assembler::branchDouble(bool invertCondition, IR::AluOp op, +template <typename TargetConfiguration> +typename Assembler<TargetConfiguration>::Jump Assembler<TargetConfiguration>::branchDouble(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right) { - Assembler::DoubleCondition cond; + DoubleCondition cond; switch (op) { case IR::OpGt: cond = Assembler::DoubleGreaterThan; break; case IR::OpLt: cond = Assembler::DoubleLessThan; break; @@ -465,12 +487,13 @@ Assembler::Jump Assembler::branchDouble(bool invertCondition, IR::AluOp op, Q_UNREACHABLE(); } if (invertCondition) - cond = JSC::MacroAssembler<PlatformMacroAssembler>::invert(cond); + cond = TargetConfiguration::MacroAssembler::invert(cond); - return JSC::MacroAssembler<PlatformMacroAssembler>::branchDouble(cond, toDoubleRegister(left, FPGpr0), toDoubleRegister(right, FPGpr1)); + return TargetConfiguration::MacroAssembler::branchDouble(cond, toDoubleRegister(left, FPGpr0), toDoubleRegister(right, JITTargetPlatform::FPGpr1)); } -Assembler::Jump Assembler::branchInt32(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right) +template <typename TargetConfiguration> +typename Assembler<TargetConfiguration>::Jump Assembler<TargetConfiguration>::branchInt32(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right) { Assembler::RelationalCondition cond; switch (op) { @@ -486,14 +509,15 @@ Assembler::Jump Assembler::branchInt32(bool invertCondition, IR::AluOp op, IR::E Q_UNREACHABLE(); } if (invertCondition) - cond = JSC::MacroAssembler<PlatformMacroAssembler>::invert(cond); + cond = TargetConfiguration::MacroAssembler::invert(cond); - return JSC::MacroAssembler<PlatformMacroAssembler>::branch32(cond, - toInt32Register(left, Assembler::ScratchRegister), - toInt32Register(right, Assembler::ReturnValueRegister)); + return TargetConfiguration::MacroAssembler::branch32(cond, + toInt32Register(left, Assembler::ScratchRegister), + toInt32Register(right, Assembler::ReturnValueRegister)); } -void Assembler::setStackLayout(int maxArgCountForBuiltins, int regularRegistersToSave, int fpRegistersToSave) +template <typename TargetConfiguration> +void Assembler<TargetConfiguration>::setStackLayout(int maxArgCountForBuiltins, int regularRegistersToSave, int fpRegistersToSave) { _stackLayout.reset(new StackLayout(_function, maxArgCountForBuiltins, regularRegistersToSave, fpRegistersToSave)); } @@ -563,7 +587,8 @@ static void qt_closePmap() #endif -JSC::MacroAssemblerCodeRef Assembler::link(int *codeSize) +template <typename TargetConfiguration> +JSC::MacroAssemblerCodeRef Assembler<TargetConfiguration>::link(int *codeSize) { Label endOfCode = label(); @@ -577,7 +602,7 @@ JSC::MacroAssemblerCodeRef Assembler::link(int *codeSize) } JSC::JSGlobalData dummy(_executableAllocator); - JSC::LinkBuffer<JSC::MacroAssembler<PlatformMacroAssembler>> linkBuffer(dummy, this, 0); + JSC::LinkBuffer<typename TargetConfiguration::MacroAssembler> linkBuffer(dummy, this, 0); for (const DataLabelPatch &p : qAsConst(_dataLabelPatches)) linkBuffer.patch(p.dataLabel, linkBuffer.locationOf(p.target)); @@ -668,4 +693,6 @@ JSC::MacroAssemblerCodeRef Assembler::link(int *codeSize) return codeRef; } +template class QV4::JIT::Assembler<AssemblerTargetConfiguration<DefaultPlatformMacroAssembler>>; + #endif diff --git a/src/qml/jit/qv4assembler_p.h b/src/qml/jit/qv4assembler_p.h index e40609eea6..e2ebe3f58c 100644 --- a/src/qml/jit/qv4assembler_p.h +++ b/src/qml/jit/qv4assembler_p.h @@ -90,31 +90,99 @@ struct CompilationUnit : public QV4::CompiledData::CompilationUnit }; #if CPU(ARM_THUMB2) -typedef JSC::MacroAssemblerARMv7 PlatformMacroAssembler; +typedef JSC::MacroAssemblerARMv7 DefaultPlatformMacroAssembler; #elif CPU(ARM64) -typedef JSC::MacroAssemblerARM64 PlatformMacroAssembler; +typedef JSC::MacroAssemblerARM64 DefaultPlatformMacroAssembler; #elif CPU(ARM_TRADITIONAL) -typedef JSC::MacroAssemblerARM PlatformMacroAssembler; +typedef JSC::MacroAssemblerARM DefaultPlatformMacroAssembler; #elif CPU(MIPS) -typedef JSC::MacroAssemblerMIPS PlatformMacroAssembler; +typedef JSC::MacroAssemblerMIPS DefaultPlatformMacroAssembler; #elif CPU(X86) -typedef JSC::MacroAssemblerX86 PlatformMacroAssembler; +typedef JSC::MacroAssemblerX86 DefaultPlatformMacroAssembler; #elif CPU(X86_64) -typedef JSC::MacroAssemblerX86_64 PlatformMacroAssembler; +typedef JSC::MacroAssemblerX86_64 DefaultPlatformMacroAssembler; #elif CPU(SH4) -typedef JSC::MacroAssemblerSH4 PlatformMacroAssembler; +typedef JSC::MacroAssemblerSH4 DefaultPlatformMacroAssembler; #endif -class Assembler : public JSC::MacroAssembler<PlatformMacroAssembler>, public TargetPlatform<PlatformMacroAssembler> +template <typename PlatformAssembler> +struct AssemblerTargetConfiguration +{ + typedef JSC::MacroAssembler<PlatformAssembler> MacroAssembler; + typedef TargetPlatform<PlatformAssembler> Platform; + // More things coming here in the future, such as Target OS +}; + +template <typename TargetConfiguration> +class Assembler : public TargetConfiguration::MacroAssembler, public TargetConfiguration::Platform { Q_DISABLE_COPY(Assembler) public: Assembler(QV4::Compiler::JSUnitGenerator *jsGenerator, IR::Function* function, QV4::ExecutableAllocator *executableAllocator); - using MacroAssembler = JSC::MacroAssembler<PlatformMacroAssembler>; - using RegisterID = MacroAssembler::RegisterID; - using FPRegisterID = MacroAssembler::FPRegisterID; + using MacroAssembler = typename TargetConfiguration::MacroAssembler; + using RegisterID = typename MacroAssembler::RegisterID; + using FPRegisterID = typename MacroAssembler::FPRegisterID; + using Address = typename MacroAssembler::Address; + using Label = typename MacroAssembler::Label; + using Jump = typename MacroAssembler::Jump; + using DataLabelPtr = typename MacroAssembler::DataLabelPtr; + using TrustedImm32 = typename MacroAssembler::TrustedImm32; + using TrustedImm64 = typename MacroAssembler::TrustedImm64; + using TrustedImmPtr = typename MacroAssembler::TrustedImmPtr; + using RelationalCondition = typename MacroAssembler::RelationalCondition; + using typename MacroAssembler::DoubleCondition; + using MacroAssembler::label; + using MacroAssembler::move; + using MacroAssembler::jump; +#ifdef QV4_USE_64_BIT_VALUE_ENCODING + using MacroAssembler::moveDoubleTo64; + using MacroAssembler::move64ToDouble; + using MacroAssembler::xor64; + using MacroAssembler::store64; + using MacroAssembler::load64; + using MacroAssembler::branch64; +#endif + using MacroAssembler::add32; + using MacroAssembler::and32; + using MacroAssembler::store32; + using MacroAssembler::loadPtr; + using MacroAssembler::load32; + using MacroAssembler::branch32; + using MacroAssembler::subDouble; + using MacroAssembler::subPtr; + using MacroAssembler::addPtr; + using MacroAssembler::call; + using MacroAssembler::poke; + using MacroAssembler::xorPtr; + using MacroAssembler::branchTruncateDoubleToUint32; + using MacroAssembler::or32; + using MacroAssembler::moveDouble; + using MacroAssembler::convertUInt32ToDouble; + using MacroAssembler::invert; + using MacroAssembler::convertInt32ToDouble; + using MacroAssembler::rshift32; + using MacroAssembler::storePtr; + +#if !defined(VALUE_FITS_IN_REGISTER) + using MacroAssembler::moveIntsToDouble; +#endif + + using JITTargetPlatform = typename TargetConfiguration::Platform; + using JITTargetPlatform::RegisterArgumentCount; + using JITTargetPlatform::StackSpaceAllocatedUponFunctionEntry; + using JITTargetPlatform::RegisterSize; + using JITTargetPlatform::StackAlignment; + using JITTargetPlatform::ReturnValueRegister; + using JITTargetPlatform::StackPointerRegister; + using JITTargetPlatform::ScratchRegister; + using JITTargetPlatform::EngineRegister; + using JITTargetPlatform::StackShadowSpace; + using JITTargetPlatform::registerForArgument; + using JITTargetPlatform::FPGpr0; + using JITTargetPlatform::platformEnterStandardStackFrame; + using JITTargetPlatform::platformLeaveStandardStackFrame; struct LookupCall { Address addr; @@ -350,9 +418,9 @@ public: void generateCJumpOnCompare(RelationalCondition cond, RegisterID left, RegisterID right, IR::BasicBlock *currentBlock, IR::BasicBlock *trueBlock, IR::BasicBlock *falseBlock); - Jump genTryDoubleConversion(IR::Expr *src, Assembler::FPRegisterID dest); - Assembler::Jump branchDouble(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right); - Assembler::Jump branchInt32(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right); + Jump genTryDoubleConversion(IR::Expr *src, FPRegisterID dest); + Jump branchDouble(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right); + Jump branchInt32(bool invertCondition, IR::AluOp op, IR::Expr *left, IR::Expr *right); Pointer loadAddress(RegisterID tmp, IR::Expr *t); Pointer loadTempAddress(IR::Temp *t); @@ -697,8 +765,8 @@ public: store64(ReturnValueRegister, addr); } #else - using JSC::MacroAssembler<PlatformMacroAssembler>::loadDouble; - using JSC::MacroAssembler<PlatformMacroAssembler>::storeDouble; + using MacroAssembler::loadDouble; + using MacroAssembler::storeDouble; #endif template <typename Result, typename Source> @@ -711,8 +779,8 @@ public: { Q_ASSERT(!source->asTemp() || source->asTemp()->kind != IR::Temp::PhysicalRegister); Q_ASSERT(target.base != scratchRegister); - JSC::MacroAssembler<PlatformMacroAssembler>::loadDouble(loadAddress(scratchRegister, source), FPGpr0); - JSC::MacroAssembler<PlatformMacroAssembler>::storeDouble(FPGpr0, target); + TargetConfiguration::MacroAssembler::loadDouble(loadAddress(scratchRegister, source), FPGpr0); + TargetConfiguration::MacroAssembler::storeDouble(FPGpr0, target); } void storeValue(QV4::Primitive value, RegisterID destination) @@ -742,7 +810,7 @@ public: void checkException() { load32(Address(EngineRegister, qOffsetOf(QV4::ExecutionEngine, hasException)), ScratchRegister); - Jump exceptionThrown = branch32(NotEqual, ScratchRegister, TrustedImm32(0)); + Jump exceptionThrown = branch32(RelationalCondition::NotEqual, ScratchRegister, TrustedImm32(0)); if (catchBlock) addPatch(catchBlock, exceptionThrown); else @@ -801,6 +869,27 @@ public: enum { Size = 0 }; }; + template <typename T> bool prepareCall(T &) + { return true; } + + bool prepareCall(LookupCall &lookupCall) + { + // IMPORTANT! See generateLookupCall in qv4isel_masm_p.h for details! + + // load the table from the context + loadPtr(Address(EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), ScratchRegister); + loadPtr(Address(ScratchRegister, qOffsetOf(QV4::Heap::ExecutionContext, lookups)), + lookupCall.addr.base); + // pre-calculate the indirect address for the lookupCall table: + if (lookupCall.addr.offset) + addPtr(TrustedImm32(lookupCall.addr.offset), lookupCall.addr.base); + // store it as the first argument + loadArgumentOnStackOrRegister<0>(lookupCall.addr.base); + // set the destination addresses offset to the getterSetterOffset. The base is the lookupCall table's address + lookupCall.addr.offset = lookupCall.getterSetterOffset; + return false; + } + template <typename ArgRet, typename Callable, typename Arg1, typename Arg2, typename Arg3, typename Arg4, typename Arg5, typename Arg6> void generateFunctionCallImp(bool needsExceptionCheck, ArgRet r, const char* functionName, Callable function, Arg1 arg1, Arg2 arg2, Arg3 arg3, Arg4 arg4, Arg5 arg5, Arg6 arg6) { @@ -833,7 +922,7 @@ public: loadArgumentOnStackOrRegister<2>(arg3); loadArgumentOnStackOrRegister<1>(arg2); - if (prepareCall(function, this)) + if (prepareCall(function)) loadArgumentOnStackOrRegister<0>(arg1); #ifdef RESTORE_EBX_ON_CALL @@ -977,7 +1066,7 @@ public: void storeUInt32(RegisterID reg, Pointer addr) { // The UInt32 representation in QV4::Value is really convoluted. See also toUInt32Register. - Jump intRange = branch32(GreaterThanOrEqual, reg, TrustedImm32(0)); + Jump intRange = branch32(RelationalCondition::GreaterThanOrEqual, reg, TrustedImm32(0)); convertUInt32ToDouble(reg, FPGpr0, ReturnValueRegister); storeDouble(FPGpr0, addr); Jump done = jump(); @@ -1007,7 +1096,7 @@ public: move(TrustedImm64(i), ReturnValueRegister); move64ToDouble(ReturnValueRegister, target); #else - JSC::MacroAssembler<PlatformMacroAssembler>::loadDouble(loadConstant(c, ScratchRegister), target); + MacroAssembler::loadDouble(loadConstant(c, ScratchRegister), target); #endif return target; } @@ -1067,7 +1156,7 @@ public: Pointer tagAddr = addr; tagAddr.offset += 4; load32(tagAddr, scratchReg); - Jump inIntRange = branch32(Equal, scratchReg, TrustedImm32(QV4::Value::Integer_Type_Internal)); + Jump inIntRange = branch32(RelationalCondition::Equal, scratchReg, TrustedImm32(QV4::Value::Integer_Type_Internal)); // it's not in signed int range, so load it as a double, and truncate it down loadDouble(addr, FPGpr0); @@ -1115,8 +1204,9 @@ private: QV4::Compiler::JSUnitGenerator *_jsGenerator; }; +template <typename TargetConfiguration> template <typename Result, typename Source> -void Assembler::copyValue(Result result, Source source) +void Assembler<TargetConfiguration>::copyValue(Result result, Source source) { #ifdef VALUE_FITS_IN_REGISTER // Use ReturnValueRegister as "scratch" register because loadArgument @@ -1129,8 +1219,9 @@ void Assembler::copyValue(Result result, Source source) #endif } +template <typename TargetConfiguration> template <typename Result> -void Assembler::copyValue(Result result, IR::Expr* source) +void Assembler<TargetConfiguration>::copyValue(Result result, IR::Expr* source) { if (source->type == IR::BoolType) { RegisterID reg = toInt32Register(source, ScratchRegister); @@ -1161,34 +1252,12 @@ void Assembler::copyValue(Result result, IR::Expr* source) } } -inline Assembler::RuntimeCall::RuntimeCall(uint offset) +template <typename TargetConfiguration> +inline Assembler<TargetConfiguration>::RuntimeCall::RuntimeCall(uint offset) : addr(Assembler::EngineRegister, offset + qOffsetOf(QV4::ExecutionEngine, runtime)) { } - - -template <typename T> inline bool prepareCall(T &, Assembler *) -{ return true; } - -template <> inline bool prepareCall(Assembler::LookupCall &lookupCall, Assembler *as) -{ - // IMPORTANT! See generateLookupCall in qv4isel_masm_p.h for details! - - // load the table from the context - as->loadPtr(Assembler::Address(Assembler::EngineRegister, qOffsetOf(QV4::ExecutionEngine, current)), Assembler::ScratchRegister); - as->loadPtr(Assembler::Address(Assembler::ScratchRegister, qOffsetOf(QV4::Heap::ExecutionContext, lookups)), - lookupCall.addr.base); - // pre-calculate the indirect address for the lookupCall table: - if (lookupCall.addr.offset) - as->addPtr(Assembler::TrustedImm32(lookupCall.addr.offset), lookupCall.addr.base); - // store it as the first argument - as->loadArgumentOnStackOrRegister<0>(lookupCall.addr.base); - // set the destination addresses offset to the getterSetterOffset. The base is the lookupCall table's address - lookupCall.addr.offset = lookupCall.getterSetterOffset; - return false; -} - } // end of namespace JIT } // end of namespace QV4 diff --git a/src/qml/jit/qv4binop.cpp b/src/qml/jit/qv4binop.cpp index df19ddb31c..dfaf7f518c 100644 --- a/src/qml/jit/qv4binop.cpp +++ b/src/qml/jit/qv4binop.cpp @@ -455,18 +455,18 @@ bool Binop<JITAssembler>::int32Binop(IR::Expr *leftSource, IR::Expr *rightSource // Not all CPUs accept shifts over more than 31 bits, and some CPUs (like ARM) will do // surprising stuff when shifting over 0 bits. #define CHECK_RHS(op) { \ - as->and32(TrustedImm32(0x1f), r, Assembler::ScratchRegister); \ - Jump notZero = as->branch32(RelationalCondition::NotEqual, Assembler::ScratchRegister, TrustedImm32(0)); \ + as->and32(TrustedImm32(0x1f), r, JITAssembler::ScratchRegister); \ + Jump notZero = as->branch32(RelationalCondition::NotEqual, JITAssembler::ScratchRegister, TrustedImm32(0)); \ as->move(l, targetReg); \ Jump done = as->jump(); \ notZero.link(as); \ op; \ done.link(as); \ } - case IR::OpLShift: CHECK_RHS(as->lshift32(l, Assembler::ScratchRegister, targetReg)); break; - case IR::OpRShift: CHECK_RHS(as->rshift32(l, Assembler::ScratchRegister, targetReg)); break; + case IR::OpLShift: CHECK_RHS(as->lshift32(l, JITAssembler::ScratchRegister, targetReg)); break; + case IR::OpRShift: CHECK_RHS(as->rshift32(l, JITAssembler::ScratchRegister, targetReg)); break; case IR::OpURShift: - CHECK_RHS(as->urshift32(l, Assembler::ScratchRegister, targetReg)); + CHECK_RHS(as->urshift32(l, JITAssembler::ScratchRegister, targetReg)); as->storeUInt32(targetReg, target); // IMPORTANT: do NOT do a break here! The stored type of an urshift is different from the other binary operations! return true; @@ -576,6 +576,6 @@ typename JITAssembler::Jump Binop<JITAssembler>::genInlineBinop(IR::Expr *leftSo return done; } -template struct QV4::JIT::Binop<QV4::JIT::Assembler>; +template struct QV4::JIT::Binop<QV4::JIT::Assembler<AssemblerTargetConfiguration<DefaultPlatformMacroAssembler>>>; #endif diff --git a/src/qml/jit/qv4isel_masm.cpp b/src/qml/jit/qv4isel_masm.cpp index a5c3d94648..6eadee4dfa 100644 --- a/src/qml/jit/qv4isel_masm.cpp +++ b/src/qml/jit/qv4isel_masm.cpp @@ -108,8 +108,8 @@ void InstructionSelection::run(int functionIndex) BitVector removableJumps = opt.calculateOptionalJumps(); qSwap(_removableJumps, removableJumps); - Assembler* oldAssembler = _as; - _as = new Assembler(jsGenerator, _function, executableAllocator); + JITAssembler* oldAssembler = _as; + _as = new JITAssembler(jsGenerator, _function, executableAllocator); _as->setStackLayout(6, // 6 == max argc for calls to built-ins with an argument array regularRegistersToSave.size(), fpRegistersToSave.size()); @@ -293,7 +293,7 @@ void InstructionSelection::callBuiltinUnwindException(IR::Expr *result) void InstructionSelection::callBuiltinPushCatchScope(const QString &exceptionName) { - generateRuntimeCall(Assembler::Void, pushCatchScope, JITTargetPlatform::EngineRegister, StringToIndex(exceptionName)); + generateRuntimeCall(JITAssembler::Void, pushCatchScope, JITTargetPlatform::EngineRegister, StringToIndex(exceptionName)); } void InstructionSelection::callBuiltinForeachIteratorObject(IR::Expr *arg, IR::Expr *result) @@ -316,17 +316,17 @@ void InstructionSelection::callBuiltinPushWithScope(IR::Expr *arg) { Q_ASSERT(arg); - generateRuntimeCall(Assembler::Void, pushWithScope, Reference(arg), JITTargetPlatform::EngineRegister); + generateRuntimeCall(JITAssembler::Void, pushWithScope, Reference(arg), JITTargetPlatform::EngineRegister); } void InstructionSelection::callBuiltinPopScope() { - generateRuntimeCall(Assembler::Void, popScope, JITTargetPlatform::EngineRegister); + generateRuntimeCall(JITAssembler::Void, popScope, JITTargetPlatform::EngineRegister); } void InstructionSelection::callBuiltinDeclareVar(bool deletable, const QString &name) { - generateRuntimeCall(Assembler::Void, declareVar, JITTargetPlatform::EngineRegister, + generateRuntimeCall(JITAssembler::Void, declareVar, JITTargetPlatform::EngineRegister, TrustedImm32(deletable), StringToIndex(name)); } @@ -427,7 +427,7 @@ void InstructionSelection::callBuiltinSetupArgumentObject(IR::Expr *result) void InstructionSelection::callBuiltinConvertThisToObject() { - generateRuntimeCall(Assembler::Void, convertThisToObject, JITTargetPlatform::EngineRegister); + generateRuntimeCall(JITAssembler::Void, convertThisToObject, JITTargetPlatform::EngineRegister); } void InstructionSelection::callValue(IR::Expr *value, IR::ExprList *args, IR::Expr *result) @@ -524,7 +524,7 @@ void InstructionSelection::getActivationProperty(const IR::Name *name, IR::Expr { if (useFastLookups && name->global) { uint index = registerGlobalGetterLookup(*name->id); - generateLookupCall(target, index, qOffsetOf(QV4::Lookup, globalGetter), JITTargetPlatform::EngineRegister, Assembler::Void); + generateLookupCall(target, index, qOffsetOf(QV4::Lookup, globalGetter), JITTargetPlatform::EngineRegister, JITAssembler::Void); return; } generateRuntimeCall(target, getActivationProperty, JITTargetPlatform::EngineRegister, StringToIndex(*name->id)); @@ -533,7 +533,7 @@ void InstructionSelection::getActivationProperty(const IR::Name *name, IR::Expr void InstructionSelection::setActivationProperty(IR::Expr *source, const QString &targetName) { // ### should use a lookup call here - generateRuntimeCall(Assembler::Void, setActivationProperty, + generateRuntimeCall(JITAssembler::Void, setActivationProperty, JITTargetPlatform::EngineRegister, StringToIndex(targetName), PointerToValue(source)); } @@ -547,7 +547,7 @@ void InstructionSelection::getProperty(IR::Expr *base, const QString &name, IR:: { if (useFastLookups) { uint index = registerGetterLookup(name); - generateLookupCall(target, index, qOffsetOf(QV4::Lookup, getter), JITTargetPlatform::EngineRegister, PointerToValue(base), Assembler::Void); + generateLookupCall(target, index, qOffsetOf(QV4::Lookup, getter), JITTargetPlatform::EngineRegister, PointerToValue(base), JITAssembler::Void); } else { generateRuntimeCall(target, getProperty, JITTargetPlatform::EngineRegister, PointerToValue(base), StringToIndex(name)); @@ -583,12 +583,12 @@ void InstructionSelection::setProperty(IR::Expr *source, IR::Expr *targetBase, { if (useFastLookups) { uint index = registerSetterLookup(targetName); - generateLookupCall(Assembler::Void, index, qOffsetOf(QV4::Lookup, setter), + generateLookupCall(JITAssembler::Void, index, qOffsetOf(QV4::Lookup, setter), JITTargetPlatform::EngineRegister, PointerToValue(targetBase), PointerToValue(source)); } else { - generateRuntimeCall(Assembler::Void, setProperty, JITTargetPlatform::EngineRegister, + generateRuntimeCall(JITAssembler::Void, setProperty, JITTargetPlatform::EngineRegister, PointerToValue(targetBase), StringToIndex(targetName), PointerToValue(source)); } @@ -597,10 +597,10 @@ void InstructionSelection::setProperty(IR::Expr *source, IR::Expr *targetBase, void InstructionSelection::setQmlContextProperty(IR::Expr *source, IR::Expr *targetBase, IR::Member::MemberKind kind, int propertyIndex) { if (kind == IR::Member::MemberOfQmlScopeObject) - generateRuntimeCall(Assembler::Void, setQmlScopeObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(targetBase), + generateRuntimeCall(JITAssembler::Void, setQmlScopeObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(targetBase), TrustedImm32(propertyIndex), PointerToValue(source)); else if (kind == IR::Member::MemberOfQmlContextObject) - generateRuntimeCall(Assembler::Void, setQmlContextObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(targetBase), + generateRuntimeCall(JITAssembler::Void, setQmlContextObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(targetBase), TrustedImm32(propertyIndex), PointerToValue(source)); else Q_ASSERT(false); @@ -608,7 +608,7 @@ void InstructionSelection::setQmlContextProperty(IR::Expr *source, IR::Expr *tar void InstructionSelection::setQObjectProperty(IR::Expr *source, IR::Expr *targetBase, int propertyIndex) { - generateRuntimeCall(Assembler::Void, setQmlQObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(targetBase), + generateRuntimeCall(JITAssembler::Void, setQmlQObjectProperty, JITTargetPlatform::EngineRegister, PointerToValue(targetBase), TrustedImm32(propertyIndex), PointerToValue(source)); } @@ -630,12 +630,12 @@ void InstructionSelection::setElement(IR::Expr *source, IR::Expr *targetBase, IR { if (useFastLookups) { uint lookup = registerIndexedSetterLookup(); - generateLookupCall(Assembler::Void, lookup, qOffsetOf(QV4::Lookup, indexedSetter), + generateLookupCall(JITAssembler::Void, lookup, qOffsetOf(QV4::Lookup, indexedSetter), PointerToValue(targetBase), PointerToValue(targetIndex), PointerToValue(source)); return; } - generateRuntimeCall(Assembler::Void, setElement, JITTargetPlatform::EngineRegister, + generateRuntimeCall(JITAssembler::Void, setElement, JITTargetPlatform::EngineRegister, PointerToValue(targetBase), PointerToValue(targetIndex), PointerToValue(source)); } @@ -735,10 +735,10 @@ void InstructionSelection::swapValues(IR::Expr *source, IR::Expr *target) Pointer sAddr = _as->loadAddress(JITTargetPlatform::ScratchRegister, source); Pointer tAddr = _as->loadAddress(JITTargetPlatform::ReturnValueRegister, target); // use the implementation in JSC::MacroAssembler, as it doesn't do bit swizzling - _as->JSC::MacroAssembler<PlatformMacroAssembler>::loadDouble(sAddr, JITTargetPlatform::FPGpr0); - _as->JSC::MacroAssembler<PlatformMacroAssembler>::loadDouble(tAddr, JITTargetPlatform::FPGpr1); - _as->JSC::MacroAssembler<PlatformMacroAssembler>::storeDouble(JITTargetPlatform::FPGpr1, sAddr); - _as->JSC::MacroAssembler<PlatformMacroAssembler>::storeDouble(JITTargetPlatform::FPGpr0, tAddr); + _as->MacroAssembler::loadDouble(sAddr, JITTargetPlatform::FPGpr0); + _as->MacroAssembler::loadDouble(tAddr, JITTargetPlatform::FPGpr1); + _as->MacroAssembler::storeDouble(JITTargetPlatform::FPGpr1, sAddr); + _as->MacroAssembler::storeDouble(JITTargetPlatform::FPGpr0, tAddr); return; } } @@ -783,25 +783,25 @@ void InstructionSelection::swapValues(IR::Expr *source, IR::Expr *target) #define setOp(op, opName, operation) \ do { \ - op = Assembler::RuntimeCall(qOffsetOf(QV4::Runtime, operation)); opName = "Runtime::" isel_stringIfy(operation); \ + op = JITAssembler::RuntimeCall(qOffsetOf(QV4::Runtime, operation)); opName = "Runtime::" isel_stringIfy(operation); \ needsExceptionCheck = QV4::Runtime::Method_##operation##_NeedsExceptionCheck; \ } while (0) #define setOpContext(op, opName, operation) \ do { \ - opContext = Assembler::RuntimeCall(qOffsetOf(QV4::Runtime, operation)); opName = "Runtime::" isel_stringIfy(operation); \ + opContext = JITAssembler::RuntimeCall(qOffsetOf(QV4::Runtime, operation)); opName = "Runtime::" isel_stringIfy(operation); \ needsExceptionCheck = QV4::Runtime::Method_##operation##_NeedsExceptionCheck; \ } while (0) void InstructionSelection::unop(IR::AluOp oper, IR::Expr *source, IR::Expr *target) { - QV4::JIT::Unop<Assembler> unop(_as, oper); + QV4::JIT::Unop<JITAssembler> unop(_as, oper); unop.generate(source, target); } void InstructionSelection::binop(IR::AluOp oper, IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target) { - QV4::JIT::Binop<Assembler> binop(_as, oper); + QV4::JIT::Binop<JITAssembler> binop(_as, oper); binop.generate(leftSource, rightSource, target); } @@ -1045,7 +1045,7 @@ void InstructionSelection::convertTypeToSInt32(IR::Expr *source, IR::Expr *targe _as->urshift64(TrustedImm32( QV4::Value::IsDoubleTag_Shift - QV4::Value::IsIntegerConvertible_Shift), JITTargetPlatform::ScratchRegister); - Jump fallback = _as->branch32(Assembler::GreaterThan, JITTargetPlatform::ScratchRegister, TrustedImm32(0)); + Jump fallback = _as->branch32(RelationalCondition::GreaterThan, JITTargetPlatform::ScratchRegister, TrustedImm32(0)); // it's a double _as->move(TrustedImm64(QV4::Value::NaNEncodeMask), JITTargetPlatform::ScratchRegister); @@ -1296,8 +1296,8 @@ void InstructionSelection::visitCJump(IR::CJump *s) return; } - Assembler::RuntimeCall op; - Assembler::RuntimeCall opContext; + JITAssembler::RuntimeCall op; + JITAssembler::RuntimeCall opContext; const char *opName = 0; bool needsExceptionCheck; switch (b->op) { @@ -1363,7 +1363,7 @@ void InstructionSelection::visitRet(IR::Ret *s) break; case IR::UInt32Type: { RegisterID srcReg = (RegisterID) t->index; - Jump intRange = _as->branch32(Assembler::GreaterThanOrEqual, srcReg, TrustedImm32(0)); + Jump intRange = _as->branch32(JITAssembler::GreaterThanOrEqual, srcReg, TrustedImm32(0)); _as->convertUInt32ToDouble(srcReg, JITTargetPlatform::FPGpr0, JITTargetPlatform::ReturnValueRegister); _as->moveDoubleToInts(JITTargetPlatform::FPGpr0, lowReg, highReg); Jump done = _as->jump(); @@ -1399,7 +1399,7 @@ void InstructionSelection::visitRet(IR::Ret *s) _as->xor64(JITTargetPlatform::ScratchRegister, JITTargetPlatform::ReturnValueRegister); } else if (t->type == IR::UInt32Type) { RegisterID srcReg = (RegisterID) t->index; - Jump intRange = _as->branch32(Assembler::GreaterThanOrEqual, srcReg, TrustedImm32(0)); + Jump intRange = _as->branch32(RelationalCondition::GreaterThanOrEqual, srcReg, TrustedImm32(0)); _as->convertUInt32ToDouble(srcReg, JITTargetPlatform::FPGpr0, JITTargetPlatform::ReturnValueRegister); _as->moveDoubleTo64(JITTargetPlatform::FPGpr0, JITTargetPlatform::ReturnValueRegister); _as->move(TrustedImm64(QV4::Value::NaNEncodeMask), JITTargetPlatform::ScratchRegister); @@ -1687,7 +1687,7 @@ bool InstructionSelection::visitCJumpStrictUndefined(IR::Binop *binop, #else // !QV4_USE_64_BIT_VALUE_ENCODING Pointer tagAddr = _as->loadAddress(JITTargetPlatform::ScratchRegister, varSrc); _as->load32(tagAddr, tagReg); - Jump j = _as->branch32(Assembler::invert(cond), tagReg, TrustedImm32(0)); + Jump j = _as->branch32(JITAssembler::invert(cond), tagReg, TrustedImm32(0)); _as->addPatch(falseBlock, j); tagAddr.offset += 4; diff --git a/src/qml/jit/qv4isel_masm_p.h b/src/qml/jit/qv4isel_masm_p.h index 505351f23a..806f7cf291 100644 --- a/src/qml/jit/qv4isel_masm_p.h +++ b/src/qml/jit/qv4isel_masm_p.h @@ -136,22 +136,24 @@ protected: void unop(IR::AluOp oper, IR::Expr *sourceTemp, IR::Expr *target) override; void binop(IR::AluOp oper, IR::Expr *leftSource, IR::Expr *rightSource, IR::Expr *target) override; - using Address = Assembler::Address; - using Pointer = Assembler::Pointer; - using PointerToValue = Assembler::PointerToValue; - using RegisterID = Assembler::RegisterID; - using FPRegisterID = Assembler::FPRegisterID; - using ResultCondition = Assembler::ResultCondition; - using TrustedImm32 = Assembler::TrustedImm32; - using TrustedImm64 = Assembler::TrustedImm64; - using Label = Assembler::Label; - using Jump = Assembler::Jump; - using StringToIndex = Assembler::StringToIndex; - using Reference = Assembler::Reference; - using RelationalCondition = Assembler::RelationalCondition; - using BranchTruncateType = Assembler::BranchTruncateType; - - using JITTargetPlatform = Assembler::TargetPlatform; + using JITAssembler = Assembler<AssemblerTargetConfiguration<DefaultPlatformMacroAssembler>>; + using Address = JITAssembler::Address; + using Pointer = JITAssembler::Pointer; + using PointerToValue = JITAssembler::PointerToValue; + using RegisterID = JITAssembler::RegisterID; + using FPRegisterID = JITAssembler::FPRegisterID; + using ResultCondition = JITAssembler::ResultCondition; + using TrustedImm32 = JITAssembler::TrustedImm32; + using TrustedImm64 = JITAssembler::TrustedImm64; + using Label = JITAssembler::Label; + using Jump = JITAssembler::Jump; + using StringToIndex = JITAssembler::StringToIndex; + using Reference = JITAssembler::Reference; + using RelationalCondition = JITAssembler::RelationalCondition; + using BranchTruncateType = JITAssembler::BranchTruncateType; + using RuntimeCall = JITAssembler::RuntimeCall; + + using JITTargetPlatform = AssemblerTargetConfiguration<DefaultPlatformMacroAssembler>::Platform; #if !defined(ARGUMENTS_IN_REGISTERS) Address addressForArgument(int index) const @@ -259,7 +261,7 @@ private: #define isel_stringIfy(s) isel_stringIfyx(s) #define generateRuntimeCall(t, function, ...) \ - _as->generateFunctionCallImp(Runtime::Method_##function##_NeedsExceptionCheck, t, "Runtime::" isel_stringIfy(function), Assembler::RuntimeCall(qOffsetOf(QV4::Runtime, function)), __VA_ARGS__) + _as->generateFunctionCallImp(Runtime::Method_##function##_NeedsExceptionCheck, t, "Runtime::" isel_stringIfy(function), RuntimeCall(qOffsetOf(QV4::Runtime, function)), __VA_ARGS__) int prepareVariableArguments(IR::ExprList* args); int prepareCallData(IR::ExprList* args, IR::Expr *thisObject); @@ -276,19 +278,19 @@ private: Pointer lookupAddr(JITTargetPlatform::ReturnValueRegister, index * sizeof(QV4::Lookup)); _as->generateFunctionCallImp(true, retval, "lookup getter/setter", - Assembler::LookupCall(lookupAddr, getterSetterOffset), lookupAddr, + JITAssembler::LookupCall(lookupAddr, getterSetterOffset), lookupAddr, arg1, arg2, arg3); } template <typename Retval, typename Arg1, typename Arg2> void generateLookupCall(Retval retval, uint index, uint getterSetterOffset, Arg1 arg1, Arg2 arg2) { - generateLookupCall(retval, index, getterSetterOffset, arg1, arg2, Assembler::VoidType()); + generateLookupCall(retval, index, getterSetterOffset, arg1, arg2, JITAssembler::VoidType()); } IR::BasicBlock *_block; BitVector _removableJumps; - Assembler* _as; + JITAssembler* _as; QScopedPointer<CompilationUnit> compilationUnit; QQmlEnginePrivate *qmlEngine; diff --git a/src/qml/jit/qv4unop.cpp b/src/qml/jit/qv4unop.cpp index 937441b7be..a27a3d1be2 100644 --- a/src/qml/jit/qv4unop.cpp +++ b/src/qml/jit/qv4unop.cpp @@ -144,6 +144,6 @@ void Unop<JITAssembler>::generateCompl(IR::Expr *source, IR::Expr *target) generateRuntimeCall(target, complement, PointerToValue(source)); } -template struct QV4::JIT::Unop<QV4::JIT::Assembler>; +template struct QV4::JIT::Unop<QV4::JIT::Assembler<AssemblerTargetConfiguration<DefaultPlatformMacroAssembler>>>; #endif |