mirror of
https://github.com/stenzek/duckstation.git
synced 2025-06-11 13:57:21 +00:00
CPU/Recompiler: Fix ARM32 build (again)
This commit is contained in:
parent
1bea8817f1
commit
b814666134
@ -92,12 +92,12 @@ void armEmitMov(vixl::aarch32::Assembler* armAsm, const vixl::aarch32::Register&
|
|||||||
{
|
{
|
||||||
if (vixl::IsUintN(16, imm))
|
if (vixl::IsUintN(16, imm))
|
||||||
{
|
{
|
||||||
armAsm->mov(al, rd, imm & 0xffff);
|
armAsm->mov(vixl::aarch32::al, rd, imm & 0xffff);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
armAsm->mov(al, rd, imm & 0xffff);
|
armAsm->mov(vixl::aarch32::al, rd, imm & 0xffff);
|
||||||
armAsm->movt(al, rd, imm >> 16);
|
armAsm->movt(vixl::aarch32::al, rd, imm >> 16);
|
||||||
}
|
}
|
||||||
|
|
||||||
void armMoveAddressToReg(vixl::aarch32::Assembler* armAsm, const vixl::aarch32::Register& reg, const void* addr)
|
void armMoveAddressToReg(vixl::aarch32::Assembler* armAsm, const vixl::aarch32::Register& reg, const void* addr)
|
||||||
@ -126,7 +126,7 @@ void armEmitJmp(vixl::aarch32::Assembler* armAsm, const void* ptr, bool force_in
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Label label(displacement + armAsm->GetCursorOffset());
|
vixl::aarch32::Label label(displacement + armAsm->GetCursorOffset());
|
||||||
armAsm->b(&label);
|
armAsm->b(&label);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -152,7 +152,7 @@ void armEmitCall(vixl::aarch32::Assembler* armAsm, const void* ptr, bool force_i
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Label label(displacement + armAsm->GetCursorOffset());
|
vixl::aarch32::Label label(displacement + armAsm->GetCursorOffset());
|
||||||
armAsm->bl(&label);
|
armAsm->bl(&label);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -167,7 +167,7 @@ void armEmitCondBranch(vixl::aarch32::Assembler* armAsm, vixl::aarch32::Conditio
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Label label(displacement + armAsm->GetCursorOffset());
|
vixl::aarch32::Label label(displacement + armAsm->GetCursorOffset());
|
||||||
armAsm->b(cond, &label);
|
armAsm->b(cond, &label);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -175,14 +175,14 @@ void armEmitCondBranch(vixl::aarch32::Assembler* armAsm, vixl::aarch32::Conditio
|
|||||||
void armEmitFarLoad(vixl::aarch32::Assembler* armAsm, const vixl::aarch32::Register& reg, const void* addr)
|
void armEmitFarLoad(vixl::aarch32::Assembler* armAsm, const vixl::aarch32::Register& reg, const void* addr)
|
||||||
{
|
{
|
||||||
armMoveAddressToReg(armAsm, reg, addr);
|
armMoveAddressToReg(armAsm, reg, addr);
|
||||||
armAsm->ldr(reg, MemOperand(reg));
|
armAsm->ldr(reg, vixl::aarch32::MemOperand(reg));
|
||||||
}
|
}
|
||||||
|
|
||||||
void armEmitFarStore(vixl::aarch32::Assembler* armAsm, const vixl::aarch32::Register& reg, const void* addr,
|
void armEmitFarStore(vixl::aarch32::Assembler* armAsm, const vixl::aarch32::Register& reg, const void* addr,
|
||||||
const vixl::aarch32::Register& tempreg)
|
const vixl::aarch32::Register& tempreg)
|
||||||
{
|
{
|
||||||
armMoveAddressToReg(armAsm, tempreg, addr);
|
armMoveAddressToReg(armAsm, tempreg, addr);
|
||||||
armAsm->str(reg, MemOperand(tempreg));
|
armAsm->str(reg, vixl::aarch32::MemOperand(tempreg));
|
||||||
}
|
}
|
||||||
|
|
||||||
void CPU::CodeCache::DisassembleAndLogHostCode(const void* start, u32 size)
|
void CPU::CodeCache::DisassembleAndLogHostCode(const void* start, u32 size)
|
||||||
@ -204,7 +204,6 @@ u32 CPU::CodeCache::GetHostInstructionCount(const void* start, u32 size)
|
|||||||
u32 CPU::CodeCache::EmitJump(void* code, const void* dst, bool flush_icache)
|
u32 CPU::CodeCache::EmitJump(void* code, const void* dst, bool flush_icache)
|
||||||
{
|
{
|
||||||
using namespace vixl::aarch32;
|
using namespace vixl::aarch32;
|
||||||
using namespace CPU::Recompiler;
|
|
||||||
|
|
||||||
const s32 disp = armGetPCDisplacement(code, dst);
|
const s32 disp = armGetPCDisplacement(code, dst);
|
||||||
DebugAssert(armIsPCDisplacementInImmediateRange(disp));
|
DebugAssert(armIsPCDisplacementInImmediateRange(disp));
|
||||||
@ -222,7 +221,7 @@ u32 CPU::CodeCache::EmitJump(void* code, const void* dst, bool flush_icache)
|
|||||||
return kA32InstructionSizeInBytes;
|
return kA32InstructionSizeInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
u8* CPU::Recompiler::armGetJumpTrampoline(const void* target)
|
u8* armGetJumpTrampoline(const void* target)
|
||||||
{
|
{
|
||||||
auto it = s_trampoline_targets.find(target);
|
auto it = s_trampoline_targets.find(target);
|
||||||
if (it != s_trampoline_targets.end())
|
if (it != s_trampoline_targets.end())
|
||||||
@ -239,7 +238,7 @@ u8* CPU::Recompiler::armGetJumpTrampoline(const void* target)
|
|||||||
}
|
}
|
||||||
|
|
||||||
u8* start = s_trampoline_start_ptr + offset;
|
u8* start = s_trampoline_start_ptr + offset;
|
||||||
Assembler armAsm(start, TRAMPOLINE_AREA_SIZE - offset);
|
vixl::aarch32::Assembler armAsm(start, TRAMPOLINE_AREA_SIZE - offset);
|
||||||
armMoveAddressToReg(&armAsm, RSCRATCH, target);
|
armMoveAddressToReg(&armAsm, RSCRATCH, target);
|
||||||
armAsm.bx(RSCRATCH);
|
armAsm.bx(RSCRATCH);
|
||||||
|
|
||||||
@ -255,7 +254,6 @@ u8* CPU::Recompiler::armGetJumpTrampoline(const void* target)
|
|||||||
u32 CPU::CodeCache::EmitASMFunctions(void* code, u32 code_size)
|
u32 CPU::CodeCache::EmitASMFunctions(void* code, u32 code_size)
|
||||||
{
|
{
|
||||||
using namespace vixl::aarch32;
|
using namespace vixl::aarch32;
|
||||||
using namespace CPU::Recompiler;
|
|
||||||
|
|
||||||
Assembler actual_asm(static_cast<u8*>(code), code_size);
|
Assembler actual_asm(static_cast<u8*>(code), code_size);
|
||||||
Assembler* armAsm = &actual_asm;
|
Assembler* armAsm = &actual_asm;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user