diff options
author | Efraim Flashner <efraim@flashner.co.il> | 2023-07-03 22:03:40 +0300 |
---|---|---|
committer | Efraim Flashner <efraim@flashner.co.il> | 2025-03-13 14:34:58 +0200 |
commit | 5f54bb4c8e007ea463e41f597db23ac80e77e6ad (patch) | |
tree | d237eb211bc5cb598f59d512979bf184766936c5 | |
parent | 5bf81d0bd73bfddd693a3297383c2e01763a1bf3 (diff) |
fixup node-12 riscv64 support
-rw-r--r-- | gnu/packages/patches/node-12-riscv64-support.patch | 373 |
1 files changed, 260 insertions, 113 deletions
diff --git a/gnu/packages/patches/node-12-riscv64-support.patch b/gnu/packages/patches/node-12-riscv64-support.patch index ba8b67bbdb..bf10b4608a 100644 --- a/gnu/packages/patches/node-12-riscv64-support.patch +++ b/gnu/packages/patches/node-12-riscv64-support.patch @@ -10456,10 +10456,10 @@ index 00000000000..26730aceca7 +#endif // V8_TARGET_ARCH_RISCV64 diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc new file mode 100644 -index 00000000000..e3c760156f8 +index 00000000000..ade49800c6b --- /dev/null +++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc -@@ -0,0 +1,4576 @@ +@@ -0,0 +1,4589 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. @@ -10478,7 +10478,6 @@ index 00000000000..e3c760156f8 +#include "src/codegen/register-configuration.h" +#include "src/debug/debug.h" +#include "src/execution/frames-inl.h" -+#include "src/heap/memory-chunk.h" +#include "src/init/bootstrapper.h" +#include "src/logging/counters.h" +#include "src/objects/heap-number.h" @@ -14164,6 +14163,20 @@ index 00000000000..e3c760156f8 + xor_(overflow, overflow, dst); +} + ++void TurboAssembler::CallRuntimeWithCEntry(Runtime::FunctionId fid, ++ Register centry) { ++ const Runtime::Function* f = Runtime::FunctionForId(fid); ++ // TODO(1236192): Most runtime routines don't need the number of ++ // arguments passed in because it is constant. At some point we ++ // should remove this need and make the runtime routine entry code ++ // smarter. ++ PrepareCEntryArgs(f->nargs); ++ PrepareCEntryFunction(ExternalReference::Create(f)); ++ DCHECK(!AreAliased(centry, a0, a1)); ++ Daddu(centry, centry, Operand(Code::kHeaderSize - kHeapObjectTag)); ++ Call(centry); ++} ++ +void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments, + SaveFPRegsMode save_doubles) { + // All parameters are on the stack. a0 has the return value after call. @@ -15038,10 +15051,10 @@ index 00000000000..e3c760156f8 +#endif // V8_TARGET_ARCH_RISCV64 diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h new file mode 100644 -index 00000000000..0ee08ca7b8e +index 00000000000..9f7d65ce6bd --- /dev/null +++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h -@@ -0,0 +1,1213 @@ +@@ -0,0 +1,1217 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. @@ -15545,6 +15558,10 @@ index 00000000000..0ee08ca7b8e + Label* condition_met); +#undef COND_ARGS + ++ // Call a runtime routine. This expects {centry} to contain a fitting CEntry ++ // builtin for the target runtime function and uses an indirect call. ++ void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry); ++ + // Performs a truncating conversion of a floating point number as used by + // the JS bitwise operations. See ECMA-262 9.5: ToInt32. + // Exits with 'result' holding the answer. @@ -16658,10 +16675,10 @@ index d565a469639..837d192afb3 100644 #if !V8_TARGET_ARCH_ARM64 diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc new file mode 100644 -index 00000000000..cc83f22c657 +index 00000000000..f626c36544c --- /dev/null +++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc -@@ -0,0 +1,2775 @@ +@@ -0,0 +1,2774 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. @@ -16676,7 +16693,6 @@ index 00000000000..cc83f22c657 +#include "src/compiler/backend/gap-resolver.h" +#include "src/compiler/node-matchers.h" +#include "src/compiler/osr.h" -+#include "src/heap/memory-chunk.h" +#include "src/wasm/wasm-code-manager.h" + +namespace v8 { @@ -33046,10 +33062,10 @@ index 766ce71db11..7a1376ce1c0 100644 #endif diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h new file mode 100644 -index 00000000000..2f624f79f5c +index 00000000000..837ff0d5718 --- /dev/null +++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h -@@ -0,0 +1,2516 @@ +@@ -0,0 +1,2642 @@ +// Copyright 2021 the V8 project authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. @@ -33057,8 +33073,6 @@ index 00000000000..2f624f79f5c +#ifndef V8_WASM_BASELINE_RISCV64_LIFTOFF_ASSEMBLER_RISCV64_H_ +#define V8_WASM_BASELINE_RISCV64_LIFTOFF_ASSEMBLER_RISCV64_H_ + -+#include "src/base/platform/wrappers.h" -+#include "src/heap/memory-chunk.h" +#include "src/wasm/baseline/liftoff-assembler.h" + +namespace v8 { @@ -33067,7 +33081,15 @@ index 00000000000..2f624f79f5c + +namespace liftoff { + -+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) { ++// fp-8 holds the stack marker, fp-16 is the instance parameter, first stack ++// slot is located at fp-24. ++// Copied from mips64, hopefully it's correct ++constexpr int32_t kConstantStackSpace = 16; ++constexpr int32_t kFirstStackSlotOffset = ++ kConstantStackSpace + LiftoffAssembler::kStackSlotSize; ++ ++/* ++inline constexpr Condition ToCondition(Condition liftoff_cond) { + switch (liftoff_cond) { + case kEqual: + return eq; @@ -33091,6 +33113,7 @@ index 00000000000..2f624f79f5c + return uge; + } +} ++*/ + +// Liftoff Frames. +// @@ -33144,20 +33167,20 @@ index 00000000000..2f624f79f5c + +inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src, + ValueType type) { -+ switch (type.kind()) { -+ case ValueType::kI32: ++ switch (type) { ++ case kWasmI32: + assm->Lw(dst.gp(), src); + break; -+ case ValueType::kI64: -+ case ValueType::kRef: -+ case ValueType::kOptRef: -+ case ValueType::kRtt: ++ case kWasmI64: ++ //case kRef: ++ //case kOptRef: ++ //case kRtt: + assm->Ld(dst.gp(), src); + break; -+ case ValueType::kF32: ++ case kWasmF32: + assm->LoadFloat(dst.fp(), src); + break; -+ case ValueType::kF64: ++ case kWasmF64: + assm->LoadDouble(dst.fp(), src); + break; + default: @@ -33168,20 +33191,20 @@ index 00000000000..2f624f79f5c +inline void Store(LiftoffAssembler* assm, Register base, int32_t offset, + LiftoffRegister src, ValueType type) { + MemOperand dst(base, offset); -+ switch (type.kind()) { -+ case ValueType::kI32: ++ switch (type) { ++ case kWasmI32: + assm->Usw(src.gp(), dst); + break; -+ case ValueType::kI64: -+ case ValueType::kOptRef: -+ case ValueType::kRef: -+ case ValueType::kRtt: ++ case kWasmI64: ++ //case kOptRef: ++ //case kRef: ++ //case kRtt: + assm->Usd(src.gp(), dst); + break; -+ case ValueType::kF32: ++ case kWasmF32: + assm->UStoreFloat(src.fp(), dst); + break; -+ case ValueType::kF64: ++ case kWasmF64: + assm->UStoreDouble(src.fp(), dst); + break; + default: @@ -33190,22 +33213,22 @@ index 00000000000..2f624f79f5c +} + +inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueType type) { -+ switch (type.kind()) { -+ case ValueType::kI32: ++ switch (type) { ++ case kWasmI32: + assm->addi(sp, sp, -kSystemPointerSize); + assm->Sw(reg.gp(), MemOperand(sp, 0)); + break; -+ case ValueType::kI64: -+ case ValueType::kOptRef: -+ case ValueType::kRef: -+ case ValueType::kRtt: ++ case kWasmI64: ++ //case kOptRef: ++ //case kRef: ++ //case kRtt: + assm->push(reg.gp()); + break; -+ case ValueType::kF32: ++ case kWasmF32: + assm->addi(sp, sp, -kSystemPointerSize); + assm->StoreFloat(reg.fp(), MemOperand(sp, 0)); + break; -+ case ValueType::kF64: ++ case kWasmF64: + assm->addi(sp, sp, -kSystemPointerSize); + assm->StoreDouble(reg.fp(), MemOperand(sp, 0)); + break; @@ -33338,6 +33361,7 @@ index 00000000000..2f624f79f5c + return offset; +} + ++/* +void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params, + int stack_param_delta) { + UseScratchRegisterScope temps(this); @@ -33360,9 +33384,11 @@ index 00000000000..2f624f79f5c + Add64(sp, fp, -stack_param_delta * 8); + Pop(ra, fp); +} ++*/ + -+void LiftoffAssembler::AlignFrameSize() {} ++//void LiftoffAssembler::AlignFrameSize() {} + ++/* +void LiftoffAssembler::PatchPrepareStackFrame(int offset) { + int frame_size = GetTotalFrameSize() - kSystemPointerSize; + // We can't run out of space, just pass anything big enough to not cause the @@ -33376,49 +33402,72 @@ index 00000000000..2f624f79f5c + // register and, as third instruction, daddu will be generated. + patching_assembler.Add64(sp, sp, Operand(-frame_size)); +} ++*/ ++ ++void LiftoffAssembler::PatchPrepareStackFrame(int offset, ++ uint32_t stack_slots) { ++ uint64_t bytes = liftoff::kConstantStackSpace + kStackSlotSize * stack_slots; ++ DCHECK_LE(bytes, kMaxInt); ++ // We can't run out of space, just pass anything big enough to not cause the ++ // assembler to try to grow the buffer. ++ constexpr int kAvailableSpace = 256; ++ TurboAssembler patching_assembler( ++ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo, ++ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace)); ++ // If bytes can be represented as 16bit, daddiu will be generated and two ++ // nops will stay untouched. Otherwise, lui-ori sequence will load it to ++ // register and, as third instruction, daddu will be generated. ++ patching_assembler.Add64(sp, sp, Operand(-bytes)); ++} + +void LiftoffAssembler::FinishCode() {} + +void LiftoffAssembler::AbortCompilation() {} + +// static ++/* +constexpr int LiftoffAssembler::StaticStackFrameSize() { + return liftoff::kInstanceOffset; +} ++*/ + ++/* +int LiftoffAssembler::SlotSizeForType(ValueType type) { + switch (type.kind()) { -+ case ValueType::kS128: ++ case ValueType::kWasmS128: + return type.element_size_bytes(); + default: + return kStackSlotSize; + } +} ++*/ + ++/* +bool LiftoffAssembler::NeedsAlignment(ValueType type) { + switch (type.kind()) { -+ case ValueType::kS128: ++ case ValueType::kWasmS128: + return true; + default: + // No alignment because all other types are kStackSlotSize. + return false; + } +} ++*/ + +void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value, + RelocInfo::Mode rmode) { -+ switch (value.type().kind()) { -+ case ValueType::kI32: ++ switch (value.type()) { ++ case kWasmI32: + TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode)); + break; -+ case ValueType::kI64: ++ case kWasmI64: + TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode)); + break; -+ case ValueType::kF32: ++ case kWasmF32: + TurboAssembler::LoadFPRImmediate(reg.fp(), + value.to_f32_boxed().get_bits()); + break; -+ case ValueType::kF64: ++ case kWasmF64: + TurboAssembler::LoadFPRImmediate(reg.fp(), + value.to_f64_boxed().get_bits()); + break; @@ -33427,7 +33476,7 @@ index 00000000000..2f624f79f5c + } +} + -+void LiftoffAssembler::LoadFromInstance(Register dst, int32_t offset, ++void LiftoffAssembler::LoadFromInstance(Register dst, uint32_t offset, + int size) { + DCHECK_LE(0, offset); + Ld(dst, liftoff::GetInstanceOperand()); @@ -33440,7 +33489,7 @@ index 00000000000..2f624f79f5c +} + +void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst, -+ int32_t offset) { ++ uint32_t offset) { + LoadFromInstance(dst, offset, kTaggedSize); +} + @@ -33454,13 +33503,14 @@ index 00000000000..2f624f79f5c + +void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr, + Register offset_reg, -+ int32_t offset_imm, ++ uint32_t offset_imm, + LiftoffRegList pinned) { + STATIC_ASSERT(kTaggedSize == kInt64Size); + MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm); + Ld(dst, src_op); +} + ++/* +void LiftoffAssembler::StoreTaggedPointer(Register dst_addr, + Register offset_reg, + int32_t offset_imm, @@ -33486,9 +33536,10 @@ index 00000000000..2f624f79f5c + wasm::WasmCode::kRecordWrite); + bind(&exit); +} ++*/ + +void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr, -+ Register offset_reg, uintptr_t offset_imm, ++ Register offset_reg, uint32_t offset_imm, + LoadType type, LiftoffRegList pinned, + uint32_t* protected_load_pc, bool is_load_mem) { + MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm); @@ -33540,7 +33591,7 @@ index 00000000000..2f624f79f5c +} + +void LiftoffAssembler::Store(Register dst_addr, Register offset_reg, -+ uintptr_t offset_imm, LiftoffRegister src, ++ uint32_t offset_imm, LiftoffRegister src, + StoreType type, LiftoffRegList pinned, + uint32_t* protected_store_pc, bool is_store_mem) { + MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm); @@ -33587,6 +33638,7 @@ index 00000000000..2f624f79f5c + } +} + ++/* +void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr, + Register offset_reg, uintptr_t offset_imm, + LoadType type, LiftoffRegList pinned) { @@ -33644,6 +33696,7 @@ index 00000000000..2f624f79f5c +} + +void LiftoffAssembler::AtomicFence() { sync(); } ++*/ + +void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst, + uint32_t caller_slot_idx, @@ -33652,17 +33705,21 @@ index 00000000000..2f624f79f5c + liftoff::Load(this, dst, src, type); +} + ++/* +void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src, + uint32_t caller_slot_idx, + ValueType type) { + int32_t offset = kSystemPointerSize * (caller_slot_idx + 1); + liftoff::Store(this, fp, offset, src, type); +} ++*/ + ++/* +void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset, + ValueType type) { + liftoff::Load(this, dst, MemOperand(sp, offset), type); +} ++*/ + +void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset, + ValueType type) { @@ -33684,27 +33741,36 @@ index 00000000000..2f624f79f5c + TurboAssembler::Move(dst, src); +} + -+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueType type) { -+ RecordUsedSpillOffset(offset); ++// Call this method whenever spilling something, such that the number of used ++// spill slot can be tracked and the stack frame will be allocated big enough. ++// copied from src/wasm/baseline/liftoff-assembler.h from v8 9.0.259 ++/* ++void RecordUsedSpillOffset(int offset) { ++ if (offset >= max_used_spill_offset_) max_used_spill_offset_ = offset; ++} ++*/ ++ ++void LiftoffAssembler::Spill(uint32_t offset, LiftoffRegister reg, ValueType type) { ++ //RecordUsedSpillOffset(offset); + MemOperand dst = liftoff::GetStackSlot(offset); -+ switch (type.kind()) { -+ case ValueType::kI32: ++ switch (type) { ++ case ValueType::kWasmI32: + Sw(reg.gp(), dst); + break; -+ case ValueType::kI64: -+ case ValueType::kRef: -+ case ValueType::kOptRef: -+ case ValueType::kRtt: -+ case ValueType::kRttWithDepth: ++ case kWasmI64: ++ //case kRef: ++ //case kOptRef: ++ //case kRtt: ++ //case kRttWithDepth: + Sd(reg.gp(), dst); + break; -+ case ValueType::kF32: ++ case kWasmF32: + StoreFloat(reg.fp(), dst); + break; -+ case ValueType::kF64: ++ case kWasmF64: + TurboAssembler::StoreDouble(reg.fp(), dst); + break; -+ case ValueType::kS128: ++ case kWasmS128: + bailout(kSimd, "Spill S128"); + break; + default: @@ -33712,19 +33778,20 @@ index 00000000000..2f624f79f5c + } +} + -+void LiftoffAssembler::Spill(int offset, WasmValue value) { -+ RecordUsedSpillOffset(offset); ++void LiftoffAssembler::Spill(uint32_t offset, WasmValue value) { ++ //RecordUsedSpillOffset(offset); + MemOperand dst = liftoff::GetStackSlot(offset); -+ switch (value.type().kind()) { -+ case ValueType::kI32: { ++ switch (value.type()) { ++ case kWasmI32: { + LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); + TurboAssembler::li(tmp.gp(), Operand(value.to_i32())); + Sw(tmp.gp(), dst); + break; + } -+ case ValueType::kI64: -+ case ValueType::kRef: -+ case ValueType::kOptRef: { ++ case kWasmI64: ++ //case kRef: ++ //case kOptRef: ++ { + LiftoffRegister tmp = GetUnusedRegister(kGpReg, {}); + TurboAssembler::li(tmp.gp(), value.to_i64()); + Sd(tmp.gp(), dst); @@ -33737,21 +33804,21 @@ index 00000000000..2f624f79f5c + } +} + -+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueType type) { ++void LiftoffAssembler::Fill(LiftoffRegister reg, uint32_t offset, ValueType type) { + MemOperand src = liftoff::GetStackSlot(offset); -+ switch (type.kind()) { -+ case ValueType::kI32: ++ switch (type) { ++ case kWasmI32: + Lw(reg.gp(), src); + break; -+ case ValueType::kI64: -+ case ValueType::kRef: -+ case ValueType::kOptRef: ++ case kWasmI64: ++ //case kRef: ++ //case kOptRef: + Ld(reg.gp(), src); + break; -+ case ValueType::kF32: ++ case kWasmF32: + LoadFloat(reg.fp(), src); + break; -+ case ValueType::kF64: ++ case kWasmF64: + TurboAssembler::LoadDouble(reg.fp(), src); + break; + default: @@ -33759,10 +33826,11 @@ index 00000000000..2f624f79f5c + } +} + -+void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) { ++void LiftoffAssembler::FillI64Half(Register, uint32_t offset, RegPairHalf) { + UNREACHABLE(); +} + ++/* +void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) { + DCHECK_LT(0, size); + RecordUsedSpillOffset(start + size); @@ -33794,20 +33862,27 @@ index 00000000000..2f624f79f5c + Pop(a1, a0); + } +} ++*/ + ++/* +void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) { + TurboAssembler::Clz64(dst.gp(), src.gp()); +} ++*/ + ++/* +void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) { + TurboAssembler::Ctz64(dst.gp(), src.gp()); +} ++*/ + ++/* +bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst, + LiftoffRegister src) { + TurboAssembler::Popcnt64(dst.gp(), src.gp()); + return true; +} ++*/ + +void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) { + TurboAssembler::Mul32(dst, lhs, rhs); @@ -33863,14 +33938,14 @@ index 00000000000..2f624f79f5c +#undef I32_BINOP + +#define I32_BINOP_I(name, instruction) \ -+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \ + int32_t imm) { \ + instruction(dst, lhs, Operand(imm)); \ + } + +// clang-format off +I32_BINOP_I(add, Add32) -+I32_BINOP_I(sub, Sub32) ++//I32_BINOP_I(sub, Sub32) +I32_BINOP_I(and, And) +I32_BINOP_I(or, Or) +I32_BINOP_I(xor, Xor) @@ -33878,11 +33953,11 @@ index 00000000000..2f624f79f5c + +#undef I32_BINOP_I + -+void LiftoffAssembler::emit_i32_clz(Register dst, Register src) { ++bool LiftoffAssembler::emit_i32_clz(Register dst, Register src) { + TurboAssembler::Clz32(dst, src); +} + -+void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { ++bool LiftoffAssembler::emit_i32_ctz(Register dst, Register src) { + TurboAssembler::Ctz32(dst, src); +} + @@ -33893,11 +33968,11 @@ index 00000000000..2f624f79f5c + +#define I32_SHIFTOP(name, instruction) \ + void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \ -+ Register amount) { \ ++ Register amount, LiftoffRegList pinned) { \ + instruction(dst, src, amount); \ + } +#define I32_SHIFTOP_I(name, instruction) \ -+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \ ++ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \ + int amount) { \ + instruction(dst, src, amount & 31); \ + } @@ -33906,10 +33981,12 @@ index 00000000000..2f624f79f5c +I32_SHIFTOP(sar, sraw) +I32_SHIFTOP(shr, srlw) + -+I32_SHIFTOP_I(shl, slliw) -+I32_SHIFTOP_I(sar, sraiw) ++ ++//I32_SHIFTOP_I(shl, slliw) ++//I32_SHIFTOP_I(sar, sraiw) +I32_SHIFTOP_I(shr, srliw) + ++ +#undef I32_SHIFTOP +#undef I32_SHIFTOP_I + @@ -33977,12 +34054,13 @@ index 00000000000..2f624f79f5c +#undef I64_BINOP + +#define I64_BINOP_I(name, instruction) \ -+ void LiftoffAssembler::emit_i64_##name##i( \ ++ void LiftoffAssembler::emit_i64_##name( \ + LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \ + instruction(dst.gp(), lhs.gp(), Operand(imm)); \ + } + +// clang-format off ++I64_BINOP_I(add, Add64) +I64_BINOP_I(and, And) +I64_BINOP_I(or, Or) +I64_BINOP_I(xor, Xor) @@ -33992,11 +34070,11 @@ index 00000000000..2f624f79f5c + +#define I64_SHIFTOP(name, instruction) \ + void LiftoffAssembler::emit_i64_##name( \ -+ LiftoffRegister dst, LiftoffRegister src, Register amount) { \ ++ LiftoffRegister dst, LiftoffRegister src, Register amount, LiftoffRegList pinned) { \ + instruction(dst.gp(), src.gp(), amount); \ + } +#define I64_SHIFTOP_I(name, instruction) \ -+ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \ ++ void LiftoffAssembler::emit_i64_##name(LiftoffRegister dst, \ + LiftoffRegister src, int amount) { \ + DCHECK(is_uint6(amount)); \ + instruction(dst.gp(), src.gp(), amount); \ @@ -34006,18 +34084,21 @@ index 00000000000..2f624f79f5c +I64_SHIFTOP(sar, sra) +I64_SHIFTOP(shr, srl) + -+I64_SHIFTOP_I(shl, slli) -+I64_SHIFTOP_I(sar, srai) ++//I64_SHIFTOP_I(shl, slli) ++//I64_SHIFTOP_I(sar, srai) +I64_SHIFTOP_I(shr, srli) + +#undef I64_SHIFTOP +#undef I64_SHIFTOP_I + ++/* +void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs, + int64_t imm) { + TurboAssembler::Add64(dst.gp(), lhs.gp(), Operand(imm)); +} -+void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) { ++*/ ++ ++void LiftoffAssembler::emit_i32_to_intptr(Register dst, Register src) { + addw(dst, src, zero_reg); +} + @@ -34257,9 +34338,10 @@ index 00000000000..2f624f79f5c + TurboAssembler::Jump(target); +} + -+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond, ++void LiftoffAssembler::emit_cond_jump(Condition liftoff_cond, + Label* label, ValueType type, + Register lhs, Register rhs) { ++ /* + Condition cond = liftoff::ToCondition(liftoff_cond); + if (rhs == no_reg) { + DCHECK(type == kWasmI32 || type == kWasmI64); @@ -34270,78 +34352,105 @@ index 00000000000..2f624f79f5c + (liftoff_cond == kEqual || liftoff_cond == kUnequal))); + TurboAssembler::Branch(label, cond, lhs, Operand(rhs)); + } ++ */ ++ if (rhs != no_reg) { ++ TurboAssembler::Branch(label, liftoff_cond, lhs, Operand(rhs)); ++ } else { ++ TurboAssembler::Branch(label, liftoff_cond, lhs, Operand(zero_reg)); ++ } +} + -+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond, ++/* ++void LiftoffAssembler::emit_i32_cond_jumpi(Condition liftoff_cond, + Label* label, Register lhs, + int32_t imm) { + Condition cond = liftoff::ToCondition(liftoff_cond); + TurboAssembler::Branch(label, cond, lhs, Operand(imm)); +} ++*/ + +void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) { + TurboAssembler::Sltu(dst, src, 1); +} + -+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond, ++void LiftoffAssembler::emit_i32_set_cond(Condition liftoff_cond, + Register dst, Register lhs, + Register rhs) { -+ Condition cond = liftoff::ToCondition(liftoff_cond); -+ TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond); ++ bailout(kUnsupportedArchitecture, "emit_i32_set_cond"); ++ //Condition cond = ConditionToConditionCmpFPU(liftoff_cond); ++ //TurboAssembler::CompareI(dst, lhs, Operand(rhs), cond); +} + +void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) { + TurboAssembler::Sltu(dst, src.gp(), 1); +} + -+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond, ++void LiftoffAssembler::emit_i64_set_cond(Condition liftoff_cond, + Register dst, LiftoffRegister lhs, + LiftoffRegister rhs) { -+ Condition cond = liftoff::ToCondition(liftoff_cond); -+ TurboAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond); ++ bailout(kUnsupportedArchitecture, "emit_i64_set_cond"); ++ //Condition cond = ConditionToConditionCmpFPU(liftoff_cond); ++ //TurboAssembler::CompareI(dst, lhs.gp(), Operand(rhs.gp()), cond); +} + -+static FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition) { ++namespace liftoff { ++ ++inline FPUCondition ConditionToConditionCmpFPU(Condition condition, ++ bool* predicate) { + switch (condition) { + case kEqual: ++ *predicate = true; + return EQ; + case kUnequal: ++ *predicate = false; + return NE; + case kUnsignedLessThan: ++ *predicate = true; + return LT; + case kUnsignedGreaterEqual: ++ *predicate = false; + return GE; + case kUnsignedLessEqual: ++ *predicate = true; + return LE; + case kUnsignedGreaterThan: ++ *predicate = false; + return GT; + default: ++ *predicate = true; + break; + } + UNREACHABLE(); +} + -+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond, ++} // namespace liftoff ++ ++void LiftoffAssembler::emit_f32_set_cond(Condition liftoff_cond, + Register dst, DoubleRegister lhs, + DoubleRegister rhs) { -+ FPUCondition fcond = ConditionToConditionCmpFPU(liftoff_cond); ++ bool predicate; ++ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate); + TurboAssembler::CompareF32(dst, fcond, lhs, rhs); +} + -+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond, ++void LiftoffAssembler::emit_f64_set_cond(Condition liftoff_cond, + Register dst, DoubleRegister lhs, + DoubleRegister rhs) { -+ FPUCondition fcond = ConditionToConditionCmpFPU(liftoff_cond); ++ bool predicate; ++ FPUCondition fcond = liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate); + TurboAssembler::CompareF64(dst, fcond, lhs, rhs); +} + ++/* +bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition, + LiftoffRegister true_value, + LiftoffRegister false_value, + ValueType type) { + return false; +} ++*/ + ++/* +void LiftoffAssembler::emit_smi_check(Register obj, Label* target, + SmiCheckMode mode) { + UseScratchRegisterScope temps(this); @@ -34350,7 +34459,9 @@ index 00000000000..2f624f79f5c + Condition condition = mode == kJumpOnSmi ? eq : ne; + Branch(target, condition, scratch, Operand(zero_reg)); +} ++*/ + ++/* +void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr, + Register offset_reg, uintptr_t offset_imm, + LoadType type, @@ -34358,21 +34469,27 @@ index 00000000000..2f624f79f5c + uint32_t* protected_load_pc) { + bailout(kSimd, "load extend and load splat unimplemented"); +} ++*/ + ++/* +void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src, + Register addr, Register offset_reg, + uintptr_t offset_imm, LoadType type, + uint8_t laneidx, uint32_t* protected_load_pc) { + bailout(kSimd, "loadlane"); +} ++*/ + ++/* +void LiftoffAssembler::StoreLane(Register dst, Register offset, + uintptr_t offset_imm, LiftoffRegister src, + StoreType type, uint8_t lane, + uint32_t* protected_store_pc) { + bailout(kSimd, "StoreLane"); +} ++*/ + ++/* +void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs, @@ -34380,48 +34497,66 @@ index 00000000000..2f624f79f5c + bool is_swizzle) { + bailout(kSimd, "emit_i8x16_shuffle"); +} ++*/ + ++/* +void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst, + LiftoffRegister lhs, + LiftoffRegister rhs) { + bailout(kSimd, "emit_i8x16_swizzle"); +} ++*/ + ++/* +void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst, + LiftoffRegister src) { + bailout(kSimd, "emit_i8x16_splat"); +} ++*/ + ++/* +void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst, + LiftoffRegister src) { + bailout(kSimd, "emit_i16x8_splat"); +} ++*/ + ++/* +void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst, + LiftoffRegister src) { + bailout(kSimd, "emit_i32x4_splat"); +} ++*/ + ++/* +void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst, + LiftoffRegister src) { + bailout(kSimd, "emit_i64x2_splat"); +} ++*/ + ++/* +void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs, + LiftoffRegister rhs) { + bailout(kSimd, "emit_i64x2_eq"); +} ++*/ + ++/* +void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst, + LiftoffRegister src) { + bailout(kSimd, "emit_f32x4_splat"); +} ++*/ + ++/* +void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst, + LiftoffRegister src) { + bailout(kSimd, "emit_f64x2_splat"); +} ++*/ + ++/* +#define SIMD_BINOP(name1, name2) \ + void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \ + LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \ @@ -35368,6 +35503,7 @@ index 00000000000..2f624f79f5c + uint8_t imm_lane_idx) { + bailout(kSimd, "emit_f64x2_replace_lane"); +} ++*/ + +void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) { + TurboAssembler::Uld(limit_address, MemOperand(limit_address)); @@ -35433,6 +35569,7 @@ index 00000000000..2f624f79f5c + Add64(sp, sp, Operand(gp_offset)); +} + ++/* +void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint, + LiftoffRegList all_spills, + LiftoffRegList ref_spills, @@ -35450,12 +35587,13 @@ index 00000000000..2f624f79f5c + // Record the number of additional spill slots. + RecordOolSpillSpaceSize(spill_space_size); +} ++*/ + +void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) { + TurboAssembler::DropAndRet(static_cast<int>(num_stack_slots)); +} + -+void LiftoffAssembler::CallC(const wasm::FunctionSig* sig, ++void LiftoffAssembler::CallC(wasm::FunctionSig* sig, + const LiftoffRegister* args, + const LiftoffRegister* rets, + ValueType out_argument_type, int stack_bytes, @@ -35465,7 +35603,7 @@ index 00000000000..2f624f79f5c + int arg_bytes = 0; + for (ValueType param_type : sig->parameters()) { + liftoff::Store(this, sp, arg_bytes, *args++, param_type); -+ arg_bytes += param_type.element_size_bytes(); ++ arg_bytes += ValueTypes::MemSize(param_type); + } + DCHECK_LE(arg_bytes, stack_bytes); + @@ -35502,11 +35640,13 @@ index 00000000000..2f624f79f5c + Call(addr, RelocInfo::WASM_CALL); +} + ++/* +void LiftoffAssembler::TailCallNativeWasmCode(Address addr) { + Jump(addr, RelocInfo::WASM_CALL); +} ++*/ + -+void LiftoffAssembler::CallIndirect(const wasm::FunctionSig* sig, ++void LiftoffAssembler::CallIndirect(wasm::FunctionSig* sig, + compiler::CallDescriptor* call_descriptor, + Register target) { + if (target == no_reg) { @@ -35517,6 +35657,7 @@ index 00000000000..2f624f79f5c + } +} + ++/* +void LiftoffAssembler::TailCallIndirect(Register target) { + if (target == no_reg) { + Pop(kScratchReg); @@ -35525,6 +35666,7 @@ index 00000000000..2f624f79f5c + Jump(target); + } +} ++*/ + +void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) { + // A direct call to a wasm runtime stub defined in this module. @@ -35546,7 +35688,7 @@ index 00000000000..2f624f79f5c + const LiftoffAssembler::VarState& src = slot.src_; + switch (src.loc()) { + case LiftoffAssembler::VarState::kStack: -+ asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_offset_)); ++ asm_->Ld(kScratchReg, liftoff::GetStackSlot(slot.src_index_)); + asm_->push(kScratchReg); + break; + case LiftoffAssembler::VarState::kRegister: @@ -35567,10 +35709,10 @@ index 00000000000..2f624f79f5c + +#endif // V8_WASM_BASELINE_RISCV64_LIFTOFF_ASSEMBLER_RISCV64_H_ diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc -index 7c41c0a209c..a4a1f8c87ff 100644 +index 7c41c0a209c..9ed15aca2fa 100644 --- a/deps/v8/src/wasm/jump-table-assembler.cc +++ b/deps/v8/src/wasm/jump-table-assembler.cc -@@ -217,6 +217,46 @@ void JumpTableAssembler::NopBytes(int bytes) { +@@ -217,6 +217,51 @@ void JumpTableAssembler::NopBytes(int bytes) { } } @@ -35586,11 +35728,15 @@ index 7c41c0a209c..a4a1f8c87ff 100644 + for (int i = 0; i < nop_bytes; i += kInstrSize) nop(); +} + -+bool JumpTableAssembler::EmitJumpSlot(Address target) { ++void JumpTableAssembler::EmitJumpSlot(Address target) { + PatchAndJump(target); -+ return true; +} + ++void JumpTableAssembler::EmitRuntimeStubSlot(Address builtin_target) { ++ JumpToInstructionStream(builtin_target); ++} ++ ++/* +void JumpTableAssembler::EmitFarJumpSlot(Address target) { + UseScratchRegisterScope temp(this); + Register rd = temp.Acquire(); @@ -35605,6 +35751,7 @@ index 7c41c0a209c..a4a1f8c87ff 100644 +void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) { + UNREACHABLE(); +} ++*/ + +void JumpTableAssembler::NopBytes(int bytes) { + DCHECK_LE(0, bytes); |