diff --git a/common.gypi b/common.gypi index cefbc4318b7f7e..c7b44c876678da 100644 --- a/common.gypi +++ b/common.gypi @@ -38,7 +38,7 @@ # Reset this number to 0 on major V8 upgrades. # Increment by one for each non-official patch applied to deps/v8. - 'v8_embedder_string': '-node.15', + 'v8_embedder_string': '-node.16', ##### V8 defaults for Node.js ##### diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc index 9b6cbf74bf25fc..75873f531d8820 100644 --- a/deps/v8/src/codegen/constant-pool.cc +++ b/deps/v8/src/codegen/constant-pool.cc @@ -554,11 +554,22 @@ void ConstantPool::EmitAndClear(Jump require_jump) { EmitPrologue(require_alignment); if (require_alignment == Alignment::kRequired) assm_->DataAlign(kInt64Size); EmitEntries(); + // Emit padding data to ensure the constant pool size matches the expected + // constant count during disassembly. + if (v8_flags.riscv_c_extension) { + int code_size = assm_->SizeOfCodeGeneratedSince(&size_check); + DCHECK_LE(code_size, size); + + while (code_size < size) { + assm_->db(0xcc); + code_size++; + } + } assm_->RecordComment("]"); assm_->bind(&after_pool); DEBUG_PRINTF("\tConstant Pool end\n") - DCHECK_LE(assm_->SizeOfCodeGeneratedSince(&size_check) - size, 3); + DCHECK_EQ(size, assm_->SizeOfCodeGeneratedSince(&size_check)); Clear(); } @@ -666,7 +677,16 @@ bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const { int ConstantPool::ComputeSize(Jump require_jump, Alignment require_alignment) const { int size_up_to_marker = PrologueSize(require_jump); - int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0; + // With RVC enabled, constant pool alignment must use kInt64Size to ensure + // sufficient padding space for 8-byte alignment; otherwise, alignment may + // fail. + // + // Example: + // pc_offset = 0x22 + // Aligned(0x22, kInt64Size) = 0x28 → 6 bytes of padding needed. + int alignment = require_alignment == Alignment::kRequired + ? (v8_flags.riscv_c_extension ? kInt64Size : kInstrSize) + : 0; size_t size_after_marker = Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size; return size_up_to_marker + static_cast(size_after_marker); @@ -674,9 +694,13 @@ int ConstantPool::ComputeSize(Jump require_jump, Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump, int pc_offset) const { + // When the RVC extension is enabled, constant pool entries must be aligned to + // kInstrSize to prevent unaligned 32-bit memory accesses. int size_up_to_marker = PrologueSize(require_jump); - if (Entry64Count() != 0 && - !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) { + if ((Entry64Count() != 0 && + !IsAligned(pc_offset + size_up_to_marker, kInt64Size)) || + (Entry32Count() != 0 && v8_flags.riscv_c_extension && + !IsAligned(pc_offset + size_up_to_marker, kInstrSize))) { return Alignment::kRequired; } return Alignment::kOmitted; diff --git a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc index 616faf5dd46ba9..a1da242a9f6b8b 100644 --- a/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc +++ b/deps/v8/src/codegen/riscv/macro-assembler-riscv.cc @@ -5370,14 +5370,12 @@ void MacroAssembler::StoreReturnAddressAndCall(Register target) { // trigger GC, since the callee function will return to it. Assembler::BlockTrampolinePoolScope block_trampoline_pool(this); - int kNumInstructions = v8_flags.riscv_c_extension ? 5 : 6; - Label start; + Label start, end; // Make 'ra' point to the correct return location, just after the 'jalr t6' // instruction that does the call, and store 'ra' at the top of the stack. bind(&start); - auipc(ra, 0); // Set 'ra' the current 'pc'. - AddWord(ra, ra, kNumInstructions * kInstrSize); + LoadAddress(ra, &end); StoreWord(ra, MemOperand(sp)); // Reserved in EnterExitFrame. AddWord(sp, sp, -kCArgsSlotsSize); // Preserves stack alignment. @@ -5385,9 +5383,9 @@ void MacroAssembler::StoreReturnAddressAndCall(Register target) { Mv(t6, target); // Function pointer in 't6' to conform to ABI for PIC. jalr(t6); - // Make sure the stored 'ra' points to this position. This way, the 'ra' - // value we stored on the stack matches the value of 'ra' during the call. - DCHECK_EQ(kNumInstructions, InstructionsGeneratedSince(&start)); + // The 'ra' value we stored on the stack matches the value of 'ra' during the + // call. + bind(&end); } void MacroAssembler::Ret(Condition cond, Register rs, const Operand& rt) { @@ -7358,7 +7356,11 @@ int MacroAssembler::CallCFunctionHelper( AddWord(sp, sp, Operand(stack_passed_arguments * kSystemPointerSize)); } if (kMaxSizeOfMoveAfterFastCall > pc_offset() - before_offset) { - nop(); + // If the RCV extension is enabled, we may have to emit multiple NOPs to + // have enough space for patching in the deopt trampoline. + do { + NOP(); + } while (pc_offset() - before_offset != kMaxSizeOfMoveAfterFastCall); } // We assume that with the nop padding, the move instruction uses // kMaxSizeOfMoveAfterFastCall bytes. When we patch in the deopt trampoline,