Skip to content
Closed
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion common.gypi
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@

# Reset this number to 0 on major V8 upgrades.
# Increment by one for each non-official patch applied to deps/v8.
'v8_embedder_string': '-node.13',
'v8_embedder_string': '-node.17',

##### V8 defaults for Node.js #####

Expand Down
3 changes: 2 additions & 1 deletion deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -766,7 +766,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
uint32_t* protected_load_pc, bool is_load_mem,
bool i64_offset) {
// Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
liftoff::LoadInternal(this, dst, src_addr, offset_reg,
Expand Down
29 changes: 20 additions & 9 deletions deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
Original file line number Diff line number Diff line change
Expand Up @@ -126,13 +126,23 @@ inline CPURegister AcquireByType(UseScratchRegisterScope* temps,
template <typename T>
inline MemOperand GetMemOp(LiftoffAssembler* assm,
UseScratchRegisterScope* temps, Register addr,
Register offset, T offset_imm) {
Register offset, T offset_imm,
bool i64_offset = false) {
if (offset.is_valid()) {
if (offset_imm == 0) return MemOperand(addr.X(), offset.W(), UXTW);
Register tmp = temps->AcquireX();
if (offset_imm == 0) {
return i64_offset ? MemOperand(addr.X(), offset.X())
: MemOperand(addr.X(), offset.W(), UXTW);
}
DCHECK_GE(kMaxUInt32, offset_imm);
assm->Add(tmp, offset.X(), offset_imm);
return MemOperand(addr.X(), tmp);
if (i64_offset) {
Register tmp = temps->AcquireX();
assm->Add(tmp, offset.X(), offset_imm);
return MemOperand(addr.X(), tmp);
} else {
Register tmp = temps->AcquireW();
assm->Add(tmp, offset.W(), offset_imm);
return MemOperand(addr.X(), tmp, UXTW);
}
}
return MemOperand(addr.X(), offset_imm);
}
Expand Down Expand Up @@ -490,10 +500,11 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
uint32_t* protected_load_pc, bool is_load_mem,
bool i64_offset) {
UseScratchRegisterScope temps(this);
MemOperand src_op =
liftoff::GetMemOp(this, &temps, src_addr, offset_reg, offset_imm);
MemOperand src_op = liftoff::GetMemOp(this, &temps, src_addr, offset_reg,
offset_imm, i64_offset);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
case LoadType::kI32Load8U:
Expand Down Expand Up @@ -1333,7 +1344,7 @@ bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
LiftoffRegister src, Label* trap) {
switch (opcode) {
case kExprI32ConvertI64:
if (src != dst) Mov(dst.gp().W(), src.gp().W());
Mov(dst.gp().W(), src.gp().W());
return true;
case kExprI32SConvertF32:
Fcvtzs(dst.gp().W(), src.fp().S()); // f32 -> i32 round to zero.
Expand Down
3 changes: 2 additions & 1 deletion deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,8 @@ void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uint32_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
uint32_t* protected_load_pc, bool is_load_mem,
bool i64_offset) {
// Offsets >=2GB are statically OOB on 32-bit systems.
DCHECK_LE(offset_imm, std::numeric_limits<int32_t>::max());
DCHECK_EQ(type.value_type() == kWasmI64, dst.is_gp_pair());
Expand Down
2 changes: 1 addition & 1 deletion deps/v8/src/wasm/baseline/liftoff-assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -669,7 +669,7 @@ class LiftoffAssembler : public TurboAssembler {
inline void Load(LiftoffRegister dst, Register src_addr, Register offset_reg,
uintptr_t offset_imm, LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc = nullptr,
bool is_load_mem = false);
bool is_load_mem = false, bool i64_offset = false);
inline void Store(Register dst_addr, Register offset_reg,
uintptr_t offset_imm, LiftoffRegister src, StoreType type,
LiftoffRegList pinned,
Expand Down
142 changes: 100 additions & 42 deletions deps/v8/src/wasm/baseline/liftoff-compiler.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2767,33 +2767,75 @@ class LiftoffCompiler {
return index;
}

bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot,
int access_size, uintptr_t* offset) {
if (!index_slot.is_const()) return false;

// Potentially zero extend index (which is a 32-bit constant).
const uintptr_t index = static_cast<uint32_t>(index_slot.i32_const());
const uintptr_t effective_offset = index + *offset;

if (effective_offset < index // overflow
|| !base::IsInBounds<uintptr_t>(effective_offset, access_size,
env_->min_memory_size)) {
return false;
}

*offset = effective_offset;
return true;
}

void LoadMem(FullDecoder* decoder, LoadType type,
const MemoryAccessImmediate<validate>& imm,
const Value& index_val, Value* result) {
ValueKind kind = type.value_type().kind();
RegClass rc = reg_class_for(kind);
if (!CheckSupportedType(decoder, kind, "load")) return;
LiftoffRegister full_index = __ PopToRegister();
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, {}, kDontForceCheck);
if (index == no_reg) return;

uintptr_t offset = imm.offset;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("load from memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
RegClass rc = reg_class_for(kind);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
uint32_t protected_load_pc = 0;
__ Load(value, addr, index, offset, type, pinned, &protected_load_pc, true);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
Register index = no_reg;

// Only look at the slot, do not pop it yet (will happen in PopToRegister
// below, if this is not a statically-in-bounds index).
auto& index_slot = __ cache_state()->stack_state.back();
bool i64_offset = index_val.type == kWasmI64;
if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
__ cache_state()->stack_state.pop_back();
DEBUG_CODE_COMMENT("load from memory (constant offset)");
LiftoffRegList pinned;
Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));
__ Load(value, mem, no_reg, offset, type, pinned, nullptr, true,
i64_offset);
__ PushRegister(kind, value);
} else {
LiftoffRegister full_index = __ PopToRegister();
index = BoundsCheckMem(decoder, type.size(), offset, full_index, {},
kDontForceCheck);
if (index == no_reg) return;

DEBUG_CODE_COMMENT("load from memory");
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
index = AddMemoryMasking(index, &offset, &pinned);

// Load the memory start address only now to reduce register pressure
// (important on ia32).
Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegister value = pinned.set(__ GetUnusedRegister(rc, pinned));

uint32_t protected_load_pc = 0;
__ Load(value, mem, index, offset, type, pinned, &protected_load_pc, true,
i64_offset);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_load_pc);
}
__ PushRegister(kind, value);
}
__ PushRegister(kind, value);

if (FLAG_trace_wasm_memory) {
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
offset, decoder->position());
}
Expand Down Expand Up @@ -2836,7 +2878,7 @@ class LiftoffCompiler {
}
__ PushRegister(kS128, value);

if (FLAG_trace_wasm_memory) {
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
// Again load extend is different.
MachineRepresentation mem_rep =
transform == LoadTransformationKind::kExtend
Expand Down Expand Up @@ -2878,7 +2920,7 @@ class LiftoffCompiler {

__ PushRegister(kS128, result);

if (FLAG_trace_wasm_memory) {
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
offset, decoder->position());
}
Expand All @@ -2889,29 +2931,45 @@ class LiftoffCompiler {
const Value& index_val, const Value& value_val) {
ValueKind kind = type.value_type().kind();
if (!CheckSupportedType(decoder, kind, "store")) return;

LiftoffRegList pinned;
LiftoffRegister value = pinned.set(__ PopToRegister());
LiftoffRegister full_index = __ PopToRegister(pinned);
Register index = BoundsCheckMem(decoder, type.size(), imm.offset,
full_index, pinned, kDontForceCheck);
if (index == no_reg) return;

uintptr_t offset = imm.offset;
pinned.set(index);
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("store to memory");
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
uint32_t protected_store_pc = 0;
LiftoffRegList outer_pinned;
if (FLAG_trace_wasm_memory) outer_pinned.set(index);
__ Store(addr, index, offset, value, type, outer_pinned,
&protected_store_pc, true);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
Register index = no_reg;

auto& index_slot = __ cache_state()->stack_state.back();
if (IndexStaticallyInBounds(index_slot, type.size(), &offset)) {
__ cache_state()->stack_state.pop_back();
DEBUG_CODE_COMMENT("store to memory (constant offset)");
Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
__ Store(mem, no_reg, offset, value, type, pinned, nullptr, true);
} else {
LiftoffRegister full_index = __ PopToRegister(pinned);
index = BoundsCheckMem(decoder, type.size(), imm.offset, full_index,
pinned, kDontForceCheck);
if (index == no_reg) return;

pinned.set(index);
index = AddMemoryMasking(index, &offset, &pinned);
DEBUG_CODE_COMMENT("store to memory");
uint32_t protected_store_pc = 0;
// Load the memory start address only now to reduce register pressure
// (important on ia32).
Register mem = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(mem, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegList outer_pinned;
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index);
__ Store(mem, index, offset, value, type, outer_pinned,
&protected_store_pc, true);
if (env_->use_trap_handler) {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
}
}
if (FLAG_trace_wasm_memory) {

if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
TraceMemoryOperation(true, type.mem_rep(), index, offset,
decoder->position());
}
Expand Down Expand Up @@ -2940,7 +2998,7 @@ class LiftoffCompiler {
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapMemOutOfBounds,
protected_store_pc);
}
if (FLAG_trace_wasm_memory) {
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
TraceMemoryOperation(true, type.mem_rep(), index, offset,
decoder->position());
}
Expand Down Expand Up @@ -4156,9 +4214,9 @@ class LiftoffCompiler {
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
LiftoffRegList outer_pinned;
if (FLAG_trace_wasm_memory) outer_pinned.set(index);
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) outer_pinned.set(index);
__ AtomicStore(addr, index, offset, value, type, outer_pinned);
if (FLAG_trace_wasm_memory) {
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
TraceMemoryOperation(true, type.mem_rep(), index, offset,
decoder->position());
}
Expand All @@ -4184,7 +4242,7 @@ class LiftoffCompiler {
__ AtomicLoad(value, addr, index, offset, type, pinned);
__ PushRegister(kind, value);

if (FLAG_trace_wasm_memory) {
if (V8_UNLIKELY(FLAG_trace_wasm_memory)) {
TraceMemoryOperation(false, type.mem_type().representation(), index,
offset, decoder->position());
}
Expand Down
6 changes: 5 additions & 1 deletion deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,11 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
Register offset_reg, uintptr_t offset_imm,
LoadType type, LiftoffRegList pinned,
uint32_t* protected_load_pc, bool is_load_mem) {
uint32_t* protected_load_pc, bool is_load_mem,
bool i64_offset) {
if (offset_reg != no_reg && !i64_offset) {
AssertZeroExtended(offset_reg);
}
Operand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
if (protected_load_pc) *protected_load_pc = pc_offset();
switch (type.value()) {
Expand Down
64 changes: 64 additions & 0 deletions deps/v8/test/mjsunit/regress/wasm/regress-11809.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Flags: --enable-testing-opcode-in-wasm --nowasm-tier-up
// Flags: --wasm-tier-mask-for-testing=2

load("test/mjsunit/wasm/wasm-module-builder.js");

function InstanceMaker(offset) {
var builder = new WasmModuleBuilder();
builder.addMemory(1, 1, false /* exported */);

var sig_index = builder.addType(makeSig(
[kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32, kWasmI32,
kWasmI32],
[kWasmI32]));
var sig_three = builder.addType(makeSig(
[kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64, kWasmI64,
kWasmI64],
[]));

var zero = builder.addFunction("zero", kSig_i_i);
var one = builder.addFunction("one", sig_index);
var two = builder.addFunction("two", kSig_v_i);
var three = builder.addFunction("three", sig_three).addBody([]);

zero.addBody([kExprLocalGet, 0, kExprI32LoadMem, 0, offset]);

one.addBody([
kExprLocalGet, 7,
kExprCallFunction, zero.index]);

two.addBody([
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprI64Const, 0x81, 0x80, 0x80, 0x80, 0x10,
kExprCallFunction, three.index,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprI32Const, 0,
kExprCallFunction, one.index,
kExprDrop,
]).exportFunc();

return builder.instantiate({});
}

var instance = InstanceMaker(0);
instance.exports.two();

// Regression test for crbug.com/1224882.
var instance_with_offset = InstanceMaker(4);
instance_with_offset.exports.two();