diff --git a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp index 172812a3802d3..31482206238ae 100644 --- a/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp +++ b/llvm/lib/Frontend/OpenMP/OMPIRBuilder.cpp @@ -7836,7 +7836,7 @@ OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc, "OMP Atomic expects a pointer to target memory"); Type *XElemTy = X.ElemTy; assert((XElemTy->isFloatingPointTy() || XElemTy->isIntegerTy() || - XElemTy->isPointerTy()) && + XElemTy->isPointerTy() || XElemTy->isStructTy()) && "OMP atomic read expected a scalar type"); Value *XRead = nullptr; @@ -7846,6 +7846,20 @@ OpenMPIRBuilder::createAtomicRead(const LocationDescription &Loc, Builder.CreateLoad(XElemTy, X.Var, X.IsVolatile, "omp.atomic.read"); XLD->setAtomic(AO); XRead = cast(XLD); + } else if (XElemTy->isStructTy()) { + // FIXME: Add checks to ensure __atomic_load is emitted iff the + // target does not support `atomicrmw` of the size of the struct + LoadInst *OldVal = Builder.CreateLoad(XElemTy, X.Var, "omp.atomic.read"); + OldVal->setAtomic(AO); + const DataLayout &LoadDL = OldVal->getModule()->getDataLayout(); + unsigned LoadSize = + LoadDL.getTypeStoreSize(OldVal->getPointerOperand()->getType()); + OpenMPIRBuilder::AtomicInfo atomicInfo( + &Builder, XElemTy, LoadSize * 8, LoadSize * 8, OldVal->getAlign(), + OldVal->getAlign(), true /* UseLibcall */, X.Var); + auto AtomicLoadRes = atomicInfo.EmitAtomicLoadLibcall(AO); + XRead = AtomicLoadRes.first; + OldVal->eraseFromParent(); } else { // We need to perform atomic op as integer IntegerType *IntCastTy = diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir index c7ddbda8e8a92..49f9f3562c78b 100644 --- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir +++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir @@ -1525,6 +1525,28 @@ llvm.func @_QPomp_atomic_capture_complex() { // ----- +// CHECK-LABEL: define void @omp_atomic_read_complex() { +llvm.func @omp_atomic_read_complex(){ + +// CHECK: %[[a:.*]] = alloca { float, float }, i64 1, align 8 +// CHECK: %[[b:.*]] = alloca { float, float }, i64 1, align 8 +// CHECK: %[[ATOMIC_TEMP_LOAD:.*]] = alloca { float, float }, align 8 +// CHECK: call void @__atomic_load(i64 8, ptr %[[b]], ptr %[[ATOMIC_TEMP_LOAD]], i32 0) +// CHECK: %[[LOADED_VAL:.*]] = load { float, float }, ptr %[[ATOMIC_TEMP_LOAD]], align 8 +// CHECK: store { float, float } %[[LOADED_VAL]], ptr %[[a]], align 4 +// CHECK: ret void +// CHECK: } + + %0 = llvm.mlir.constant(1 : i64) : i64 + %1 = llvm.alloca %0 x !llvm.struct<(f32, f32)> {bindc_name = "ib"} : (i64) -> !llvm.ptr + %2 = llvm.mlir.constant(1 : i64) : i64 + %3 = llvm.alloca %2 x !llvm.struct<(f32, f32)> {bindc_name = "ia"} : (i64) -> !llvm.ptr + omp.atomic.read %1 = %3 : !llvm.ptr, !llvm.struct<(f32, f32)> + llvm.return +} + +// ----- + // Checking an order-dependent operation when the order is `expr binop x` // CHECK-LABEL: @omp_atomic_update_ordering // CHECK-SAME: (ptr %[[x:.*]], i32 %[[expr:.*]])