diff --git a/clang/include/clang/Basic/Builtins.td b/clang/include/clang/Basic/Builtins.td index 468c16050e2bf..403cb509d6b91 100644 --- a/clang/include/clang/Basic/Builtins.td +++ b/clang/include/clang/Basic/Builtins.td @@ -1977,16 +1977,16 @@ def AtomicNandFetch : AtomicBuiltin { let Prototype = "void(...)"; } -def AtomicTestAndSet : Builtin { +def AtomicTestAndSet : AtomicBuiltin { let Spellings = ["__atomic_test_and_set"]; - let Attributes = [NoThrow]; - let Prototype = "bool(void volatile*, int)"; + let Attributes = [NoThrow, CustomTypeChecking]; + let Prototype = "bool(...)"; } -def AtomicClear : Builtin { +def AtomicClear : AtomicBuiltin { let Spellings = ["__atomic_clear"]; - let Attributes = [NoThrow]; - let Prototype = "void(void volatile*, int)"; + let Attributes = [NoThrow, CustomTypeChecking]; + let Prototype = "void(...)"; } def AtomicThreadFence : Builtin { diff --git a/clang/lib/AST/Expr.cpp b/clang/lib/AST/Expr.cpp index ba66d36278567..b6a86d82473d3 100644 --- a/clang/lib/AST/Expr.cpp +++ b/clang/lib/AST/Expr.cpp @@ -5070,6 +5070,8 @@ unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) { case AO__opencl_atomic_init: case AO__c11_atomic_load: case AO__atomic_load_n: + case AO__atomic_test_and_set: + case AO__atomic_clear: return 2; case AO__scoped_atomic_load_n: diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp index f6cb2ad421e90..3adb2a7ad207f 100644 --- a/clang/lib/CodeGen/CGAtomic.cpp +++ b/clang/lib/CodeGen/CGAtomic.cpp @@ -723,6 +723,24 @@ static void EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, Address Dest, case AtomicExpr::AO__scoped_atomic_fetch_nand: Op = llvm::AtomicRMWInst::Nand; break; + + case AtomicExpr::AO__atomic_test_and_set: { + llvm::AtomicRMWInst *RMWI = + CGF.emitAtomicRMWInst(llvm::AtomicRMWInst::Xchg, Ptr, + CGF.Builder.getInt8(1), Order, Scope, E); + RMWI->setVolatile(E->isVolatile()); + llvm::Value *Result = CGF.Builder.CreateIsNotNull(RMWI, "tobool"); + CGF.Builder.CreateStore(Result, Dest); + return; + } + + case AtomicExpr::AO__atomic_clear: { + llvm::StoreInst *Store = + CGF.Builder.CreateStore(CGF.Builder.getInt8(0), Ptr); + Store->setAtomic(Order, Scope); + Store->setVolatile(E->isVolatile()); + return; + } } llvm::Value *LoadVal1 = CGF.Builder.CreateLoad(Val1); @@ -878,6 +896,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__c11_atomic_load: case AtomicExpr::AO__opencl_atomic_load: case AtomicExpr::AO__hip_atomic_load: + case AtomicExpr::AO__atomic_test_and_set: + case AtomicExpr::AO__atomic_clear: break; case AtomicExpr::AO__atomic_load: @@ -1200,6 +1220,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { case AtomicExpr::AO__opencl_atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_fetch_max: case AtomicExpr::AO__scoped_atomic_max_fetch: + case AtomicExpr::AO__atomic_test_and_set: + case AtomicExpr::AO__atomic_clear: llvm_unreachable("Integral atomic operations always become atomicrmw!"); } @@ -1239,7 +1261,8 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E) { E->getOp() == AtomicExpr::AO__atomic_store || E->getOp() == AtomicExpr::AO__atomic_store_n || E->getOp() == AtomicExpr::AO__scoped_atomic_store || - E->getOp() == AtomicExpr::AO__scoped_atomic_store_n; + E->getOp() == AtomicExpr::AO__scoped_atomic_store_n || + E->getOp() == AtomicExpr::AO__atomic_clear; bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load || E->getOp() == AtomicExpr::AO__opencl_atomic_load || E->getOp() == AtomicExpr::AO__hip_atomic_load || diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp index dcea32969fb99..2d2ad6e51116a 100644 --- a/clang/lib/CodeGen/CGBuiltin.cpp +++ b/clang/lib/CodeGen/CGBuiltin.cpp @@ -5139,147 +5139,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID, ReturnValueSlot(), Args); } - case Builtin::BI__atomic_test_and_set: { - // Look at the argument type to determine whether this is a volatile - // operation. The parameter type is always volatile. - QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); - bool Volatile = - PtrTy->castAs()->getPointeeType().isVolatileQualified(); - - Address Ptr = - EmitPointerWithAlignment(E->getArg(0)).withElementType(Int8Ty); - - Value *NewVal = Builder.getInt8(1); - Value *Order = EmitScalarExpr(E->getArg(1)); - if (isa(Order)) { - int ord = cast(Order)->getZExtValue(); - AtomicRMWInst *Result = nullptr; - switch (ord) { - case 0: // memory_order_relaxed - default: // invalid order - Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, - llvm::AtomicOrdering::Monotonic); - break; - case 1: // memory_order_consume - case 2: // memory_order_acquire - Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, - llvm::AtomicOrdering::Acquire); - break; - case 3: // memory_order_release - Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, - llvm::AtomicOrdering::Release); - break; - case 4: // memory_order_acq_rel - - Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, Ptr, NewVal, - llvm::AtomicOrdering::AcquireRelease); - break; - case 5: // memory_order_seq_cst - Result = Builder.CreateAtomicRMW( - llvm::AtomicRMWInst::Xchg, Ptr, NewVal, - llvm::AtomicOrdering::SequentiallyConsistent); - break; - } - Result->setVolatile(Volatile); - return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); - } - - llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); - - llvm::BasicBlock *BBs[5] = { - createBasicBlock("monotonic", CurFn), - createBasicBlock("acquire", CurFn), - createBasicBlock("release", CurFn), - createBasicBlock("acqrel", CurFn), - createBasicBlock("seqcst", CurFn) - }; - llvm::AtomicOrdering Orders[5] = { - llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Acquire, - llvm::AtomicOrdering::Release, llvm::AtomicOrdering::AcquireRelease, - llvm::AtomicOrdering::SequentiallyConsistent}; - - Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); - llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); - - Builder.SetInsertPoint(ContBB); - PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set"); - - for (unsigned i = 0; i < 5; ++i) { - Builder.SetInsertPoint(BBs[i]); - AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg, - Ptr, NewVal, Orders[i]); - RMW->setVolatile(Volatile); - Result->addIncoming(RMW, BBs[i]); - Builder.CreateBr(ContBB); - } - - SI->addCase(Builder.getInt32(0), BBs[0]); - SI->addCase(Builder.getInt32(1), BBs[1]); - SI->addCase(Builder.getInt32(2), BBs[1]); - SI->addCase(Builder.getInt32(3), BBs[2]); - SI->addCase(Builder.getInt32(4), BBs[3]); - SI->addCase(Builder.getInt32(5), BBs[4]); - - Builder.SetInsertPoint(ContBB); - return RValue::get(Builder.CreateIsNotNull(Result, "tobool")); - } - - case Builtin::BI__atomic_clear: { - QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType(); - bool Volatile = - PtrTy->castAs()->getPointeeType().isVolatileQualified(); - - Address Ptr = EmitPointerWithAlignment(E->getArg(0)); - Ptr = Ptr.withElementType(Int8Ty); - Value *NewVal = Builder.getInt8(0); - Value *Order = EmitScalarExpr(E->getArg(1)); - if (isa(Order)) { - int ord = cast(Order)->getZExtValue(); - StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); - switch (ord) { - case 0: // memory_order_relaxed - default: // invalid order - Store->setOrdering(llvm::AtomicOrdering::Monotonic); - break; - case 3: // memory_order_release - Store->setOrdering(llvm::AtomicOrdering::Release); - break; - case 5: // memory_order_seq_cst - Store->setOrdering(llvm::AtomicOrdering::SequentiallyConsistent); - break; - } - return RValue::get(nullptr); - } - - llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn); - - llvm::BasicBlock *BBs[3] = { - createBasicBlock("monotonic", CurFn), - createBasicBlock("release", CurFn), - createBasicBlock("seqcst", CurFn) - }; - llvm::AtomicOrdering Orders[3] = { - llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Release, - llvm::AtomicOrdering::SequentiallyConsistent}; - - Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false); - llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]); - - for (unsigned i = 0; i < 3; ++i) { - Builder.SetInsertPoint(BBs[i]); - StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile); - Store->setOrdering(Orders[i]); - Builder.CreateBr(ContBB); - } - - SI->addCase(Builder.getInt32(0), BBs[0]); - SI->addCase(Builder.getInt32(3), BBs[1]); - SI->addCase(Builder.getInt32(5), BBs[2]); - - Builder.SetInsertPoint(ContBB); - return RValue::get(nullptr); - } - case Builtin::BI__atomic_thread_fence: case Builtin::BI__atomic_signal_fence: case Builtin::BI__c11_atomic_thread_fence: diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp index 28dcfaac2e84f..66899c1089b2f 100644 --- a/clang/lib/Sema/SemaChecking.cpp +++ b/clang/lib/Sema/SemaChecking.cpp @@ -3634,6 +3634,7 @@ static bool isValidOrderingForOp(int64_t Ordering, AtomicExpr::AtomicOp Op) { case AtomicExpr::AO__atomic_store_n: case AtomicExpr::AO__scoped_atomic_store: case AtomicExpr::AO__scoped_atomic_store_n: + case AtomicExpr::AO__atomic_clear: return OrderingCABI != llvm::AtomicOrderingCABI::consume && OrderingCABI != llvm::AtomicOrderingCABI::acquire && OrderingCABI != llvm::AtomicOrderingCABI::acq_rel; @@ -3686,12 +3687,18 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, C11CmpXchg, // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int) - GNUCmpXchg + GNUCmpXchg, + + // bool __atomic_test_and_set(A *, int) + TestAndSetByte, + + // void __atomic_clear(A *, int) + ClearByte, } Form = Init; - const unsigned NumForm = GNUCmpXchg + 1; - const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 3, 4, 5, 6 }; - const unsigned NumVals[] = { 1, 0, 1, 1, 1, 1, 2, 2, 3 }; + const unsigned NumForm = ClearByte + 1; + const unsigned NumArgs[] = {2, 2, 3, 3, 3, 3, 4, 5, 6, 2, 2}; + const unsigned NumVals[] = {1, 0, 1, 1, 1, 1, 2, 2, 3, 0, 0}; // where: // C is an appropriate type, // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins, @@ -3852,6 +3859,14 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, case AtomicExpr::AO__scoped_atomic_compare_exchange_n: Form = GNUCmpXchg; break; + + case AtomicExpr::AO__atomic_test_and_set: + Form = TestAndSetByte; + break; + + case AtomicExpr::AO__atomic_clear: + Form = ClearByte; + break; } unsigned AdjustedNumArgs = NumArgs[Form]; @@ -3911,14 +3926,28 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, } } - // Pointer to object of size zero is not allowed. - if (RequireCompleteType(Ptr->getBeginLoc(), AtomTy, - diag::err_incomplete_type)) - return ExprError(); - if (Context.getTypeInfoInChars(AtomTy).Width.isZero()) { - Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) - << Ptr->getType() << 1 << Ptr->getSourceRange(); - return ExprError(); + if (Form != TestAndSetByte && Form != ClearByte) { + // Pointer to object of size zero is not allowed. + if (RequireCompleteType(Ptr->getBeginLoc(), AtomTy, + diag::err_incomplete_type)) + return ExprError(); + + if (Context.getTypeInfoInChars(AtomTy).Width.isZero()) { + Diag(ExprRange.getBegin(), diag::err_atomic_builtin_must_be_pointer) + << Ptr->getType() << 1 << Ptr->getSourceRange(); + return ExprError(); + } + } else { + // The __atomic_clear and __atomic_test_and_set intrinsics accept any + // non-const pointer type, including void* and pointers to incomplete + // structs, but only access the first byte. + AtomTy = Context.CharTy; + AtomTy = AtomTy.withCVRQualifiers( + pointerType->getPointeeType().getCVRQualifiers()); + QualType PointerQT = Context.getPointerType(AtomTy); + pointerType = PointerQT->getAs(); + Ptr = ImpCastExprToType(Ptr, PointerQT, CK_BitCast).get(); + ValType = AtomTy; } // For an arithmetic operation, the implied arithmetic must be well-formed. @@ -3997,10 +4026,10 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, ValType.removeLocalVolatile(); ValType.removeLocalConst(); QualType ResultType = ValType; - if (Form == Copy || Form == LoadCopy || Form == GNUXchg || - Form == Init) + if (Form == Copy || Form == LoadCopy || Form == GNUXchg || Form == Init || + Form == ClearByte) ResultType = Context.VoidTy; - else if (Form == C11CmpXchg || Form == GNUCmpXchg) + else if (Form == C11CmpXchg || Form == GNUCmpXchg || Form == TestAndSetByte) ResultType = Context.BoolTy; // The type of a parameter passed 'by value'. In the GNU atomics, such @@ -4045,6 +4074,10 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, APIOrderedArgs.push_back(Args[1]); // Order APIOrderedArgs.push_back(Args[3]); // OrderFail break; + case TestAndSetByte: + case ClearByte: + APIOrderedArgs.push_back(Args[1]); // Order + break; } } else APIOrderedArgs.append(Args.begin(), Args.end()); @@ -4130,6 +4163,8 @@ ExprResult Sema::BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SubExprs.push_back(APIOrderedArgs[1]); // Val1 break; case Load: + case TestAndSetByte: + case ClearByte: SubExprs.push_back(APIOrderedArgs[1]); // Order break; case LoadCopy: diff --git a/clang/test/CodeGen/atomic-test-and-set.c b/clang/test/CodeGen/atomic-test-and-set.c new file mode 100644 index 0000000000000..39d4cef16b21d --- /dev/null +++ b/clang/test/CodeGen/atomic-test-and-set.c @@ -0,0 +1,345 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 +// RUN: %clang_cc1 %s -emit-llvm -o - -triple=aarch64-none-elf | FileCheck %s +// REQUIRES: aarch64-registered-target + +#include + +// CHECK-LABEL: define dso_local void @clear_relaxed( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] monotonic, align 1 +// CHECK-NEXT: ret void +// +void clear_relaxed(char *ptr) { + __atomic_clear(ptr, memory_order_relaxed); +} + +// CHECK-LABEL: define dso_local void @clear_seq_cst( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] seq_cst, align 1 +// CHECK-NEXT: ret void +// +void clear_seq_cst(char *ptr) { + __atomic_clear(ptr, memory_order_seq_cst); +} + +// CHECK-LABEL: define dso_local void @clear_release( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] release, align 1 +// CHECK-NEXT: ret void +// +void clear_release(char *ptr) { + __atomic_clear(ptr, memory_order_release); +} + +// CHECK-LABEL: define dso_local void @clear_dynamic( +// CHECK-SAME: ptr noundef [[PTR:%.*]], i32 noundef [[ORDER:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ORDER_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store i32 [[ORDER]], ptr [[ORDER_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ORDER_ADDR]], align 4 +// CHECK-NEXT: switch i32 [[TMP1]], label %[[MONOTONIC:.*]] [ +// CHECK-NEXT: i32 3, label %[[RELEASE:.*]] +// CHECK-NEXT: i32 5, label %[[SEQCST:.*]] +// CHECK-NEXT: ] +// CHECK: [[MONOTONIC]]: +// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] monotonic, align 1 +// CHECK-NEXT: br label %[[ATOMIC_CONTINUE:.*]] +// CHECK: [[RELEASE]]: +// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] release, align 1 +// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] +// CHECK: [[SEQCST]]: +// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] seq_cst, align 1 +// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] +// CHECK: [[ATOMIC_CONTINUE]]: +// CHECK-NEXT: ret void +// +void clear_dynamic(char *ptr, int order) { + __atomic_clear(ptr, order); +} + +// CHECK-LABEL: define dso_local void @test_and_set_relaxed( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_relaxed(char *ptr) { + __atomic_test_and_set(ptr, memory_order_relaxed); +} + +// CHECK-LABEL: define dso_local void @test_and_set_consume( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_consume(char *ptr) { + __atomic_test_and_set(ptr, memory_order_consume); +} + +// CHECK-LABEL: define dso_local void @test_and_set_acquire( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_acquire(char *ptr) { + __atomic_test_and_set(ptr, memory_order_acquire); +} + +// CHECK-LABEL: define dso_local void @test_and_set_release( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_release(char *ptr) { + __atomic_test_and_set(ptr, memory_order_release); +} + +// CHECK-LABEL: define dso_local void @test_and_set_acq_rel( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_acq_rel(char *ptr) { + __atomic_test_and_set(ptr, memory_order_acq_rel); +} + +// CHECK-LABEL: define dso_local void @test_and_set_seq_cst( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_seq_cst(char *ptr) { + __atomic_test_and_set(ptr, memory_order_seq_cst); +} + +// CHECK-LABEL: define dso_local void @test_and_set_dynamic( +// CHECK-SAME: ptr noundef [[PTR:%.*]], i32 noundef [[ORDER:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ORDER_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store i32 [[ORDER]], ptr [[ORDER_ADDR]], align 4 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ORDER_ADDR]], align 4 +// CHECK-NEXT: switch i32 [[TMP1]], label %[[MONOTONIC:.*]] [ +// CHECK-NEXT: i32 1, label %[[ACQUIRE:.*]] +// CHECK-NEXT: i32 2, label %[[ACQUIRE]] +// CHECK-NEXT: i32 3, label %[[RELEASE:.*]] +// CHECK-NEXT: i32 4, label %[[ACQREL:.*]] +// CHECK-NEXT: i32 5, label %[[SEQCST:.*]] +// CHECK-NEXT: ] +// CHECK: [[MONOTONIC]]: +// CHECK-NEXT: [[TMP2:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP2]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: br label %[[ATOMIC_CONTINUE:.*]] +// CHECK: [[ACQUIRE]]: +// CHECK-NEXT: [[TMP3:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acquire, align 1 +// CHECK-NEXT: [[TOBOOL1:%.*]] = icmp ne i8 [[TMP3]], 0 +// CHECK-NEXT: store i1 [[TOBOOL1]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] +// CHECK: [[RELEASE]]: +// CHECK-NEXT: [[TMP4:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 release, align 1 +// CHECK-NEXT: [[TOBOOL2:%.*]] = icmp ne i8 [[TMP4]], 0 +// CHECK-NEXT: store i1 [[TOBOOL2]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] +// CHECK: [[ACQREL]]: +// CHECK-NEXT: [[TMP5:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 acq_rel, align 1 +// CHECK-NEXT: [[TOBOOL3:%.*]] = icmp ne i8 [[TMP5]], 0 +// CHECK-NEXT: store i1 [[TOBOOL3]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] +// CHECK: [[SEQCST]]: +// CHECK-NEXT: [[TMP6:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 seq_cst, align 1 +// CHECK-NEXT: [[TOBOOL4:%.*]] = icmp ne i8 [[TMP6]], 0 +// CHECK-NEXT: store i1 [[TOBOOL4]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: br label %[[ATOMIC_CONTINUE]] +// CHECK: [[ATOMIC_CONTINUE]]: +// CHECK-NEXT: [[TMP7:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP7]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_dynamic(char *ptr, int order) { + __atomic_test_and_set(ptr, order); +} + +// CHECK-LABEL: define dso_local void @test_and_set_array( +// CHECK-SAME: ) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[X:%.*]] = alloca [10 x i32], align 4 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x i32], ptr [[X]], i64 0, i64 0 +// CHECK-NEXT: [[TMP0:%.*]] = atomicrmw volatile xchg ptr [[ARRAYDECAY]], i8 1 seq_cst, align 4 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP0]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP1]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_array() { + volatile int x[10]; + __atomic_test_and_set(x, memory_order_seq_cst); +} + +// These intrinsics accept any pointer type, including void and incomplete +// structs, and always access the first byte regardless of the actual type +// size. + +struct incomplete; + +// CHECK-LABEL: define dso_local void @clear_int( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] monotonic, align 4 +// CHECK-NEXT: ret void +// +void clear_int(int *ptr) { + __atomic_clear(ptr, memory_order_relaxed); +} +// CHECK-LABEL: define dso_local void @clear_void( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] monotonic, align 1 +// CHECK-NEXT: ret void +// +void clear_void(void *ptr) { + __atomic_clear(ptr, memory_order_relaxed); +} +// CHECK-LABEL: define dso_local void @clear_incomplete( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: store atomic i8 0, ptr [[TMP0]] monotonic, align 1 +// CHECK-NEXT: ret void +// +void clear_incomplete(struct incomplete *ptr) { + __atomic_clear(ptr, memory_order_relaxed); +} + +// CHECK-LABEL: define dso_local void @test_and_set_int( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 4 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_int(int *ptr) { + __atomic_test_and_set(ptr, memory_order_relaxed); +} +// CHECK-LABEL: define dso_local void @test_and_set_void( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_void(void *ptr) { + __atomic_test_and_set(ptr, memory_order_relaxed); +} +// CHECK-LABEL: define dso_local void @test_and_set_incomplete( +// CHECK-SAME: ptr noundef [[PTR:%.*]]) #[[ATTR0]] { +// CHECK-NEXT: [[ENTRY:.*:]] +// CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8 +// CHECK-NEXT: [[ATOMIC_TEMP:%.*]] = alloca i8, align 1 +// CHECK-NEXT: store ptr [[PTR]], ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[TMP0]], i8 1 monotonic, align 1 +// CHECK-NEXT: [[TOBOOL:%.*]] = icmp ne i8 [[TMP1]], 0 +// CHECK-NEXT: store i1 [[TOBOOL]], ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[TMP2:%.*]] = load i8, ptr [[ATOMIC_TEMP]], align 1 +// CHECK-NEXT: [[LOADEDV:%.*]] = trunc i8 [[TMP2]] to i1 +// CHECK-NEXT: ret void +// +void test_and_set_incomplete(struct incomplete *ptr) { + __atomic_test_and_set(ptr, memory_order_relaxed); +} diff --git a/clang/test/Sema/atomic-ops.c b/clang/test/Sema/atomic-ops.c index 2405f804d0da5..725a12060d4e0 100644 --- a/clang/test/Sema/atomic-ops.c +++ b/clang/test/Sema/atomic-ops.c @@ -284,11 +284,29 @@ void f(_Atomic(int) *i, const _Atomic(int) *ci, const volatile int flag_k = 0; volatile int flag = 0; - (void)(int)__atomic_test_and_set(&flag_k, memory_order_seq_cst); // expected-warning {{passing 'const volatile int *' to parameter of type 'volatile void *'}} + (void)(int)__atomic_test_and_set(&flag_k, memory_order_seq_cst); // expected-error {{address argument to atomic operation must be a pointer to non-const type ('const volatile int *' invalid)}} (void)(int)__atomic_test_and_set(&flag, memory_order_seq_cst); - __atomic_clear(&flag_k, memory_order_seq_cst); // expected-warning {{passing 'const volatile int *' to parameter of type 'volatile void *'}} + __atomic_clear(&flag_k, memory_order_seq_cst); // expected-error {{address argument to atomic operation must be a pointer to non-const type ('const volatile int *' invalid)}} __atomic_clear(&flag, memory_order_seq_cst); (int)__atomic_clear(&flag, memory_order_seq_cst); // expected-error {{operand of type 'void'}} + __atomic_clear(0x8000, memory_order_seq_cst); // expected-error {{address argument to atomic builtin must be a pointer ('int' invalid)}} + __atomic_clear(&flag, memory_order_consume); // expected-warning {{memory order argument to atomic operation is invalid}} + __atomic_clear(&flag, memory_order_acquire); // expected-warning {{memory order argument to atomic operation is invalid}} + __atomic_clear(&flag, memory_order_acq_rel); // expected-warning {{memory order argument to atomic operation is invalid}} + _Bool lock; + __atomic_test_and_set(lock, memory_order_acquire); // expected-error {{address argument to atomic builtin must be a pointer}} + __atomic_clear(lock, memory_order_release); // expected-error {{address argument to atomic builtin must be a pointer}} + + // These intrinsics accept any non-const pointer type (including + // pointer-to-incomplete), and access the first byte. + __atomic_test_and_set((void*)0x8000, memory_order_seq_cst); + __atomic_test_and_set((char*)0x8000, memory_order_seq_cst); + __atomic_test_and_set((int*)0x8000, memory_order_seq_cst); + __atomic_test_and_set((struct incomplete*)0x8000, memory_order_seq_cst); + __atomic_clear((void*)0x8000, memory_order_seq_cst); + __atomic_clear((char*)0x8000, memory_order_seq_cst); + __atomic_clear((int*)0x8000, memory_order_seq_cst); + __atomic_clear((struct incomplete*)0x8000, memory_order_seq_cst); __c11_atomic_init(ci, 0); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}} __c11_atomic_store(ci, 0, memory_order_release); // expected-error {{address argument to atomic operation must be a pointer to non-const _Atomic type ('const _Atomic(int) *' invalid)}}