@@ -52,6 +52,29 @@ type Funclet = ();
52
52
// TODO: remove this variable.
53
53
static mut RETURN_VALUE_COUNT : usize = 0 ;
54
54
55
+ enum ExtremumOperation {
56
+ Max ,
57
+ Min ,
58
+ }
59
+
60
+ trait EnumClone {
61
+ fn clone ( & self ) -> Self ;
62
+ }
63
+
64
+ impl EnumClone for AtomicOrdering {
65
+ fn clone ( & self ) -> Self {
66
+ match * self {
67
+ AtomicOrdering :: NotAtomic => AtomicOrdering :: NotAtomic ,
68
+ AtomicOrdering :: Unordered => AtomicOrdering :: Unordered ,
69
+ AtomicOrdering :: Monotonic => AtomicOrdering :: Monotonic ,
70
+ AtomicOrdering :: Acquire => AtomicOrdering :: Acquire ,
71
+ AtomicOrdering :: Release => AtomicOrdering :: Release ,
72
+ AtomicOrdering :: AcquireRelease => AtomicOrdering :: AcquireRelease ,
73
+ AtomicOrdering :: SequentiallyConsistent => AtomicOrdering :: SequentiallyConsistent ,
74
+ }
75
+ }
76
+ }
77
+
55
78
pub struct Builder < ' a : ' gcc , ' gcc , ' tcx > {
56
79
pub cx : & ' a CodegenCx < ' gcc , ' tcx > ,
57
80
pub block : Option < Block < ' gcc > > ,
@@ -67,6 +90,80 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
67
90
}
68
91
}
69
92
93
+ fn atomic_extremum ( & mut self , operation : ExtremumOperation , dst : RValue < ' gcc > , src : RValue < ' gcc > , order : AtomicOrdering ) -> RValue < ' gcc > {
94
+ let size = self . cx . int_width ( src. get_type ( ) ) / 8 ;
95
+
96
+ let func = self . current_func ( ) ;
97
+
98
+ let load_ordering =
99
+ match order {
100
+ // TODO: does this make sense?
101
+ AtomicOrdering :: AcquireRelease | AtomicOrdering :: Release => AtomicOrdering :: Acquire ,
102
+ _ => order. clone ( ) ,
103
+ } ;
104
+ let previous_value = self . atomic_load ( dst, load_ordering. clone ( ) , Size :: from_bytes ( size) ) ;
105
+ let previous_var = func. new_local ( None , previous_value. get_type ( ) , "previous_value" ) ;
106
+ let return_value = func. new_local ( None , previous_value. get_type ( ) , "return_value" ) ;
107
+ self . llbb ( ) . add_assignment ( None , previous_var, previous_value) ;
108
+ self . llbb ( ) . add_assignment ( None , return_value, previous_var. to_rvalue ( ) ) ;
109
+
110
+ let while_block = func. new_block ( "while" ) ;
111
+ let after_block = func. new_block ( "after_while" ) ;
112
+ self . llbb ( ) . end_with_jump ( None , while_block) ;
113
+
114
+ // NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
115
+ // state need to be updated.
116
+ self . block = Some ( while_block) ;
117
+ * self . cx . current_block . borrow_mut ( ) = Some ( while_block) ;
118
+
119
+ let comparison_operator =
120
+ match operation {
121
+ ExtremumOperation :: Max => ComparisonOp :: LessThan ,
122
+ ExtremumOperation :: Min => ComparisonOp :: GreaterThan ,
123
+ } ;
124
+
125
+ let cond1 = self . context . new_comparison ( None , comparison_operator, previous_var. to_rvalue ( ) , self . context . new_cast ( None , src, previous_value. get_type ( ) ) ) ;
126
+ let compare_exchange = self . compare_exchange ( dst, previous_var. to_rvalue ( ) , src, order, load_ordering, false ) ;
127
+ let cond2 = self . cx . context . new_unary_op ( None , UnaryOp :: LogicalNegate , compare_exchange. get_type ( ) , compare_exchange) ;
128
+ let cond = self . cx . context . new_binary_op ( None , BinaryOp :: LogicalAnd , self . cx . bool_type , cond1, cond2) ;
129
+
130
+ while_block. end_with_conditional ( None , cond, while_block, after_block) ;
131
+
132
+ // NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
133
+ // state need to be updated.
134
+ self . block = Some ( after_block) ;
135
+ * self . cx . current_block . borrow_mut ( ) = Some ( after_block) ;
136
+
137
+ return_value. to_rvalue ( )
138
+ }
139
+
140
+ fn compare_exchange ( & self , dst : RValue < ' gcc > , cmp : RValue < ' gcc > , src : RValue < ' gcc > , order : AtomicOrdering , failure_order : AtomicOrdering , weak : bool ) -> RValue < ' gcc > {
141
+ let size = self . cx . int_width ( src. get_type ( ) ) ;
142
+ let compare_exchange = self . context . get_builtin_function ( & format ! ( "__atomic_compare_exchange_{}" , size / 8 ) ) ;
143
+ let order = self . context . new_rvalue_from_int ( self . i32_type , order. to_gcc ( ) ) ;
144
+ let failure_order = self . context . new_rvalue_from_int ( self . i32_type , failure_order. to_gcc ( ) ) ;
145
+ let weak = self . context . new_rvalue_from_int ( self . bool_type , weak as i32 ) ;
146
+
147
+ let void_ptr_type = self . context . new_type :: < * mut ( ) > ( ) ;
148
+ let volatile_void_ptr_type = void_ptr_type. make_volatile ( ) ;
149
+ let dst = self . context . new_cast ( None , dst, volatile_void_ptr_type) ;
150
+ let expected = self . current_func ( ) . new_local ( None , cmp. get_type ( ) , "expected" ) ;
151
+ self . llbb ( ) . add_assignment ( None , expected, cmp) ;
152
+ let expected = self . context . new_cast ( None , expected. get_address ( None ) , void_ptr_type) ;
153
+
154
+ // NOTE: not sure why, but we need to cast to the signed type.
155
+ let new_src_type =
156
+ if size == 64 {
157
+ // TODO: use sized types (uint64_t, …) when libgccjit supports them.
158
+ self . cx . long_type
159
+ }
160
+ else {
161
+ src. get_type ( ) . to_signed ( & self . cx )
162
+ } ;
163
+ let src = self . context . new_cast ( None , src, new_src_type) ;
164
+ self . context . new_call ( None , compare_exchange, & [ dst, expected, src, weak, order, failure_order] )
165
+ }
166
+
70
167
pub fn assign ( & self , lvalue : LValue < ' gcc > , value : RValue < ' gcc > ) {
71
168
self . llbb ( ) . add_assignment ( None , lvalue, value) ;
72
169
}
@@ -1397,30 +1494,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
1397
1494
1398
1495
// Atomic Operations
1399
1496
fn atomic_cmpxchg ( & mut self , dst : RValue < ' gcc > , cmp : RValue < ' gcc > , src : RValue < ' gcc > , order : AtomicOrdering , failure_order : AtomicOrdering , weak : bool ) -> RValue < ' gcc > {
1400
- let size = self . cx . int_width ( src. get_type ( ) ) ;
1401
- let compare_exchange = self . context . get_builtin_function ( & format ! ( "__atomic_compare_exchange_{}" , size / 8 ) ) ;
1402
- let order = self . context . new_rvalue_from_int ( self . i32_type , order. to_gcc ( ) ) ;
1403
- let failure_order = self . context . new_rvalue_from_int ( self . i32_type , failure_order. to_gcc ( ) ) ;
1404
- let weak = self . context . new_rvalue_from_int ( self . bool_type , weak as i32 ) ;
1405
-
1406
- let void_ptr_type = self . context . new_type :: < * mut ( ) > ( ) ;
1407
- let volatile_void_ptr_type = void_ptr_type. make_volatile ( ) ;
1408
- let dst = self . context . new_cast ( None , dst, volatile_void_ptr_type) ;
1409
- let expected = self . current_func ( ) . new_local ( None , cmp. get_type ( ) , "expected" ) ;
1410
- self . llbb ( ) . add_assignment ( None , expected, cmp) ;
1411
- let expected = self . context . new_cast ( None , expected. get_address ( None ) , void_ptr_type) ;
1412
-
1413
- // NOTE: not sure why, but we need to cast to the signed type.
1414
- let new_src_type =
1415
- if size == 64 {
1416
- // TODO: use sized types (uint64_t, …) when libgccjit supports them.
1417
- self . cx . long_type
1418
- }
1419
- else {
1420
- src. get_type ( ) . to_signed ( & self . cx )
1421
- } ;
1422
- let src = self . context . new_cast ( None , src, new_src_type) ;
1423
- let success = self . context . new_call ( None , compare_exchange, & [ dst, expected, src, weak, order, failure_order] ) ;
1497
+ let success = self . compare_exchange ( dst, cmp, src, order, failure_order, weak) ;
1424
1498
1425
1499
let pair_type = self . cx . type_struct ( & [ src. get_type ( ) , self . bool_type ] , false ) ;
1426
1500
let result = self . current_func ( ) . new_local ( None , pair_type, "atomic_cmpxchg_result" ) ;
@@ -1446,10 +1520,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
1446
1520
AtomicRmwBinOp :: AtomicNand => format ! ( "__atomic_fetch_nand_{}" , size) ,
1447
1521
AtomicRmwBinOp :: AtomicOr => format ! ( "__atomic_fetch_or_{}" , size) ,
1448
1522
AtomicRmwBinOp :: AtomicXor => format ! ( "__atomic_fetch_xor_{}" , size) ,
1449
- AtomicRmwBinOp :: AtomicMax => unimplemented ! ( ) ,
1450
- AtomicRmwBinOp :: AtomicMin => unimplemented ! ( ) ,
1451
- AtomicRmwBinOp :: AtomicUMax => unimplemented ! ( ) ,
1452
- AtomicRmwBinOp :: AtomicUMin => unimplemented ! ( ) ,
1523
+ AtomicRmwBinOp :: AtomicMax => return self . atomic_extremum ( ExtremumOperation :: Max , dst , src , order ) ,
1524
+ AtomicRmwBinOp :: AtomicMin => return self . atomic_extremum ( ExtremumOperation :: Min , dst , src , order ) ,
1525
+ AtomicRmwBinOp :: AtomicUMax => return self . atomic_extremum ( ExtremumOperation :: Max , dst , src , order ) ,
1526
+ AtomicRmwBinOp :: AtomicUMin => return self . atomic_extremum ( ExtremumOperation :: Min , dst , src , order ) ,
1453
1527
} ;
1454
1528
1455
1529
0 commit comments