Skip to content

Commit 5603ecf

Browse files
committed
Implement atomic max/min
1 parent f571309 commit 5603ecf

File tree

4 files changed

+190
-43
lines changed

4 files changed

+190
-43
lines changed

Cargo.lock

Lines changed: 2 additions & 2 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

gcc-test-backend/src/main.rs

Lines changed: 70 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2,9 +2,9 @@
22
#![feature(asm, backtrace, core_intrinsics, global_asm, naked_functions)]
33

44
//use backtrace::Backtrace;
5-
use std::backtrace::Backtrace;
5+
//use std::backtrace::Backtrace;
66

7-
global_asm!("
7+
/*global_asm!("
88
.global add_asm
99
add_asm:
1010
mov %rdi, %rax
@@ -16,7 +16,7 @@ global_asm!("
1616
.global toto
1717
toto:
1818
ret"
19-
);
19+
);*/
2020

2121
/*global_asm!("
2222
.att_syntax
@@ -79,8 +79,8 @@ toto:
7979
.popsection");*/
8080

8181
extern "C" {
82-
fn add_asm(a: i64, b: i64) -> i64;
83-
fn toto();
82+
//fn add_asm(a: i64, b: i64) -> i64;
83+
//fn toto();
8484
}
8585

8686
//#![feature(intrinsics)]
@@ -129,7 +129,7 @@ pub fn example_c() -> [T; N] {
129129
pub fn atomic_xchg<T: Copy>(dst: *mut T, src: T) -> T;
130130
}*/
131131

132-
fn rem(num: f32, other: f32) -> f32 {
132+
/*fn rem(num: f32, other: f32) -> f32 {
133133
num % other
134134
}
135135
@@ -189,7 +189,7 @@ fn thread_func(_arg: *mut c_void) -> *mut c_void {
189189
});
190190
191191
std::ptr::null_mut()
192-
}
192+
}*/
193193

194194
/*fn main() {
195195
FOO.with(|foo| {
@@ -249,9 +249,68 @@ use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
249249
fn main() {
250250
//println!("Global panic count: {}", GLOBAL_PANIC_COUNT.load(Ordering::Relaxed));
251251

252-
let mutex = std::sync::Mutex::new(());
252+
/*let x = AtomicUsize::new(0xf731);
253+
assert_eq!(x.fetch_min(0x137f, Ordering::SeqCst), 0xf731);
254+
assert_eq!(x.load(Ordering::SeqCst), 0x137f);
255+
assert_eq!(x.fetch_min(0xf731, Ordering::SeqCst), 0x137f);
256+
assert_eq!(x.load(Ordering::SeqCst), 0x137f);
257+
258+
let x = AtomicUsize::new(0x137f);
259+
assert_eq!(x.fetch_max(0xf731, Ordering::SeqCst), 0x137f);
260+
assert_eq!(x.load(Ordering::SeqCst), 0xf731);
261+
assert_eq!(x.fetch_max(0x137f, Ordering::SeqCst), 0xf731);
262+
assert_eq!(x.load(Ordering::SeqCst), 0xf731);
263+
264+
let b: [i32; 0] = [];
265+
assert_eq!(b.binary_search(&5), Err(0));
266+
267+
let b = [4];
268+
assert_eq!(b.binary_search(&3), Err(0));
269+
assert_eq!(b.binary_search(&4), Ok(0));
270+
assert_eq!(b.binary_search(&5), Err(1));
271+
272+
let b = [1, 2, 4, 6, 8, 9];
273+
assert_eq!(b.binary_search(&5), Err(3));
274+
assert_eq!(b.binary_search(&6), Ok(3));
275+
assert_eq!(b.binary_search(&7), Err(4));
276+
assert_eq!(b.binary_search(&8), Ok(4));
277+
278+
let b = [1, 2, 4, 5, 6, 8];
279+
assert_eq!(b.binary_search(&9), Err(6));
280+
281+
let b = [1, 2, 4, 6, 7, 8, 9];
282+
assert_eq!(b.binary_search(&6), Ok(3));
283+
assert_eq!(b.binary_search(&5), Err(3));
284+
assert_eq!(b.binary_search(&8), Ok(5));
285+
286+
let b = [1, 2, 4, 5, 6, 8, 9];
287+
assert_eq!(b.binary_search(&7), Err(5));
288+
assert_eq!(b.binary_search(&0), Err(0));
289+
290+
let b = [1, 3, 3, 3, 7];
291+
assert_eq!(b.binary_search(&0), Err(0));
292+
assert_eq!(b.binary_search(&1), Ok(0));
293+
assert_eq!(b.binary_search(&2), Err(1));
294+
assert!(match b.binary_search(&3) {
295+
Ok(1..=3) => true,
296+
_ => false,
297+
});
298+
assert!(match b.binary_search(&3) {
299+
Ok(1..=3) => true,
300+
_ => false,
301+
});
302+
assert_eq!(b.binary_search(&4), Err(4));
303+
assert_eq!(b.binary_search(&5), Err(4));
304+
assert_eq!(b.binary_search(&6), Err(4));
305+
assert_eq!(b.binary_search(&7), Ok(4));
306+
assert_eq!(b.binary_search(&8), Err(5));*/
307+
308+
let b = [(); usize::MAX];
309+
assert_eq!(b.binary_search(&()), Ok(usize::MAX / 2));
310+
311+
/*let mutex = std::sync::Mutex::new(());
253312
println!("Poisoned: {}", mutex.is_poisoned());
254-
let _guard = mutex.lock().unwrap();
313+
let _guard = mutex.lock().unwrap();*/
255314

256315
//unsafe { toto() };
257316
//assert_eq!(unsafe { add_asm(40, 2) }, 42);
@@ -419,7 +478,7 @@ fn main() {
419478
println!("bits of {}: {:?}", value, &bits[0..popcnt]);
420479
}*/
421480

422-
let mut bits = [0u8; 64];
481+
/*let mut bits = [0u8; 64];
423482
for value in 0..=1024u64 {
424483
let popcnt;
425484
unsafe {
@@ -441,7 +500,7 @@ fn main() {
441500
);
442501
}
443502
println!("bits of {}: {:?}", value, &bits[0..popcnt]);
444-
}
503+
}*/
445504

446505
/*let bt = Backtrace::new();
447506
println!("{:?}", bt);*/

src/builder.rs

Lines changed: 102 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -52,6 +52,29 @@ type Funclet = ();
5252
// TODO: remove this variable.
5353
static mut RETURN_VALUE_COUNT: usize = 0;
5454

55+
enum ExtremumOperation {
56+
Max,
57+
Min,
58+
}
59+
60+
trait EnumClone {
61+
fn clone(&self) -> Self;
62+
}
63+
64+
impl EnumClone for AtomicOrdering {
65+
fn clone(&self) -> Self {
66+
match *self {
67+
AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic,
68+
AtomicOrdering::Unordered => AtomicOrdering::Unordered,
69+
AtomicOrdering::Monotonic => AtomicOrdering::Monotonic,
70+
AtomicOrdering::Acquire => AtomicOrdering::Acquire,
71+
AtomicOrdering::Release => AtomicOrdering::Release,
72+
AtomicOrdering::AcquireRelease => AtomicOrdering::AcquireRelease,
73+
AtomicOrdering::SequentiallyConsistent => AtomicOrdering::SequentiallyConsistent,
74+
}
75+
}
76+
}
77+
5578
pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
5679
pub cx: &'a CodegenCx<'gcc, 'tcx>,
5780
pub block: Option<Block<'gcc>>,
@@ -67,6 +90,80 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
6790
}
6891
}
6992

93+
fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
94+
let size = self.cx.int_width(src.get_type()) / 8;
95+
96+
let func = self.current_func();
97+
98+
let load_ordering =
99+
match order {
100+
// TODO: does this make sense?
101+
AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
102+
_ => order.clone(),
103+
};
104+
let previous_value = self.atomic_load(dst, load_ordering.clone(), Size::from_bytes(size));
105+
let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
106+
let return_value = func.new_local(None, previous_value.get_type(), "return_value");
107+
self.llbb().add_assignment(None, previous_var, previous_value);
108+
self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
109+
110+
let while_block = func.new_block("while");
111+
let after_block = func.new_block("after_while");
112+
self.llbb().end_with_jump(None, while_block);
113+
114+
// NOTE: since jumps were added and compare_exchange doesn't expect this, the current blocks in the
115+
// state need to be updated.
116+
self.block = Some(while_block);
117+
*self.cx.current_block.borrow_mut() = Some(while_block);
118+
119+
let comparison_operator =
120+
match operation {
121+
ExtremumOperation::Max => ComparisonOp::LessThan,
122+
ExtremumOperation::Min => ComparisonOp::GreaterThan,
123+
};
124+
125+
let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
126+
let compare_exchange = self.compare_exchange(dst, previous_var.to_rvalue(), src, order, load_ordering, false);
127+
let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
128+
let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
129+
130+
while_block.end_with_conditional(None, cond, while_block, after_block);
131+
132+
// NOTE: since jumps were added in a place rustc does not expect, the current blocks in the
133+
// state need to be updated.
134+
self.block = Some(after_block);
135+
*self.cx.current_block.borrow_mut() = Some(after_block);
136+
137+
return_value.to_rvalue()
138+
}
139+
140+
fn compare_exchange(&self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
141+
let size = self.cx.int_width(src.get_type());
142+
let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
143+
let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
144+
let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
145+
let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
146+
147+
let void_ptr_type = self.context.new_type::<*mut ()>();
148+
let volatile_void_ptr_type = void_ptr_type.make_volatile();
149+
let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
150+
let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
151+
self.llbb().add_assignment(None, expected, cmp);
152+
let expected = self.context.new_cast(None, expected.get_address(None), void_ptr_type);
153+
154+
// NOTE: not sure why, but we need to cast to the signed type.
155+
let new_src_type =
156+
if size == 64 {
157+
// TODO: use sized types (uint64_t, …) when libgccjit supports them.
158+
self.cx.long_type
159+
}
160+
else {
161+
src.get_type().to_signed(&self.cx)
162+
};
163+
let src = self.context.new_cast(None, src, new_src_type);
164+
self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
165+
}
166+
70167
pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
71168
self.llbb().add_assignment(None, lvalue, value);
72169
}
@@ -1397,30 +1494,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
13971494

13981495
// Atomic Operations
13991496
fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
1400-
let size = self.cx.int_width(src.get_type());
1401-
let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size / 8));
1402-
let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
1403-
let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
1404-
let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
1405-
1406-
let void_ptr_type = self.context.new_type::<*mut ()>();
1407-
let volatile_void_ptr_type = void_ptr_type.make_volatile();
1408-
let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
1409-
let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
1410-
self.llbb().add_assignment(None, expected, cmp);
1411-
let expected = self.context.new_cast(None, expected.get_address(None), void_ptr_type);
1412-
1413-
// NOTE: not sure why, but we need to cast to the signed type.
1414-
let new_src_type =
1415-
if size == 64 {
1416-
// TODO: use sized types (uint64_t, …) when libgccjit supports them.
1417-
self.cx.long_type
1418-
}
1419-
else {
1420-
src.get_type().to_signed(&self.cx)
1421-
};
1422-
let src = self.context.new_cast(None, src, new_src_type);
1423-
let success = self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order]);
1497+
let success = self.compare_exchange(dst, cmp, src, order, failure_order, weak);
14241498

14251499
let pair_type = self.cx.type_struct(&[src.get_type(), self.bool_type], false);
14261500
let result = self.current_func().new_local(None, pair_type, "atomic_cmpxchg_result");
@@ -1446,10 +1520,10 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
14461520
AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
14471521
AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
14481522
AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
1449-
AtomicRmwBinOp::AtomicMax => unimplemented!(),
1450-
AtomicRmwBinOp::AtomicMin => unimplemented!(),
1451-
AtomicRmwBinOp::AtomicUMax => unimplemented!(),
1452-
AtomicRmwBinOp::AtomicUMin => unimplemented!(),
1523+
AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1524+
AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
1525+
AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
1526+
AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
14531527
};
14541528

14551529

src/type_.rs

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
1+
use std::convert::TryInto;
2+
13
use gccjit::{RValue, Struct, Type};
24
use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods};
35
use rustc_codegen_ssa::common::TypeKind;
@@ -237,8 +239,20 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
237239
self.context.new_opaque_struct_type(None, name)
238240
}
239241

240-
pub fn type_array(&self, ty: Type<'gcc>, len: u64) -> Type<'gcc> {
241-
self.context.new_array_type(None, ty, len as i32)
242+
pub fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
243+
if let Some(struct_type) = ty.is_struct() {
244+
if struct_type.get_field_count() == 0 {
245+
// NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
246+
// size of usize::MAX in test_binary_search, we workaround this by setting the size to
247+
// zero for ZSTs.
248+
// FIXME: fix gccjit API.
249+
len = 0;
250+
}
251+
}
252+
253+
let len: i32 = len.try_into().expect("array len");
254+
255+
self.context.new_array_type(None, ty, len)
242256
}
243257
}
244258

0 commit comments

Comments
 (0)