12
12
13
13
use option:: * ;
14
14
use cast;
15
- use util;
16
15
use ops:: Drop ;
17
16
use rt:: kill:: BlockedTask ;
18
17
use kinds:: Send ;
19
18
use rt:: sched:: Scheduler ;
20
19
use rt:: local:: Local ;
21
- use unstable:: atomics:: { AtomicUint , AtomicOption , Acquire , SeqCst } ;
20
+ use rt:: select:: { Select , SelectPort } ;
21
+ use unstable:: atomics:: { AtomicUint , AtomicOption , Acquire , Release , SeqCst } ;
22
22
use unstable:: sync:: UnsafeAtomicRcBox ;
23
23
use util:: Void ;
24
24
use comm:: { GenericChan , GenericSmartChan , GenericPort , Peekable } ;
@@ -76,6 +76,7 @@ pub fn oneshot<T: Send>() -> (PortOne<T>, ChanOne<T>) {
76
76
}
77
77
78
78
impl < T > ChanOne < T > {
79
+ #[ inline]
79
80
fn packet ( & self ) -> * mut Packet < T > {
80
81
unsafe {
81
82
let p: * mut ~Packet < T > = cast:: transmute ( & self . void_packet ) ;
@@ -141,7 +142,6 @@ impl<T> ChanOne<T> {
141
142
}
142
143
}
143
144
144
-
145
145
impl < T > PortOne < T > {
146
146
fn packet ( & self ) -> * mut Packet < T > {
147
147
unsafe {
@@ -162,46 +162,115 @@ impl<T> PortOne<T> {
162
162
163
163
pub fn try_recv ( self ) -> Option < T > {
164
164
let mut this = self ;
165
- let packet = this. packet ( ) ;
166
165
167
166
// Optimistic check. If data was sent already, we don't even need to block.
168
167
// No release barrier needed here; we're not handing off our task pointer yet.
169
- if unsafe { ( * packet ) . state . load ( Acquire ) } != STATE_ONE {
168
+ if !this . optimistic_check ( ) {
170
169
// No data available yet.
171
170
// Switch to the scheduler to put the ~Task into the Packet state.
172
171
let sched = Local :: take :: < Scheduler > ( ) ;
173
172
do sched. deschedule_running_task_and_then |sched, task| {
174
- unsafe {
175
- // Atomically swap the task pointer into the Packet state, issuing
176
- // an acquire barrier to prevent reordering of the subsequent read
177
- // of the payload. Also issues a release barrier to prevent
178
- // reordering of any previous writes to the task structure.
179
- let task_as_state = task. cast_to_uint ( ) ;
180
- let oldstate = ( * packet) . state . swap ( task_as_state, SeqCst ) ;
181
- match oldstate {
182
- STATE_BOTH => {
183
- // Data has not been sent. Now we're blocked.
184
- rtdebug ! ( "non-rendezvous recv" ) ;
185
- sched. metrics . non_rendezvous_recvs += 1 ;
186
- }
187
- STATE_ONE => {
188
- rtdebug ! ( "rendezvous recv" ) ;
189
- sched. metrics . rendezvous_recvs += 1 ;
190
-
191
- // Channel is closed. Switch back and check the data.
192
- // NB: We have to drop back into the scheduler event loop here
193
- // instead of switching immediately back or we could end up
194
- // triggering infinite recursion on the scheduler's stack.
195
- let recvr = BlockedTask :: cast_from_uint ( task_as_state) ;
196
- sched. enqueue_blocked_task ( recvr) ;
173
+ this. block_on ( sched, task) ;
174
+ }
175
+ }
176
+
177
+ // Task resumes.
178
+ this. recv_ready ( )
179
+ }
180
+ }
181
+
182
+ impl < T > Select for PortOne < T > {
183
+ #[ inline]
184
+ fn optimistic_check ( & mut self ) -> bool {
185
+ unsafe { ( * self . packet ( ) ) . state . load ( Acquire ) == STATE_ONE }
186
+ }
187
+
188
+ fn block_on ( & mut self , sched : & mut Scheduler , task : BlockedTask ) -> bool {
189
+ unsafe {
190
+ // Atomically swap the task pointer into the Packet state, issuing
191
+ // an acquire barrier to prevent reordering of the subsequent read
192
+ // of the payload. Also issues a release barrier to prevent
193
+ // reordering of any previous writes to the task structure.
194
+ let task_as_state = task. cast_to_uint ( ) ;
195
+ let oldstate = ( * self . packet ( ) ) . state . swap ( task_as_state, SeqCst ) ;
196
+ match oldstate {
197
+ STATE_BOTH => {
198
+ // Data has not been sent. Now we're blocked.
199
+ rtdebug ! ( "non-rendezvous recv" ) ;
200
+ sched. metrics . non_rendezvous_recvs += 1 ;
201
+ false
202
+ }
203
+ STATE_ONE => {
204
+ // Re-record that we are the only owner of the packet.
205
+ // Release barrier needed in case the task gets reawoken
206
+ // on a different core (this is analogous to writing a
207
+ // payload; a barrier in enqueueing the task protects it).
208
+ // NB(#8132). This *must* occur before the enqueue below.
209
+ // FIXME(#6842, #8130) This is usually only needed for the
210
+ // assertion in recv_ready, except in the case of select().
211
+ // This won't actually ever have cacheline contention, but
212
+ // maybe should be optimized out with a cfg(test) anyway?
213
+ ( * self . packet ( ) ) . state . store ( STATE_ONE , Release ) ;
214
+
215
+ rtdebug ! ( "rendezvous recv" ) ;
216
+ sched. metrics . rendezvous_recvs += 1 ;
217
+
218
+ // Channel is closed. Switch back and check the data.
219
+ // NB: We have to drop back into the scheduler event loop here
220
+ // instead of switching immediately back or we could end up
221
+ // triggering infinite recursion on the scheduler's stack.
222
+ let recvr = BlockedTask :: cast_from_uint ( task_as_state) ;
223
+ sched. enqueue_blocked_task ( recvr) ;
224
+ true
225
+ }
226
+ _ => rtabort ! ( "can't block_on; a task is already blocked" )
227
+ }
228
+ }
229
+ }
230
+
231
+ // This is the only select trait function that's not also used in recv.
232
+ fn unblock_from ( & mut self ) -> bool {
233
+ let packet = self . packet ( ) ;
234
+ unsafe {
235
+ // In case the data is available, the acquire barrier here matches
236
+ // the release barrier the sender used to release the payload.
237
+ match ( * packet) . state . load ( Acquire ) {
238
+ // Impossible. We removed STATE_BOTH when blocking on it, and
239
+ // no self-respecting sender would put it back.
240
+ STATE_BOTH => rtabort ! ( "refcount already 2 in unblock_from" ) ,
241
+ // Here, a sender already tried to wake us up. Perhaps they
242
+ // even succeeded! Data is available.
243
+ STATE_ONE => true ,
244
+ // Still registered as blocked. Need to "unblock" the pointer.
245
+ task_as_state => {
246
+ // In the window between the load and the CAS, a sender
247
+ // might take the pointer and set the refcount to ONE. If
248
+ // that happens, we shouldn't clobber that with BOTH!
249
+ // Acquire barrier again for the same reason as above.
250
+ match ( * packet) . state . compare_and_swap ( task_as_state, STATE_BOTH ,
251
+ Acquire ) {
252
+ STATE_BOTH => rtabort ! ( "refcount became 2 in unblock_from" ) ,
253
+ STATE_ONE => true , // Lost the race. Data available.
254
+ same_ptr => {
255
+ // We successfully unblocked our task pointer.
256
+ assert ! ( task_as_state == same_ptr) ;
257
+ let handle = BlockedTask :: cast_from_uint ( task_as_state) ;
258
+ // Because we are already awake, the handle we
259
+ // gave to this port shall already be empty.
260
+ handle. assert_already_awake ( ) ;
261
+ false
197
262
}
198
- _ => util:: unreachable ( )
199
263
}
200
264
}
201
265
}
202
266
}
267
+ }
268
+ }
203
269
204
- // Task resumes.
270
+ impl < T > SelectPort < T > for PortOne < T > {
271
+ fn recv_ready ( self ) -> Option < T > {
272
+ let mut this = self ;
273
+ let packet = this. packet ( ) ;
205
274
206
275
// No further memory barrier is needed here to access the
207
276
// payload. Some scenarios:
@@ -213,8 +282,11 @@ impl<T> PortOne<T> {
213
282
// 3) We encountered STATE_BOTH above and blocked, but the receiving task (this task)
214
283
// is pinned to some other scheduler, so the sending task had to give us to
215
284
// a different scheduler for resuming. That send synchronized memory.
216
-
217
285
unsafe {
286
+ // See corresponding store() above in block_on for rationale.
287
+ // FIXME(#8130) This can happen only in test builds.
288
+ assert ! ( ( * packet) . state. load( Acquire ) == STATE_ONE ) ;
289
+
218
290
let payload = ( * packet) . payload . take ( ) ;
219
291
220
292
// The sender has closed up shop. Drop the packet.
@@ -234,7 +306,7 @@ impl<T> Peekable<T> for PortOne<T> {
234
306
match oldstate {
235
307
STATE_BOTH => false ,
236
308
STATE_ONE => ( * packet) . payload . is_some ( ) ,
237
- _ => util :: unreachable ( )
309
+ _ => rtabort ! ( "peeked on a blocked task" )
238
310
}
239
311
}
240
312
}
@@ -368,6 +440,36 @@ impl<T> Peekable<T> for Port<T> {
368
440
}
369
441
}
370
442
443
+ impl < T > Select for Port < T > {
444
+ #[ inline]
445
+ fn optimistic_check ( & mut self ) -> bool {
446
+ do self . next . with_mut_ref |pone| { pone. optimistic_check ( ) }
447
+ }
448
+
449
+ #[ inline]
450
+ fn block_on ( & mut self , sched : & mut Scheduler , task : BlockedTask ) -> bool {
451
+ let task = Cell :: new ( task) ;
452
+ do self. next . with_mut_ref |pone| { pone. block_on ( sched, task. take ( ) ) }
453
+ }
454
+
455
+ #[ inline]
456
+ fn unblock_from ( & mut self ) -> bool {
457
+ do self . next . with_mut_ref |pone| { pone. unblock_from ( ) }
458
+ }
459
+ }
460
+
461
+ impl < T > SelectPort < ( T , Port < T > ) > for Port < T > {
462
+ fn recv_ready ( self ) -> Option < ( T , Port < T > ) > {
463
+ match self . next . take ( ) . recv_ready ( ) {
464
+ Some ( StreamPayload { val, next } ) => {
465
+ self . next . put_back ( next) ;
466
+ Some ( ( val, self ) )
467
+ }
468
+ None => None
469
+ }
470
+ }
471
+ }
472
+
371
473
pub struct SharedChan < T > {
372
474
// Just like Chan, but a shared AtomicOption instead of Cell
373
475
priv next : UnsafeAtomicRcBox < AtomicOption < StreamChanOne < T > > >
0 commit comments