15
15
16
16
/* Notes made in the collector */
17
17
#define HIT_PENDING 0x01 /* A front op was still pending */
18
- #define SOME_EMPTY 0x02 /* One of more streams are empty */
19
- #define ALL_EMPTY 0x04 /* All streams are empty */
20
- #define MAYBE_DISCONTIG 0x08 /* A front op may be discontiguous (rounded to PAGE_SIZE) */
21
- #define NEED_REASSESS 0x10 /* Need to loop round and reassess */
22
- #define REASSESS_DISCONTIG 0x20 /* Reassess discontiguity if contiguity advances */
23
- #define MADE_PROGRESS 0x40 /* Made progress cleaning up a stream or the folio set */
24
- #define BUFFERED 0x80 /* The pagecache needs cleaning up */
25
- #define NEED_RETRY 0x100 /* A front op requests retrying */
26
- #define SAW_FAILURE 0x200 /* One stream or hit a permanent failure */
18
+ #define NEED_REASSESS 0x02 /* Need to loop round and reassess */
19
+ #define MADE_PROGRESS 0x04 /* Made progress cleaning up a stream or the folio set */
20
+ #define BUFFERED 0x08 /* The pagecache needs cleaning up */
21
+ #define NEED_RETRY 0x10 /* A front op requests retrying */
22
+ #define SAW_FAILURE 0x20 /* One stream or hit a permanent failure */
27
23
28
24
/*
29
25
* Successful completion of write of a folio to the server and/or cache. Note
@@ -85,10 +81,10 @@ int netfs_folio_written_back(struct folio *folio)
85
81
* Unlock any folios we've finished with.
86
82
*/
87
83
static void netfs_writeback_unlock_folios (struct netfs_io_request * wreq ,
88
- unsigned long long collected_to ,
89
84
unsigned int * notes )
90
85
{
91
86
struct folio_queue * folioq = wreq -> buffer ;
87
+ unsigned long long collected_to = wreq -> collected_to ;
92
88
unsigned int slot = wreq -> buffer_head_slot ;
93
89
94
90
if (slot >= folioq_nr_slots (folioq )) {
@@ -117,12 +113,6 @@ static void netfs_writeback_unlock_folios(struct netfs_io_request *wreq,
117
113
118
114
trace_netfs_collect_folio (wreq , folio , fend , collected_to );
119
115
120
- if (fpos + fsize > wreq -> contiguity ) {
121
- trace_netfs_collect_contig (wreq , fpos + fsize ,
122
- netfs_contig_trace_unlock );
123
- wreq -> contiguity = fpos + fsize ;
124
- }
125
-
126
116
/* Unlock any folio we've transferred all of. */
127
117
if (collected_to < fend )
128
118
break ;
@@ -380,7 +370,7 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
380
370
{
381
371
struct netfs_io_subrequest * front , * remove ;
382
372
struct netfs_io_stream * stream ;
383
- unsigned long long collected_to ;
373
+ unsigned long long collected_to , issued_to ;
384
374
unsigned int notes ;
385
375
int s ;
386
376
@@ -389,28 +379,21 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
389
379
trace_netfs_rreq (wreq , netfs_rreq_trace_collect );
390
380
391
381
reassess_streams :
382
+ issued_to = atomic64_read (& wreq -> issued_to );
392
383
smp_rmb ();
393
384
collected_to = ULLONG_MAX ;
394
- if (wreq -> origin == NETFS_WRITEBACK )
395
- notes = ALL_EMPTY | BUFFERED | MAYBE_DISCONTIG ;
396
- else if (wreq -> origin == NETFS_WRITETHROUGH )
397
- notes = ALL_EMPTY | BUFFERED ;
385
+ if (wreq -> origin == NETFS_WRITEBACK ||
386
+ wreq -> origin == NETFS_WRITETHROUGH )
387
+ notes = BUFFERED ;
398
388
else
399
- notes = ALL_EMPTY ;
389
+ notes = 0 ;
400
390
401
391
/* Remove completed subrequests from the front of the streams and
402
392
* advance the completion point on each stream. We stop when we hit
403
393
* something that's in progress. The issuer thread may be adding stuff
404
394
* to the tail whilst we're doing this.
405
- *
406
- * We must not, however, merge in discontiguities that span whole
407
- * folios that aren't under writeback. This is made more complicated
408
- * by the folios in the gap being of unpredictable sizes - if they even
409
- * exist - but we don't want to look them up.
410
395
*/
411
396
for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
412
- loff_t rstart , rend ;
413
-
414
397
stream = & wreq -> io_streams [s ];
415
398
/* Read active flag before list pointers */
416
399
if (!smp_load_acquire (& stream -> active ))
@@ -422,26 +405,10 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
422
405
//_debug("sreq [%x] %llx %zx/%zx",
423
406
// front->debug_index, front->start, front->transferred, front->len);
424
407
425
- /* Stall if there may be a discontinuity. */
426
- rstart = round_down (front -> start , PAGE_SIZE );
427
- if (rstart > wreq -> contiguity ) {
428
- if (wreq -> contiguity > stream -> collected_to ) {
429
- trace_netfs_collect_gap (wreq , stream ,
430
- wreq -> contiguity , 'D' );
431
- stream -> collected_to = wreq -> contiguity ;
432
- }
433
- notes |= REASSESS_DISCONTIG ;
434
- break ;
408
+ if (stream -> collected_to < front -> start ) {
409
+ trace_netfs_collect_gap (wreq , stream , issued_to , 'F' );
410
+ stream -> collected_to = front -> start ;
435
411
}
436
- rend = round_up (front -> start + front -> len , PAGE_SIZE );
437
- if (rend > wreq -> contiguity ) {
438
- trace_netfs_collect_contig (wreq , rend ,
439
- netfs_contig_trace_collect );
440
- wreq -> contiguity = rend ;
441
- if (notes & REASSESS_DISCONTIG )
442
- notes |= NEED_REASSESS ;
443
- }
444
- notes &= ~MAYBE_DISCONTIG ;
445
412
446
413
/* Stall if the front is still undergoing I/O. */
447
414
if (test_bit (NETFS_SREQ_IN_PROGRESS , & front -> flags )) {
@@ -483,26 +450,20 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
483
450
front = list_first_entry_or_null (& stream -> subrequests ,
484
451
struct netfs_io_subrequest , rreq_link );
485
452
stream -> front = front ;
486
- if (!front ) {
487
- unsigned long long jump_to = atomic64_read (& wreq -> issued_to );
488
-
489
- if (stream -> collected_to < jump_to ) {
490
- trace_netfs_collect_gap (wreq , stream , jump_to , 'A' );
491
- stream -> collected_to = jump_to ;
492
- }
493
- }
494
-
495
453
spin_unlock_bh (& wreq -> lock );
496
454
netfs_put_subrequest (remove , false,
497
455
notes & SAW_FAILURE ?
498
456
netfs_sreq_trace_put_cancel :
499
457
netfs_sreq_trace_put_done );
500
458
}
501
459
502
- if (front )
503
- notes &= ~ALL_EMPTY ;
504
- else
505
- notes |= SOME_EMPTY ;
460
+ /* If we have an empty stream, we need to jump it forward
461
+ * otherwise the collection point will never advance.
462
+ */
463
+ if (!front && issued_to > stream -> collected_to ) {
464
+ trace_netfs_collect_gap (wreq , stream , issued_to , 'E' );
465
+ stream -> collected_to = issued_to ;
466
+ }
506
467
507
468
if (stream -> collected_to < collected_to )
508
469
collected_to = stream -> collected_to ;
@@ -511,36 +472,6 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
511
472
if (collected_to != ULLONG_MAX && collected_to > wreq -> collected_to )
512
473
wreq -> collected_to = collected_to ;
513
474
514
- /* If we have an empty stream, we need to jump it forward over any gap
515
- * otherwise the collection point will never advance.
516
- *
517
- * Note that the issuer always adds to the stream with the lowest
518
- * so-far submitted start, so if we see two consecutive subreqs in one
519
- * stream with nothing between then in another stream, then the second
520
- * stream has a gap that can be jumped.
521
- */
522
- if (notes & SOME_EMPTY ) {
523
- unsigned long long jump_to = wreq -> start + READ_ONCE (wreq -> submitted );
524
-
525
- for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
526
- stream = & wreq -> io_streams [s ];
527
- if (stream -> active &&
528
- stream -> front &&
529
- stream -> front -> start < jump_to )
530
- jump_to = stream -> front -> start ;
531
- }
532
-
533
- for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
534
- stream = & wreq -> io_streams [s ];
535
- if (stream -> active &&
536
- !stream -> front &&
537
- stream -> collected_to < jump_to ) {
538
- trace_netfs_collect_gap (wreq , stream , jump_to , 'B' );
539
- stream -> collected_to = jump_to ;
540
- }
541
- }
542
- }
543
-
544
475
for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
545
476
stream = & wreq -> io_streams [s ];
546
477
if (stream -> active )
@@ -551,43 +482,14 @@ static void netfs_collect_write_results(struct netfs_io_request *wreq)
551
482
552
483
/* Unlock any folios that we have now finished with. */
553
484
if (notes & BUFFERED ) {
554
- unsigned long long clean_to = min (wreq -> collected_to , wreq -> contiguity );
555
-
556
- if (wreq -> cleaned_to < clean_to )
557
- netfs_writeback_unlock_folios (wreq , clean_to , & notes );
485
+ if (wreq -> cleaned_to < wreq -> collected_to )
486
+ netfs_writeback_unlock_folios (wreq , & notes );
558
487
} else {
559
488
wreq -> cleaned_to = wreq -> collected_to ;
560
489
}
561
490
562
491
// TODO: Discard encryption buffers
563
492
564
- /* If all streams are discontiguous with the last folio we cleared, we
565
- * may need to skip a set of folios.
566
- */
567
- if ((notes & (MAYBE_DISCONTIG | ALL_EMPTY )) == MAYBE_DISCONTIG ) {
568
- unsigned long long jump_to = ULLONG_MAX ;
569
-
570
- for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
571
- stream = & wreq -> io_streams [s ];
572
- if (stream -> active && stream -> front &&
573
- stream -> front -> start < jump_to )
574
- jump_to = stream -> front -> start ;
575
- }
576
-
577
- trace_netfs_collect_contig (wreq , jump_to , netfs_contig_trace_jump );
578
- wreq -> contiguity = jump_to ;
579
- wreq -> cleaned_to = jump_to ;
580
- wreq -> collected_to = jump_to ;
581
- for (s = 0 ; s < NR_IO_STREAMS ; s ++ ) {
582
- stream = & wreq -> io_streams [s ];
583
- if (stream -> collected_to < jump_to )
584
- stream -> collected_to = jump_to ;
585
- }
586
- //cond_resched();
587
- notes |= MADE_PROGRESS ;
588
- goto reassess_streams ;
589
- }
590
-
591
493
if (notes & NEED_RETRY )
592
494
goto need_retry ;
593
495
if ((notes & MADE_PROGRESS ) && test_bit (NETFS_RREQ_PAUSE , & wreq -> flags )) {
0 commit comments