@@ -63,16 +63,71 @@ get_next_stk_size(rust_scheduler *sched, rust_task *task,
63
63
sz = std::max (sz, next);
64
64
65
65
LOG (task, mem, " next stack size: %" PRIdPTR, sz);
66
+ I (sched, requested <= sz);
66
67
return sz;
67
68
}
68
69
69
70
// Task stack segments. Heap allocated and chained together.
70
71
72
+ static void
73
+ config_valgrind_stack (stk_seg *stk) {
74
+ stk->valgrind_id =
75
+ VALGRIND_STACK_REGISTER (&stk->data [0 ],
76
+ stk->end );
77
+ #ifndef NVALGRIND
78
+ // Establish that the stack is accessible. This must be done when reusing
79
+ // old stack segments, since the act of popping the stack previously
80
+ // caused valgrind to consider the whole thing inaccessible.
81
+ size_t sz = stk->end - (uintptr_t )&stk->data [0 ];
82
+ VALGRIND_MAKE_MEM_UNDEFINED (stk->data , sz);
83
+
84
+ // Establish some guard bytes so valgrind will tell
85
+ // us if we run off the end of the stack
86
+ VALGRIND_MAKE_MEM_NOACCESS (stk->data , STACK_NOACCESS_SIZE);
87
+ #endif
88
+ }
89
+
90
+ static void
91
+ unconfig_valgrind_stack (stk_seg *stk) {
92
+ #ifndef NVALGRIND
93
+ // Make the guard bytes accessible again, but undefined
94
+ VALGRIND_MAKE_MEM_UNDEFINED (stk->data , STACK_NOACCESS_SIZE);
95
+ #endif
96
+ VALGRIND_STACK_DEREGISTER (stk->valgrind_id );
97
+ }
98
+
99
+ static void
100
+ free_stk (rust_task *task, stk_seg *stk) {
101
+ LOGPTR (task->sched , " freeing stk segment" , (uintptr_t )stk);
102
+ task->free (stk);
103
+ }
104
+
71
105
static stk_seg*
72
106
new_stk (rust_scheduler *sched, rust_task *task, size_t requested_sz)
73
107
{
108
+ LOG (task, mem, " creating new stack for task %" PRIxPTR, task);
109
+
74
110
// The minimum stack size, in bytes, of a Rust stack, excluding red zone
75
111
size_t min_sz = get_min_stk_size (sched->min_stack_size );
112
+
113
+ // Try to reuse an existing stack segment
114
+ if (task->stk != NULL && task->stk ->prev != NULL ) {
115
+ size_t prev_sz = (size_t )(task->stk ->prev ->end
116
+ - (uintptr_t )&task->stk ->prev ->data [0 ]
117
+ - RED_ZONE_SIZE);
118
+ if (min_sz <= prev_sz) {
119
+ LOG (task, mem, " reusing existing stack" );
120
+ task->stk = task->stk ->prev ;
121
+ A (sched, task->stk ->prev == NULL , " Bogus stack ptr" );
122
+ config_valgrind_stack (task->stk );
123
+ return task->stk ;
124
+ } else {
125
+ LOG (task, mem, " existing stack is not big enough" );
126
+ free_stk (task, task->stk ->prev );
127
+ task->stk ->prev = NULL ;
128
+ }
129
+ }
130
+
76
131
// The size of the current stack segment, excluding red zone
77
132
size_t current_sz = 0 ;
78
133
if (task->stk != NULL ) {
@@ -88,16 +143,13 @@ new_stk(rust_scheduler *sched, rust_task *task, size_t requested_sz)
88
143
stk_seg *stk = (stk_seg *)task->malloc (sz, " stack" );
89
144
LOGPTR (task->sched , " new stk" , (uintptr_t )stk);
90
145
memset (stk, 0 , sizeof (stk_seg));
146
+ stk->prev = NULL ;
91
147
stk->next = task->stk ;
92
148
stk->end = (uintptr_t ) &stk->data [rust_stk_sz + RED_ZONE_SIZE];
93
149
LOGPTR (task->sched , " stk end" , stk->end );
94
- stk->valgrind_id =
95
- VALGRIND_STACK_REGISTER (&stk->data [0 ],
96
- &stk->data [rust_stk_sz + RED_ZONE_SIZE]);
97
- #ifndef NVALGRIND
98
- VALGRIND_MAKE_MEM_NOACCESS (stk->data , STACK_NOACCESS_SIZE);
99
- #endif
150
+
100
151
task->stk = stk;
152
+ config_valgrind_stack (task->stk );
101
153
return stk;
102
154
}
103
155
@@ -108,12 +160,27 @@ del_stk(rust_task *task, stk_seg *stk)
108
160
109
161
task->stk = stk->next ;
110
162
111
- #ifndef NVALGRIND
112
- VALGRIND_MAKE_MEM_DEFINED (stk->data , STACK_NOACCESS_SIZE);
113
- #endif
114
- VALGRIND_STACK_DEREGISTER (stk->valgrind_id );
115
- LOGPTR (task->sched , " freeing stk segment" , (uintptr_t )stk);
116
- task->free (stk);
163
+ bool delete_stack = false ;
164
+ if (task->stk != NULL ) {
165
+ // Don't actually delete this stack. Save it to reuse later,
166
+ // preventing the pathological case where we repeatedly reallocate
167
+ // the stack for the next frame.
168
+ task->stk ->prev = stk;
169
+ } else {
170
+ // This is the last stack, delete it.
171
+ delete_stack = true ;
172
+ }
173
+
174
+ // Delete the previous previous stack
175
+ if (stk->prev != NULL ) {
176
+ free_stk (task, stk->prev );
177
+ stk->prev = NULL ;
178
+ }
179
+
180
+ unconfig_valgrind_stack (stk);
181
+ if (delete_stack) {
182
+ free_stk (task, stk);
183
+ }
117
184
}
118
185
119
186
// Tasks
0 commit comments