Skip to content

Commit f12766a

Browse files
committed
Be more careful about the order in which we read the next field
during task annihilation, since it is easy to tread on freed memory.
1 parent 0623fb5 commit f12766a

File tree

1 file changed

+25
-6
lines changed

1 file changed

+25
-6
lines changed

src/libcore/cleanup.rs

Lines changed: 25 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -126,22 +126,29 @@ struct AnnihilateStats {
126126
n_bytes_freed: uint
127127
}
128128

129-
unsafe fn each_live_alloc(f: &fn(box: *mut BoxRepr, uniq: bool) -> bool) {
129+
unsafe fn each_live_alloc(read_next_before: bool,
130+
f: &fn(box: *mut BoxRepr, uniq: bool) -> bool) {
131+
//! Walks the internal list of allocations
132+
130133
use managed;
131134

132135
let task: *Task = transmute(rustrt::rust_get_task());
133136
let box = (*task).boxed_region.live_allocs;
134137
let mut box: *mut BoxRepr = transmute(copy box);
135138
while box != mut_null() {
136-
let next = transmute(copy (*box).header.next);
139+
let next_before = transmute(copy (*box).header.next);
137140
let uniq =
138141
(*box).header.ref_count == managed::raw::RC_MANAGED_UNIQUE;
139142

140143
if ! f(box, uniq) {
141144
break
142145
}
143146

144-
box = next
147+
if read_next_before {
148+
box = next_before;
149+
} else {
150+
box = transmute(copy (*box).header.next);
151+
}
145152
}
146153
}
147154

@@ -173,7 +180,10 @@ pub unsafe fn annihilate() {
173180
};
174181

175182
// Pass 1: Make all boxes immortal.
176-
for each_live_alloc |box, uniq| {
183+
//
184+
// In this pass, nothing gets freed, so it does not matter whether
185+
// we read the next field before or after the callback.
186+
for each_live_alloc(true) |box, uniq| {
177187
stats.n_total_boxes += 1;
178188
if uniq {
179189
debug_ptr("Managed-uniq: ", &*box);
@@ -185,7 +195,11 @@ pub unsafe fn annihilate() {
185195
}
186196

187197
// Pass 2: Drop all boxes.
188-
for each_live_alloc |box, uniq| {
198+
//
199+
// In this pass, unique-managed boxes may get freed, but not
200+
// managed boxes, so we must read the `next` field *after* the
201+
// callback, as the original value may have been freed.
202+
for each_live_alloc(false) |box, uniq| {
189203
if !uniq {
190204
debug_ptr("Invoking tydesc/glue on: ", &*box);
191205
let tydesc: *TypeDesc = transmute(copy (*box).header.type_desc);
@@ -198,7 +212,12 @@ pub unsafe fn annihilate() {
198212
}
199213

200214
// Pass 3: Free all boxes.
201-
for each_live_alloc |box, uniq| {
215+
//
216+
// In this pass, managed boxes may get freed (but not
217+
// unique-managed boxes, though I think that none of those are
218+
// left), so we must read the `next` field before, since it will
219+
// not be valid after.
220+
for each_live_alloc(true) |box, uniq| {
202221
if !uniq {
203222
debug_ptr("About to free: ", &*box);
204223
stats.n_bytes_freed +=

0 commit comments

Comments
 (0)