diff --git a/mk/rt.mk b/mk/rt.mk index d25c866ec16f9..7ada4c0069520 100644 --- a/mk/rt.mk +++ b/mk/rt.mk @@ -10,7 +10,7 @@ RUNTIME_CS := rt/sync/timer.cpp \ rt/rust_run_program.cpp \ rt/rust_crate_cache.cpp \ rt/rust_comm.cpp \ - rt/rust_dom.cpp \ + rt/rust_scheduler.cpp \ rt/rust_task.cpp \ rt/rust_task_list.cpp \ rt/rust_proxy.cpp \ @@ -40,7 +40,7 @@ RUNTIME_HDR := rt/globals.h \ rt/rust_util.h \ rt/rust_chan.h \ rt/rust_port.h \ - rt/rust_dom.h \ + rt/rust_scheduler.h \ rt/rust_task.h \ rt/rust_task_list.h \ rt/rust_proxy.h \ diff --git a/src/comp/back/upcall.rs b/src/comp/back/upcall.rs index 1a5a9a8d25461..5fad90ce031db 100644 --- a/src/comp/back/upcall.rs +++ b/src/comp/back/upcall.rs @@ -55,8 +55,6 @@ type upcalls = ValueRef get_type_desc, ValueRef new_task, ValueRef start_task, - ValueRef new_thread, - ValueRef start_thread, ValueRef ivec_resize, ValueRef ivec_spill); @@ -102,7 +100,7 @@ fn declare_upcalls(type_names tn, ModuleRef llmod) -> @upcalls { mark=d("mark", [T_ptr(T_i8())], T_int()), new_str=d("new_str", [T_ptr(T_i8()), T_size_t()], T_ptr(T_str())), - dup_str=d("dup_str", [T_ptr(T_str())], + dup_str=d("dup_str", [T_taskptr(tn), T_ptr(T_str())], T_ptr(T_str())), new_vec=d("new_vec", [T_size_t(), T_ptr(T_tydesc(tn))], T_opaque_vec_ptr()), @@ -118,10 +116,6 @@ fn declare_upcalls(type_names tn, ModuleRef llmod) -> @upcalls { start_task=d("start_task", [T_taskptr(tn), T_int(), T_int(), T_size_t()], T_taskptr(tn)), - new_thread=d("new_thread", [T_ptr(T_i8())], T_taskptr(tn)), - start_thread=d("start_thread", - [T_taskptr(tn), T_int(), T_int(), T_int(), - T_size_t()], T_taskptr(tn)), ivec_resize=d("ivec_resize", [T_ptr(T_opaque_ivec()), T_int()], T_void()), ivec_spill=d("ivec_spill", [T_ptr(T_opaque_ivec()), T_int()], diff --git a/src/comp/middle/trans.rs b/src/comp/middle/trans.rs index aed4d3b753e27..4cc7a19787b5d 100644 --- a/src/comp/middle/trans.rs +++ b/src/comp/middle/trans.rs @@ -6512,7 +6512,7 @@ fn deep_copy(&@block_ctxt bcx, ValueRef v, ty::t t, ValueRef target_task) else if(ty::type_is_str(tcx, t)) { ret rslt(bcx, bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.dup_str, - [bcx.fcx.lltaskptr, v])); + [bcx.fcx.lltaskptr, target_task, v])); } else if(ty::type_is_chan(tcx, t)) { // If this is a channel, we need to clone it. diff --git a/src/rt/circular_buffer.cpp b/src/rt/circular_buffer.cpp index ab98dfe34c4cd..8c0067ff002e2 100644 --- a/src/rt/circular_buffer.cpp +++ b/src/rt/circular_buffer.cpp @@ -4,34 +4,35 @@ #include "rust_internal.h" -circular_buffer::circular_buffer(rust_dom *dom, size_t unit_sz) : - dom(dom), +circular_buffer::circular_buffer(rust_task *task, size_t unit_sz) : + sched(task->sched), + task(task), unit_sz(unit_sz), _buffer_sz(initial_size()), _next(0), _unread(0), - _buffer((uint8_t *)dom->malloc(_buffer_sz)) { + _buffer((uint8_t *)task->malloc(_buffer_sz)) { - A(dom, unit_sz, "Unit size must be larger than zero."); + A(sched, unit_sz, "Unit size must be larger than zero."); - DLOG(dom, mem, "new circular_buffer(buffer_sz=%d, unread=%d)" + DLOG(sched, mem, "new circular_buffer(buffer_sz=%d, unread=%d)" "-> circular_buffer=0x%" PRIxPTR, _buffer_sz, _unread, this); - A(dom, _buffer, "Failed to allocate buffer."); + A(sched, _buffer, "Failed to allocate buffer."); } circular_buffer::~circular_buffer() { - DLOG(dom, mem, "~circular_buffer 0x%" PRIxPTR, this); - I(dom, _buffer); - W(dom, _unread == 0, + DLOG(sched, mem, "~circular_buffer 0x%" PRIxPTR, this); + I(sched, _buffer); + W(sched, _unread == 0, "freeing circular_buffer with %d unread bytes", _unread); - dom->free(_buffer); + task->free(_buffer); } size_t circular_buffer::initial_size() { - I(dom, unit_sz > 0); + I(sched, unit_sz > 0); return INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS * unit_sz; } @@ -40,8 +41,8 @@ circular_buffer::initial_size() { */ void circular_buffer::transfer(void *dst) { - I(dom, dst); - I(dom, _unread <= _buffer_sz); + I(sched, dst); + I(sched, _unread <= _buffer_sz); uint8_t *ptr = (uint8_t *) dst; @@ -53,13 +54,13 @@ circular_buffer::transfer(void *dst) { } else { head_sz = _buffer_sz - _next; } - I(dom, _next + head_sz <= _buffer_sz); + I(sched, _next + head_sz <= _buffer_sz); memcpy(ptr, _buffer + _next, head_sz); // Then copy any other items from the beginning of the buffer - I(dom, _unread >= head_sz); + I(sched, _unread >= head_sz); size_t tail_sz = _unread - head_sz; - I(dom, head_sz + tail_sz <= _buffer_sz); + I(sched, head_sz + tail_sz <= _buffer_sz); memcpy(ptr + head_sz, _buffer, tail_sz); } @@ -69,37 +70,37 @@ circular_buffer::transfer(void *dst) { */ void circular_buffer::enqueue(void *src) { - I(dom, src); - I(dom, _unread <= _buffer_sz); - I(dom, _buffer); + I(sched, src); + I(sched, _unread <= _buffer_sz); + I(sched, _buffer); // Grow if necessary. if (_unread == _buffer_sz) { grow(); } - DLOG(dom, mem, "circular_buffer enqueue " + DLOG(sched, mem, "circular_buffer enqueue " "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d", _unread, _next, _buffer_sz, unit_sz); - I(dom, _unread < _buffer_sz); - I(dom, _unread + unit_sz <= _buffer_sz); + I(sched, _unread < _buffer_sz); + I(sched, _unread + unit_sz <= _buffer_sz); // Copy data size_t dst_idx = _next + _unread; - I(dom, dst_idx >= _buffer_sz || dst_idx + unit_sz <= _buffer_sz); + I(sched, dst_idx >= _buffer_sz || dst_idx + unit_sz <= _buffer_sz); if (dst_idx >= _buffer_sz) { dst_idx -= _buffer_sz; - I(dom, _next >= unit_sz); - I(dom, dst_idx <= _next - unit_sz); + I(sched, _next >= unit_sz); + I(sched, dst_idx <= _next - unit_sz); } - I(dom, dst_idx + unit_sz <= _buffer_sz); + I(sched, dst_idx + unit_sz <= _buffer_sz); memcpy(&_buffer[dst_idx], src, unit_sz); _unread += unit_sz; - DLOG(dom, mem, "circular_buffer pushed data at index: %d", dst_idx); + DLOG(sched, mem, "circular_buffer pushed data at index: %d", dst_idx); } /** @@ -109,21 +110,21 @@ circular_buffer::enqueue(void *src) { */ void circular_buffer::dequeue(void *dst) { - I(dom, unit_sz > 0); - I(dom, _unread >= unit_sz); - I(dom, _unread <= _buffer_sz); - I(dom, _buffer); + I(sched, unit_sz > 0); + I(sched, _unread >= unit_sz); + I(sched, _unread <= _buffer_sz); + I(sched, _buffer); - DLOG(dom, mem, + DLOG(sched, mem, "circular_buffer dequeue " "unread: %d, next: %d, buffer_sz: %d, unit_sz: %d", _unread, _next, _buffer_sz, unit_sz); - I(dom, _next + unit_sz <= _buffer_sz); + I(sched, _next + unit_sz <= _buffer_sz); if (dst != NULL) { memcpy(dst, &_buffer[_next], unit_sz); } - DLOG(dom, mem, "shifted data from index %d", _next); + DLOG(sched, mem, "shifted data from index %d", _next); _unread -= unit_sz; _next += unit_sz; if (_next == _buffer_sz) { @@ -139,11 +140,11 @@ circular_buffer::dequeue(void *dst) { void circular_buffer::grow() { size_t new_buffer_sz = _buffer_sz * 2; - I(dom, new_buffer_sz <= MAX_CIRCULAR_BUFFER_SIZE); - DLOG(dom, mem, "circular_buffer is growing to %d bytes", new_buffer_sz); - void *new_buffer = dom->malloc(new_buffer_sz); + I(sched, new_buffer_sz <= MAX_CIRCULAR_BUFFER_SIZE); + DLOG(sched, mem, "circular_buffer is growing to %d bytes", new_buffer_sz); + void *new_buffer = task->malloc(new_buffer_sz); transfer(new_buffer); - dom->free(_buffer); + task->free(_buffer); _buffer = (uint8_t *)new_buffer; _next = 0; _buffer_sz = new_buffer_sz; @@ -152,11 +153,11 @@ circular_buffer::grow() { void circular_buffer::shrink() { size_t new_buffer_sz = _buffer_sz / 2; - I(dom, initial_size() <= new_buffer_sz); - DLOG(dom, mem, "circular_buffer is shrinking to %d bytes", new_buffer_sz); - void *new_buffer = dom->malloc(new_buffer_sz); + I(sched, initial_size() <= new_buffer_sz); + DLOG(sched, mem, "circular_buffer is shrinking to %d bytes", new_buffer_sz); + void *new_buffer = task->malloc(new_buffer_sz); transfer(new_buffer); - dom->free(_buffer); + task->free(_buffer); _buffer = (uint8_t *)new_buffer; _next = 0; _buffer_sz = new_buffer_sz; diff --git a/src/rt/circular_buffer.h b/src/rt/circular_buffer.h index cdd0b03b09107..eb1e49494ea69 100644 --- a/src/rt/circular_buffer.h +++ b/src/rt/circular_buffer.h @@ -6,15 +6,17 @@ #define CIRCULAR_BUFFER_H class -circular_buffer : public dom_owned { +circular_buffer : public task_owned { static const size_t INITIAL_CIRCULAR_BUFFER_SIZE_IN_UNITS = 8; static const size_t MAX_CIRCULAR_BUFFER_SIZE = 1 << 24; + rust_scheduler *sched; + public: - rust_dom *dom; + rust_task *task; // Size of the data unit in bytes. const size_t unit_sz; - circular_buffer(rust_dom *dom, size_t unit_sz); + circular_buffer(rust_task *task, size_t unit_sz); ~circular_buffer(); void transfer(void *dst); void enqueue(void *src); diff --git a/src/rt/memory.h b/src/rt/memory.h index 9196e28dd83b2..d5e5a6eb1a7cc 100644 --- a/src/rt/memory.h +++ b/src/rt/memory.h @@ -1,3 +1,4 @@ +// -*- c++ -*- #ifndef MEMORY_H #define MEMORY_H @@ -5,50 +6,54 @@ inline void *operator new(size_t size, void *mem) { return mem; } -inline void *operator new(size_t size, rust_dom *dom) { - return dom->malloc(size, memory_region::LOCAL); +inline void *operator new(size_t size, rust_kernel *kernel) { + return kernel->malloc(size); } -inline void *operator new[](size_t size, rust_dom *dom) { - return dom->malloc(size, memory_region::LOCAL); +inline void *operator new(size_t size, rust_task *task) { + return task->malloc(size, memory_region::LOCAL); } -inline void *operator new(size_t size, rust_dom &dom) { - return dom.malloc(size, memory_region::LOCAL); +inline void *operator new[](size_t size, rust_task *task) { + return task->malloc(size, memory_region::LOCAL); } -inline void *operator new[](size_t size, rust_dom &dom) { - return dom.malloc(size, memory_region::LOCAL); +inline void *operator new(size_t size, rust_task &task) { + return task.malloc(size, memory_region::LOCAL); } -inline void *operator new(size_t size, rust_dom *dom, +inline void *operator new[](size_t size, rust_task &task) { + return task.malloc(size, memory_region::LOCAL); +} + +inline void *operator new(size_t size, rust_task *task, memory_region::memory_region_type type) { - return dom->malloc(size, type); + return task->malloc(size, type); } -inline void *operator new[](size_t size, rust_dom *dom, +inline void *operator new[](size_t size, rust_task *task, memory_region::memory_region_type type) { - return dom->malloc(size, type); + return task->malloc(size, type); } -inline void *operator new(size_t size, rust_dom &dom, +inline void *operator new(size_t size, rust_task &task, memory_region::memory_region_type type) { - return dom.malloc(size, type); + return task.malloc(size, type); } -inline void *operator new[](size_t size, rust_dom &dom, +inline void *operator new[](size_t size, rust_task &task, memory_region::memory_region_type type) { - return dom.malloc(size, type); + return task.malloc(size, type); } -inline void operator delete(void *mem, rust_dom *dom) { - dom->free(mem, memory_region::LOCAL); +inline void operator delete(void *mem, rust_task *task) { + task->free(mem, memory_region::LOCAL); return; } -inline void operator delete(void *mem, rust_dom *dom, +inline void operator delete(void *mem, rust_task *task, memory_region::memory_region_type type) { - dom->free(mem, type); + task->free(mem, type); return; } diff --git a/src/rt/rust.cpp b/src/rt/rust.cpp index 093656b77d2ab..02ab54fd2e191 100644 --- a/src/rt/rust.cpp +++ b/src/rt/rust.cpp @@ -1,19 +1,21 @@ #include "rust_internal.h" struct -command_line_args : public dom_owned +command_line_args : public kernel_owned { - rust_dom *dom; + rust_kernel *kernel; + rust_task *task; int argc; char **argv; // vec[str] passed to rust_task::start. rust_vec *args; - command_line_args(rust_dom *dom, + command_line_args(rust_task *task, int sys_argc, char **sys_argv) - : dom(dom), + : kernel(task->kernel), + task(task), argc(sys_argc), argv(sys_argv), args(NULL) @@ -21,29 +23,29 @@ command_line_args : public dom_owned #if defined(__WIN32__) LPCWSTR cmdline = GetCommandLineW(); LPWSTR *wargv = CommandLineToArgvW(cmdline, &argc); - dom->win32_require("CommandLineToArgvW", wargv != NULL); - argv = (char **) dom->malloc(sizeof(char*) * argc); + kernel->win32_require("CommandLineToArgvW", wargv != NULL); + argv = (char **) kernel->malloc(sizeof(char*) * argc); for (int i = 0; i < argc; ++i) { int n_chars = WideCharToMultiByte(CP_UTF8, 0, wargv[i], -1, NULL, 0, NULL, NULL); - dom->win32_require("WideCharToMultiByte(0)", n_chars != 0); - argv[i] = (char *) dom->malloc(n_chars); + kernel->win32_require("WideCharToMultiByte(0)", n_chars != 0); + argv[i] = (char *) kernel->malloc(n_chars); n_chars = WideCharToMultiByte(CP_UTF8, 0, wargv[i], -1, argv[i], n_chars, NULL, NULL); - dom->win32_require("WideCharToMultiByte(1)", n_chars != 0); + kernel->win32_require("WideCharToMultiByte(1)", n_chars != 0); } LocalFree(wargv); #endif size_t vec_fill = sizeof(rust_str *) * argc; size_t vec_alloc = next_power_of_two(sizeof(rust_vec) + vec_fill); - void *mem = dom->malloc(vec_alloc); - args = new (mem) rust_vec(dom, vec_alloc, 0, NULL); + void *mem = kernel->malloc(vec_alloc); + args = new (mem) rust_vec(task->sched, vec_alloc, 0, NULL); rust_str **strs = (rust_str**) &args->data[0]; for (int i = 0; i < argc; ++i) { size_t str_fill = strlen(argv[i]) + 1; size_t str_alloc = next_power_of_two(sizeof(rust_str) + str_fill); - mem = dom->malloc(str_alloc); - strs[i] = new (mem) rust_str(dom, str_alloc, str_fill, + mem = kernel->malloc(str_alloc); + strs[i] = new (mem) rust_str(task->sched, str_alloc, str_fill, (uint8_t const *)argv[i]); } args->fill = vec_fill; @@ -58,15 +60,15 @@ command_line_args : public dom_owned // Drop the args we've had pinned here. rust_str **strs = (rust_str**) &args->data[0]; for (int i = 0; i < argc; ++i) - dom->free(strs[i]); - dom->free(args); + kernel->free(strs[i]); + kernel->free(args); } #ifdef __WIN32__ for (int i = 0; i < argc; ++i) { - dom->free(argv[i]); + kernel->free(argv[i]); } - dom->free(argv); + kernel->free(argv); #endif } }; @@ -96,26 +98,24 @@ rust_start(uintptr_t main_fn, int argc, char **argv, void* crate_map) { rust_srv *srv = new rust_srv(); rust_kernel *kernel = new rust_kernel(srv); kernel->start(); - rust_handle *handle = kernel->create_domain("main"); - rust_dom *dom = handle->referent(); - command_line_args *args = new (dom) command_line_args(dom, argc, argv); + rust_scheduler *sched = kernel->get_scheduler(); + command_line_args *args + = new (kernel) command_line_args(sched->root_task, argc, argv); - DLOG(dom, dom, "startup: %d args in 0x%" PRIxPTR, + DLOG(sched, dom, "startup: %d args in 0x%" PRIxPTR, args->argc, (uintptr_t)args->args); for (int i = 0; i < args->argc; i++) { - DLOG(dom, dom, "startup: arg[%d] = '%s'", i, args->argv[i]); + DLOG(sched, dom, "startup: arg[%d] = '%s'", i, args->argv[i]); } - dom->root_task->start(main_fn, (uintptr_t)args->args); + sched->root_task->start(main_fn, (uintptr_t)args->args); int num_threads = get_num_threads(); - DLOG(dom, dom, "Using %d worker threads.", num_threads); + DLOG(sched, dom, "Using %d worker threads.", num_threads); - int ret = dom->start_main_loops(num_threads); + int ret = kernel->start_task_threads(num_threads); delete args; - kernel->destroy_domain(dom); - kernel->join_all_domains(); delete kernel; delete srv; diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp index 27fe45e42d7a1..a7325bc066d1b 100644 --- a/src/rt/rust_builtin.cpp +++ b/src/rt/rust_builtin.cpp @@ -9,7 +9,7 @@ extern "C" CDECL rust_str* last_os_error(rust_task *task) { - rust_dom *dom = task->dom; + rust_scheduler *sched = task->sched; LOG(task, task, "last_os_error()"); #if defined(__WIN32__) @@ -42,12 +42,12 @@ last_os_error(rust_task *task) { #endif size_t fill = strlen(buf) + 1; size_t alloc = next_power_of_two(sizeof(rust_str) + fill); - void *mem = dom->malloc(alloc, memory_region::LOCAL); + void *mem = task->malloc(alloc, memory_region::LOCAL); if (!mem) { task->fail(1); return NULL; } - rust_str *st = new (mem) rust_str(dom, alloc, fill, (const uint8_t *)buf); + rust_str *st = new (mem) rust_str(sched, alloc, fill, (const uint8_t *)buf); #ifdef __WIN32__ LocalFree((HLOCAL)buf); @@ -57,7 +57,7 @@ last_os_error(rust_task *task) { extern "C" CDECL rust_str * rust_getcwd(rust_task *task) { - rust_dom *dom = task->dom; + rust_scheduler *sched = task->sched; LOG(task, task, "rust_getcwd()"); char cbuf[BUF_BYTES]; @@ -73,14 +73,14 @@ rust_getcwd(rust_task *task) { size_t fill = strlen(cbuf) + 1; size_t alloc = next_power_of_two(sizeof(rust_str) + fill); - void *mem = dom->malloc(alloc, memory_region::LOCAL); + void *mem = task->malloc(alloc, memory_region::LOCAL); if (!mem) { task->fail(1); return NULL; } rust_str *st; - st = new (mem) rust_str(dom, alloc, fill, (const uint8_t *)cbuf); + st = new (mem) rust_str(sched, alloc, fill, (const uint8_t *)cbuf); return st; } @@ -124,7 +124,7 @@ unsupervise(rust_task *task) { extern "C" CDECL rust_vec* vec_alloc(rust_task *task, type_desc *t, type_desc *elem_t, size_t n_elts) { - rust_dom *dom = task->dom; + rust_scheduler *sched = task->sched; LOG(task, mem, "vec_alloc %" PRIdPTR " elements of size %" PRIdPTR, n_elts, elem_t->size); size_t fill = n_elts * elem_t->size; @@ -134,7 +134,7 @@ vec_alloc(rust_task *task, type_desc *t, type_desc *elem_t, size_t n_elts) task->fail(4); return NULL; } - rust_vec *vec = new (mem) rust_vec(dom, alloc, 0, NULL); + rust_vec *vec = new (mem) rust_vec(sched, alloc, 0, NULL); return vec; } @@ -198,11 +198,11 @@ vec_alloc_with_data(rust_task *task, size_t elt_size, void *d) { - rust_dom *dom = task->dom; + rust_scheduler *sched = task->sched; size_t alloc = next_power_of_two(sizeof(rust_vec) + (n_elts * elt_size)); - void *mem = dom->malloc(alloc, memory_region::LOCAL); + void *mem = task->malloc(alloc, memory_region::LOCAL); if (!mem) return NULL; - return new (mem) rust_vec(dom, alloc, fill * elt_size, (uint8_t*)d); + return new (mem) rust_vec(sched, alloc, fill * elt_size, (uint8_t*)d); } extern "C" CDECL rust_vec* @@ -355,13 +355,13 @@ str_from_buf(rust_task *task, char *buf, unsigned int len) { extern "C" CDECL void * rand_new(rust_task *task) { - rust_dom *dom = task->dom; + rust_scheduler *sched = task->sched; randctx *rctx = (randctx *) task->malloc(sizeof(randctx)); if (!rctx) { task->fail(1); return NULL; } - isaac_init(dom, rctx); + isaac_init(sched, rctx); return rctx; } @@ -391,16 +391,16 @@ task_yield(rust_task *task) { extern "C" CDECL void task_join(rust_task *task, rust_task *join_task) { - task->dom->scheduler_lock.lock(); + task->kernel->scheduler_lock.lock(); // If the other task is already dying, we don't have to wait for it. if (join_task->dead() == false) { join_task->tasks_waiting_to_join.push(task); task->block(join_task, "joining local task"); - task->dom->scheduler_lock.unlock(); + task->kernel->scheduler_lock.unlock(); task->yield(2); } else { - task->dom->scheduler_lock.unlock(); + task->kernel->scheduler_lock.unlock(); } } diff --git a/src/rt/rust_chan.cpp b/src/rt/rust_chan.cpp index cc03c227acda9..778fb6b16fdf2 100644 --- a/src/rt/rust_chan.cpp +++ b/src/rt/rust_chan.cpp @@ -9,7 +9,7 @@ rust_chan::rust_chan(rust_task *task, size_t unit_sz) : task(task), port(port), - buffer(task->dom, unit_sz) { + buffer(task, unit_sz) { ++task->ref_count; if (port) { associate(port); @@ -22,7 +22,7 @@ rust_chan::rust_chan(rust_task *task, rust_chan::~rust_chan() { LOG(task, comm, "del rust_chan(task=0x%" PRIxPTR ")", (uintptr_t) this); - A(task->dom, is_associated() == false, + A(task->sched, is_associated() == false, "Channel must be disassociated before being freed."); --task->ref_count; } @@ -49,7 +49,7 @@ bool rust_chan::is_associated() { * Unlink this channel from its associated port. */ void rust_chan::disassociate() { - A(task->dom, is_associated(), "Channel must be associated with a port."); + A(task->sched, is_associated(), "Channel must be associated with a port."); if (port->is_proxy() == false) { LOG(task, task, @@ -69,14 +69,14 @@ void rust_chan::disassociate() { void rust_chan::send(void *sptr) { buffer.enqueue(sptr); - rust_dom *dom = task->dom; + rust_scheduler *sched = task->sched; if (!is_associated()) { - W(dom, is_associated(), + W(sched, is_associated(), "rust_chan::transmit with no associated port."); return; } - A(dom, !buffer.is_empty(), + A(sched, !buffer.is_empty(), "rust_chan::transmit with nothing to send."); if (port->is_proxy()) { @@ -86,7 +86,7 @@ void rust_chan::send(void *sptr) { } else { rust_port *target_port = port->referent(); if (target_port->task->blocked_on(target_port)) { - DLOG(dom, comm, "dequeued in rendezvous_ptr"); + DLOG(sched, comm, "dequeued in rendezvous_ptr"); buffer.dequeue(target_port->task->rendezvous_ptr); target_port->task->rendezvous_ptr = 0; target_port->task->wakeup(target_port); diff --git a/src/rt/rust_crate_cache.cpp b/src/rt/rust_crate_cache.cpp index 1f66e0e0084e1..7d3822d253a36 100644 --- a/src/rt/rust_crate_cache.cpp +++ b/src/rt/rust_crate_cache.cpp @@ -7,16 +7,16 @@ rust_crate_cache::get_type_desc(size_t size, size_t n_descs, type_desc const **descs) { - I(dom, n_descs > 1); + I(sched, n_descs > 1); type_desc *td = NULL; size_t keysz = n_descs * sizeof(type_desc*); HASH_FIND(hh, this->type_descs, descs, keysz, td); if (td) { - DLOG(dom, cache, "rust_crate_cache::get_type_desc hit"); + DLOG(sched, cache, "rust_crate_cache::get_type_desc hit"); return td; } - DLOG(dom, cache, "rust_crate_cache::get_type_desc miss"); - td = (type_desc*) dom->malloc(sizeof(type_desc) + keysz); + DLOG(sched, cache, "rust_crate_cache::get_type_desc miss"); + td = (type_desc*) sched->kernel->malloc(sizeof(type_desc) + keysz); if (!td) return NULL; // By convention, desc 0 is the root descriptor. @@ -27,7 +27,7 @@ rust_crate_cache::get_type_desc(size_t size, td->size = size; td->align = align; for (size_t i = 0; i < n_descs; ++i) { - DLOG(dom, cache, + DLOG(sched, cache, "rust_crate_cache::descs[%" PRIdPTR "] = 0x%" PRIxPTR, i, descs[i]); td->descs[i] = descs[i]; @@ -38,22 +38,22 @@ rust_crate_cache::get_type_desc(size_t size, return td; } -rust_crate_cache::rust_crate_cache(rust_dom *dom) +rust_crate_cache::rust_crate_cache(rust_scheduler *sched) : type_descs(NULL), - dom(dom), + sched(sched), idx(0) { } void rust_crate_cache::flush() { - DLOG(dom, cache, "rust_crate_cache::flush()"); + DLOG(sched, cache, "rust_crate_cache::flush()"); while (type_descs) { type_desc *d = type_descs; HASH_DEL(type_descs, d); - DLOG(dom, mem, "rust_crate_cache::flush() tydesc %" PRIxPTR, d); - dom->free(d); + DLOG(sched, mem, "rust_crate_cache::flush() tydesc %" PRIxPTR, d); + sched->kernel->free(d); } } diff --git a/src/rt/rust_internal.h b/src/rt/rust_internal.h index 5db1216c1021e..f10e55d40a09d 100644 --- a/src/rt/rust_internal.h +++ b/src/rt/rust_internal.h @@ -50,7 +50,7 @@ extern "C" { #include "sync/lock_and_signal.h" #include "sync/lock_free_queue.h" -struct rust_dom; +struct rust_scheduler; struct rust_task; class rust_log; class rust_port; @@ -115,15 +115,9 @@ template struct rc_base { ~rc_base(); }; -template struct dom_owned { - void operator delete(void *ptr) { - ((T *)ptr)->dom->free(ptr); - } -}; - template struct task_owned { void operator delete(void *ptr) { - ((T *)ptr)->task->dom->free(ptr); + ((T *)ptr)->task->free(ptr); } }; @@ -148,14 +142,14 @@ struct rust_cond { }; // Helper class used regularly elsewhere. -template class ptr_vec : public dom_owned > { +template class ptr_vec : public task_owned > { static const size_t INIT_SIZE = 8; - rust_dom *dom; + rust_task *task; size_t alloc; size_t fill; T **data; public: - ptr_vec(rust_dom *dom); + ptr_vec(rust_task *task); ~ptr_vec(); size_t length() { @@ -180,8 +174,7 @@ template class ptr_vec : public dom_owned > { #include "rust_proxy.h" #include "rust_kernel.h" #include "rust_message.h" -#include "rust_dom.h" -#include "memory.h" +#include "rust_scheduler.h" struct rust_timer { // FIXME: This will probably eventually need replacement @@ -190,7 +183,7 @@ struct rust_timer { // For now it's just the most basic "thread that can interrupt // its associated domain-thread" device, so that we have // *some* form of task-preemption. - rust_dom *dom; + rust_scheduler *sched; uintptr_t exit_flag; #if defined(__WIN32__) @@ -200,7 +193,7 @@ struct rust_timer { pthread_t thread; #endif - rust_timer(rust_dom *dom); + rust_timer(rust_scheduler *sched); ~rust_timer(); }; @@ -250,35 +243,11 @@ rust_alarm typedef ptr_vec rust_wait_queue; -struct stk_seg { - unsigned int valgrind_id; - uintptr_t limit; - uint8_t data[]; -}; - -struct frame_glue_fns { - uintptr_t mark_glue_off; - uintptr_t drop_glue_off; - uintptr_t reloc_glue_off; -}; - -struct gc_alloc { - gc_alloc *prev; - gc_alloc *next; - uintptr_t ctrl_word; - uint8_t data[]; - bool mark() { - if (ctrl_word & 1) - return false; - ctrl_word |= 1; - return true; - } -}; - #include "circular_buffer.h" #include "rust_task.h" #include "rust_chan.h" #include "rust_port.h" +#include "memory.h" #include "test/rust_test_harness.h" #include "test/rust_test_util.h" diff --git a/src/rt/rust_kernel.cpp b/src/rt/rust_kernel.cpp index f72da483c3502..f3ebfd4f4b951 100644 --- a/src/rt/rust_kernel.cpp +++ b/src/rt/rust_kernel.cpp @@ -11,58 +11,57 @@ rust_kernel::rust_kernel(rust_srv *srv) : _region(&srv->local_region), _log(srv, NULL), _srv(srv), - _interrupt_kernel_loop(FALSE) { - // Nop. + _interrupt_kernel_loop(FALSE) +{ + sched = create_scheduler("main"); } -rust_handle * -rust_kernel::create_domain(const char *name) { +rust_scheduler * +rust_kernel::create_scheduler(const char *name) { _kernel_lock.lock(); rust_message_queue *message_queue = new (this) rust_message_queue(_srv, this); rust_srv *srv = _srv->clone(); - rust_dom *dom = - new (this) rust_dom(this, message_queue, srv, name); - rust_handle *handle = internal_get_dom_handle(dom); + rust_scheduler *sched = + new (this) rust_scheduler(this, message_queue, srv, name); + rust_handle *handle = internal_get_sched_handle(sched); message_queue->associate(handle); - domains.append(dom); message_queues.append(message_queue); - KLOG("created domain: " PTR ", name: %s, index: %d, domains %d", - dom, name, dom->list_index, domains.length()); + KLOG("created scheduler: " PTR ", name: %s, index: %d", + sched, name, sched->list_index); _kernel_lock.signal_all(); _kernel_lock.unlock(); - return handle; + return sched; } void -rust_kernel::destroy_domain(rust_dom *dom) { +rust_kernel::destroy_scheduler() { _kernel_lock.lock(); - KLOG("deleting domain: " PTR ", name: %s, index: %d, domains %d", - dom, dom->name, dom->list_index, domains.length()); - domains.remove(dom); - dom->message_queue->disassociate(); - rust_srv *srv = dom->srv; - delete dom; + KLOG("deleting scheduler: " PTR ", name: %s, index: %d", + sched, sched->name, sched->list_index); + sched->message_queue->disassociate(); + rust_srv *srv = sched->srv; + delete sched; delete srv; _kernel_lock.signal_all(); _kernel_lock.unlock(); } -rust_handle * -rust_kernel::internal_get_dom_handle(rust_dom *dom) { - rust_handle *handle = NULL; - if (_dom_handles.get(dom, &handle) == false) { +rust_handle * +rust_kernel::internal_get_sched_handle(rust_scheduler *sched) { + rust_handle *handle = NULL; + if (_sched_handles.get(sched, &handle) == false) { handle = - new (this) rust_handle(this, dom->message_queue, dom); - _dom_handles.put(dom, handle); + new (this) rust_handle(this, sched->message_queue, sched); + _sched_handles.put(sched, handle); } return handle; } -rust_handle * -rust_kernel::get_dom_handle(rust_dom *dom) { +rust_handle * +rust_kernel::get_sched_handle(rust_scheduler *sched) { _kernel_lock.lock(); - rust_handle *handle = internal_get_dom_handle(dom); + rust_handle *handle = internal_get_sched_handle(sched); _kernel_lock.unlock(); return handle; } @@ -73,7 +72,7 @@ rust_kernel::get_task_handle(rust_task *task) { rust_handle *handle = NULL; if (_task_handles.get(task, &handle) == false) { handle = - new (this) rust_handle(this, task->dom->message_queue, + new (this) rust_handle(this, task->sched->message_queue, task); _task_handles.put(task, handle); } @@ -88,7 +87,7 @@ rust_kernel::get_port_handle(rust_port *port) { if (_port_handles.get(port, &handle) == false) { handle = new (this) rust_handle(this, - port->task->dom->message_queue, + port->task->sched->message_queue, port); _port_handles.put(port, handle); } @@ -97,21 +96,8 @@ rust_kernel::get_port_handle(rust_port *port) { } void -rust_kernel::join_all_domains() { - _kernel_lock.lock(); - while (domains.length() > 0) { - _kernel_lock.wait(); - } - _kernel_lock.unlock(); - KLOG("joined domains"); -} - -void -rust_kernel::log_all_domain_state() { - KLOG("log_all_domain_state: %d domains", domains.length()); - for (uint32_t i = 0; i < domains.length(); i++) { - domains[i]->log_state(); - } +rust_kernel::log_all_scheduler_state() { + sched->log_state(); } /** @@ -172,9 +158,7 @@ rust_kernel::terminate_kernel_loop() { } rust_kernel::~rust_kernel() { - K(_srv, domains.length() == 0, - "Kernel has %d live domain(s), join all domains before killing " - "the kernel.", domains.length()); + destroy_scheduler(); terminate_kernel_loop(); @@ -190,8 +174,8 @@ rust_kernel::~rust_kernel() { KLOG("..task handles freed"); free_handles(_port_handles); KLOG("..port handles freed"); - free_handles(_dom_handles); - KLOG("..dom handles freed"); + free_handles(_sched_handles); + KLOG("..sched handles freed"); KLOG("freeing queues"); @@ -239,6 +223,56 @@ rust_kernel::signal_kernel_lock() { _kernel_lock.unlock(); } +int rust_kernel::start_task_threads(int num_threads) +{ + rust_task_thread *thread = NULL; + + // -1, because this thread will also be a thread. + for(int i = 0; i < num_threads - 1; ++i) { + thread = new rust_task_thread(i + 1, this); + thread->start(); + threads.push(thread); + } + + sched->start_main_loop(0); + + while(threads.pop(&thread)) { + thread->join(); + delete thread; + } + + return sched->rval; +} + +#ifdef __WIN32__ +void +rust_kernel::win32_require(LPCTSTR fn, BOOL ok) { + if (!ok) { + LPTSTR buf; + DWORD err = GetLastError(); + FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, err, + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPTSTR) &buf, 0, NULL ); + DLOG_ERR(sched, dom, "%s failed with error %ld: %s", fn, err, buf); + LocalFree((HLOCAL)buf); + I(sched, ok); + } +} +#endif + +rust_task_thread::rust_task_thread(int id, rust_kernel *owner) + : id(id), owner(owner) +{ +} + +void rust_task_thread::run() +{ + owner->sched->start_main_loop(id); +} + // // Local Variables: // mode: C++ diff --git a/src/rt/rust_kernel.h b/src/rt/rust_kernel.h index 0c3df20358a5c..bea5afd5de3cb 100644 --- a/src/rt/rust_kernel.h +++ b/src/rt/rust_kernel.h @@ -34,6 +34,9 @@ rust_handle : } }; +class rust_task_thread; + + /** * A global object shared by all thread domains. Most of the data structures * in this class are synchronized since they are accessed from multiple @@ -49,7 +52,7 @@ class rust_kernel : public rust_thread { */ hash_map *> _task_handles; hash_map *> _port_handles; - hash_map *> _dom_handles; + hash_map *> _sched_handles; template void free_handles(hash_map* > &map); @@ -62,14 +65,17 @@ class rust_kernel : public rust_thread { void terminate_kernel_loop(); void pump_message_queues(); - rust_handle *internal_get_dom_handle(rust_dom *dom); + rust_handle * + internal_get_sched_handle(rust_scheduler *sched); -public: + array_list threads; - /** - * List of domains that are currently executing. - */ - indexed_list domains; + rust_scheduler *create_scheduler(const char *name); + void destroy_scheduler(); + +public: + rust_scheduler *sched; + lock_and_signal scheduler_lock; /** * Message queues are kernel objects and are associated with domains. @@ -80,15 +86,12 @@ class rust_kernel : public rust_thread { */ indexed_list message_queues; - rust_handle *get_dom_handle(rust_dom *dom); + rust_handle *get_sched_handle(rust_scheduler *sched); rust_handle *get_task_handle(rust_task *task); rust_handle *get_port_handle(rust_port *port); rust_kernel(rust_srv *srv); - rust_handle *create_domain(const char *name); - void destroy_domain(rust_dom *dom); - bool is_deadlocked(); void signal_kernel_lock(); @@ -101,25 +104,31 @@ class rust_kernel : public rust_thread { void notify_message_enqueued(rust_message_queue *queue, rust_message *message); - /** - * Blocks until all domains have terminated. - */ - void join_all_domains(); - - void log_all_domain_state(); + void log_all_scheduler_state(); void log(uint32_t level, char const *fmt, ...); virtual ~rust_kernel(); void *malloc(size_t size); void free(void *mem); -}; -inline void *operator new(size_t size, rust_kernel *kernel) { - return kernel->malloc(size); -} + // FIXME: this should go away + inline rust_scheduler *get_scheduler() const { return sched; } + + int start_task_threads(int num_threads); -inline void *operator new(size_t size, rust_kernel &kernel) { - return kernel.malloc(size); -} +#ifdef __WIN32__ + void win32_require(LPCTSTR fn, BOOL ok); +#endif +}; + +class rust_task_thread : public rust_thread { + int id; + rust_kernel *owner; + +public: + rust_task_thread(int id, rust_kernel *owner); + + virtual void run(); +}; #endif /* RUST_KERNEL_H */ diff --git a/src/rt/rust_log.cpp b/src/rt/rust_log.cpp index 01e92e1f23dda..a2ab77b1b5513 100644 --- a/src/rt/rust_log.cpp +++ b/src/rt/rust_log.cpp @@ -23,9 +23,9 @@ static const char * _foreground_colors[] = { "[37m", static lock_and_signal _log_lock; static uint32_t _last_thread_id; -rust_log::rust_log(rust_srv *srv, rust_dom *dom) : +rust_log::rust_log(rust_srv *srv, rust_scheduler *sched) : _srv(srv), - _dom(dom), + _sched(sched), _use_colors(getenv("RUST_COLOR_LOG")) { } @@ -104,12 +104,12 @@ rust_log::trace_ln(rust_task *task, uint32_t level, char *message) { uint32_t thread_id = hash((uint32_t) pthread_self()); #endif char prefix[BUF_BYTES] = ""; - if (_dom && _dom->name) { + if (_sched && _sched->name) { append_string(prefix, "%04" PRIxPTR ":%.10s:", - thread_id, _dom->name); + thread_id, _sched->name); } else { append_string(prefix, "%04" PRIxPTR ":0x%08" PRIxPTR ":", - thread_id, (uintptr_t) _dom); + thread_id, (uintptr_t) _sched); } if (task) { if (task->name) { diff --git a/src/rt/rust_log.h b/src/rt/rust_log.h index 334b4d9477573..ce0d8f593efce 100644 --- a/src/rt/rust_log.h +++ b/src/rt/rust_log.h @@ -1,3 +1,4 @@ +// -*- c++ -*- #ifndef RUST_LOG_H #define RUST_LOG_H @@ -5,30 +6,30 @@ const uint32_t log_err = 0; const uint32_t log_note = 1; #define LOG(task, field, ...) \ - DLOG_LVL(log_note, task, task->dom, field, __VA_ARGS__) + DLOG_LVL(log_note, task, task->sched, field, __VA_ARGS__) #define LOG_ERR(task, field, ...) \ - DLOG_LVL(log_err, task, task->dom, field, __VA_ARGS__) -#define DLOG(dom, field, ...) \ - DLOG_LVL(log_note, NULL, dom, field, __VA_ARGS__) -#define DLOG_ERR(dom, field, ...) \ - DLOG_LVL(log_err, NULL, dom, field, __VA_ARGS__) -#define LOGPTR(dom, msg, ptrval) \ - DLOG_LVL(log_note, NULL, dom, mem, "%s 0x%" PRIxPTR, msg, ptrval) -#define DLOG_LVL(lvl, task, dom, field, ...) \ + DLOG_LVL(log_err, task, task->sched, field, __VA_ARGS__) +#define DLOG(sched, field, ...) \ + DLOG_LVL(log_note, NULL, sched, field, __VA_ARGS__) +#define DLOG_ERR(sched, field, ...) \ + DLOG_LVL(log_err, NULL, sched, field, __VA_ARGS__) +#define LOGPTR(sched, msg, ptrval) \ + DLOG_LVL(log_note, NULL, sched, mem, "%s 0x%" PRIxPTR, msg, ptrval) +#define DLOG_LVL(lvl, task, sched, field, ...) \ do { \ - rust_dom* _d_ = dom; \ + rust_scheduler* _d_ = sched; \ if (log_rt_##field >= lvl && _d_->log_lvl >= lvl) { \ _d_->log(task, lvl, __VA_ARGS__); \ } \ } while (0) -struct rust_dom; +struct rust_scheduler; struct rust_task; class rust_log { public: - rust_log(rust_srv *srv, rust_dom *dom); + rust_log(rust_srv *srv, rust_scheduler *sched); virtual ~rust_log(); enum ansi_color { @@ -53,7 +54,7 @@ class rust_log { private: rust_srv *_srv; - rust_dom *_dom; + rust_scheduler *_sched; bool _use_labels; bool _use_colors; void trace_ln(rust_task *task, char *message); diff --git a/src/rt/rust_message.cpp b/src/rt/rust_message.cpp index 19a1d8a367e4e..716299e64fe19 100644 --- a/src/rt/rust_message.cpp +++ b/src/rt/rust_message.cpp @@ -109,9 +109,10 @@ void data_message::kernel_process() { } -rust_message_queue::rust_message_queue(rust_srv *srv, rust_kernel *kernel) : - region (srv, true), kernel(kernel), - dom_handle(NULL) { +rust_message_queue::rust_message_queue(rust_srv *srv, rust_kernel *kernel) + : region(srv, true), + kernel(kernel), + sched_handle(NULL) { // Nop. } diff --git a/src/rt/rust_message.h b/src/rt/rust_message.h index e9ce94bfa9e1f..6b95c9ffec784 100644 --- a/src/rt/rust_message.h +++ b/src/rt/rust_message.h @@ -93,26 +93,26 @@ class rust_message_queue : public lock_free_queue, public: memory_region region; rust_kernel *kernel; - rust_handle *dom_handle; + rust_handle *sched_handle; int32_t list_index; rust_message_queue(rust_srv *srv, rust_kernel *kernel); - void associate(rust_handle *dom_handle) { - this->dom_handle = dom_handle; + void associate(rust_handle *sched_handle) { + this->sched_handle = sched_handle; } /** * The Rust domain relinquishes control to the Rust kernel. */ void disassociate() { - this->dom_handle = NULL; + this->sched_handle = NULL; } /** * Checks if a Rust domain is responsible for draining the message queue. */ bool is_associated() { - return this->dom_handle != NULL; + return this->sched_handle != NULL; } void enqueue(rust_message* message) { diff --git a/src/rt/rust_port.cpp b/src/rt/rust_port.cpp index 57d0b21683650..a2bd3b34c38dc 100644 --- a/src/rt/rust_port.cpp +++ b/src/rt/rust_port.cpp @@ -3,14 +3,14 @@ rust_port::rust_port(rust_task *task, size_t unit_sz) : maybe_proxy(this), task(task), - unit_sz(unit_sz), writers(task->dom), chans(task->dom) { + unit_sz(unit_sz), writers(task), chans(task) { LOG(task, comm, "new rust_port(task=0x%" PRIxPTR ", unit_sz=%d) -> port=0x%" PRIxPTR, (uintptr_t)task, unit_sz, (uintptr_t)this); // Allocate a remote channel, for remote channel data. - remote_channel = new (task->dom) rust_chan(task, this, unit_sz); + remote_channel = new (task) rust_chan(task, this, unit_sz); } rust_port::~rust_port() { diff --git a/src/rt/rust_dom.cpp b/src/rt/rust_scheduler.cpp similarity index 66% rename from src/rt/rust_dom.cpp rename to src/rt/rust_scheduler.cpp index c9c6b56c09ee0..4ada1ae56973d 100644 --- a/src/rt/rust_dom.cpp +++ b/src/rt/rust_scheduler.cpp @@ -3,15 +3,13 @@ #include "rust_internal.h" #include "globals.h" -rust_dom::rust_dom(rust_kernel *kernel, +rust_scheduler::rust_scheduler(rust_kernel *kernel, rust_message_queue *message_queue, rust_srv *srv, const char *name) : interrupt_flag(0), _log(srv, this), log_lvl(log_note), srv(srv), - local_region(&srv->local_region), - synchronized_region(&srv->synchronized_region), name(name), newborn_tasks(this, "newborn"), running_tasks(this, "running"), @@ -34,8 +32,9 @@ rust_dom::rust_dom(rust_kernel *kernel, root_task = create_task(NULL, name); } -rust_dom::~rust_dom() { - DLOG(this, dom, "~rust_dom %s @0x%" PRIxPTR, name, (uintptr_t)this); +rust_scheduler::~rust_scheduler() { + DLOG(this, dom, "~rust_scheduler %s @0x%" PRIxPTR, name, (uintptr_t)this); + newborn_tasks.delete_all(); running_tasks.delete_all(); blocked_tasks.delete_all(); @@ -46,19 +45,19 @@ rust_dom::~rust_dom() { } void -rust_dom::activate(rust_task *task) { +rust_scheduler::activate(rust_task *task) { context ctx; task->ctx.next = &ctx; DLOG(this, task, "descheduling..."); - scheduler_lock.unlock(); + kernel->scheduler_lock.unlock(); task->ctx.swap(ctx); - scheduler_lock.lock(); + kernel->scheduler_lock.lock(); DLOG(this, task, "task has returned"); } void -rust_dom::log(rust_task* task, uint32_t level, char const *fmt, ...) { +rust_scheduler::log(rust_task* task, uint32_t level, char const *fmt, ...) { char buf[BUF_BYTES]; va_list args; va_start(args, fmt); @@ -68,97 +67,15 @@ rust_dom::log(rust_task* task, uint32_t level, char const *fmt, ...) { } void -rust_dom::fail() { +rust_scheduler::fail() { log(NULL, log_err, "domain %s @0x%" PRIxPTR " root task failed", name, this); I(this, rval == 0); rval = 1; } -void * -rust_dom::malloc(size_t size) { - return malloc(size, memory_region::LOCAL); -} - -void * -rust_dom::malloc(size_t size, memory_region::memory_region_type type) { - if (type == memory_region::LOCAL) { - return local_region.malloc(size); - } else if (type == memory_region::SYNCHRONIZED) { - return synchronized_region.malloc(size); - } - I(this, false); - return NULL; -} - -void * -rust_dom::calloc(size_t size) { - return calloc(size, memory_region::LOCAL); -} - -void * -rust_dom::calloc(size_t size, memory_region::memory_region_type type) { - if (type == memory_region::LOCAL) { - return local_region.calloc(size); - } else if (type == memory_region::SYNCHRONIZED) { - return synchronized_region.calloc(size); - } - return NULL; -} - -void * -rust_dom::realloc(void *mem, size_t size) { - return realloc(mem, size, memory_region::LOCAL); -} - -void * -rust_dom::realloc(void *mem, size_t size, - memory_region::memory_region_type type) { - if (type == memory_region::LOCAL) { - return local_region.realloc(mem, size); - } else if (type == memory_region::SYNCHRONIZED) { - return synchronized_region.realloc(mem, size); - } - return NULL; -} - -void -rust_dom::free(void *mem) { - free(mem, memory_region::LOCAL); -} - -void -rust_dom::free(void *mem, memory_region::memory_region_type type) { - DLOG(this, mem, "rust_dom::free(0x%" PRIxPTR ")", mem); - if (type == memory_region::LOCAL) { - local_region.free(mem); - } else if (type == memory_region::SYNCHRONIZED) { - synchronized_region.free(mem); - } - return; -} - -#ifdef __WIN32__ -void -rust_dom::win32_require(LPCTSTR fn, BOOL ok) { - if (!ok) { - LPTSTR buf; - DWORD err = GetLastError(); - FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM | - FORMAT_MESSAGE_IGNORE_INSERTS, - NULL, err, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - (LPTSTR) &buf, 0, NULL ); - DLOG_ERR(this, dom, "%s failed with error %ld: %s", fn, err, buf); - LocalFree((HLOCAL)buf); - I(this, ok); - } -} -#endif - size_t -rust_dom::number_of_live_tasks() { +rust_scheduler::number_of_live_tasks() { return running_tasks.length() + blocked_tasks.length(); } @@ -166,8 +83,8 @@ rust_dom::number_of_live_tasks() { * Delete any dead tasks. */ void -rust_dom::reap_dead_tasks() { - I(this, scheduler_lock.lock_held_by_current_thread()); +rust_scheduler::reap_dead_tasks() { + I(this, kernel->scheduler_lock.lock_held_by_current_thread()); for (size_t i = 0; i < dead_tasks.length(); ) { rust_task *task = dead_tasks[i]; // Make sure this task isn't still running somewhere else... @@ -187,7 +104,7 @@ rust_dom::reap_dead_tasks() { /** * Drains and processes incoming pending messages. */ -void rust_dom::drain_incoming_message_queue(bool process) { +void rust_scheduler::drain_incoming_message_queue(bool process) { rust_message *message; while (message_queue->dequeue(&message)) { DLOG(this, comm, "<== receiving \"%s\" " PTR, @@ -207,7 +124,7 @@ void rust_dom::drain_incoming_message_queue(bool process) { * Returns NULL if no tasks can be scheduled. */ rust_task * -rust_dom::schedule_task() { +rust_scheduler::schedule_task() { I(this, this); // FIXME: in the face of failing tasks, this is not always right. // I(this, n_live_tasks() > 0); @@ -225,7 +142,7 @@ rust_dom::schedule_task() { } void -rust_dom::log_state() { +rust_scheduler::log_state() { if (log_rt_task < log_note) return; if (!running_tasks.is_empty()) { @@ -265,8 +182,8 @@ rust_dom::log_state() { * drop to zero. */ int -rust_dom::start_main_loop(int id) { - scheduler_lock.lock(); +rust_scheduler::start_main_loop(int id) { + kernel->scheduler_lock.lock(); // Make sure someone is watching, to pull us out of infinite loops. // @@ -296,9 +213,9 @@ rust_dom::start_main_loop(int id) { DLOG(this, task, "all tasks are blocked, scheduler id %d yielding ...", id); - scheduler_lock.unlock(); + kernel->scheduler_lock.unlock(); sync::sleep(100); - scheduler_lock.lock(); + kernel->scheduler_lock.lock(); DLOG(this, task, "scheduler resuming ..."); continue; @@ -349,9 +266,9 @@ rust_dom::start_main_loop(int id) { "scheduler yielding ...", dead_tasks.length()); log_state(); - scheduler_lock.unlock(); + kernel->scheduler_lock.unlock(); sync::yield(); - scheduler_lock.lock(); + kernel->scheduler_lock.lock(); } else { drain_incoming_message_queue(true); } @@ -360,58 +277,25 @@ rust_dom::start_main_loop(int id) { DLOG(this, dom, "finished main-loop %d (dom.rval = %d)", id, rval); - scheduler_lock.unlock(); - return rval; -} - -int rust_dom::start_main_loops(int num_threads) -{ - dom_worker *worker = NULL; - - // -1, because this thread will also be a worker. - for(int i = 0; i < num_threads - 1; ++i) { - worker = new dom_worker(i + 1, this); - worker->start(); - threads.push(worker); - } - - start_main_loop(0); - - while(threads.pop(&worker)) { - worker->join(); - delete worker; - } - + kernel->scheduler_lock.unlock(); return rval; } rust_crate_cache * -rust_dom::get_cache() { +rust_scheduler::get_cache() { return &cache; } rust_task * -rust_dom::create_task(rust_task *spawner, const char *name) { - //scheduler_lock.lock(); +rust_scheduler::create_task(rust_task *spawner, const char *name) { rust_task *task = - new (this) rust_task (this, &newborn_tasks, spawner, name); + new (this->kernel) rust_task (this, &newborn_tasks, spawner, name); DLOG(this, task, "created task: " PTR ", spawner: %s, name: %s", task, spawner ? spawner->name : "null", name); newborn_tasks.append(task); - //scheduler_lock.unlock(); return task; } -rust_dom::dom_worker::dom_worker(int id, rust_dom *owner) - : id(id), owner(owner) -{ -} - -void rust_dom::dom_worker::run() -{ - owner->start_main_loop(id); -} - // // Local Variables: // mode: C++ diff --git a/src/rt/rust_dom.h b/src/rt/rust_scheduler.h similarity index 63% rename from src/rt/rust_dom.h rename to src/rt/rust_scheduler.h index 7f9fa7a2901ed..d3e2df224c782 100644 --- a/src/rt/rust_dom.h +++ b/src/rt/rust_scheduler.h @@ -1,7 +1,7 @@ -#ifndef RUST_DOM_H -#define RUST_DOM_H +#ifndef RUST_SCHEDULER_H +#define RUST_SCHEDULER_H -struct rust_dom; +struct rust_scheduler; class rust_crate_cache @@ -18,15 +18,15 @@ rust_crate_cache public: - rust_dom *dom; + rust_scheduler *sched; size_t idx; - rust_crate_cache(rust_dom *dom); + rust_crate_cache(rust_scheduler *sched); ~rust_crate_cache(); void flush(); }; -struct rust_dom : public kernel_owned, rc_base +struct rust_scheduler : public kernel_owned, rc_base { // Fields known to the compiler: uintptr_t interrupt_flag; @@ -35,8 +35,6 @@ struct rust_dom : public kernel_owned, rc_base rust_log _log; uint32_t log_lvl; rust_srv *srv; - memory_region local_region; - memory_region synchronized_region; const char *const name; rust_task_list newborn_tasks; @@ -66,30 +64,17 @@ struct rust_dom : public kernel_owned, rc_base // Only a pointer to 'name' is kept, so it must live as long as this // domain. - rust_dom(rust_kernel *kernel, + rust_scheduler(rust_kernel *kernel, rust_message_queue *message_queue, rust_srv *srv, const char *name); - ~rust_dom(); + ~rust_scheduler(); void activate(rust_task *task); void log(rust_task *task, uint32_t level, char const *fmt, ...); rust_log & get_log(); void fail(); - void *malloc(size_t size); - void *malloc(size_t size, memory_region::memory_region_type type); - void *calloc(size_t size); - void *calloc(size_t size, memory_region::memory_region_type type); - void *realloc(void *mem, size_t size); - void *realloc(void *mem, size_t size, - memory_region::memory_region_type type); - void free(void *mem); - void free(void *mem, memory_region::memory_region_type type); void drain_incoming_message_queue(bool process); -#ifdef __WIN32__ - void win32_require(LPCTSTR fn, BOOL ok); -#endif - rust_crate_cache *get_cache(); size_t number_of_live_tasks(); @@ -97,28 +82,14 @@ struct rust_dom : public kernel_owned, rc_base rust_task *schedule_task(); int start_main_loop(int id); - int start_main_loops(int num_threads); void log_state(); rust_task *create_task(rust_task *spawner, const char *name); - - class dom_worker : public rust_thread { - int id; - rust_dom *owner; - - public: - dom_worker(int id, rust_dom *owner); - - virtual void run(); - }; - - lock_and_signal scheduler_lock; - array_list threads; }; inline rust_log & -rust_dom::get_log() { +rust_scheduler::get_log() { return _log; } @@ -133,4 +104,4 @@ rust_dom::get_log() { // End: // -#endif /* RUST_DOM_H */ +#endif /* RUST_SCHEDULER_H */ diff --git a/src/rt/rust_task.cpp b/src/rt/rust_task.cpp index 21c0f593f9ace..2a3342c683d04 100644 --- a/src/rt/rust_task.cpp +++ b/src/rt/rust_task.cpp @@ -20,16 +20,16 @@ static size_t const min_stk_bytes = 0x100000; // Task stack segments. Heap allocated and chained together. static stk_seg* -new_stk(rust_dom *dom, size_t minsz) +new_stk(rust_task *task, size_t minsz) { if (minsz < min_stk_bytes) minsz = min_stk_bytes; size_t sz = sizeof(stk_seg) + minsz; - stk_seg *stk = (stk_seg *)dom->malloc(sz); - LOGPTR(dom, "new stk", (uintptr_t)stk); + stk_seg *stk = (stk_seg *)task->malloc(sz); + LOGPTR(task->sched, "new stk", (uintptr_t)stk); memset(stk, 0, sizeof(stk_seg)); stk->limit = (uintptr_t) &stk->data[minsz]; - LOGPTR(dom, "stk limit", stk->limit); + LOGPTR(task->sched, "stk limit", stk->limit); stk->valgrind_id = VALGRIND_STACK_REGISTER(&stk->data[0], &stk->data[minsz]); @@ -37,11 +37,11 @@ new_stk(rust_dom *dom, size_t minsz) } static void -del_stk(rust_dom *dom, stk_seg *stk) +del_stk(rust_task *task, stk_seg *stk) { VALGRIND_STACK_DEREGISTER(stk->valgrind_id); - LOGPTR(dom, "freeing stk segment", (uintptr_t)stk); - dom->free(stk); + LOGPTR(task->sched, "freeing stk segment", (uintptr_t)stk); + task->free(stk); } // Tasks @@ -52,15 +52,16 @@ del_stk(rust_dom *dom, stk_seg *stk) size_t const n_callee_saves = 4; size_t const callee_save_fp = 0; -rust_task::rust_task(rust_dom *dom, rust_task_list *state, +rust_task::rust_task(rust_scheduler *sched, rust_task_list *state, rust_task *spawner, const char *name) : maybe_proxy(this), - stk(new_stk(dom, 0)), + stk(NULL), runtime_sp(0), - rust_sp(stk->limit), + rust_sp(0), gc_alloc_chain(0), - dom(dom), + sched(sched), cache(NULL), + kernel(sched->kernel), name(name), state(state), cond(NULL), @@ -68,12 +69,16 @@ rust_task::rust_task(rust_dom *dom, rust_task_list *state, supervisor(spawner), list_index(-1), rendezvous_ptr(0), - alarm(this), handle(NULL), - active(false) + active(false), + local_region(&sched->srv->local_region), + synchronized_region(&sched->srv->synchronized_region) { - LOGPTR(dom, "new task", (uintptr_t)this); - DLOG(dom, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this); + LOGPTR(sched, "new task", (uintptr_t)this); + DLOG(sched, task, "sizeof(task) = %d (0x%x)", sizeof *this, sizeof *this); + + stk = new_stk(this, 0); + rust_sp = stk->limit; if (spawner == NULL) { ref_count = 0; @@ -82,35 +87,15 @@ rust_task::rust_task(rust_dom *dom, rust_task_list *state, rust_task::~rust_task() { - DLOG(dom, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d", + DLOG(sched, task, "~rust_task %s @0x%" PRIxPTR ", refcnt=%d", name, (uintptr_t)this, ref_count); - /* - for (uintptr_t fp = get_fp(); fp; fp = get_previous_fp(fp)) { - frame_glue_fns *glue_fns = get_frame_glue_fns(fp); - DLOG(dom, task, - "~rust_task, frame fp=0x%" PRIxPTR ", glue_fns=0x%" PRIxPTR, - fp, glue_fns); - if (glue_fns) { - DLOG(dom, task, - "~rust_task, mark_glue=0x%" PRIxPTR, - glue_fns->mark_glue); - DLOG(dom, task, - "~rust_task, drop_glue=0x%" PRIxPTR, - glue_fns->drop_glue); - DLOG(dom, task, - "~rust_task, reloc_glue=0x%" PRIxPTR, - glue_fns->reloc_glue); - } - } - */ - /* FIXME: tighten this up, there are some more assertions that hold at task-lifecycle events. */ - I(dom, ref_count == 0 || - (ref_count == 1 && this == dom->root_task)); + I(sched, ref_count == 0 || + (ref_count == 1 && this == sched->root_task)); - del_stk(dom, stk); + del_stk(this, stk); } extern "C" void rust_new_exit_task_glue(); @@ -134,7 +119,7 @@ void task_start_wrapper(spawn_args *a) LOG(task, task, "task exited with value %d", rval); { - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); // FIXME: the old exit glue does some magical argument copying // stuff. This is probably still needed. @@ -142,7 +127,7 @@ void task_start_wrapper(spawn_args *a) // This is duplicated from upcall_exit, which is probably dead code by // now. LOG(task, task, "task ref_count: %d", task->ref_count); - A(task->dom, task->ref_count >= 0, + A(task->sched, task->ref_count >= 0, "Task ref_count should not be negative on exit!"); task->die(); task->notify_tasks_waiting_to_join(); @@ -155,12 +140,12 @@ void rust_task::start(uintptr_t spawnee_fn, uintptr_t args) { - LOGPTR(dom, "from spawnee", spawnee_fn); - - I(dom, stk->data != NULL); - I(dom, !dom->scheduler_lock.lock_held_by_current_thread()); + LOGPTR(sched, "from spawnee", spawnee_fn); - scoped_lock with(dom->scheduler_lock); + I(sched, stk->data != NULL); + I(sched, !kernel->scheduler_lock.lock_held_by_current_thread()); + + scoped_lock with(kernel->scheduler_lock); char *sp = (char *)rust_sp; @@ -177,7 +162,7 @@ rust_task::start(uintptr_t spawnee_fn, ctx.call((void *)task_start_wrapper, a, sp); yield_timer.reset(0); - transition(&dom->newborn_tasks, &dom->running_tasks); + transition(&sched->newborn_tasks, &sched->running_tasks); } void @@ -222,8 +207,8 @@ rust_task::kill() { // Unblock the task so it can unwind. unblock(); - if (this == dom->root_task) - dom->fail(); + if (this == sched->root_task) + sched->fail(); LOG(this, task, "preparing to unwind task: 0x%" PRIxPTR, this); // run_on_resume(rust_unwind_glue); @@ -232,15 +217,15 @@ rust_task::kill() { void rust_task::fail(size_t nargs) { // See note in ::kill() regarding who should call this. - DLOG(dom, task, "task %s @0x%" PRIxPTR " failing", name, this); + DLOG(sched, task, "task %s @0x%" PRIxPTR " failing", name, this); backtrace(); // Unblock the task so it can unwind. unblock(); - if (this == dom->root_task) - dom->fail(); + if (this == sched->root_task) + sched->fail(); // run_after_return(nargs, rust_unwind_glue); if (supervisor) { - DLOG(dom, task, + DLOG(sched, task, "task %s @0x%" PRIxPTR " propagating failure to supervisor %s @0x%" PRIxPTR, name, this, supervisor->name, supervisor); @@ -254,14 +239,14 @@ void rust_task::gc(size_t nargs) { // FIXME: not presently implemented; was broken by rustc. - DLOG(dom, task, + DLOG(sched, task, "task %s @0x%" PRIxPTR " garbage collecting", name, this); } void rust_task::unsupervise() { - DLOG(dom, task, + DLOG(sched, task, "task %s @0x%" PRIxPTR " disconnecting from supervisor %s @0x%" PRIxPTR, name, this, supervisor->name, supervisor); @@ -297,13 +282,13 @@ rust_task::get_frame_glue_fns(uintptr_t fp) { bool rust_task::running() { - return state == &dom->running_tasks; + return state == &sched->running_tasks; } bool rust_task::blocked() { - return state == &dom->blocked_tasks; + return state == &sched->blocked_tasks; } bool @@ -315,13 +300,13 @@ rust_task::blocked_on(rust_cond *on) bool rust_task::dead() { - return state == &dom->dead_tasks; + return state == &sched->dead_tasks; } void rust_task::link_gc(gc_alloc *gcm) { - I(dom, gcm->prev == NULL); - I(dom, gcm->next == NULL); + I(sched, gcm->prev == NULL); + I(sched, gcm->next == NULL); gcm->prev = NULL; gcm->next = gc_alloc_chain; gc_alloc_chain = gcm; @@ -351,12 +336,12 @@ rust_task::malloc(size_t sz, type_desc *td) if (td) { sz += sizeof(gc_alloc); } - void *mem = dom->malloc(sz); + void *mem = malloc(sz, memory_region::LOCAL); if (!mem) return mem; if (td) { gc_alloc *gcm = (gc_alloc*) mem; - DLOG(dom, task, "task %s @0x%" PRIxPTR + DLOG(sched, task, "task %s @0x%" PRIxPTR " allocated %d GC bytes = 0x%" PRIxPTR, name, (uintptr_t)this, sz, gcm); memset((void*) gcm, 0, sizeof(gc_alloc)); @@ -378,8 +363,8 @@ rust_task::realloc(void *data, size_t sz, bool is_gc) gc_alloc *gcm = (gc_alloc*)(((char *)data) - sizeof(gc_alloc)); unlink_gc(gcm); sz += sizeof(gc_alloc); - gcm = (gc_alloc*) dom->realloc((void*)gcm, sz); - DLOG(dom, task, "task %s @0x%" PRIxPTR + gcm = (gc_alloc*) realloc((void*)gcm, sz, memory_region::LOCAL); + DLOG(sched, task, "task %s @0x%" PRIxPTR " reallocated %d GC bytes = 0x%" PRIxPTR, name, (uintptr_t)this, sz, gcm); if (!gcm) @@ -387,7 +372,7 @@ rust_task::realloc(void *data, size_t sz, bool is_gc) link_gc(gcm); data = (void*) &(gcm->data); } else { - data = dom->realloc(data, sz); + data = realloc(data, sz, memory_region::LOCAL); } return data; } @@ -401,22 +386,22 @@ rust_task::free(void *p, bool is_gc) if (is_gc) { gc_alloc *gcm = (gc_alloc*)(((char *)p) - sizeof(gc_alloc)); unlink_gc(gcm); - DLOG(dom, mem, + DLOG(sched, mem, "task %s @0x%" PRIxPTR " freeing GC memory = 0x%" PRIxPTR, name, (uintptr_t)this, gcm); - dom->free(gcm); + free(gcm, memory_region::LOCAL); } else { - dom->free(p); + free(p, memory_region::LOCAL); } } void rust_task::transition(rust_task_list *src, rust_task_list *dst) { - I(dom, dom->scheduler_lock.lock_held_by_current_thread()); - DLOG(dom, task, + I(sched, kernel->scheduler_lock.lock_held_by_current_thread()); + DLOG(sched, task, "task %s " PTR " state change '%s' -> '%s' while in '%s'", name, (uintptr_t)this, src->name, dst->name, state->name); - I(dom, state == src); + I(sched, state == src); src->remove(this); dst->append(this); state = dst; @@ -426,30 +411,30 @@ void rust_task::block(rust_cond *on, const char* name) { LOG(this, task, "Blocking on 0x%" PRIxPTR ", cond: 0x%" PRIxPTR, (uintptr_t) on, (uintptr_t) cond); - A(dom, cond == NULL, "Cannot block an already blocked task."); - A(dom, on != NULL, "Cannot block on a NULL object."); + A(sched, cond == NULL, "Cannot block an already blocked task."); + A(sched, on != NULL, "Cannot block on a NULL object."); - transition(&dom->running_tasks, &dom->blocked_tasks); + transition(&sched->running_tasks, &sched->blocked_tasks); cond = on; cond_name = name; } void rust_task::wakeup(rust_cond *from) { - A(dom, cond != NULL, "Cannot wake up unblocked task."); + A(sched, cond != NULL, "Cannot wake up unblocked task."); LOG(this, task, "Blocked on 0x%" PRIxPTR " woken up on 0x%" PRIxPTR, (uintptr_t) cond, (uintptr_t) from); - A(dom, cond == from, "Cannot wake up blocked task on wrong condition."); + A(sched, cond == from, "Cannot wake up blocked task on wrong condition."); - transition(&dom->blocked_tasks, &dom->running_tasks); - I(dom, cond == from); + transition(&sched->blocked_tasks, &sched->running_tasks); + I(sched, cond == from); cond = NULL; cond_name = "none"; } void rust_task::die() { - transition(&dom->running_tasks, &dom->dead_tasks); + transition(&sched->running_tasks, &sched->dead_tasks); } void @@ -462,8 +447,8 @@ rust_crate_cache * rust_task::get_crate_cache() { if (!cache) { - DLOG(dom, task, "fetching cache for current crate"); - cache = dom->get_cache(); + DLOG(sched, task, "fetching cache for current crate"); + cache = sched->get_cache(); } return cache; } @@ -481,7 +466,7 @@ rust_task::backtrace() { rust_handle * rust_task::get_handle() { if (handle == NULL) { - handle = dom->kernel->get_task_handle(this); + handle = sched->kernel->get_task_handle(this); } return handle; } @@ -491,6 +476,54 @@ bool rust_task::can_schedule() return yield_timer.has_timed_out() && !active; } +void * +rust_task::malloc(size_t size, memory_region::memory_region_type type) { + if (type == memory_region::LOCAL) { + return local_region.malloc(size); + } else if (type == memory_region::SYNCHRONIZED) { + return synchronized_region.malloc(size); + } + I(sched, false); + return NULL; +} + +void * +rust_task::calloc(size_t size) { + return calloc(size, memory_region::LOCAL); +} + +void * +rust_task::calloc(size_t size, memory_region::memory_region_type type) { + if (type == memory_region::LOCAL) { + return local_region.calloc(size); + } else if (type == memory_region::SYNCHRONIZED) { + return synchronized_region.calloc(size); + } + return NULL; +} + +void * +rust_task::realloc(void *mem, size_t size, + memory_region::memory_region_type type) { + if (type == memory_region::LOCAL) { + return local_region.realloc(mem, size); + } else if (type == memory_region::SYNCHRONIZED) { + return synchronized_region.realloc(mem, size); + } + return NULL; +} + +void +rust_task::free(void *mem, memory_region::memory_region_type type) { + DLOG(sched, mem, "rust_task::free(0x%" PRIxPTR ")", mem); + if (type == memory_region::LOCAL) { + local_region.free(mem); + } else if (type == memory_region::SYNCHRONIZED) { + synchronized_region.free(mem); + } + return; +} + // // Local Variables: // mode: C++ diff --git a/src/rt/rust_task.h b/src/rt/rust_task.h index 3f9a0660300c6..54287df441439 100644 --- a/src/rt/rust_task.h +++ b/src/rt/rust_task.h @@ -9,19 +9,45 @@ #include "context.h" +struct stk_seg { + unsigned int valgrind_id; + uintptr_t limit; + uint8_t data[]; +}; + +struct frame_glue_fns { + uintptr_t mark_glue_off; + uintptr_t drop_glue_off; + uintptr_t reloc_glue_off; +}; + +struct gc_alloc { + gc_alloc *prev; + gc_alloc *next; + uintptr_t ctrl_word; + uint8_t data[]; + bool mark() { + if (ctrl_word & 1) + return false; + ctrl_word |= 1; + return true; + } +}; + struct rust_task : public maybe_proxy, - public dom_owned + public kernel_owned { // Fields known to the compiler. stk_seg *stk; uintptr_t runtime_sp; // Runtime sp while task running. uintptr_t rust_sp; // Saved sp when not running. gc_alloc *gc_alloc_chain; // Linked list of GC allocations. - rust_dom *dom; + rust_scheduler *sched; rust_crate_cache *cache; // Fields known only to the runtime. + rust_kernel *kernel; const char *const name; rust_task_list *state; rust_cond *cond; @@ -45,8 +71,6 @@ rust_task : public maybe_proxy, // List of tasks waiting for this task to finish. array_list *> tasks_waiting_to_join; - rust_alarm alarm; - rust_handle *handle; context ctx; @@ -55,8 +79,11 @@ rust_task : public maybe_proxy, // or is about to run this task. volatile bool active; + memory_region local_region; + memory_region synchronized_region; + // Only a pointer to 'name' is kept, so it must live as long as this task. - rust_task(rust_dom *dom, + rust_task(rust_scheduler *sched, rust_task_list *state, rust_task *spawner, const char *name); @@ -84,8 +111,8 @@ rust_task : public maybe_proxy, void die(); void unblock(); - void check_active() { I(dom, dom->curr_task == this); } - void check_suspended() { I(dom, dom->curr_task != this); } + void check_active() { I(sched, sched->curr_task == this); } + void check_suspended() { I(sched, sched->curr_task != this); } // Print a backtrace, if the "bt" logging option is on. void backtrace(); @@ -117,6 +144,13 @@ rust_task : public maybe_proxy, rust_crate_cache * get_crate_cache(); bool can_schedule(); + + void *malloc(size_t size, memory_region::memory_region_type type); + void *calloc(size_t size); + void *calloc(size_t size, memory_region::memory_region_type type); + void *realloc(void *mem, size_t size, + memory_region::memory_region_type type); + void free(void *mem, memory_region::memory_region_type type); }; // diff --git a/src/rt/rust_task_list.cpp b/src/rt/rust_task_list.cpp index bb1224afa20bf..81441de35f572 100644 --- a/src/rt/rust_task_list.cpp +++ b/src/rt/rust_task_list.cpp @@ -1,16 +1,16 @@ #include "rust_internal.h" -rust_task_list::rust_task_list (rust_dom *dom, const char* name) : - dom(dom), name(name) { +rust_task_list::rust_task_list (rust_scheduler *sched, const char* name) : + sched(sched), name(name) { // Nop; } void rust_task_list::delete_all() { - DLOG(dom, task, "deleting all %s tasks", name); + DLOG(sched, task, "deleting all %s tasks", name); while (is_empty() == false) { rust_task *task = pop_value(); - DLOG(dom, task, "deleting task " PTR, task); + DLOG(sched, task, "deleting task " PTR, task); delete task; } } diff --git a/src/rt/rust_task_list.h b/src/rt/rust_task_list.h index b1fba75029d45..0991b32eed85f 100644 --- a/src/rt/rust_task_list.h +++ b/src/rt/rust_task_list.h @@ -1,3 +1,4 @@ +// -*- c++ -*- #ifndef RUST_TASK_LIST_H #define RUST_TASK_LIST_H @@ -5,11 +6,11 @@ * Used to indicate the state of a rust task. */ class rust_task_list : public indexed_list, - public dom_owned { + public kernel_owned { public: - rust_dom *dom; + rust_scheduler *sched; const char* name; - rust_task_list (rust_dom *dom, const char* name); + rust_task_list (rust_scheduler *sched, const char* name); void delete_all(); }; diff --git a/src/rt/rust_timer.cpp b/src/rt/rust_timer.cpp index 79cb1615bbc3f..2b1c33aa6b9e4 100644 --- a/src/rt/rust_timer.cpp +++ b/src/rt/rust_timer.cpp @@ -29,8 +29,8 @@ static void * timer_loop(void *ptr) { // We were handed the rust_timer that owns us. rust_timer *timer = (rust_timer *)ptr; - rust_dom *dom = timer->dom; - DLOG(dom, timer, "in timer 0x%" PRIxPTR, (uintptr_t)timer); + rust_scheduler *sched = timer->sched; + DLOG(sched, timer, "in timer 0x%" PRIxPTR, (uintptr_t)timer); size_t ms = TIME_SLICE_IN_MS; while (!timer->exit_flag) { @@ -39,10 +39,10 @@ timer_loop(void *ptr) { #else usleep(ms * 1000); #endif - DLOG(dom, timer, "timer 0x%" PRIxPTR - " interrupting domain 0x%" PRIxPTR, (uintptr_t) timer, - (uintptr_t) dom); - dom->interrupt_flag = 1; + DLOG(sched, timer, "timer 0x%" PRIxPTR + " interrupting schedain 0x%" PRIxPTR, (uintptr_t) timer, + (uintptr_t) sched); + sched->interrupt_flag = 1; } #if defined(__WIN32__) ExitThread(0); @@ -52,12 +52,12 @@ timer_loop(void *ptr) { return 0; } -rust_timer::rust_timer(rust_dom *dom) : - dom(dom), exit_flag(0) { - DLOG(dom, timer, "creating timer for domain 0x%" PRIxPTR, dom); +rust_timer::rust_timer(rust_scheduler *sched) : + sched(sched), exit_flag(0) { + DLOG(sched, timer, "creating timer for domain 0x%" PRIxPTR, sched); #if defined(__WIN32__) thread = CreateThread(NULL, 0, timer_loop, this, 0, NULL); - dom->win32_require("CreateThread", thread != NULL); + sched->kernel->win32_require("CreateThread", thread != NULL); if (RUNNING_ON_VALGRIND) Sleep(10); #else @@ -70,8 +70,9 @@ rust_timer::rust_timer(rust_dom *dom) : rust_timer::~rust_timer() { exit_flag = 1; #if defined(__WIN32__) - dom->win32_require("WaitForSingleObject", - WaitForSingleObject(thread, INFINITE) == WAIT_OBJECT_0); + sched->kernel->win32_require("WaitForSingleObject", + WaitForSingleObject(thread, INFINITE) == + WAIT_OBJECT_0); #else pthread_join(thread, NULL); #endif diff --git a/src/rt/rust_upcall.cpp b/src/rt/rust_upcall.cpp index 0947c44b2c960..f6257470b297a 100644 --- a/src/rt/rust_upcall.cpp +++ b/src/rt/rust_upcall.cpp @@ -23,7 +23,7 @@ str_buf(rust_task *task, rust_str *s); extern "C" void upcall_grow_task(rust_task *task, size_t n_frame_bytes) { - I(task->dom, false); + I(task->sched, false); LOG_UPCALL_ENTRY(task); task->grow(n_frame_bytes); } @@ -31,62 +31,61 @@ upcall_grow_task(rust_task *task, size_t n_frame_bytes) { extern "C" CDECL void upcall_log_int(rust_task *task, uint32_t level, int32_t i) { LOG_UPCALL_ENTRY(task); - if (task->dom->log_lvl >= level) - task->dom->log(task, level, "rust: %" PRId32 " (0x%" PRIx32 ")", + if (task->sched->log_lvl >= level) + task->sched->log(task, level, "rust: %" PRId32 " (0x%" PRIx32 ")", i, i); } extern "C" CDECL void upcall_log_float(rust_task *task, uint32_t level, float f) { LOG_UPCALL_ENTRY(task); - if (task->dom->log_lvl >= level) - task->dom->log(task, level, "rust: %12.12f", f); + if (task->sched->log_lvl >= level) + task->sched->log(task, level, "rust: %12.12f", f); } extern "C" CDECL void upcall_log_double(rust_task *task, uint32_t level, double *f) { LOG_UPCALL_ENTRY(task); - if (task->dom->log_lvl >= level) - task->dom->log(task, level, "rust: %12.12f", *f); + if (task->sched->log_lvl >= level) + task->sched->log(task, level, "rust: %12.12f", *f); } extern "C" CDECL void upcall_log_str(rust_task *task, uint32_t level, rust_str *str) { LOG_UPCALL_ENTRY(task); - if (task->dom->log_lvl >= level) { + if (task->sched->log_lvl >= level) { const char *c = str_buf(task, str); - task->dom->log(task, level, "rust: %s", c); + task->sched->log(task, level, "rust: %s", c); } } extern "C" CDECL void upcall_trace_word(rust_task *task, uintptr_t i) { LOG_UPCALL_ENTRY(task); - task->dom->log(task, 2, "trace: 0x%" PRIxPTR "", i, i, (char) i); + task->sched->log(task, 2, "trace: 0x%" PRIxPTR "", i, i, (char) i); } extern "C" CDECL void upcall_trace_str(rust_task *task, char const *c) { LOG_UPCALL_ENTRY(task); - task->dom->log(task, 2, "trace: %s", c); + task->sched->log(task, 2, "trace: %s", c); } extern "C" CDECL rust_port* upcall_new_port(rust_task *task, size_t unit_sz) { LOG_UPCALL_ENTRY(task); - rust_dom *dom = task->dom; - scoped_lock with(dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); LOG(task, comm, "upcall_new_port(task=0x%" PRIxPTR " (%s), unit_sz=%d)", (uintptr_t) task, task->name, unit_sz); - return new (dom) rust_port(task, unit_sz); + return new (task) rust_port(task, unit_sz); } extern "C" CDECL void upcall_del_port(rust_task *task, rust_port *port) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); LOG(task, comm, "upcall del_port(0x%" PRIxPTR ")", (uintptr_t) port); - I(task->dom, !port->ref_count); + I(task->sched, !port->ref_count); delete port; } @@ -96,12 +95,12 @@ upcall_del_port(rust_task *task, rust_port *port) { extern "C" CDECL rust_chan* upcall_new_chan(rust_task *task, rust_port *port) { LOG_UPCALL_ENTRY(task); - rust_dom *dom = task->dom; + rust_scheduler *sched = task->sched; LOG(task, comm, "upcall_new_chan(" "task=0x%" PRIxPTR " (%s), port=0x%" PRIxPTR ")", (uintptr_t) task, task->name, port); - I(dom, port); - return new (dom) rust_chan(task, port, port->unit_sz); + I(sched, port); + return new (task) rust_chan(task, port, port->unit_sz); } /** @@ -124,11 +123,11 @@ upcall_flush_chan(rust_task *task, rust_chan *chan) { extern "C" CDECL void upcall_del_chan(rust_task *task, rust_chan *chan) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); LOG(task, comm, "upcall del_chan(0x%" PRIxPTR ")", (uintptr_t) chan); - A(task->dom, chan->ref_count == 0, + A(task->sched, chan->ref_count == 0, "Channel's ref count should be zero."); if (chan->is_associated()) { @@ -166,7 +165,7 @@ extern "C" CDECL rust_chan * upcall_clone_chan(rust_task *task, maybe_proxy *target, rust_chan *chan) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); size_t unit_sz = chan->buffer.unit_sz; maybe_proxy *port = chan->port; rust_task *target_task = NULL; @@ -175,13 +174,13 @@ upcall_clone_chan(rust_task *task, maybe_proxy *target, target_task = target->referent(); } else { rust_handle *handle = - task->dom->kernel->get_port_handle(port->as_referent()); + task->sched->kernel->get_port_handle(port->as_referent()); maybe_proxy *proxy = new rust_proxy (handle); LOG(task, mem, "new proxy: " PTR, proxy); port = proxy; target_task = target->as_proxy()->handle()->referent(); } - return new (target_task->dom) rust_chan(target_task, port, unit_sz); + return new (target_task) rust_chan(target_task, port, unit_sz); } extern "C" CDECL void @@ -208,7 +207,7 @@ upcall_sleep(rust_task *task, size_t time_in_us) { extern "C" CDECL void upcall_send(rust_task *task, rust_chan *chan, void *sptr) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); chan->send(sptr); LOG(task, comm, "=== sent data ===>"); } @@ -217,7 +216,7 @@ extern "C" CDECL void upcall_recv(rust_task *task, uintptr_t *dptr, rust_port *port) { { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); LOG(task, comm, "port: 0x%" PRIxPTR ", dptr: 0x%" PRIxPTR ", size: 0x%" PRIxPTR ", chan_no: %d", @@ -255,7 +254,7 @@ upcall_fail(rust_task *task, extern "C" CDECL void upcall_kill(rust_task *task, maybe_proxy *target) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); if (target->is_proxy()) { notify_message:: send(notify_message::KILL, "kill", task->get_handle(), @@ -274,9 +273,9 @@ extern "C" CDECL void upcall_exit(rust_task *task) { { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); LOG(task, task, "task ref_count: %d", task->ref_count); - A(task->dom, task->ref_count >= 0, + A(task->sched, task->ref_count >= 0, "Task ref_count should not be negative on exit!"); task->die(); task->notify_tasks_waiting_to_join(); @@ -287,7 +286,7 @@ upcall_exit(rust_task *task) { extern "C" CDECL uintptr_t upcall_malloc(rust_task *task, size_t nbytes, type_desc *td) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); LOG(task, mem, "upcall malloc(%" PRIdPTR ", 0x%" PRIxPTR ")" @@ -308,9 +307,9 @@ upcall_malloc(rust_task *task, size_t nbytes, type_desc *td) { extern "C" CDECL void upcall_free(rust_task *task, void* ptr, uintptr_t is_gc) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); - rust_dom *dom = task->dom; - DLOG(dom, mem, + scoped_lock with(task->kernel->scheduler_lock); + rust_scheduler *sched = task->sched; + DLOG(sched, mem, "upcall free(0x%" PRIxPTR ", is_gc=%" PRIdPTR ")", (uintptr_t)ptr, is_gc); task->free(ptr, (bool) is_gc); @@ -319,13 +318,13 @@ upcall_free(rust_task *task, void* ptr, uintptr_t is_gc) { extern "C" CDECL uintptr_t upcall_mark(rust_task *task, void* ptr) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); - rust_dom *dom = task->dom; + rust_scheduler *sched = task->sched; if (ptr) { gc_alloc *gcm = (gc_alloc*) (((char*)ptr) - sizeof(gc_alloc)); uintptr_t marked = (uintptr_t) gcm->mark(); - DLOG(dom, gc, "upcall mark(0x%" PRIxPTR ") = %" PRIdPTR, + DLOG(sched, gc, "upcall mark(0x%" PRIxPTR ") = %" PRIdPTR, (uintptr_t)gcm, marked); return marked; } @@ -333,14 +332,14 @@ upcall_mark(rust_task *task, void* ptr) { } rust_str *make_str(rust_task *task, char const *s, size_t fill) { - rust_dom *dom = task->dom; + rust_scheduler *sched = task->sched; size_t alloc = next_power_of_two(sizeof(rust_str) + fill); void *mem = task->malloc(alloc); if (!mem) { task->fail(3); return NULL; } - rust_str *st = new (mem) rust_str(dom, alloc, fill, (uint8_t const *) s); + rust_str *st = new (mem) rust_str(sched, alloc, fill, (uint8_t const *) s); LOG(task, mem, "upcall new_str('%s', %" PRIdPTR ") = 0x%" PRIxPTR, s, fill, st); @@ -350,32 +349,32 @@ rust_str *make_str(rust_task *task, char const *s, size_t fill) { extern "C" CDECL rust_str * upcall_new_str(rust_task *task, char const *s, size_t fill) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); return make_str(task, s, fill); } extern "C" CDECL rust_str * -upcall_dup_str(rust_task *task, rust_str *str) { +upcall_dup_str(rust_task *task, rust_task *target, rust_str *str) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); - return make_str(task, (char const *)str->data, str->fill); + return make_str(target, (char const *)str->data, str->fill); } extern "C" CDECL rust_vec * upcall_new_vec(rust_task *task, size_t fill, type_desc *td) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); - rust_dom *dom = task->dom; - DLOG(dom, mem, "upcall new_vec(%" PRIdPTR ")", fill); + scoped_lock with(task->kernel->scheduler_lock); + rust_scheduler *sched = task->sched; + DLOG(sched, mem, "upcall new_vec(%" PRIdPTR ")", fill); size_t alloc = next_power_of_two(sizeof(rust_vec) + fill); void *mem = task->malloc(alloc, td); if (!mem) { task->fail(3); return NULL; } - rust_vec *v = new (mem) rust_vec(dom, alloc, 0, NULL); + rust_vec *v = new (mem) rust_vec(sched, alloc, 0, NULL); LOG(task, mem, "upcall new_vec(%" PRIdPTR ") = 0x%" PRIxPTR, fill, v); return v; @@ -388,7 +387,7 @@ vec_grow(rust_task *task, uintptr_t *need_copy, type_desc *td) { - rust_dom *dom = task->dom; + rust_scheduler *sched = task->sched; LOG(task, mem, "vec_grow(0x%" PRIxPTR ", %" PRIdPTR "), rc=%" PRIdPTR " alloc=%" PRIdPTR ", fill=%" PRIdPTR @@ -439,10 +438,10 @@ vec_grow(rust_task *task, if (v->ref_count != CONST_REFCOUNT) v->deref(); - v = new (mem) rust_vec(dom, alloc, 0, NULL); + v = new (mem) rust_vec(sched, alloc, 0, NULL); *need_copy = 1; } - I(dom, sizeof(rust_vec) + v->fill <= v->alloc); + I(sched, sizeof(rust_vec) + v->fill <= v->alloc); return v; } @@ -471,7 +470,7 @@ upcall_vec_append(rust_task *task, type_desc *t, type_desc *elem_t, rust_vec **dst_ptr, rust_vec *src, bool skip_null) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); rust_vec *dst = *dst_ptr; uintptr_t need_copy; size_t n_src_bytes = skip_null ? src->fill - 1 : src->fill; @@ -507,7 +506,7 @@ upcall_get_type_desc(rust_task *task, size_t n_descs, type_desc const **descs) { LOG_UPCALL_ENTRY(task); - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); LOG(task, cache, "upcall get_type_desc with size=%" PRIdPTR ", align=%" PRIdPTR ", %" PRIdPTR " descs", size, align, n_descs); @@ -521,9 +520,9 @@ extern "C" CDECL rust_task * upcall_new_task(rust_task *spawner, rust_vec *name) { // name is a rust string structure. LOG_UPCALL_ENTRY(spawner); - scoped_lock with(spawner->dom->scheduler_lock); - rust_dom *dom = spawner->dom; - rust_task *task = dom->create_task(spawner, (const char *)name->data); + scoped_lock with(spawner->kernel->scheduler_lock); + rust_scheduler *sched = spawner->sched; + rust_task *task = sched->create_task(spawner, (const char *)name->data); return task; } @@ -535,8 +534,8 @@ upcall_start_task(rust_task *spawner, size_t args_sz) { LOG_UPCALL_ENTRY(spawner); - rust_dom *dom = spawner->dom; - DLOG(dom, task, + rust_scheduler *sched = spawner->sched; + DLOG(sched, task, "upcall start_task(task %s @0x%" PRIxPTR ", spawnee 0x%" PRIxPTR ")", task->name, task, @@ -556,86 +555,6 @@ upcall_start_task(rust_task *spawner, return task; } -/** - * Called whenever a new domain is created. - */ -extern "C" CDECL maybe_proxy * -upcall_new_thread(rust_task *task, const char *name) { - I(task->dom, false); - LOG_UPCALL_ENTRY(task); - rust_dom *parent_dom = task->dom; - rust_kernel *kernel = parent_dom->kernel; - rust_handle *child_dom_handle = - kernel->create_domain(name); - rust_handle *child_task_handle = - kernel->get_task_handle(child_dom_handle->referent()->root_task); - LOG(task, mem, "child name: %s, child_dom_handle: " PTR - ", child_task_handle: " PTR, - name, child_dom_handle, child_task_handle); - rust_proxy *child_task_proxy = - new rust_proxy (child_task_handle); - return child_task_proxy; -} - -#if 0 /* FIXME: this code will be re-enabled once we have multithreading. */ - -#if defined(__WIN32__) -static DWORD WINAPI rust_thread_start(void *ptr) -#elif defined(__GNUC__) -static void *rust_thread_start(void *ptr) -#else -#error "Platform not supported" -#endif -{ - // We were handed the domain we are supposed to run. - rust_dom *dom = (rust_dom *) ptr; - - // Start a new rust main loop for this thread. - dom->start_main_loop(); - - // Destroy the domain. - dom->kernel->destroy_domain(dom); - - return 0; -} - -#endif - -/** - * Called after a new domain is created. Here we create a new thread and - * and start the domain main loop. - */ -extern "C" CDECL maybe_proxy * -upcall_start_thread(rust_task *task, - rust_proxy *child_task_proxy, - uintptr_t spawnee_fn, - size_t callsz) { - I(task->dom, false); - LOG_UPCALL_ENTRY(task); -#if 0 - rust_dom *parenet_dom = task->dom; - rust_handle *child_task_handle = child_task_proxy->handle(); - LOG(task, task, - "spawnee_fn " PTR - ", callsz %" PRIdPTR ")", - spawnee_fn, callsz); - rust_task *child_task = child_task_handle->referent(); - child_task->start(spawnee_fn, - task->rust_sp, callsz); -#if defined(__WIN32__) - HANDLE thread; - thread = CreateThread(NULL, 0, rust_thread_start, child_task->dom, 0, - NULL); - parenet_dom->win32_require("CreateThread", thread != NULL); -#else - pthread_t thread; - pthread_create(&thread, &parenet_dom->attr, rust_thread_start, - (void *) child_task->dom); -#endif -#endif // 0 - return child_task_proxy; -} - /** * Resizes an interior vector that has been spilled to the heap. */ @@ -643,8 +562,8 @@ extern "C" CDECL void upcall_ivec_resize(rust_task *task, rust_ivec *v, size_t newsz) { - scoped_lock with(task->dom->scheduler_lock); - I(task->dom, !v->fill); + scoped_lock with(task->kernel->scheduler_lock); + I(task->sched, !v->fill); size_t new_alloc = next_power_of_two(newsz); rust_ivec_heap *new_heap_part = (rust_ivec_heap *) @@ -662,7 +581,7 @@ extern "C" CDECL void upcall_ivec_spill(rust_task *task, rust_ivec *v, size_t newsz) { - scoped_lock with(task->dom->scheduler_lock); + scoped_lock with(task->kernel->scheduler_lock); size_t new_alloc = next_power_of_two(newsz); rust_ivec_heap *heap_part = (rust_ivec_heap *) diff --git a/src/rt/rust_util.h b/src/rt/rust_util.h index fe3c245996654..984cd978ec284 100644 --- a/src/rt/rust_util.h +++ b/src/rt/rust_util.h @@ -1,6 +1,8 @@ #ifndef RUST_UTIL_H #define RUST_UTIL_H +#include "rust_task.h" + // Reference counted objects template @@ -17,30 +19,30 @@ rc_base::~rc_base() // Utility type: pointer-vector. template -ptr_vec::ptr_vec(rust_dom *dom) : - dom(dom), +ptr_vec::ptr_vec(rust_task *task) : + task(task), alloc(INIT_SIZE), fill(0), - data(new (dom) T*[alloc]) + data(new (task) T*[alloc]) { - I(dom, data); - DLOG(dom, mem, "new ptr_vec(data=0x%" PRIxPTR ") -> 0x%" PRIxPTR, + I(task->sched, data); + DLOG(task->sched, mem, "new ptr_vec(data=0x%" PRIxPTR ") -> 0x%" PRIxPTR, (uintptr_t)data, (uintptr_t)this); } template ptr_vec::~ptr_vec() { - I(dom, data); - DLOG(dom, mem, "~ptr_vec 0x%" PRIxPTR ", data=0x%" PRIxPTR, + I(task->sched, data); + DLOG(task->sched, mem, "~ptr_vec 0x%" PRIxPTR ", data=0x%" PRIxPTR, (uintptr_t)this, (uintptr_t)data); - I(dom, fill == 0); - dom->free(data); + I(task->sched, fill == 0); + task->free(data); } template T *& ptr_vec::operator[](size_t offset) { - I(dom, data[offset]->idx == offset); + I(task->sched, data[offset]->idx == offset); return data[offset]; } @@ -48,14 +50,14 @@ template void ptr_vec::push(T *p) { - I(dom, data); - I(dom, fill <= alloc); + I(task->sched, data); + I(task->sched, fill <= alloc); if (fill == alloc) { alloc *= 2; - data = (T **)dom->realloc(data, alloc * sizeof(T*)); - I(dom, data); + data = (T **)task->realloc(data, alloc * sizeof(T*)); + I(task->sched, data); } - I(dom, fill < alloc); + I(task->sched, fill < alloc); p->idx = fill; data[fill++] = p; } @@ -78,13 +80,13 @@ template void ptr_vec::trim(size_t sz) { - I(dom, data); + I(task->sched, data); if (sz <= (alloc / 4) && (alloc / 2) >= INIT_SIZE) { alloc /= 2; - I(dom, alloc >= fill); - data = (T **)dom->realloc(data, alloc * sizeof(T*)); - I(dom, data); + I(task->sched, alloc >= fill); + data = (T **)task->realloc(data, alloc * sizeof(T*)); + I(task->sched, data); } } @@ -93,9 +95,9 @@ void ptr_vec::swap_delete(T *item) { /* Swap the endpoint into i and decr fill. */ - I(dom, data); - I(dom, fill > 0); - I(dom, item->idx < fill); + I(task->sched, data); + I(task->sched, fill > 0); + I(task->sched, item->idx < fill); fill--; if (fill > 0) { T *subst = data[fill]; @@ -125,22 +127,22 @@ next_power_of_two(size_t s) // Initialization helper for ISAAC RNG static inline void -isaac_init(rust_dom *dom, randctx *rctx) +isaac_init(rust_scheduler *sched, randctx *rctx) { memset(rctx, 0, sizeof(randctx)); #ifdef __WIN32__ { HCRYPTPROV hProv; - dom->win32_require + sched->kernel->win32_require (_T("CryptAcquireContext"), CryptAcquireContext(&hProv, NULL, NULL, PROV_RSA_FULL, CRYPT_VERIFYCONTEXT|CRYPT_SILENT)); - dom->win32_require + sched->kernel->win32_require (_T("CryptGenRandom"), CryptGenRandom(hProv, sizeof(rctx->randrsl), (BYTE*)(&rctx->randrsl))); - dom->win32_require + sched->kernel->win32_require (_T("CryptReleaseContext"), CryptReleaseContext(hProv, 0)); } @@ -154,10 +156,11 @@ isaac_init(rust_dom *dom, randctx *rctx) } } else { int fd = open("/dev/urandom", O_RDONLY); - I(dom, fd > 0); - I(dom, read(fd, (void*) &rctx->randrsl, sizeof(rctx->randrsl)) + I(sched, fd > 0); + I(sched, + read(fd, (void*) &rctx->randrsl, sizeof(rctx->randrsl)) == sizeof(rctx->randrsl)); - I(dom, close(fd) == 0); + I(sched, close(fd) == 0); } #endif randinit(rctx, 1); @@ -172,9 +175,10 @@ rust_vec : public rc_base size_t fill; size_t pad; // Pad to align data[0] to 16 bytes. uint8_t data[]; - rust_vec(rust_dom *dom, size_t alloc, size_t fill, uint8_t const *d) : - alloc(alloc), - fill(fill) + rust_vec(rust_scheduler *sched, size_t alloc, size_t fill, + uint8_t const *d) + : alloc(alloc), + fill(fill) { if (d) memcpy(&data[0], d, fill); diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index 17b4ba9817d25..f3212bbeb000d 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -67,13 +67,11 @@ upcall_new_chan upcall_new_port upcall_new_str upcall_new_task -upcall_new_thread upcall_new_vec upcall_recv upcall_send upcall_sleep upcall_start_task -upcall_start_thread upcall_trace_str upcall_trace_word upcall_vec_append diff --git a/src/rt/test/rust_test_runtime.cpp b/src/rt/test/rust_test_runtime.cpp index cf82818c4ff0a..acb3557b8c1ce 100644 --- a/src/rt/test/rust_test_runtime.cpp +++ b/src/rt/test/rust_test_runtime.cpp @@ -11,12 +11,11 @@ rust_test_runtime::~rust_test_runtime() { void rust_domain_test::worker::run() { - rust_handle *handle = kernel->create_domain("test"); + rust_scheduler *handle = kernel->get_scheduler(); for (int i = 0; i < TASKS; i++) { - handle->referent()->create_task(NULL, "child"); + handle->create_task(NULL, "child"); } sync::random_sleep(1000); - kernel->destroy_domain(handle->_referent); } bool @@ -37,7 +36,6 @@ rust_domain_test::run() { // sleep below. sync::sleep(100); - kernel.join_all_domains(); return true; } @@ -47,12 +45,9 @@ void task_entry() { void rust_task_test::worker::run() { - rust_handle *handle = - kernel->create_domain("test"); - rust_dom *domain = handle->referent(); - domain->root_task->start((uintptr_t)&task_entry, (uintptr_t)NULL); - domain->start_main_loop(0); - kernel->destroy_domain(domain); + rust_scheduler *scheduler = kernel->get_scheduler(); + scheduler->root_task->start((uintptr_t)&task_entry, (uintptr_t)NULL); + scheduler->start_main_loop(0); } bool @@ -68,6 +63,5 @@ rust_task_test::run() { } sync::random_sleep(1000); - kernel.join_all_domains(); return true; } diff --git a/src/test/run-pass/child-outlives-parent.rs b/src/test/run-pass/child-outlives-parent.rs index 988172ba6ad6c..4bf03ea243a9e 100644 --- a/src/test/run-pass/child-outlives-parent.rs +++ b/src/test/run-pass/child-outlives-parent.rs @@ -1,8 +1,5 @@ - - - // xfail-stage0 // Reported as issue #126, child leaks the string. fn child2(str s) { } -fn main() { auto x = spawn child2("hi"); } \ No newline at end of file +fn main() { auto x = spawn child2("hi"); } diff --git a/src/test/run-pass/task-life-0.rs b/src/test/run-pass/task-life-0.rs index 324572344a4dc..74d966c780bc8 100644 --- a/src/test/run-pass/task-life-0.rs +++ b/src/test/run-pass/task-life-0.rs @@ -5,4 +5,4 @@ fn main() -> () { fn child(str s) { -} \ No newline at end of file +}