|
10 | 10 | #include <algorithm>
|
11 | 11 | #include <arpa/inet.h>
|
12 | 12 |
|
| 13 | +#include <time.h> |
| 14 | + |
13 | 15 | using api::FastlyResult;
|
14 | 16 | using fastly::FastlyAPIError;
|
| 17 | +using host_api::MonotonicClock; |
15 | 18 | using host_api::Result;
|
16 | 19 |
|
| 20 | +#define NEVER_HANDLE 0xFFFFFFFE |
| 21 | + |
| 22 | +#define MILLISECS_IN_NANOSECS 1000000 |
| 23 | +#define SECS_IN_NANOSECS 1000000000 |
| 24 | + |
| 25 | +void sleep_until(uint64_t time_ns, uint64_t now) { |
| 26 | + while (time_ns > now) { |
| 27 | + uint64_t duration = time_ns - now; |
| 28 | + timespec req{.tv_sec = static_cast<time_t>(duration / SECS_IN_NANOSECS), |
| 29 | + .tv_nsec = static_cast<long>(duration % SECS_IN_NANOSECS)}; |
| 30 | + timespec rem; |
| 31 | + nanosleep(&req, &rem); |
| 32 | + now = MonotonicClock::now(); |
| 33 | + } |
| 34 | +} |
| 35 | + |
17 | 36 | size_t api::AsyncTask::select(std::vector<api::AsyncTask *> *tasks) {
|
18 | 37 | size_t tasks_len = tasks->size();
|
19 |
| - fastly_compute_at_edge_async_io_handle_t *handles = |
20 |
| - new fastly_compute_at_edge_async_io_handle_t[tasks_len]; |
21 |
| - for (int i = 0; i < tasks_len; i++) { |
22 |
| - handles[i] = tasks->at(i)->id(); |
| 38 | + std::vector<fastly_compute_at_edge_async_io_handle_t> handles; |
| 39 | + handles.reserve(tasks_len); |
| 40 | + uint64_t now = 0; |
| 41 | + uint64_t soonest_deadline = 0; |
| 42 | + size_t soonest_deadline_idx = -1; |
| 43 | + for (size_t idx = 0; idx < tasks_len; ++idx) { |
| 44 | + auto *task = tasks->at(idx); |
| 45 | + uint64_t deadline = task->deadline(); |
| 46 | + // Select for completed task deadlines before performing the task select host call. |
| 47 | + if (deadline > 0) { |
| 48 | + MOZ_ASSERT(task->id() == NEVER_HANDLE); |
| 49 | + if (now == 0) { |
| 50 | + now = MonotonicClock::now(); |
| 51 | + MOZ_ASSERT(now > 0); |
| 52 | + } |
| 53 | + if (deadline <= now) { |
| 54 | + return idx; |
| 55 | + } |
| 56 | + if (soonest_deadline == 0 || deadline < soonest_deadline) { |
| 57 | + soonest_deadline = deadline; |
| 58 | + soonest_deadline_idx = idx; |
| 59 | + } |
| 60 | + } else { |
| 61 | + uint32_t handle = task->id(); |
| 62 | + // Timer task handles are skipped and never passed to the host. |
| 63 | + MOZ_ASSERT(handle != NEVER_HANDLE); |
| 64 | + handles.push_back(handle); |
| 65 | + } |
23 | 66 | }
|
24 |
| - fastly_world_list_handle_t hs{.ptr = handles, .len = tasks_len}; |
| 67 | + |
| 68 | + // When there are no async tasks, sleep until the deadline |
| 69 | + if (handles.size() == 0) { |
| 70 | + MOZ_ASSERT(soonest_deadline > 0); |
| 71 | + sleep_until(soonest_deadline, now); |
| 72 | + return soonest_deadline_idx; |
| 73 | + } |
| 74 | + |
| 75 | + fastly_world_list_handle_t hs{.ptr = handles.data(), .len = handles.size()}; |
25 | 76 | fastly_world_option_u32_t ret;
|
26 | 77 | fastly_compute_at_edge_types_error_t err = 0;
|
27 |
| - if (!fastly_compute_at_edge_async_io_select(&hs, 0, &ret, &err)) { |
28 |
| - abort(); |
29 |
| - } else if (ret.is_some) { |
30 |
| - return ret.val; |
31 |
| - } else { |
32 |
| - abort(); |
| 78 | + |
| 79 | + while (true) { |
| 80 | + if (!fastly_compute_at_edge_async_io_select( |
| 81 | + &hs, (soonest_deadline - now) / MILLISECS_IN_NANOSECS, &ret, &err)) { |
| 82 | + abort(); |
| 83 | + } else if (ret.is_some) { |
| 84 | + // The host index will be the index in the list of tasks with the timer tasks filtered out. |
| 85 | + // We thus need to offset the host index by any timer tasks appearing before the nth |
| 86 | + // non-timer task. |
| 87 | + size_t task_idx = 0; |
| 88 | + for (size_t idx = 0; idx < tasks_len; ++idx) { |
| 89 | + if (tasks->at(idx)->id() != NEVER_HANDLE) { |
| 90 | + if (ret.val == task_idx) { |
| 91 | + return idx; |
| 92 | + } |
| 93 | + task_idx++; |
| 94 | + } |
| 95 | + } |
| 96 | + abort(); |
| 97 | + } else { |
| 98 | + // No value case means a timeout, which means soonest_deadline_idx is set. |
| 99 | + MOZ_ASSERT(soonest_deadline > 0); |
| 100 | + MOZ_ASSERT(soonest_deadline_idx != -1); |
| 101 | + // Verify that the task definitely is ready from a time perspective, and if not loop the host |
| 102 | + // call again. |
| 103 | + now = MonotonicClock::now(); |
| 104 | + if (soonest_deadline > now) { |
| 105 | + continue; |
| 106 | + } |
| 107 | + return soonest_deadline_idx; |
| 108 | + } |
33 | 109 | }
|
34 | 110 | }
|
35 | 111 |
|
@@ -96,11 +172,11 @@ Result<uint32_t> Random::get_u32() {
|
96 | 172 | return res;
|
97 | 173 | }
|
98 | 174 |
|
99 |
| -uint64_t MonotonicClock::now() { return 0; } |
| 175 | +uint64_t MonotonicClock::now() { return JS_Now() * 1000; } |
100 | 176 |
|
101 |
| -uint64_t MonotonicClock::resolution() { return 1000000; } |
| 177 | +uint64_t MonotonicClock::resolution() { return 1000; } |
102 | 178 |
|
103 |
| -int32_t MonotonicClock::subscribe(const uint64_t when, const bool absolute) { return 0; } |
| 179 | +int32_t MonotonicClock::subscribe(const uint64_t when, const bool absolute) { return NEVER_HANDLE; } |
104 | 180 |
|
105 | 181 | void MonotonicClock::unsubscribe(const int32_t handle_id) {}
|
106 | 182 |
|
|
0 commit comments