|
52 | 52 | import sphinx_gallery.gen_rst
|
53 | 53 | import multiprocessing
|
54 | 54 |
|
55 |
| -# Save the original function |
56 |
| -def isolated_call(func, args, kwargs, result_queue): |
| 55 | +# Monkey patching sphinx gallery to run each example in an isolated process so |
| 56 | +# that we don't need to worry about examples changing global state |
| 57 | +def call_fn(func, args, kwargs, result_queue): |
57 | 58 | try:
|
58 | 59 | result = func(*args, **kwargs)
|
59 | 60 | result_queue.put((True, result))
|
60 | 61 | except Exception as e:
|
61 | 62 | result_queue.put((False, str(e)))
|
62 | 63 |
|
63 |
| -def make_isolated_version(func): |
| 64 | +def call_in_subprocess(func): |
64 | 65 | def wrapper(*args, **kwargs):
|
65 |
| - result_queue = multiprocessing.Queue() |
66 |
| - p = multiprocessing.Process( |
67 |
| - target=isolated_call, |
68 |
| - args=(func, args, kwargs, result_queue) |
69 |
| - ) |
70 |
| - p.start() |
71 |
| - p.join() |
72 |
| - success, result = result_queue.get() |
73 |
| - if success: |
74 |
| - return result |
75 |
| - else: |
76 |
| - raise RuntimeError(f"Error in isolated process: {result}") |
| 66 | + pool = multiprocessing.Pool(processes=1) |
| 67 | + p = pool.apply_async(func, args, kwargs) |
| 68 | + pool.close() |
| 69 | + pool.join() |
| 70 | + return p.get() |
77 | 71 | return wrapper
|
78 | 72 |
|
79 | 73 | # Monkey-patch
|
80 |
| -sphinx_gallery.gen_rst.generate_file_rst = make_isolated_version(sphinx_gallery.gen_rst.generate_file_rst) |
| 74 | +sphinx_gallery.gen_rst.generate_file_rst = call_in_subprocess(sphinx_gallery.gen_rst.generate_file_rst) |
81 | 75 |
|
82 | 76 | try:
|
83 | 77 | import torchvision
|
@@ -128,18 +122,19 @@ def wrapper(*args, **kwargs):
|
128 | 122 | # -- Sphinx-gallery configuration --------------------------------------------
|
129 | 123 |
|
130 | 124 | def reset_seeds(gallery_conf, fname):
|
131 |
| - torch.cuda.empty_cache() |
132 |
| - torch.backends.cudnn.deterministic = True |
133 |
| - torch.backends.cudnn.benchmark = False |
134 |
| - torch._dynamo.reset() |
135 |
| - torch._inductor.config.force_disable_caches = True |
136 |
| - torch.manual_seed(42) |
137 |
| - torch.set_default_device(None) |
138 |
| - random.seed(10) |
139 |
| - numpy.random.seed(10) |
140 |
| - torch.set_grad_enabled(True) |
141 |
| - |
142 |
| - gc.collect() |
| 125 | + pass |
| 126 | + # torch.cuda.empty_cache() |
| 127 | + # torch.backends.cudnn.deterministic = True |
| 128 | + # torch.backends.cudnn.benchmark = False |
| 129 | + # torch._dynamo.reset() |
| 130 | + # torch._inductor.config.force_disable_caches = True |
| 131 | + # torch.manual_seed(42) |
| 132 | + # torch.set_default_device(None) |
| 133 | + # random.seed(10) |
| 134 | + # numpy.random.seed(10) |
| 135 | + # torch.set_grad_enabled(True) |
| 136 | + |
| 137 | + # gc.collect() |
143 | 138 |
|
144 | 139 | sphinx_gallery_conf = {
|
145 | 140 | 'examples_dirs': ['beginner_source', 'intermediate_source',
|
|
0 commit comments