From 6e9a5e25fb55f1de72ac6291a413975c931f7e98 Mon Sep 17 00:00:00 2001 From: markstur Date: Wed, 1 Nov 2023 10:30:46 -0700 Subject: [PATCH] Remove broken links in tuning guide A couple of "for more details" links to external sites are no longer finding details. Better to remove them. Signed-off-by: markstur --- recipes_source/recipes/tuning_guide.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/recipes_source/recipes/tuning_guide.py b/recipes_source/recipes/tuning_guide.py index dd615714a24..39fa667bc1a 100644 --- a/recipes_source/recipes/tuning_guide.py +++ b/recipes_source/recipes/tuning_guide.py @@ -193,15 +193,12 @@ def fused_gelu(x): # # numactl --cpunodebind=N --membind=N python -############################################################################### -# More detailed descriptions can be found `here `_. - ############################################################################### # Utilize OpenMP # ~~~~~~~~~~~~~~ # OpenMP is utilized to bring better performance for parallel computation tasks. # ``OMP_NUM_THREADS`` is the easiest switch that can be used to accelerate computations. It determines number of threads used for OpenMP computations. -# CPU affinity setting controls how workloads are distributed over multiple cores. It affects communication overhead, cache line invalidation overhead, or page thrashing, thus proper setting of CPU affinity brings performance benefits. ``GOMP_CPU_AFFINITY`` or ``KMP_AFFINITY`` determines how to bind OpenMP* threads to physical processing units. Detailed information can be found `here `_. +# CPU affinity setting controls how workloads are distributed over multiple cores. It affects communication overhead, cache line invalidation overhead, or page thrashing, thus proper setting of CPU affinity brings performance benefits. ``GOMP_CPU_AFFINITY`` or ``KMP_AFFINITY`` determines how to bind OpenMP* threads to physical processing units. ############################################################################### # With the following command, PyTorch run the task on N OpenMP threads.