Skip to content

Commit 3c7c494

Browse files
authored
Merge pull request #10598 from luzpaz/typos/ompi-mca
Fix typos in ompi/mca subdirectory
2 parents 96dbb4b + 96d1c5b commit 3c7c494

File tree

184 files changed

+421
-421
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

184 files changed

+421
-421
lines changed

ompi/mca/bml/r2/bml_r2.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -220,7 +220,7 @@ static int mca_bml_r2_endpoint_add_btl (struct ompi_proc_t *proc, mca_bml_base_e
220220

221221
if ((btl_flags & (MCA_BTL_FLAGS_PUT | MCA_BTL_FLAGS_GET | MCA_BTL_FLAGS_SEND)) == 0) {
222222
/* If no protocol specified, we have 2 choices: we ignore the BTL
223-
* as we don't know which protocl to use, or we suppose that all
223+
* as we don't know which protocol to use, or we suppose that all
224224
* BTLs support the send protocol. This is really a btl error as
225225
* these flags should have been sanitized by the btl. */
226226
btl_flags |= MCA_BTL_FLAGS_SEND;
@@ -634,7 +634,7 @@ static int mca_bml_r2_del_procs(size_t nprocs,
634634
}
635635

636636
/* The reference stored in btl_eager and btl_rdma will automatically
637-
* dissapear once the btl_array destructor is called. Thus, there is
637+
* disappear once the btl_array destructor is called. Thus, there is
638638
* no need for extra cleaning here.
639639
*/
640640
}
@@ -916,7 +916,7 @@ static int mca_bml_r2_register( mca_btl_base_tag_t tag,
916916
{
917917
mca_btl_base_active_message_trigger[tag].cbfunc = cbfunc;
918918
mca_btl_base_active_message_trigger[tag].cbdata = data;
919-
/* Give an oportunity to the BTLs to do something special
919+
/* Give an opportunity to the BTLs to do something special
920920
* for each registration.
921921
*/
922922
{

ompi/mca/coll/adapt/coll_adapt_context.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ struct ompi_coll_adapt_constant_bcast_context_s {
3232
opal_mutex_t *mutex;
3333
int *recv_array;
3434
int *send_array;
35-
/* Length of the fragment array, which is the number of recevied segments */
35+
/* Length of the fragment array, which is the number of received segments */
3636
int num_recv_segs;
3737
/* Number of segments that is finishing recving */
3838
int num_recv_fini;
@@ -81,7 +81,7 @@ struct ompi_coll_adapt_constant_reduce_context_s {
8181
int ireduce_tag;
8282
/* How many sends are posted but not finished */
8383
int32_t ongoing_send;
84-
/* Length of the fragment array, which is the number of recevied segments */
84+
/* Length of the fragment array, which is the number of received segments */
8585
int32_t num_recv_segs;
8686
/* Number of sent segments */
8787
int32_t num_sent_segs;

ompi/mca/coll/adapt/coll_adapt_ibcast.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -414,7 +414,7 @@ int ompi_coll_adapt_ibcast_generic(void *buff, int count, struct ompi_datatype_t
414414
num_segs = (count + seg_count - 1) / seg_count;
415415
real_seg_size = (ptrdiff_t) seg_count *extent;
416416

417-
/* Set memory for recv_array and send_array, created on heap becasue they are needed to be accessed by other functions (callback functions) */
417+
/* Set memory for recv_array and send_array, created on heap because they are needed to be accessed by other functions (callback functions) */
418418
if (num_segs != 0) {
419419
recv_array = (int *) malloc(sizeof(int) * num_segs);
420420
}
@@ -485,7 +485,7 @@ int ompi_coll_adapt_ibcast_generic(void *buff, int count, struct ompi_datatype_t
485485
context->frag_id = i;
486486
/* The id of peer in in children_list */
487487
context->child_id = j;
488-
/* Actural rank of the peer */
488+
/* Actual rank of the peer */
489489
context->peer = tree->tree_next[j];
490490
context->con = con;
491491
OBJ_RETAIN(con);

ompi/mca/coll/adapt/coll_adapt_ireduce.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -741,7 +741,7 @@ int ompi_coll_adapt_ireduce_generic(const void *sbuf, void *rbuf, int count,
741741
(ompi_coll_adapt_reduce_context_t *)opal_free_list_wait(mca_coll_adapt_component.adapt_ireduce_context_free_list);
742742
context->buff = (char *) sbuf + (ptrdiff_t) seg_index * (ptrdiff_t) segment_increment;
743743
context->seg_index = seg_index;
744-
/* Actural rank of the peer */
744+
/* Actual rank of the peer */
745745
context->peer = tree->tree_prev;
746746
context->con = con;
747747
context->inbuf = NULL;

ompi/mca/coll/base/coll_base_allgather.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
* Example on 6 nodes:
5353
* Initialization: everyone has its own buffer at location 0 in rbuf
5454
* This means if user specified MPI_IN_PLACE for sendbuf
55-
* we must copy our block from recvbuf to begining!
55+
* we must copy our block from recvbuf to beginning!
5656
* # 0 1 2 3 4 5
5757
* [0] [1] [2] [3] [4] [5]
5858
* Step 0: send message to (rank - 2^0), receive message from (rank + 2^0)
@@ -124,7 +124,7 @@ int ompi_coll_base_allgather_intra_bruck(const void *sbuf, int scount,
124124
/* Communication step:
125125
At every step i, rank r:
126126
- doubles the distance
127-
- sends message which starts at begining of rbuf and has size
127+
- sends message which starts at beginning of rbuf and has size
128128
(blockcount * rcount) to rank (r - distance)
129129
- receives message of size blockcount * rcount from rank (r + distance)
130130
at location (rbuf + distance * rcount * rext)
@@ -159,10 +159,10 @@ int ompi_coll_base_allgather_intra_bruck(const void *sbuf, int scount,
159159
/* Finalization step:
160160
On all nodes except 0, data needs to be shifted locally:
161161
- create temporary shift buffer,
162-
see discussion in coll_basic_reduce.c about the size and begining
162+
see discussion in coll_basic_reduce.c about the size and beginning
163163
of temporary buffer.
164164
- copy blocks [0 .. (size - rank - 1)] from rbuf to shift buffer
165-
- move blocks [(size - rank) .. size] from rbuf to begining of rbuf
165+
- move blocks [(size - rank) .. size] from rbuf to beginning of rbuf
166166
- copy blocks from shift buffer starting at block [rank] in rbuf.
167167
*/
168168
if (0 != rank) {
@@ -182,7 +182,7 @@ int ompi_coll_base_allgather_intra_bruck(const void *sbuf, int scount,
182182
shift_buf, rbuf);
183183
if (err < 0) { line = __LINE__; free(free_buf); goto err_hndl; }
184184

185-
/* 2. move blocks [(size - rank) .. size] from rbuf to the begining of rbuf */
185+
/* 2. move blocks [(size - rank) .. size] from rbuf to the beginning of rbuf */
186186
tmpsend = (char*) rbuf + (ptrdiff_t)(size - rank) * (ptrdiff_t)rcount * rext;
187187
err = ompi_datatype_copy_content_same_ddt(rdtype, (ptrdiff_t)rank * (ptrdiff_t)rcount,
188188
rbuf, tmpsend);
@@ -532,7 +532,7 @@ int ompi_coll_base_allgather_intra_ring(const void *sbuf, int scount,
532532
[(r - i - 1 + size) % size]
533533
- sends message to rank [(r + 1) % size] containing data from rank
534534
[(r - i + size) % size]
535-
- sends message which starts at begining of rbuf and has size
535+
- sends message which starts at beginning of rbuf and has size
536536
*/
537537
sendto = (rank + 1) % size;
538538
recvfrom = (rank - 1 + size) % size;
@@ -685,7 +685,7 @@ ompi_coll_base_allgather_intra_neighborexchange(const void *sbuf, int scount,
685685
- Rest of the steps:
686686
update recv_data_from according to offset, and
687687
exchange two blocks with appropriate neighbor.
688-
the send location becomes previous receve location.
688+
the send location becomes previous receive location.
689689
*/
690690
tmprecv = (char*)rbuf + (ptrdiff_t)neighbor[0] * (ptrdiff_t)rcount * rext;
691691
tmpsend = (char*)rbuf + (ptrdiff_t)rank * (ptrdiff_t)rcount * rext;

ompi/mca/coll/base/coll_base_allgatherv.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -406,7 +406,7 @@ int ompi_coll_base_allgatherv_intra_ring(const void *sbuf, int scount,
406406
[(r - i - 1 + size) % size]
407407
- sends message to rank [(r + 1) % size] containing data from rank
408408
[(r - i + size) % size]
409-
- sends message which starts at begining of rbuf and has size
409+
- sends message which starts at beginning of rbuf and has size
410410
*/
411411
sendto = (rank + 1) % size;
412412
recvfrom = (rank - 1 + size) % size;
@@ -563,7 +563,7 @@ ompi_coll_base_allgatherv_intra_neighborexchange(const void *sbuf, int scount,
563563
- Rest of the steps:
564564
update recv_data_from according to offset, and
565565
exchange two blocks with appropriate neighbor.
566-
the send location becomes previous receve location.
566+
the send location becomes previous receive location.
567567
Note, we need to create indexed datatype to send and receive these
568568
blocks properly.
569569
*/

ompi/mca/coll/base/coll_base_allreduce.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -424,7 +424,7 @@ ompi_coll_base_allreduce_intra_ring(const void *sbuf, void *rbuf, int count,
424424
- wait on block (r + 1)
425425
- compute on block (r + 1)
426426
- send block (r + 1) to rank (r + 1)
427-
Note that we must be careful when computing the begining of buffers and
427+
Note that we must be careful when computing the beginning of buffers and
428428
for send operations and computation we must compute the exact block size.
429429
*/
430430
send_to = (rank + 1) % size;
@@ -717,7 +717,7 @@ ompi_coll_base_allreduce_intra_ring_segmented(const void *sbuf, void *rbuf, int
717717
- wait on block (r + 1)
718718
- compute on block (r + 1)
719719
- send block (r + 1) to rank (r + 1)
720-
Note that we must be careful when computing the begining of buffers and
720+
Note that we must be careful when computing the beginning of buffers and
721721
for send operations and computation we must compute the exact block size.
722722
*/
723723
send_to = (rank + 1) % size;
@@ -1122,7 +1122,7 @@ int ompi_coll_base_allreduce_intra_redscat_allgather(
11221122

11231123
for (int mask = 1; mask < nprocs_pof2; mask <<= 1) {
11241124
/*
1125-
* On each iteration: rindex[step] = sindex[step] -- begining of the
1125+
* On each iteration: rindex[step] = sindex[step] -- beginning of the
11261126
* current window. Length of the current window is storded in wsize.
11271127
*/
11281128
int vdest = vrank ^ mask;

ompi/mca/coll/base/coll_base_alltoallv.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ ompi_coll_base_alltoallv_intra_pairwise(const void *sbuf, const int *scounts, co
196196
ompi_datatype_type_extent(sdtype, &sext);
197197
ompi_datatype_type_extent(rdtype, &rext);
198198

199-
/* Perform pairwise exchange starting from 1 since local exhange is done */
199+
/* Perform pairwise exchange starting from 1 since local exchange is done */
200200
for (step = 0; step < size; step++) {
201201

202202
/* Determine sender and receiver for this step. */

ompi/mca/coll/base/coll_base_barrier.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -83,7 +83,7 @@ ompi_coll_base_sendrecv_zero( int dest, int stag,
8383
if( MPI_ERR_PROC_FAILED_PENDING == rc ) {
8484
rc = MPI_ERR_PROC_FAILED;
8585
}
86-
} else /* this 'else' intentionaly spills outside the ifdef */
86+
} else /* this 'else' intentionally spills outside the ifdef */
8787
#endif /* OPAL_ENABLE_FT_MPI */
8888
ompi_request_free(&req);
8989
}
@@ -95,7 +95,7 @@ ompi_coll_base_sendrecv_zero( int dest, int stag,
9595
}
9696

9797
/*
98-
* Barrier is ment to be a synchronous operation, as some BTLs can mark
98+
* Barrier is meant to be a synchronous operation, as some BTLs can mark
9999
* a request done before its passed to the NIC and progress might not be made
100100
* elsewhere we cannot allow a process to exit the barrier until its last
101101
* [round of] sends are completed.
@@ -110,7 +110,7 @@ ompi_coll_base_sendrecv_zero( int dest, int stag,
110110
/*
111111
* Simple double ring version of barrier
112112
*
113-
* synchronous gurantee made by last ring of sends are synchronous
113+
* synchronous guarantee made by last ring of sends are synchronous
114114
*
115115
*/
116116
int ompi_coll_base_barrier_intra_doublering(struct ompi_communicator_t *comm,

ompi/mca/coll/base/coll_base_bcast.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -369,7 +369,7 @@ ompi_coll_base_bcast_intra_split_bintree ( void* buffer,
369369
int err=0, line, rank, size, segindex, i, lr, pair;
370370
uint32_t counts[2];
371371
int segcount[2]; /* Number of elements sent with each segment */
372-
int num_segments[2]; /* Number of segmenets */
372+
int num_segments[2]; /* Number of segments */
373373
int sendcount[2]; /* the same like segcount, except for the last segment */
374374
size_t realsegsize[2], type_size;
375375
char *tmpbuf[2];
@@ -507,7 +507,7 @@ ompi_coll_base_bcast_intra_split_bintree ( void* buffer,
507507
if (err != MPI_SUCCESS) { line = __LINE__; goto error_hndl; }
508508
} /* end of for each child */
509509

510-
/* upate the base request */
510+
/* update the base request */
511511
base_req = new_req;
512512
/* go to the next buffer (ie. the one corresponding to the next recv) */
513513
tmpbuf[lr] += realsegsize[lr];

ompi/mca/coll/base/coll_base_comm_select.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -453,7 +453,7 @@ static opal_list_t *check_components(opal_list_t * components,
453453
/* For all valid component reorder them not on their provided priorities but on
454454
* the order requested in the info key. As at this point the coll_include is
455455
* already ordered backward we can simply append the components.
456-
* Note that the last element in selectable will have the highest priorty.
456+
* Note that the last element in selectable will have the highest priority.
457457
*/
458458
for (int idx = count_include-1; idx >= 0; --idx) {
459459
mca_coll_base_avail_coll_t *item;

ompi/mca/coll/base/coll_base_functions.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -519,7 +519,7 @@ OMPI_DECLSPEC OBJ_CLASS_DECLARATION(mca_coll_base_comm_t);
519519

520520
/**
521521
* Free all requests in an array. As these requests are usually used during
522-
* collective communications, and as on a succesful collective they are
522+
* collective communications, and as on a successful collective they are
523523
* expected to be released during the corresponding wait, the array should
524524
* generally be empty. However, this function might be used on error conditions
525525
* where it will allow a correct cleanup.
@@ -544,7 +544,7 @@ static inline void ompi_coll_base_free_reqs(ompi_request_t **reqs, int count)
544544
* free, as this is the best that can be done in this case. */
545545
ompi_request_cancel(reqs[i]);
546546
ompi_request_wait(&reqs[i], MPI_STATUS_IGNORE);
547-
} else /* this 'else' intentionaly spills outside the ifdef */
547+
} else /* this 'else' intentionally spills outside the ifdef */
548548
#endif /* OPAL_ENABLE_FT_MPI */
549549
ompi_request_free(&reqs[i]);
550550
}

ompi/mca/coll/base/coll_base_reduce.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -112,7 +112,7 @@ int ompi_coll_base_reduce_generic( const void* sendbuf, void* recvbuf, int origi
112112
}
113113

114114
/* If this is a non-commutative operation we must copy
115-
sendbuf to the accumbuf, in order to simplfy the loops */
115+
sendbuf to the accumbuf, in order to simplify the loops */
116116

117117
if (!ompi_op_is_commute(op) && MPI_IN_PLACE != sendbuf) {
118118
ompi_datatype_copy_content_same_ddt(datatype, original_count,
@@ -250,14 +250,14 @@ int ompi_coll_base_reduce_generic( const void* sendbuf, void* recvbuf, int origi
250250
the number of segments we have two options:
251251
- send all segments using blocking send to the parent, or
252252
- avoid overflooding the parent nodes by limiting the number of
253-
outstanding requests to max_oustanding_reqs.
253+
outstanding requests to max_outstanding_reqs.
254254
TODO/POSSIBLE IMPROVEMENT: If there is a way to determine the eager size
255255
for the current communication, synchronization should be used only
256256
when the message/segment size is smaller than the eager size.
257257
*/
258258
else {
259259

260-
/* If the number of segments is less than a maximum number of oustanding
260+
/* If the number of segments is less than a maximum number of outstanding
261261
requests or there is no limit on the maximum number of outstanding
262262
requests, we send data to the parent using blocking send */
263263
if ((0 == max_outstanding_reqs) ||
@@ -965,7 +965,7 @@ int ompi_coll_base_reduce_intra_redscat_gather(
965965

966966
for (int mask = 1; mask < nprocs_pof2; mask <<= 1) {
967967
/*
968-
* On each iteration: rindex[step] = sindex[step] -- begining of the
968+
* On each iteration: rindex[step] = sindex[step] -- beginning of the
969969
* current window. Length of the current window is storded in wsize.
970970
*/
971971
int vdest = vrank ^ mask;

ompi/mca/coll/base/coll_base_reduce_scatter.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -545,7 +545,7 @@ ompi_coll_base_reduce_scatter_intra_ring( const void *sbuf, void *rbuf, const in
545545
- wait on block (r)
546546
- compute on block (r)
547547
- copy block (r) to rbuf
548-
Note that we must be careful when computing the begining of buffers and
548+
Note that we must be careful when computing the beginning of buffers and
549549
for send operations and computation we must compute the exact block size.
550550
*/
551551
send_to = (rank + 1) % size;

ompi/mca/coll/base/coll_base_util.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ int ompi_coll_base_sendrecv_actual( const void* sendbuf, size_t scount,
9494
if( MPI_ERR_PROC_FAILED_PENDING == err ) {
9595
err = MPI_ERR_PROC_FAILED;
9696
}
97-
} else /* this 'else' intentionaly spills outside the ifdef */
97+
} else /* this 'else' intentionally spills outside the ifdef */
9898
#endif /* OPAL_ENABLE_FT_MPI */
9999
ompi_request_free(&req);
100100
}
@@ -130,7 +130,7 @@ int ompi_rounddown(int num, int factor)
130130
/**
131131
* Release all objects and arrays stored into the nbc_request.
132132
* The release_arrays are temporary memory to stored the values
133-
* converted from Fortran, and should dissapear in same time as the
133+
* converted from Fortran, and should disappear in same time as the
134134
* request itself.
135135
*/
136136
static void

ompi/mca/coll/base/coll_base_util.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ int ompi_coll_base_file_getnext_string(FILE *fptr, int *fileline, char** val);
196196
*/
197197
int ompi_coll_base_file_peek_next_char_is(FILE *fptr, int *fileline, int expected);
198198

199-
/* Miscelaneous function */
199+
/* Miscellaneous function */
200200
const char* mca_coll_base_colltype_to_str(int collid);
201201
int mca_coll_base_name_to_colltype(const char* name);
202202

ompi/mca/coll/basic/coll_basic_allgather.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,10 +60,10 @@ mca_coll_basic_allgather_inter(const void *sbuf, int scount,
6060
rsize = ompi_comm_remote_size(comm);
6161

6262
/* Algorithm:
63-
* - a gather to the root in remote group (simultaniously executed,
64-
* thats why we cannot use coll_gather).
63+
* - a gather to the root in remote group (simultaneously executed,
64+
* that's why we cannot use coll_gather).
6565
* - exchange the temp-results between two roots
66-
* - inter-bcast (again simultanious).
66+
* - inter-bcast (again simultaneous).
6767
*/
6868

6969
/* Step one: gather operations: */
@@ -106,7 +106,7 @@ mca_coll_basic_allgather_inter(const void *sbuf, int scount,
106106
err = ompi_request_wait_all(rsize + 1, reqs, MPI_STATUSES_IGNORE);
107107
if (OMPI_SUCCESS != err) { line = __LINE__; goto exit; }
108108

109-
/* Step 2: exchange the resuts between the root processes */
109+
/* Step 2: exchange the results between the root processes */
110110
span = opal_datatype_span(&sdtype->super, (int64_t)scount * (int64_t)size, &gap);
111111
tmpbuf_free = (char *) malloc(span);
112112
if (NULL == tmpbuf_free) { line = __LINE__; err = OMPI_ERR_OUT_OF_RESOURCE; goto exit; }

ompi/mca/coll/basic/coll_basic_allreduce.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ mca_coll_basic_allreduce_inter(const void *sbuf, void *rbuf, int count,
9797
* and which one enters coll_reduce with providing
9898
* MPI_PROC_NULL as root argument etc.) Here,
9999
* we execute the data exchange for both groups
100-
* simultaniously. */
100+
* simultaneously. */
101101
/*****************************************************************/
102102
if (rank == root) {
103103
err = ompi_datatype_type_extent(dtype, &extent);

ompi/mca/coll/basic/coll_basic_reduce_scatter.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757
*
5858
* NOTE: that the recursive halving algorithm should be faster than
5959
* the reduce/scatter for all message sizes. However, the memory
60-
* usage for the recusive halving is msg_size + 2 * comm_size greater
60+
* usage for the recursive halving is msg_size + 2 * comm_size greater
6161
* for the recursive halving, so I've limited where the recursive
6262
* halving is used to be nice to the app memory wise. There are much
6363
* better algorithms for large messages with commutative operations,

0 commit comments

Comments
 (0)