Skip to content

Commit 869b73d

Browse files
committed
mark crypto intrinsics as safe
1 parent 5ea51fc commit 869b73d

File tree

1 file changed

+28
-40
lines changed

1 file changed

+28
-40
lines changed

crates/core_arch/src/arm_shared/crypto.rs

Lines changed: 28 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,8 @@ use stdarch_test::assert_instr;
108108
not(target_arch = "arm"),
109109
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
110110
)]
111-
pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
112-
vaeseq_u8_(data, key)
111+
pub fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
112+
unsafe { vaeseq_u8_(data, key) }
113113
}
114114

115115
/// AES single round decryption.
@@ -127,8 +127,8 @@ pub unsafe fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
127127
not(target_arch = "arm"),
128128
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
129129
)]
130-
pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
131-
vaesdq_u8_(data, key)
130+
pub fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
131+
unsafe { vaesdq_u8_(data, key) }
132132
}
133133

134134
/// AES mix columns.
@@ -146,8 +146,8 @@ pub unsafe fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t {
146146
not(target_arch = "arm"),
147147
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
148148
)]
149-
pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
150-
vaesmcq_u8_(data)
149+
pub fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
150+
unsafe { vaesmcq_u8_(data) }
151151
}
152152

153153
/// AES inverse mix columns.
@@ -165,8 +165,8 @@ pub unsafe fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t {
165165
not(target_arch = "arm"),
166166
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
167167
)]
168-
pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
169-
vaesimcq_u8_(data)
168+
pub fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
169+
unsafe { vaesimcq_u8_(data) }
170170
}
171171

172172
/// SHA1 fixed rotate.
@@ -184,8 +184,8 @@ pub unsafe fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t {
184184
not(target_arch = "arm"),
185185
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
186186
)]
187-
pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
188-
vsha1h_u32_(hash_e)
187+
pub fn vsha1h_u32(hash_e: u32) -> u32 {
188+
unsafe { vsha1h_u32_(hash_e) }
189189
}
190190

191191
/// SHA1 hash update accelerator, choose.
@@ -203,8 +203,8 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
203203
not(target_arch = "arm"),
204204
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
205205
)]
206-
pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
207-
vsha1cq_u32_(hash_abcd, hash_e, wk)
206+
pub fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
207+
unsafe { vsha1cq_u32_(hash_abcd, hash_e, wk) }
208208
}
209209

210210
/// SHA1 hash update accelerator, majority.
@@ -222,8 +222,8 @@ pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
222222
not(target_arch = "arm"),
223223
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
224224
)]
225-
pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
226-
vsha1mq_u32_(hash_abcd, hash_e, wk)
225+
pub fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
226+
unsafe { vsha1mq_u32_(hash_abcd, hash_e, wk) }
227227
}
228228

229229
/// SHA1 hash update accelerator, parity.
@@ -241,8 +241,8 @@ pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
241241
not(target_arch = "arm"),
242242
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
243243
)]
244-
pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
245-
vsha1pq_u32_(hash_abcd, hash_e, wk)
244+
pub fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
245+
unsafe { vsha1pq_u32_(hash_abcd, hash_e, wk) }
246246
}
247247

248248
/// SHA1 schedule update accelerator, first part.
@@ -260,8 +260,8 @@ pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) ->
260260
not(target_arch = "arm"),
261261
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
262262
)]
263-
pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t {
264-
vsha1su0q_u32_(w0_3, w4_7, w8_11)
263+
pub fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t {
264+
unsafe { vsha1su0q_u32_(w0_3, w4_7, w8_11) }
265265
}
266266

267267
/// SHA1 schedule update accelerator, second part.
@@ -279,8 +279,8 @@ pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_
279279
not(target_arch = "arm"),
280280
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
281281
)]
282-
pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t {
283-
vsha1su1q_u32_(tw0_3, w12_15)
282+
pub fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t {
283+
unsafe { vsha1su1q_u32_(tw0_3, w12_15) }
284284
}
285285

286286
/// SHA256 hash update accelerator.
@@ -298,12 +298,8 @@ pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t
298298
not(target_arch = "arm"),
299299
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
300300
)]
301-
pub unsafe fn vsha256hq_u32(
302-
hash_abcd: uint32x4_t,
303-
hash_efgh: uint32x4_t,
304-
wk: uint32x4_t,
305-
) -> uint32x4_t {
306-
vsha256hq_u32_(hash_abcd, hash_efgh, wk)
301+
pub fn vsha256hq_u32(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t {
302+
unsafe { vsha256hq_u32_(hash_abcd, hash_efgh, wk) }
307303
}
308304

309305
/// SHA256 hash update accelerator, upper part.
@@ -321,12 +317,8 @@ pub unsafe fn vsha256hq_u32(
321317
not(target_arch = "arm"),
322318
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
323319
)]
324-
pub unsafe fn vsha256h2q_u32(
325-
hash_efgh: uint32x4_t,
326-
hash_abcd: uint32x4_t,
327-
wk: uint32x4_t,
328-
) -> uint32x4_t {
329-
vsha256h2q_u32_(hash_efgh, hash_abcd, wk)
320+
pub fn vsha256h2q_u32(hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t) -> uint32x4_t {
321+
unsafe { vsha256h2q_u32_(hash_efgh, hash_abcd, wk) }
330322
}
331323

332324
/// SHA256 schedule update accelerator, first part.
@@ -344,8 +336,8 @@ pub unsafe fn vsha256h2q_u32(
344336
not(target_arch = "arm"),
345337
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
346338
)]
347-
pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t {
348-
vsha256su0q_u32_(w0_3, w4_7)
339+
pub fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t {
340+
unsafe { vsha256su0q_u32_(w0_3, w4_7) }
349341
}
350342

351343
/// SHA256 schedule update accelerator, second part.
@@ -363,12 +355,8 @@ pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t
363355
not(target_arch = "arm"),
364356
stable(feature = "aarch64_neon_crypto_intrinsics", since = "1.72.0")
365357
)]
366-
pub unsafe fn vsha256su1q_u32(
367-
tw0_3: uint32x4_t,
368-
w8_11: uint32x4_t,
369-
w12_15: uint32x4_t,
370-
) -> uint32x4_t {
371-
vsha256su1q_u32_(tw0_3, w8_11, w12_15)
358+
pub fn vsha256su1q_u32(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t {
359+
unsafe { vsha256su1q_u32_(tw0_3, w8_11, w12_15) }
372360
}
373361

374362
#[cfg(test)]

0 commit comments

Comments
 (0)