Skip to content

Commit 4907dac

Browse files
committed
don't promote large fields to higher alignments if that would affect niche placement
1 parent faf2da3 commit 4907dac

File tree

2 files changed

+42
-13
lines changed

2 files changed

+42
-13
lines changed

compiler/rustc_abi/src/layout.rs

Lines changed: 24 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -772,19 +772,6 @@ fn univariant(
772772
if optimize {
773773
let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
774774
let optimizing = &mut inverse_memory_index.raw[..end];
775-
let effective_field_align = |layout: Layout<'_>| {
776-
if let Some(pack) = pack {
777-
// return the packed alignment in bytes
778-
layout.align().abi.min(pack).bytes()
779-
} else {
780-
// returns log2(effective-align).
781-
// This is ok since `pack` applies to all fields equally.
782-
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
783-
//
784-
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
785-
layout.align().abi.bytes().max(layout.size().bytes()).trailing_zeros() as u64
786-
}
787-
};
788775

789776
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
790777
// the field ordering to try and catch some code making assumptions about layouts
@@ -801,6 +788,30 @@ fn univariant(
801788
}
802789
// Otherwise we just leave things alone and actually optimize the type's fields
803790
} else {
791+
let max_field_align = fields.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
792+
let any_niche = fields.iter().any(|f| f.largest_niche().is_some());
793+
let effective_field_align = |layout: Layout<'_>| {
794+
if let Some(pack) = pack {
795+
// return the packed alignment in bytes
796+
layout.align().abi.min(pack).bytes()
797+
} else {
798+
// returns log2(effective-align).
799+
// This is ok since `pack` applies to all fields equally.
800+
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
801+
//
802+
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
803+
let align = layout.align().abi.bytes();
804+
let size = layout.size().bytes();
805+
let size_as_align = align.max(size).trailing_zeros();
806+
let size_as_align = if any_niche {
807+
max_field_align.trailing_zeros().min(size_as_align)
808+
} else {
809+
size_as_align
810+
};
811+
size_as_align as u64
812+
}
813+
};
814+
804815
match kind {
805816
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
806817
optimizing.sort_by_key(|&x| {

tests/ui/structs-enums/type-sizes.rs

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,18 @@ struct Reorder2 {
186186
ary: [u8; 6],
187187
}
188188

189+
// We want the niche in the front, which means we can't treat the array as quasi-aligned more than
190+
// 4 bytes even though we also want to place it at an 8-aligned offset where possible.
191+
// So the ideal layout would look like: (char, u32, [u8; 8], u8)
192+
// The current layout algorithm does (char, [u8; 8], u32, u8)
193+
#[repr(align(8))]
194+
struct ReorderWithNiche {
195+
a: u32,
196+
b: char,
197+
c: u8,
198+
ary: [u8; 8]
199+
}
200+
189201
// standins for std types which we want to be laid out in a reasonable way
190202
struct RawVecDummy {
191203
ptr: NonNull<u8>,
@@ -298,4 +310,10 @@ pub fn main() {
298310
assert!(ptr::from_ref(&b.1).addr() > ptr::from_ref(&b.2).addr());
299311

300312
assert_eq!(size_of::<Cow<'static, str>>(), size_of::<String>());
313+
314+
let v = ReorderWithNiche {a: 0, b: ' ', c: 0, ary: [0; 8]};
315+
assert!((&v.ary).as_ptr().is_aligned_to(4),
316+
"here [u8; 8] should group with _at least_ align-4 fields");
317+
assert_eq!(ptr::from_ref(&v), ptr::from_ref(&v.b).cast(),
318+
"sort niches to the front where possible");
301319
}

0 commit comments

Comments
 (0)