|
| 1 | +use std::cmp; |
| 2 | + |
| 3 | +use crate::utils::{is_copy, is_self_ty, snippet, span_lint_and_sugg}; |
| 4 | +use if_chain::if_chain; |
| 5 | +use rustc_ast::attr; |
| 6 | +use rustc_errors::Applicability; |
| 7 | +use rustc_hir as hir; |
| 8 | +use rustc_hir::intravisit::FnKind; |
| 9 | +use rustc_hir::{BindingAnnotation, Body, FnDecl, HirId, ItemKind, MutTy, Mutability, Node, PatKind}; |
| 10 | +use rustc_lint::{LateContext, LateLintPass}; |
| 11 | +use rustc_middle::ty; |
| 12 | +use rustc_session::{declare_tool_lint, impl_lint_pass}; |
| 13 | +use rustc_span::Span; |
| 14 | +use rustc_target::abi::LayoutOf; |
| 15 | +use rustc_target::spec::abi::Abi; |
| 16 | +use rustc_target::spec::Target; |
| 17 | + |
| 18 | +declare_clippy_lint! { |
| 19 | + /// **What it does:** Checks for functions taking arguments by reference, where |
| 20 | + /// the argument type is `Copy` and small enough to be more efficient to always |
| 21 | + /// pass by value. |
| 22 | + /// |
| 23 | + /// **Why is this bad?** In many calling conventions instances of structs will |
| 24 | + /// be passed through registers if they fit into two or less general purpose |
| 25 | + /// registers. |
| 26 | + /// |
| 27 | + /// **Known problems:** This lint is target register size dependent, it is |
| 28 | + /// limited to 32-bit to try and reduce portability problems between 32 and |
| 29 | + /// 64-bit, but if you are compiling for 8 or 16-bit targets then the limit |
| 30 | + /// will be different. |
| 31 | + /// |
| 32 | + /// The configuration option `trivial_copy_size_limit` can be set to override |
| 33 | + /// this limit for a project. |
| 34 | + /// |
| 35 | + /// This lint attempts to allow passing arguments by reference if a reference |
| 36 | + /// to that argument is returned. This is implemented by comparing the lifetime |
| 37 | + /// of the argument and return value for equality. However, this can cause |
| 38 | + /// false positives in cases involving multiple lifetimes that are bounded by |
| 39 | + /// each other. |
| 40 | + /// |
| 41 | + /// **Example:** |
| 42 | + /// |
| 43 | + /// ```rust |
| 44 | + /// // Bad |
| 45 | + /// fn foo(v: &u32) {} |
| 46 | + /// ``` |
| 47 | + /// |
| 48 | + /// ```rust |
| 49 | + /// // Better |
| 50 | + /// fn foo(v: u32) {} |
| 51 | + /// ``` |
| 52 | + pub TRIVIALLY_COPY_PASS_BY_REF, |
| 53 | + pedantic, |
| 54 | + "functions taking small copyable arguments by reference" |
| 55 | +} |
| 56 | + |
| 57 | +declare_clippy_lint! { |
| 58 | + /// **What it does:** Checks for functions taking arguments by value, where |
| 59 | + /// the argument type is `Copy` and large enough to be worth considering |
| 60 | + /// passing by reference. Does not trigger if the function is being exported, |
| 61 | + /// because that might induce API breakage, if the parameter is declared as mutable, |
| 62 | + /// or if the argument is a `self`. |
| 63 | + /// |
| 64 | + /// **Why is this bad?** Arguments passed by value might result in an unnecessary |
| 65 | + /// shallow copy, taking up more space in the stack and requiring a call to |
| 66 | + /// `memcpy`, which which can be expensive. |
| 67 | + /// |
| 68 | + /// **Example:** |
| 69 | + /// |
| 70 | + /// ```rust |
| 71 | + /// #[derive(Clone, Copy)] |
| 72 | + /// struct TooLarge([u8; 2048]); |
| 73 | + /// |
| 74 | + /// // Bad |
| 75 | + /// fn foo(v: TooLarge) {} |
| 76 | + /// ``` |
| 77 | + /// ```rust |
| 78 | + /// #[derive(Clone, Copy)] |
| 79 | + /// struct TooLarge([u8; 2048]); |
| 80 | + /// |
| 81 | + /// // Good |
| 82 | + /// fn foo(v: &TooLarge) {} |
| 83 | + /// ``` |
| 84 | + pub LARGE_TYPES_PASSED_BY_VALUE, |
| 85 | + pedantic, |
| 86 | + "functions taking large arguments by value" |
| 87 | +} |
| 88 | + |
| 89 | +#[derive(Copy, Clone)] |
| 90 | +pub struct PassByRefOrValue { |
| 91 | + ref_min_size: u64, |
| 92 | + value_max_size: u64, |
| 93 | +} |
| 94 | + |
| 95 | +impl<'tcx> PassByRefOrValue { |
| 96 | + pub fn new(ref_min_size: Option<u64>, value_max_size: u64, target: &Target) -> Self { |
| 97 | + let ref_min_size = ref_min_size.unwrap_or_else(|| { |
| 98 | + let bit_width = u64::from(target.pointer_width); |
| 99 | + // Cap the calculated bit width at 32-bits to reduce |
| 100 | + // portability problems between 32 and 64-bit targets |
| 101 | + let bit_width = cmp::min(bit_width, 32); |
| 102 | + #[allow(clippy::integer_division)] |
| 103 | + let byte_width = bit_width / 8; |
| 104 | + // Use a limit of 2 times the register byte width |
| 105 | + byte_width * 2 |
| 106 | + }); |
| 107 | + |
| 108 | + Self { |
| 109 | + ref_min_size, |
| 110 | + value_max_size, |
| 111 | + } |
| 112 | + } |
| 113 | + |
| 114 | + fn check_poly_fn(&mut self, cx: &LateContext<'tcx>, hir_id: HirId, decl: &FnDecl<'_>, span: Option<Span>) { |
| 115 | + let fn_def_id = cx.tcx.hir().local_def_id(hir_id); |
| 116 | + |
| 117 | + let fn_sig = cx.tcx.fn_sig(fn_def_id); |
| 118 | + let fn_sig = cx.tcx.erase_late_bound_regions(&fn_sig); |
| 119 | + |
| 120 | + let fn_body = cx.enclosing_body.map(|id| cx.tcx.hir().body(id)); |
| 121 | + |
| 122 | + for (index, (input, &ty)) in decl.inputs.iter().zip(fn_sig.inputs()).enumerate() { |
| 123 | + // All spans generated from a proc-macro invocation are the same... |
| 124 | + match span { |
| 125 | + Some(s) if s == input.span => return, |
| 126 | + _ => (), |
| 127 | + } |
| 128 | + |
| 129 | + match ty.kind() { |
| 130 | + ty::Ref(input_lt, ty, Mutability::Not) => { |
| 131 | + // Use lifetimes to determine if we're returning a reference to the |
| 132 | + // argument. In that case we can't switch to pass-by-value as the |
| 133 | + // argument will not live long enough. |
| 134 | + let output_lts = match *fn_sig.output().kind() { |
| 135 | + ty::Ref(output_lt, _, _) => vec![output_lt], |
| 136 | + ty::Adt(_, substs) => substs.regions().collect(), |
| 137 | + _ => vec![], |
| 138 | + }; |
| 139 | + |
| 140 | + if_chain! { |
| 141 | + if !output_lts.contains(&input_lt); |
| 142 | + if is_copy(cx, ty); |
| 143 | + if let Some(size) = cx.layout_of(ty).ok().map(|l| l.size.bytes()); |
| 144 | + if size <= self.ref_min_size; |
| 145 | + if let hir::TyKind::Rptr(_, MutTy { ty: ref decl_ty, .. }) = input.kind; |
| 146 | + then { |
| 147 | + let value_type = if is_self_ty(decl_ty) { |
| 148 | + "self".into() |
| 149 | + } else { |
| 150 | + snippet(cx, decl_ty.span, "_").into() |
| 151 | + }; |
| 152 | + span_lint_and_sugg( |
| 153 | + cx, |
| 154 | + TRIVIALLY_COPY_PASS_BY_REF, |
| 155 | + input.span, |
| 156 | + &format!("this argument ({} byte) is passed by reference, but would be more efficient if passed by value (limit: {} byte)", size, self.ref_min_size), |
| 157 | + "consider passing by value instead", |
| 158 | + value_type, |
| 159 | + Applicability::Unspecified, |
| 160 | + ); |
| 161 | + } |
| 162 | + } |
| 163 | + }, |
| 164 | + |
| 165 | + ty::Adt(_, _) | ty::Array(_, _) | ty::Tuple(_) => { |
| 166 | + // if function has a body and parameter is annotated with mut, ignore |
| 167 | + if let Some(param) = fn_body.and_then(|body| body.params.get(index)) { |
| 168 | + match param.pat.kind { |
| 169 | + PatKind::Binding(BindingAnnotation::Unannotated, _, _, _) => {}, |
| 170 | + _ => continue, |
| 171 | + } |
| 172 | + } |
| 173 | + |
| 174 | + if_chain! { |
| 175 | + if !cx.access_levels.is_exported(hir_id); |
| 176 | + if is_copy(cx, ty); |
| 177 | + if !is_self_ty(input); |
| 178 | + if let Some(size) = cx.layout_of(ty).ok().map(|l| l.size.bytes()); |
| 179 | + if size > self.value_max_size; |
| 180 | + then { |
| 181 | + span_lint_and_sugg( |
| 182 | + cx, |
| 183 | + LARGE_TYPES_PASSED_BY_VALUE, |
| 184 | + input.span, |
| 185 | + &format!("this argument ({} byte) is passed by value, but might be more efficient if passed by reference (limit: {} byte)", size, self.value_max_size), |
| 186 | + "consider passing by reference instead", |
| 187 | + format!("&{}", snippet(cx, input.span, "_")), |
| 188 | + Applicability::MaybeIncorrect, |
| 189 | + ); |
| 190 | + } |
| 191 | + } |
| 192 | + }, |
| 193 | + |
| 194 | + _ => {}, |
| 195 | + } |
| 196 | + } |
| 197 | + } |
| 198 | +} |
| 199 | + |
| 200 | +impl_lint_pass!(PassByRefOrValue => [TRIVIALLY_COPY_PASS_BY_REF, LARGE_TYPES_PASSED_BY_VALUE]); |
| 201 | + |
| 202 | +impl<'tcx> LateLintPass<'tcx> for PassByRefOrValue { |
| 203 | + fn check_trait_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::TraitItem<'_>) { |
| 204 | + if item.span.from_expansion() { |
| 205 | + return; |
| 206 | + } |
| 207 | + |
| 208 | + if let hir::TraitItemKind::Fn(method_sig, _) = &item.kind { |
| 209 | + self.check_poly_fn(cx, item.hir_id, &*method_sig.decl, None); |
| 210 | + } |
| 211 | + } |
| 212 | + |
| 213 | + fn check_fn( |
| 214 | + &mut self, |
| 215 | + cx: &LateContext<'tcx>, |
| 216 | + kind: FnKind<'tcx>, |
| 217 | + decl: &'tcx FnDecl<'_>, |
| 218 | + _body: &'tcx Body<'_>, |
| 219 | + span: Span, |
| 220 | + hir_id: HirId, |
| 221 | + ) { |
| 222 | + if span.from_expansion() { |
| 223 | + return; |
| 224 | + } |
| 225 | + |
| 226 | + match kind { |
| 227 | + FnKind::ItemFn(.., header, _, attrs) => { |
| 228 | + if header.abi != Abi::Rust { |
| 229 | + return; |
| 230 | + } |
| 231 | + for a in attrs { |
| 232 | + if let Some(meta_items) = a.meta_item_list() { |
| 233 | + if a.has_name(sym!(proc_macro_derive)) |
| 234 | + || (a.has_name(sym!(inline)) && attr::list_contains_name(&meta_items, sym!(always))) |
| 235 | + { |
| 236 | + return; |
| 237 | + } |
| 238 | + } |
| 239 | + } |
| 240 | + }, |
| 241 | + FnKind::Method(..) => (), |
| 242 | + FnKind::Closure(..) => return, |
| 243 | + } |
| 244 | + |
| 245 | + // Exclude non-inherent impls |
| 246 | + if let Some(Node::Item(item)) = cx.tcx.hir().find(cx.tcx.hir().get_parent_node(hir_id)) { |
| 247 | + if matches!(item.kind, ItemKind::Impl{ of_trait: Some(_), .. } | |
| 248 | + ItemKind::Trait(..)) |
| 249 | + { |
| 250 | + return; |
| 251 | + } |
| 252 | + } |
| 253 | + |
| 254 | + self.check_poly_fn(cx, hir_id, decl, Some(span)); |
| 255 | + } |
| 256 | +} |
0 commit comments