1 // Copyright 2023 The Chromium Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_ 6 #define PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_ 7 8 #include <cstddef> 9 #include <type_traits> 10 11 #include "build/build_config.h" 12 #include "partition_alloc/partition_address_space.h" 13 #include "partition_alloc/partition_alloc_base/compiler_specific.h" 14 #include "partition_alloc/partition_alloc_base/component_export.h" 15 #include "partition_alloc/partition_alloc_base/cxx20_is_constant_evaluated.h" 16 #include "partition_alloc/partition_alloc_buildflags.h" 17 #include "partition_alloc/partition_alloc_config.h" 18 #include "partition_alloc/partition_alloc_constants.h" 19 #include "partition_alloc/partition_alloc_forward.h" 20 #include "partition_alloc/pointers/instance_tracer.h" 21 #include "partition_alloc/tagging.h" 22 23 #if !BUILDFLAG(HAS_64_BIT_POINTERS) 24 #include "partition_alloc/address_pool_manager_bitmap.h" 25 #endif 26 27 #if !BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT) 28 #error "Included under wrong build option" 29 #endif 30 31 namespace base::internal { 32 33 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) 34 PA_COMPONENT_EXPORT(RAW_PTR) 35 void CheckThatAddressIsntWithinFirstPartitionPage(uintptr_t address); 36 #endif 37 38 // Note that `RawPtrBackupRefImpl` itself is not thread-safe. If multiple 39 // threads modify the same raw_ptr object without synchronization, a data race 40 // will occur. 41 template <bool AllowDangling = false, bool DisableBRP = false> 42 struct RawPtrBackupRefImpl { 43 // These are needed for correctness, or else we may end up manipulating 44 // ref-count where we shouldn't, thus affecting the BRP's integrity. Unlike 45 // the first two, kMustZeroOnDestruct wouldn't be needed if raw_ptr was used 46 // correctly, but we already caught cases where a value is written after 47 // destruction. 48 static constexpr bool kMustZeroOnConstruct = true; 49 static constexpr bool kMustZeroOnMove = true; 50 static constexpr bool kMustZeroOnDestruct = true; 51 52 private: UseBrpRawPtrBackupRefImpl53 PA_ALWAYS_INLINE static bool UseBrp(uintptr_t address) { 54 // BRP is temporarily disabled for Pointers annotated with 55 // DisableBRP. 56 if constexpr (DisableBRP) { 57 return false; 58 } 59 return partition_alloc::IsManagedByPartitionAllocBRPPool(address); 60 } 61 IsSupportedAndNotNullRawPtrBackupRefImpl62 PA_ALWAYS_INLINE static bool IsSupportedAndNotNull(uintptr_t address) { 63 // There are many situations where the compiler can prove that 64 // `ReleaseWrappedPtr` is called on a value that is always nullptr, but the 65 // way `IsManagedByPartitionAllocBRPPool` is written, the compiler can't 66 // prove that nullptr is not managed by PartitionAlloc; and so the compiler 67 // has to emit a useless check and dead code. To avoid that without making 68 // the runtime check slower, tell the compiler to skip 69 // `IsManagedByPartitionAllocBRPPool` when it can statically determine that 70 // address is nullptr. 71 #if PA_HAS_BUILTIN(__builtin_constant_p) 72 if (__builtin_constant_p(address == 0) && (address == 0)) { 73 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) 74 PA_BASE_CHECK( 75 !partition_alloc::IsManagedByPartitionAllocBRPPool(address)); 76 #endif // BUILDFLAG(PA_DCHECK_IS_ON) || 77 // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) 78 return false; 79 } 80 #endif // PA_HAS_BUILTIN(__builtin_constant_p) 81 82 // This covers the nullptr case, as address 0 is never in any 83 // PartitionAlloc pool. 84 bool use_brp = UseBrp(address); 85 86 // There may be pointers immediately after the allocation, e.g. 87 // { 88 // // Assume this allocation happens outside of PartitionAlloc. 89 // raw_ptr<T> ptr = new T[20]; 90 // for (size_t i = 0; i < 20; i ++) { ptr++; } 91 // } 92 // 93 // Such pointers are *not* at risk of accidentally falling into BRP pool, 94 // because: 95 // 1) On 64-bit systems, BRP pool is preceded by a forbidden region. 96 // 2) On 32-bit systems, the guard pages and metadata of super pages in BRP 97 // pool aren't considered to be part of that pool. 98 // 99 // This allows us to make a stronger assertion that if 100 // IsManagedByPartitionAllocBRPPool returns true for a valid pointer, 101 // it must be at least partition page away from the beginning of a super 102 // page. 103 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) 104 if (use_brp) { 105 CheckThatAddressIsntWithinFirstPartitionPage(address); 106 } 107 #endif 108 109 return use_brp; 110 } 111 112 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) 113 // Out-Of-Bounds (OOB) poison bit is set when the pointer has overflowed by 114 // one byte. 115 #if defined(ARCH_CPU_X86_64) 116 // Bit 63 is the only pointer bit that will work as the poison bit across both 117 // LAM48 and LAM57. It also works when all unused linear address bits are 118 // checked for canonicality. 119 static constexpr uintptr_t OOB_POISON_BIT = static_cast<uintptr_t>(1) << 63; 120 #else 121 // Avoid ARM's Top-Byte Ignore. 122 static constexpr uintptr_t OOB_POISON_BIT = static_cast<uintptr_t>(1) << 55; 123 #endif 124 125 template <typename T> UnpoisonPtrRawPtrBackupRefImpl126 PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) { 127 return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) & 128 ~OOB_POISON_BIT); 129 } 130 131 template <typename T> IsPtrOOBRawPtrBackupRefImpl132 PA_ALWAYS_INLINE static bool IsPtrOOB(T* ptr) { 133 return (reinterpret_cast<uintptr_t>(ptr) & OOB_POISON_BIT) == 134 OOB_POISON_BIT; 135 } 136 137 template <typename T> PoisonOOBPtrRawPtrBackupRefImpl138 PA_ALWAYS_INLINE static T* PoisonOOBPtr(T* ptr) { 139 return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(ptr) | 140 OOB_POISON_BIT); 141 } 142 #else // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) 143 template <typename T> UnpoisonPtrRawPtrBackupRefImpl144 PA_ALWAYS_INLINE static T* UnpoisonPtr(T* ptr) { 145 return ptr; 146 } 147 #endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) 148 149 public: 150 // Wraps a pointer. 151 template <typename T> WrapRawPtrRawPtrBackupRefImpl152 PA_ALWAYS_INLINE static constexpr T* WrapRawPtr(T* ptr) { 153 if (partition_alloc::internal::base::is_constant_evaluated()) { 154 return ptr; 155 } 156 uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(ptr)); 157 if (IsSupportedAndNotNull(address)) { 158 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) 159 PA_BASE_CHECK(ptr != nullptr); 160 #endif 161 AcquireInternal(address); 162 } else { 163 #if !BUILDFLAG(HAS_64_BIT_POINTERS) 164 #if PA_HAS_BUILTIN(__builtin_constant_p) 165 // Similarly to `IsSupportedAndNotNull` above, elide the 166 // `BanSuperPageFromBRPPool` call if the compiler can prove that `address` 167 // is zero since PA won't be able to map anything at that address anyway. 168 bool known_constant_zero = 169 __builtin_constant_p(address == 0) && (address == 0); 170 #else // PA_HAS_BUILTIN(__builtin_constant_p) 171 bool known_constant_zero = false; 172 #endif // PA_HAS_BUILTIN(__builtin_constant_p) 173 174 if (!known_constant_zero) { 175 partition_alloc::internal::AddressPoolManagerBitmap:: 176 BanSuperPageFromBRPPool(address); 177 } 178 #endif // !BUILDFLAG(HAS_64_BIT_POINTERS) 179 } 180 181 return ptr; 182 } 183 184 // Notifies the allocator when a wrapped pointer is being removed or replaced. 185 template <typename T> ReleaseWrappedPtrRawPtrBackupRefImpl186 PA_ALWAYS_INLINE static constexpr void ReleaseWrappedPtr(T* wrapped_ptr) { 187 if (partition_alloc::internal::base::is_constant_evaluated()) { 188 return; 189 } 190 uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(wrapped_ptr)); 191 if (IsSupportedAndNotNull(address)) { 192 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) 193 PA_BASE_CHECK(wrapped_ptr != nullptr); 194 #endif 195 ReleaseInternal(address); 196 } 197 198 // We are unable to counteract BanSuperPageFromBRPPool(), called from 199 // WrapRawPtr(). We only use one bit per super-page and, thus can't tell if 200 // there's more than one associated raw_ptr<T> at a given time. The risk of 201 // exhausting the entire address space is minuscule, therefore, we couldn't 202 // resist the perf gain of a single relaxed store (in the above mentioned 203 // function) over much more expensive two CAS operations, which we'd have to 204 // use if we were to un-ban a super-page. 205 } 206 207 // Unwraps the pointer, while asserting that memory hasn't been freed. The 208 // function is allowed to crash on nullptr. 209 template <typename T> SafelyUnwrapPtrForDereferenceRawPtrBackupRefImpl210 PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForDereference( 211 T* wrapped_ptr) { 212 if (partition_alloc::internal::base::is_constant_evaluated()) { 213 return wrapped_ptr; 214 } 215 #if BUILDFLAG(PA_DCHECK_IS_ON) || BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) 216 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) 217 PA_BASE_CHECK(!IsPtrOOB(wrapped_ptr)); 218 #endif 219 uintptr_t address = partition_alloc::UntagPtr(wrapped_ptr); 220 if (IsSupportedAndNotNull(address)) { 221 PA_BASE_CHECK(wrapped_ptr != nullptr); 222 PA_BASE_CHECK(IsPointeeAlive(address)); // Detects use-after-free. 223 } 224 #endif // BUILDFLAG(PA_DCHECK_IS_ON) || 225 // BUILDFLAG(ENABLE_BACKUP_REF_PTR_SLOW_CHECKS) 226 return wrapped_ptr; 227 } 228 229 // Unwraps the pointer, while asserting that memory hasn't been freed. The 230 // function must handle nullptr gracefully. 231 template <typename T> SafelyUnwrapPtrForExtractionRawPtrBackupRefImpl232 PA_ALWAYS_INLINE static constexpr T* SafelyUnwrapPtrForExtraction( 233 T* wrapped_ptr) { 234 if (partition_alloc::internal::base::is_constant_evaluated()) { 235 return wrapped_ptr; 236 } 237 T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr); 238 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) 239 // Some code uses invalid pointer values as indicators, so those values must 240 // be passed through unchanged during extraction. The following check will 241 // pass invalid values through if those values do not fall within the BRP 242 // pool after being unpoisoned. 243 if (!IsSupportedAndNotNull(partition_alloc::UntagPtr(unpoisoned_ptr))) { 244 return wrapped_ptr; 245 } 246 // Poison-based OOB checks do not extend to extracted pointers. The 247 // alternative of retaining poison on extracted pointers could introduce new 248 // OOB conditions, e.g., in code that extracts an end-of-allocation pointer 249 // for use in a loop termination condition. The poison bit would make that 250 // pointer appear to reference a very high address. 251 #endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) 252 return unpoisoned_ptr; 253 } 254 255 // Unwraps the pointer, without making an assertion on whether memory was 256 // freed or not. 257 template <typename T> UnsafelyUnwrapPtrForComparisonRawPtrBackupRefImpl258 PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForComparison( 259 T* wrapped_ptr) { 260 if (partition_alloc::internal::base::is_constant_evaluated()) { 261 return wrapped_ptr; 262 } 263 // This may be used for unwrapping an end-of-allocation pointer to be used 264 // as an endpoint in an iterative algorithm, so this removes the OOB poison 265 // bit. 266 return UnpoisonPtr(wrapped_ptr); 267 } 268 269 // Upcasts the wrapped pointer. 270 template <typename To, typename From> UpcastRawPtrBackupRefImpl271 PA_ALWAYS_INLINE static constexpr To* Upcast(From* wrapped_ptr) { 272 static_assert(std::is_convertible_v<From*, To*>, 273 "From must be convertible to To."); 274 // Note, this cast may change the address if upcasting to base that lies in 275 // the middle of the derived object. 276 return wrapped_ptr; 277 } 278 279 // Verify the pointer stayed in the same slot, and return the poisoned version 280 // of `new_ptr` if OOB poisoning is enabled. 281 template <typename T> VerifyAndPoisonPointerAfterAdvanceOrRetreatRawPtrBackupRefImpl282 PA_ALWAYS_INLINE static T* VerifyAndPoisonPointerAfterAdvanceOrRetreat( 283 T* unpoisoned_ptr, 284 T* new_ptr) { 285 // First check if the new address didn't migrate in/out the BRP pool, and 286 // that it lands within the same allocation. An end-of-allocation address is 287 // ok, too, and that may lead to the pointer being poisoned if the relevant 288 // feature is enabled. These checks add a non-trivial cost, but they're 289 // cheaper and more secure than the previous implementation that rewrapped 290 // the pointer (wrapped the new pointer and unwrapped the old one). 291 // 292 // Note, the value of these checks goes beyond OOB protection. They're 293 // important for integrity of the BRP algorithm. Without these, an attacker 294 // could make the pointer point to another allocation, and cause its 295 // ref-count to go to 0 upon this pointer's destruction, even though there 296 // may be another pointer still pointing to it, thus making it lose the BRP 297 // protection prematurely. 298 // 299 // Note 2, if we ever need to restore the "before allocation" mode, we can 300 // run into a problem on 32-bit that the end-of-allocation address could 301 // fall outside of PartitionAlloc's pools, if this is the last slot of the 302 // super page, thus pointing to the guard page. This means the ref-count 303 // won't be decreased when the pointer is released (leak). This problem 304 // doesn't exist in the modes that involve putting extras after the 305 // allocation, because the end-of-allocation address belongs to the same 306 // slot. 307 const uintptr_t before_addr = partition_alloc::UntagPtr(unpoisoned_ptr); 308 const uintptr_t after_addr = partition_alloc::UntagPtr(new_ptr); 309 // TODO(bartekn): Consider adding support for non-BRP pools too (without 310 // removing the cross-pool migration check). 311 if (IsSupportedAndNotNull(before_addr)) { 312 constexpr size_t size = sizeof(T); 313 [[maybe_unused]] const bool is_end = 314 CheckPointerWithinSameAlloc(before_addr, after_addr, size); 315 #if BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) 316 if (is_end) { 317 new_ptr = PoisonOOBPtr(new_ptr); 318 } 319 #endif // BUILDFLAG(BACKUP_REF_PTR_POISON_OOB_PTR) 320 } else { 321 // Check that the new address didn't migrate into the BRP pool, as it 322 // would result in more pointers pointing to an allocation than its 323 // ref-count reflects. 324 PA_BASE_CHECK(!IsSupportedAndNotNull(after_addr)); 325 } 326 return new_ptr; 327 } 328 329 // Advance the wrapped pointer by `delta_elems`. 330 // `is_in_pointer_modification` means that the result is intended to modify 331 // the pointer (as opposed to creating a new one). 332 template < 333 typename T, 334 typename Z, 335 typename = 336 std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>> 337 PA_ALWAYS_INLINE static constexpr T* AdvanceRawPtrBackupRefImpl338 Advance(T* wrapped_ptr, Z delta_elems, bool is_in_pointer_modification) { 339 if (partition_alloc::internal::base::is_constant_evaluated()) { 340 return wrapped_ptr + delta_elems; 341 } 342 T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr); 343 // When modifying the pointer, we have to make sure it doesn't migrate to a 344 // different slot, or else ref-count integrity is at risk. This isn't needed 345 // if the result will be assigned to a new pointer, as it'll do ref-counting 346 // properly. Do it anyway if extra OOB checks are enabled. 347 if (BUILDFLAG(BACKUP_REF_PTR_EXTRA_OOB_CHECKS) || 348 is_in_pointer_modification) { 349 return VerifyAndPoisonPointerAfterAdvanceOrRetreat( 350 unpoisoned_ptr, unpoisoned_ptr + delta_elems); 351 } 352 return unpoisoned_ptr + delta_elems; 353 } 354 355 // Retreat the wrapped pointer by `delta_elems`. 356 // `is_in_pointer_modification` means that the result is intended to modify 357 // the pointer (as opposed to creating a new one). 358 template < 359 typename T, 360 typename Z, 361 typename = 362 std::enable_if_t<partition_alloc::internal::is_offset_type<Z>, void>> 363 PA_ALWAYS_INLINE static constexpr T* RetreatRawPtrBackupRefImpl364 Retreat(T* wrapped_ptr, Z delta_elems, bool is_in_pointer_modification) { 365 if (partition_alloc::internal::base::is_constant_evaluated()) { 366 return wrapped_ptr - delta_elems; 367 } 368 T* unpoisoned_ptr = UnpoisonPtr(wrapped_ptr); 369 // When modifying the pointer, we have to make sure it doesn't migrate to a 370 // different slot, or else ref-count integrity is at risk. This isn't needed 371 // if the result will be assigned to a new pointer, as it'll do ref-counting 372 // properly. Do it anyway if extra OOB checks are enabled. 373 if (BUILDFLAG(BACKUP_REF_PTR_EXTRA_OOB_CHECKS) || 374 is_in_pointer_modification) { 375 return VerifyAndPoisonPointerAfterAdvanceOrRetreat( 376 unpoisoned_ptr, unpoisoned_ptr - delta_elems); 377 } 378 return unpoisoned_ptr - delta_elems; 379 } 380 381 template <typename T> GetDeltaElemsRawPtrBackupRefImpl382 PA_ALWAYS_INLINE static constexpr ptrdiff_t GetDeltaElems(T* wrapped_ptr1, 383 T* wrapped_ptr2) { 384 if (partition_alloc::internal::base::is_constant_evaluated()) { 385 return wrapped_ptr1 - wrapped_ptr2; 386 } 387 388 T* unpoisoned_ptr1 = UnpoisonPtr(wrapped_ptr1); 389 T* unpoisoned_ptr2 = UnpoisonPtr(wrapped_ptr2); 390 #if BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK) 391 if (partition_alloc::internal::base::is_constant_evaluated()) { 392 return unpoisoned_ptr1 - unpoisoned_ptr2; 393 } 394 uintptr_t address1 = partition_alloc::UntagPtr(unpoisoned_ptr1); 395 uintptr_t address2 = partition_alloc::UntagPtr(unpoisoned_ptr2); 396 // Ensure that both pointers are within the same slot, and pool! 397 // TODO(bartekn): Consider adding support for non-BRP pool too. 398 if (IsSupportedAndNotNull(address1)) { 399 PA_BASE_CHECK(IsSupportedAndNotNull(address2)); 400 PA_BASE_CHECK(partition_alloc::internal::IsPtrWithinSameAlloc( 401 address2, address1, sizeof(T)) != 402 partition_alloc::internal::PtrPosWithinAlloc::kFarOOB); 403 } else { 404 PA_BASE_CHECK(!IsSupportedAndNotNull(address2)); 405 } 406 #endif // BUILDFLAG(ENABLE_POINTER_SUBTRACTION_CHECK) 407 return unpoisoned_ptr1 - unpoisoned_ptr2; 408 } 409 410 // Returns a copy of a wrapped pointer, without making an assertion on whether 411 // memory was freed or not. 412 // This method increments the reference count of the allocation slot. 413 template <typename T> DuplicateRawPtrBackupRefImpl414 PA_ALWAYS_INLINE static constexpr T* Duplicate(T* wrapped_ptr) { 415 if (partition_alloc::internal::base::is_constant_evaluated()) { 416 return wrapped_ptr; 417 } 418 return WrapRawPtr(wrapped_ptr); 419 } 420 421 // Report the current wrapped pointer if pointee isn't alive anymore. 422 template <typename T> ReportIfDanglingRawPtrBackupRefImpl423 PA_ALWAYS_INLINE static void ReportIfDangling(T* wrapped_ptr) { 424 ReportIfDanglingInternal(partition_alloc::UntagPtr(wrapped_ptr)); 425 } 426 427 // `WrapRawPtrForDuplication` and `UnsafelyUnwrapPtrForDuplication` are used 428 // to create a new raw_ptr<T> from another raw_ptr<T> of a different flavor. 429 template <typename T> WrapRawPtrForDuplicationRawPtrBackupRefImpl430 PA_ALWAYS_INLINE static constexpr T* WrapRawPtrForDuplication(T* ptr) { 431 if (partition_alloc::internal::base::is_constant_evaluated()) { 432 return ptr; 433 } else { 434 return WrapRawPtr(ptr); 435 } 436 } 437 438 template <typename T> UnsafelyUnwrapPtrForDuplicationRawPtrBackupRefImpl439 PA_ALWAYS_INLINE static constexpr T* UnsafelyUnwrapPtrForDuplication( 440 T* wrapped_ptr) { 441 if (partition_alloc::internal::base::is_constant_evaluated()) { 442 return wrapped_ptr; 443 } else { 444 return UnpoisonPtr(wrapped_ptr); 445 } 446 } 447 448 #if BUILDFLAG(ENABLE_BACKUP_REF_PTR_INSTANCE_TRACER) 449 template <typename T> TraceRawPtrBackupRefImpl450 static constexpr void Trace(uint64_t owner_id, T* wrapped_ptr) { 451 if (partition_alloc::internal::base::is_constant_evaluated()) { 452 return; 453 } 454 455 uintptr_t address = partition_alloc::UntagPtr(UnpoisonPtr(wrapped_ptr)); 456 457 if (!IsSupportedAndNotNull(address)) { 458 return; 459 } 460 461 InstanceTracer::Trace(owner_id, AllowDangling, address); 462 } 463 UntraceRawPtrBackupRefImpl464 static constexpr void Untrace(uint64_t owner_id) { 465 if (partition_alloc::internal::base::is_constant_evaluated()) { 466 return; 467 } 468 469 InstanceTracer::Untrace(owner_id); 470 } 471 #else 472 // In theory, this shouldn't be needed. In practice, the optimizer is unable 473 // to tell that things like `IsSupportedAndNotNull()` are side-effect free. 474 template <typename T> TraceRawPtrBackupRefImpl475 static constexpr void Trace(uint64_t owner_id, T* wrapped_ptr) {} UntraceRawPtrBackupRefImpl476 static constexpr void Untrace(uint64_t owner_id) {} 477 #endif 478 479 // This is for accounting only, used by unit tests. IncrementSwapCountForTestRawPtrBackupRefImpl480 PA_ALWAYS_INLINE static constexpr void IncrementSwapCountForTest() {} IncrementLessCountForTestRawPtrBackupRefImpl481 PA_ALWAYS_INLINE static constexpr void IncrementLessCountForTest() {} 482 483 private: 484 // We've evaluated several strategies (inline nothing, various parts, or 485 // everything in |Wrap()| and |Release()|) using the Speedometer2 benchmark 486 // to measure performance. The best results were obtained when only the 487 // lightweight |IsManagedByPartitionAllocBRPPool()| check was inlined. 488 // Therefore, we've extracted the rest into the functions below and marked 489 // them as PA_NOINLINE to prevent unintended LTO effects. 490 PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void AcquireInternal( 491 uintptr_t address); 492 PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReleaseInternal( 493 uintptr_t address); 494 PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) bool IsPointeeAlive( 495 uintptr_t address); 496 PA_NOINLINE static PA_COMPONENT_EXPORT(RAW_PTR) void ReportIfDanglingInternal( 497 uintptr_t address); 498 499 // CHECK if `before_addr` and `after_addr` are in the same allocation, for a 500 // given `type_size`. 501 // If BACKUP_REF_PTR_POISON_OOB_PTR is enabled, return whether the allocation 502 // is at the end. 503 // If BACKUP_REF_PTR_POISON_OOB_PTR is disable, return false. 504 PA_NOINLINE static PA_COMPONENT_EXPORT( 505 RAW_PTR) bool CheckPointerWithinSameAlloc(uintptr_t before_addr, 506 uintptr_t after_addr, 507 size_t type_size); 508 }; 509 510 } // namespace base::internal 511 512 #endif // PARTITION_ALLOC_POINTERS_RAW_PTR_BACKUP_REF_IMPL_H_ 513