diff --git a/Source/JavaScriptCore/heap/BlockDirectory.cpp b/Source/JavaScriptCore/heap/BlockDirectory.cpp index f9df6f0cbf3c9..d60ed0c161f19 100644 --- a/Source/JavaScriptCore/heap/BlockDirectory.cpp +++ b/Source/JavaScriptCore/heap/BlockDirectory.cpp @@ -306,6 +306,17 @@ void BlockDirectory::shrink() }); } +// FIXME: rdar://139998916 +MarkedBlock::Handle* BlockDirectory::findMarkedBlockHandleDebug(MarkedBlock* block) +{ + for (size_t index = 0; index < m_blocks.size(); ++index) { + MarkedBlock::Handle* handle = m_blocks[index]; + if (&handle->block() == block) + return handle; + } + return nullptr; +} + void BlockDirectory::assertNoUnswept() { if (!ASSERT_ENABLED) diff --git a/Source/JavaScriptCore/heap/BlockDirectory.h b/Source/JavaScriptCore/heap/BlockDirectory.h index 88389ad13f156..452668ba2d330 100644 --- a/Source/JavaScriptCore/heap/BlockDirectory.h +++ b/Source/JavaScriptCore/heap/BlockDirectory.h @@ -130,8 +130,11 @@ class BlockDirectory { void setNextDirectoryInAlignedMemoryAllocator(BlockDirectory* directory) { m_nextDirectoryInAlignedMemoryAllocator = directory; } MarkedBlock::Handle* findEmptyBlockToSteal(); - + MarkedBlock::Handle* findBlockToSweep(); + + // FIXME: rdar://139998916 + MarkedBlock::Handle* findMarkedBlockHandleDebug(MarkedBlock*); Subspace* subspace() const { return m_subspace; } MarkedSpace& markedSpace() const; diff --git a/Source/JavaScriptCore/heap/CellContainerInlines.h b/Source/JavaScriptCore/heap/CellContainerInlines.h index ef48a6d2b216f..3cb498cddeb2d 100644 --- a/Source/JavaScriptCore/heap/CellContainerInlines.h +++ b/Source/JavaScriptCore/heap/CellContainerInlines.h @@ -90,7 +90,7 @@ inline WeakSet& CellContainer::weakSet() const inline void CellContainer::aboutToMark(HeapVersion markingVersion) { if (!isPreciseAllocation()) - markedBlock().aboutToMark(markingVersion); + markedBlock().aboutToMark(markingVersion, nullptr); } inline bool CellContainer::areMarksStale() const diff --git a/Source/JavaScriptCore/heap/HeapInlines.h b/Source/JavaScriptCore/heap/HeapInlines.h index 4d767a564d5fd..97bf38d6082fd 100644 --- a/Source/JavaScriptCore/heap/HeapInlines.h +++ b/Source/JavaScriptCore/heap/HeapInlines.h @@ -83,7 +83,7 @@ ALWAYS_INLINE bool Heap::testAndSetMarked(HeapVersion markingVersion, const void if (cell->isPreciseAllocation()) return cell->preciseAllocation().testAndSetMarked(); MarkedBlock& block = cell->markedBlock(); - Dependency dependency = block.aboutToMark(markingVersion); + Dependency dependency = block.aboutToMark(markingVersion, cell); return block.testAndSetMarked(cell, dependency); } diff --git a/Source/JavaScriptCore/heap/MarkedBlock.cpp b/Source/JavaScriptCore/heap/MarkedBlock.cpp index ee68dc760cdf5..bd027fce1db9c 100644 --- a/Source/JavaScriptCore/heap/MarkedBlock.cpp +++ b/Source/JavaScriptCore/heap/MarkedBlock.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2022 Apple Inc. All rights reserved. + * Copyright (C) 2011-2024 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,8 +31,13 @@ #include "JSCJSValueInlines.h" #include "MarkedBlockInlines.h" #include "SweepingScope.h" +#include "VMInspector.h" #include +#if PLATFORM(COCOA) +#include +#endif + namespace JSC { namespace MarkedBlockInternal { static constexpr bool verbose = false; @@ -83,17 +88,17 @@ MarkedBlock::Handle::~Handle() MarkedBlock::MarkedBlock(VM& vm, Handle& handle) { - new (&footer()) Footer(vm, handle); + new (&header()) Header(vm, handle); if (MarkedBlockInternal::verbose) dataLog(RawPointer(this), ": Allocated.\n"); } MarkedBlock::~MarkedBlock() { - footer().~Footer(); + header().~Header(); } -MarkedBlock::Footer::Footer(VM& vm, Handle& handle) +MarkedBlock::Header::Header(VM& vm, Handle& handle) : m_handle(handle) , m_vm(&vm) , m_markingVersion(MarkedSpace::nullVersion) @@ -101,7 +106,7 @@ MarkedBlock::Footer::Footer(VM& vm, Handle& handle) { } -MarkedBlock::Footer::~Footer() +MarkedBlock::Header::~Header() { } @@ -113,7 +118,7 @@ void MarkedBlock::Handle::unsweepWithNoNewlyAllocated() void MarkedBlock::Handle::stopAllocating(const FreeList& freeList) { - Locker locker { blockFooter().m_lock }; + Locker locker { blockHeader().m_lock }; if (MarkedBlockInternal::verbose) dataLog(RawPointer(this), ": MarkedBlock::Handle::stopAllocating!\n"); @@ -135,8 +140,8 @@ void MarkedBlock::Handle::stopAllocating(const FreeList& freeList) // allocated from our free list are not currently marked, so we need another // way to tell what's live vs dead. - blockFooter().m_newlyAllocated.clearAll(); - blockFooter().m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion(); + blockHeader().m_newlyAllocated.clearAll(); + blockHeader().m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion(); forEachCell( [&] (size_t, HeapCell* cell, HeapCell::Kind) -> IterationStatus { @@ -160,19 +165,19 @@ void MarkedBlock::Handle::lastChanceToFinalize() { directory()->setIsAllocated(NoLockingNecessary, this, false); directory()->setIsDestructible(NoLockingNecessary, this, true); - blockFooter().m_marks.clearAll(); + blockHeader().m_marks.clearAll(); block().clearHasAnyMarked(); - blockFooter().m_markingVersion = heap()->objectSpace().markingVersion(); + blockHeader().m_markingVersion = heap()->objectSpace().markingVersion(); m_weakSet.lastChanceToFinalize(); - blockFooter().m_newlyAllocated.clearAll(); - blockFooter().m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion(); + blockHeader().m_newlyAllocated.clearAll(); + blockHeader().m_newlyAllocatedVersion = heap()->objectSpace().newlyAllocatedVersion(); sweep(nullptr); } void MarkedBlock::Handle::resumeAllocating(FreeList& freeList) { { - Locker locker { blockFooter().m_lock }; + Locker locker { blockHeader().m_lock }; if (MarkedBlockInternal::verbose) dataLog(RawPointer(this), ": MarkedBlock::Handle::resumeAllocating!\n"); @@ -193,17 +198,61 @@ void MarkedBlock::Handle::resumeAllocating(FreeList& freeList) sweep(&freeList); } -void MarkedBlock::aboutToMarkSlow(HeapVersion markingVersion) +#if ENABLE(MARKEDBLOCK_TEST_DUMP_INFO) + +inline void MarkedBlock::setupTestForDumpInfoAndCrash() +{ + static std::atomic count = 0; + // Option set to 0 disables testing. + if (++count == Options::markedBlockDumpInfoCount()) { + memset(&header(), 0, sizeof(uintptr_t)); + switch (Options::markedBlockDumpInfoCount() & 0xf) { + case 1: // Test null VM pointer. + dataLogLn("Zeroing MarkedBlock::Header::m_vm"); + *const_cast(&header().m_vm) = nullptr; + break; + case 2: // Test non-null invalid VM pointer. + dataLogLn("Corrupting MarkedBlock::Header::m_vm"); + *const_cast(&header().m_vm) = bitwise_cast(0xdeadbeefdeadbeef); + break; + case 3: // Test contiguous and total zero byte counts: start and end zeroed. + dataLogLn("Zeroing start and end of MarkedBlock"); + char* blockMem = bitwise_cast(this); + memset(blockMem, 0, blockSize / 4); + memset(blockMem + 3 * blockSize / 4, 0, blockSize / 4); + break; + case 4: // Test contiguous and total zero byte counts: entire block zeroed. + dataLogLn("Zeroing MarkedBlock"); + char* blockMem = bitwise_cast(this); + memset(blockMem, 0, blockSize); + break; + } + } +} + +#else + +inline void MarkedBlock::setupTestForDumpInfoAndCrash() { } + +#endif // ENABLE(MARKEDBLOCK_TEST_DUMP_INFO) + +void MarkedBlock::aboutToMarkSlow(HeapVersion markingVersion, HeapCell* cell) { ASSERT(vm().heap.objectSpace().isMarking()); - Locker locker { footer().m_lock }; + setupTestForDumpInfoAndCrash(); + + Locker locker { header().m_lock }; if (!areMarksStale(markingVersion)) return; - - BlockDirectory* directory = handle().directory(); - if (handle().directory()->isAllocated(Locker { directory->bitvectorLock() }, &handle()) + MarkedBlock::Handle* handle = header().handlePointerForNullCheck(); + if (UNLIKELY(!handle)) + dumpInfoAndCrashForInvalidHandleV2(locker, cell); + + BlockDirectory* directory = handle->directory(); + + if (handle->directory()->isAllocated(Locker { directory->bitvectorLock() }, handle) || !marksConveyLivenessDuringMarking(markingVersion)) { if (MarkedBlockInternal::verbose) dataLog(RawPointer(this), ": Clearing marks without doing anything else.\n"); @@ -213,12 +262,12 @@ void MarkedBlock::aboutToMarkSlow(HeapVersion markingVersion) // date version! If it does, then we want to leave the newlyAllocated alone, since that // means that we had allocated in this previously empty block but did not fill it up, so // we created a newlyAllocated. - footer().m_marks.clearAll(); + header().m_marks.clearAll(); } else { if (MarkedBlockInternal::verbose) dataLog(RawPointer(this), ": Doing things.\n"); HeapVersion newlyAllocatedVersion = space()->newlyAllocatedVersion(); - if (footer().m_newlyAllocatedVersion == newlyAllocatedVersion) { + if (header().m_newlyAllocatedVersion == newlyAllocatedVersion) { // When do we get here? The block could not have been filled up. The newlyAllocated bits would // have had to be created since the end of the last collection. The only things that create // them are aboutToMarkSlow, lastChanceToFinalize, and stopAllocating. If it had been @@ -226,25 +275,25 @@ void MarkedBlock::aboutToMarkSlow(HeapVersion markingVersion) // cannot be lastChanceToFinalize. So it must be stopAllocating. That means that we just // computed the newlyAllocated bits just before the start of an increment. When we are in that // mode, it seems as if newlyAllocated should subsume marks. - ASSERT(footer().m_newlyAllocated.subsumes(footer().m_marks)); - footer().m_marks.clearAll(); + ASSERT(header().m_newlyAllocated.subsumes(header().m_marks)); + header().m_marks.clearAll(); } else { - footer().m_newlyAllocated.setAndClear(footer().m_marks); - footer().m_newlyAllocatedVersion = newlyAllocatedVersion; + header().m_newlyAllocated.setAndClear(header().m_marks); + header().m_newlyAllocatedVersion = newlyAllocatedVersion; } } clearHasAnyMarked(); WTF::storeStoreFence(); - footer().m_markingVersion = markingVersion; + header().m_markingVersion = markingVersion; // This means we're the first ones to mark any object in this block. - directory->setIsMarkingNotEmpty(Locker { directory->bitvectorLock() }, &handle(), true); + directory->setIsMarkingNotEmpty(Locker { directory->bitvectorLock() }, handle, true); } void MarkedBlock::resetAllocated() { - footer().m_newlyAllocated.clearAll(); - footer().m_newlyAllocatedVersion = MarkedSpace::nullVersion; + header().m_newlyAllocated.clearAll(); + header().m_newlyAllocatedVersion = MarkedSpace::nullVersion; } void MarkedBlock::resetMarks() @@ -256,14 +305,14 @@ void MarkedBlock::resetMarks() // version is null, aboutToMarkSlow() will assume that the marks were not stale as of before // beginMarking(). Hence the need to whip the marks into shape. if (areMarksStale()) - footer().m_marks.clearAll(); - footer().m_markingVersion = MarkedSpace::nullVersion; + header().m_marks.clearAll(); + header().m_markingVersion = MarkedSpace::nullVersion; } #if ASSERT_ENABLED void MarkedBlock::assertMarksNotStale() { - ASSERT(footer().m_markingVersion == vm().heap.objectSpace().markingVersion()); + ASSERT(header().m_markingVersion == vm().heap.objectSpace().markingVersion()); } #endif // ASSERT_ENABLED @@ -284,7 +333,7 @@ bool MarkedBlock::isMarked(const void* p) void MarkedBlock::Handle::didConsumeFreeList() { - Locker locker { blockFooter().m_lock }; + Locker locker { blockHeader().m_lock }; if (MarkedBlockInternal::verbose) dataLog(RawPointer(this), ": MarkedBlock::Handle::didConsumeFreeList!\n"); ASSERT(isFreeListed()); @@ -294,12 +343,12 @@ void MarkedBlock::Handle::didConsumeFreeList() size_t MarkedBlock::markCount() { - return areMarksStale() ? 0 : footer().m_marks.count(); + return areMarksStale() ? 0 : header().m_marks.count(); } void MarkedBlock::clearHasAnyMarked() { - footer().m_biasedMarkCount = footer().m_markCountBias; + header().m_biasedMarkCount = header().m_markCountBias; } void MarkedBlock::noteMarkedSlow() @@ -325,12 +374,19 @@ void MarkedBlock::Handle::didAddToDirectory(BlockDirectory* directory, unsigned m_index = index; m_directory = directory; - blockFooter().m_subspace = directory->subspace(); + blockHeader().m_subspace = directory->subspace(); size_t cellSize = directory->cellSize(); m_atomsPerCell = (cellSize + atomSize - 1) / atomSize; - m_endAtom = endAtom - m_atomsPerCell + 1; - + + // Discount the payload atoms at the front so that m_startAtom can start on an atom such that + // m_atomsPerCell increments from m_startAtom will get us exactly to endAtom when we have filled + // up the payload region using bump allocation. This makes simplifies the computation of the + // termination condition for iteration later. + size_t numberOfUnallocatableAtoms = numberOfPayloadAtoms % m_atomsPerCell; + m_startAtom = firstPayloadRegionAtom + numberOfUnallocatableAtoms; + ASSERT(m_startAtom < firstPayloadRegionAtom + m_atomsPerCell); + m_attributes = directory->attributes(); if (!isJSCellKind(m_attributes.cellKind)) @@ -343,7 +399,7 @@ void MarkedBlock::Handle::didAddToDirectory(BlockDirectory* directory, unsigned RELEASE_ASSERT(markCountBias < 0); // This means we haven't marked anything yet. - blockFooter().m_biasedMarkCount = blockFooter().m_markCountBias = static_cast(markCountBias); + blockHeader().m_biasedMarkCount = blockHeader().m_markCountBias = static_cast(markCountBias); } void MarkedBlock::Handle::didRemoveFromDirectory() @@ -353,7 +409,7 @@ void MarkedBlock::Handle::didRemoveFromDirectory() m_index = std::numeric_limits::max(); m_directory = nullptr; - blockFooter().m_subspace = nullptr; + blockHeader().m_subspace = nullptr; } #if ASSERT_ENABLED @@ -406,7 +462,7 @@ void MarkedBlock::Handle::sweep(FreeList* freeList) } if (space()->isMarking()) - blockFooter().m_lock.lock(); + blockHeader().m_lock.lock(); subspace()->didBeginSweepingToFreeList(this); @@ -470,6 +526,105 @@ bool MarkedBlock::Handle::isFreeListedCell(const void* target) const return m_directory->isFreeListedCell(target); } +NO_RETURN_DUE_TO_CRASH NEVER_INLINE void MarkedBlock::dumpInfoAndCrashForInvalidHandleV2(AbstractLocker&, HeapCell* heapCell) +{ + VM* blockVM = header().m_vm; + VM* actualVM = nullptr; + bool isBlockVMValid = false; + bool isBlockInSet = false; + bool isBlockInDirectory = false; + bool foundInBlockVM = false; + size_t contiguousZeroBytesHeadOfBlock = 0; + size_t totalZeroBytesInBlock = 0; + uint64_t cellFirst8Bytes = 0; + + if (heapCell) { + uint64_t* p = bitwise_cast(heapCell); + cellFirst8Bytes = *p; + } + + auto updateCrashLogMsg = [&](int line) { + StringPrintStream out; + out.printf("INVALID HANDLE [%d]: markedBlock=%p; heapCell=%p; cellFirst8Bytes=%#llx; contiguousZeros=%lu; totalZeros=%lu; blockVM=%p; actualVM=%p; isBlockVMValid=%d; isBlockInSet=%d; isBlockInDir=%d; foundInBlockVM=%d;", + line, this, heapCell, static_cast(cellFirst8Bytes), contiguousZeroBytesHeadOfBlock, totalZeroBytesInBlock, blockVM, actualVM, isBlockVMValid, isBlockInSet, isBlockInDirectory, foundInBlockVM); + const char* msg = out.toCString().data(); +#if PLATFORM(COCOA) + WTF::setCrashLogMessage(msg); +#endif + dataLogLn(msg); + }; + updateCrashLogMsg(__LINE__); + + char* blockStart = bitwise_cast(this); + bool sawNonZero = false; + for (auto mem = blockStart; mem < blockStart + MarkedBlock::blockSize; mem++) { + // Exclude the MarkedBlock::Header::m_lock from the zero scan since taking the lock writes a non-zero value. + auto isMLockBytes = [blockStart](char* p) { + constexpr size_t lockOffset = offsetOfHeader + OBJECT_OFFSETOF(MarkedBlock::Header, m_lock); + size_t offset = p - blockStart; + return lockOffset <= offset && offset < lockOffset + sizeof(MarkedBlock::Header::m_lock); + }; + bool byteIsZero = !*mem; + if (byteIsZero || isMLockBytes(mem)) { + totalZeroBytesInBlock++; + if (!sawNonZero) + contiguousZeroBytesHeadOfBlock++; + } else + sawNonZero = true; + } + updateCrashLogMsg(__LINE__); + + VMInspector::forEachVM([&](VM& vm) { + if (blockVM == &vm) { + isBlockVMValid = true; + return IterationStatus::Done; + } + return IterationStatus::Continue; + }); + updateCrashLogMsg(__LINE__); + + if (isBlockVMValid) { + MarkedSpace& objectSpace = blockVM->heap.objectSpace(); + isBlockInSet = objectSpace.blocks().set().contains(this); + isBlockInDirectory = !!objectSpace.findMarkedBlockHandleDebug(this); + foundInBlockVM = isBlockInSet || isBlockInDirectory; + updateCrashLogMsg(__LINE__); + } + + if (!foundInBlockVM) { + // Search all VMs to see if this block belongs to any VM. + VMInspector::forEachVM([&](VM& vm) { + MarkedSpace& objectSpace = vm.heap.objectSpace(); + isBlockInSet = objectSpace.blocks().set().contains(this); + isBlockInDirectory = !!objectSpace.findMarkedBlockHandleDebug(this); + // Either of them is true indicates that the block belongs to the VM. + if (isBlockInSet || isBlockInDirectory) { + actualVM = &vm; + updateCrashLogMsg(__LINE__); + return IterationStatus::Done; + } + return IterationStatus::Continue; + }); + } + updateCrashLogMsg(__LINE__); + + uint64_t bitfield = 0xab00ab01ab020000; + if (!isBlockVMValid) + bitfield |= 1 << 7; + if (!isBlockInSet) + bitfield |= 1 << 6; + if (!isBlockInDirectory) + bitfield |= 1 << 5; + if (!foundInBlockVM) + bitfield |= 1 << 4; + + static_assert(MarkedBlock::blockSize < (1ull << 32)); + uint64_t zeroCounts = contiguousZeroBytesHeadOfBlock | (static_cast(totalZeroBytesInBlock) << 32); + + // NB: could save something other than 'this' since it can be derived from heapCell. + CRASH_WITH_INFO(heapCell, cellFirst8Bytes, zeroCounts, bitfield, this, blockVM, actualVM); +} + } // namespace JSC namespace WTF { diff --git a/Source/JavaScriptCore/heap/MarkedBlock.h b/Source/JavaScriptCore/heap/MarkedBlock.h index 8354bb1001914..ff2485dd42701 100644 --- a/Source/JavaScriptCore/heap/MarkedBlock.h +++ b/Source/JavaScriptCore/heap/MarkedBlock.h @@ -63,10 +63,10 @@ class MarkedBlock { friend struct VerifyMarked; public: - class Footer; + class Header; class Handle; private: - friend class Footer; + friend class Header; friend class Handle; public: static constexpr size_t atomSize = 16; // bytes @@ -81,9 +81,9 @@ class MarkedBlock { static constexpr size_t maxNumberOfLowerTierCells = 8; static_assert(maxNumberOfLowerTierCells <= 256); - static_assert(!(MarkedBlock::atomSize & (MarkedBlock::atomSize - 1)), "MarkedBlock::atomSize must be a power of two."); - static_assert(!(MarkedBlock::blockSize & (MarkedBlock::blockSize - 1)), "MarkedBlock::blockSize must be a power of two."); - + static_assert(!(atomSize & (atomSize - 1)), "MarkedBlock::atomSize must be a power of two."); + static_assert(!(blockSize & (blockSize - 1)), "MarkedBlock::blockSize must be a power of two."); + struct VoidFunctor { typedef void ReturnType; void returnValue() { } @@ -102,7 +102,7 @@ class MarkedBlock { // https://bugs.webkit.org/show_bug.cgi?id=159644 mutable ReturnType m_count; }; - + class Handle { WTF_MAKE_NONCOPYABLE(Handle); WTF_MAKE_STRUCT_FAST_ALLOCATED_WITH_HEAP_IDENTIFIER(MarkedBlockHandle); @@ -114,7 +114,7 @@ class MarkedBlock { ~Handle(); MarkedBlock& block(); - MarkedBlock::Footer& blockFooter(); + MarkedBlock::Header& blockHeader(); void* cellAlign(void*); @@ -200,8 +200,8 @@ class MarkedBlock { void didAddToDirectory(BlockDirectory*, unsigned index); void didRemoveFromDirectory(); - void* start() const { return &m_block->atoms()[0]; } - void* end() const { return &m_block->atoms()[m_endAtom]; } + void* start() const { return &m_block->atoms()[m_startAtom]; } + void* end() const { return &m_block->atoms()[endAtom]; } void* atomAt(size_t i) const { return &m_block->atoms()[i]; } bool contains(void* p) const { return start() <= p && p < end(); } void* pageStart() const { return &m_block->atoms()[0]; } @@ -229,7 +229,7 @@ class MarkedBlock { void setIsFreeListed(); unsigned m_atomsPerCell { std::numeric_limits::max() }; - unsigned m_endAtom { std::numeric_limits::max() }; // This is a fuzzy end. Always test for < m_endAtom. + unsigned m_startAtom { std::numeric_limits::max() }; // Exact location of the first allocatable atom. CellAttributes m_attributes; bool m_isFreeListed { false }; @@ -248,13 +248,17 @@ class MarkedBlock { typedef char Atom[atomSize]; public: - class Footer { + class Header { public: - Footer(VM&, Handle&); - ~Footer(); + Header(VM&, Handle&); + ~Header(); + + static ptrdiff_t offsetOfVM() { return OBJECT_OFFSETOF(Header, m_vm); } + Handle* handlePointerForNullCheck() + { + return WTF::opaque(&m_handle); + } - static ptrdiff_t offsetOfVM() { return OBJECT_OFFSETOF(Footer, m_vm); } - private: friend class LLIntOffsetsExtractor; friend class MarkedBlock; @@ -303,16 +307,28 @@ class MarkedBlock { }; private: - Footer& footer(); - const Footer& footer() const; + Header& header(); + const Header& header() const; public: - static constexpr size_t endAtom = (blockSize - sizeof(Footer)) / atomSize; - static constexpr size_t payloadSize = endAtom * atomSize; - static constexpr size_t footerSize = blockSize - payloadSize; + static constexpr size_t numberOfAtoms = blockSize / atomSize; + static constexpr size_t numberOfPayloadAtoms = (blockSize - sizeof(Header)) / atomSize; + static constexpr size_t payloadSize = numberOfPayloadAtoms * atomSize; + static constexpr size_t headerSize = blockSize - payloadSize; + static_assert(payloadSize == roundUpToMultipleOf(payloadSize)); + static_assert(headerSize == roundUpToMultipleOf(headerSize)); + static_assert(sizeof(Header) <= headerSize); + + static constexpr size_t firstPayloadRegionAtom = headerSize / atomSize; + static constexpr size_t endAtom = blockSize / atomSize; + static constexpr size_t offsetOfHeader = 0; + static constexpr size_t headerAtom = offsetOfHeader / atomSize; + + static_assert(payloadSize == ((blockSize - sizeof(MarkedBlock::Header)) & ~(atomSize - 1)), "Payload size computed the alternate way should give the same result"); + static_assert(firstPayloadRegionAtom * atomSize == headerSize); + static_assert(endAtom * atomSize == blockSize); + static_assert(endAtom - firstPayloadRegionAtom == numberOfPayloadAtoms); - static_assert(payloadSize == ((blockSize - sizeof(MarkedBlock::Footer)) & ~(atomSize - 1)), "Payload size computed the alternate way should give the same result"); - static MarkedBlock::Handle* tryCreate(Heap&, AlignedMemoryAllocator*); Handle& handle(); @@ -342,7 +358,7 @@ class MarkedBlock { void clearNewlyAllocated(const void*); const Bitmap& newlyAllocated() const; - HeapVersion newlyAllocatedVersion() const { return footer().m_newlyAllocatedVersion; } + HeapVersion newlyAllocatedVersion() const { return header().m_newlyAllocatedVersion; } inline bool isNewlyAllocatedStale() const; @@ -365,7 +381,7 @@ class MarkedBlock { JS_EXPORT_PRIVATE bool areMarksStale(); bool areMarksStale(HeapVersion markingVersion); - Dependency aboutToMark(HeapVersion markingVersion); + Dependency aboutToMark(HeapVersion markingVersion, HeapCell*); #if ASSERT_ENABLED JS_EXPORT_PRIVATE void assertMarksNotStale(); @@ -376,52 +392,53 @@ class MarkedBlock { void resetMarks(); bool isMarkedRaw(const void* p); - HeapVersion markingVersion() const { return footer().m_markingVersion; } + HeapVersion markingVersion() const { return header().m_markingVersion; } const Bitmap& marks() const; - CountingLock& lock() { return footer().m_lock; } + CountingLock& lock() { return header().m_lock; } - Subspace* subspace() const { return footer().m_subspace; } + Subspace* subspace() const { return header().m_subspace; } void populatePage() const { - *bitwise_cast(&footer()); + *bitwise_cast(&header()); } void setVerifierMemo(void*); template T verifierMemo() const; - static constexpr size_t offsetOfFooter = endAtom * atomSize; - static_assert(offsetOfFooter + sizeof(Footer) <= blockSize); - private: MarkedBlock(VM&, Handle&); ~MarkedBlock(); Atom* atoms(); - JS_EXPORT_PRIVATE void aboutToMarkSlow(HeapVersion markingVersion); + JS_EXPORT_PRIVATE void aboutToMarkSlow(HeapVersion markingVersion, HeapCell*); void clearHasAnyMarked(); void noteMarkedSlow(); inline bool marksConveyLivenessDuringMarking(HeapVersion markingVersion); inline bool marksConveyLivenessDuringMarking(HeapVersion myMarkingVersion, HeapVersion markingVersion); + + // FIXME: rdar://139998916 + NO_RETURN_DUE_TO_CRASH NEVER_INLINE void dumpInfoAndCrashForInvalidHandleV2(AbstractLocker&, HeapCell*); + inline void setupTestForDumpInfoAndCrash(); }; -inline MarkedBlock::Footer& MarkedBlock::footer() +inline MarkedBlock::Header& MarkedBlock::header() { - return *bitwise_cast(atoms() + endAtom); + return *bitwise_cast(atoms() + headerAtom); } -inline const MarkedBlock::Footer& MarkedBlock::footer() const +inline const MarkedBlock::Header& MarkedBlock::header() const { - return const_cast(this)->footer(); + return const_cast(this)->header(); } inline MarkedBlock::Handle& MarkedBlock::handle() { - return footer().m_handle; + return header().m_handle; } inline const MarkedBlock::Handle& MarkedBlock::handle() const @@ -434,9 +451,9 @@ inline MarkedBlock& MarkedBlock::Handle::block() return *m_block; } -inline MarkedBlock::Footer& MarkedBlock::Handle::blockFooter() +inline MarkedBlock::Header& MarkedBlock::Handle::blockHeader() { - return block().footer(); + return block().header(); } inline MarkedBlock::Atom* MarkedBlock::atoms() @@ -451,7 +468,7 @@ inline bool MarkedBlock::isAtomAligned(const void* p) inline void* MarkedBlock::Handle::cellAlign(void* p) { - uintptr_t base = reinterpret_cast(block().atoms()); + uintptr_t base = reinterpret_cast(block().atoms() + m_startAtom); uintptr_t bits = reinterpret_cast(p); bits -= base; bits -= bits % cellSize(); @@ -486,7 +503,7 @@ inline VM& MarkedBlock::Handle::vm() const inline VM& MarkedBlock::vm() const { - return *footer().m_vm; + return *header().m_vm; } inline WeakSet& MarkedBlock::Handle::weakSet() @@ -559,21 +576,22 @@ inline size_t MarkedBlock::candidateAtomNumber(const void* p) inline unsigned MarkedBlock::atomNumber(const void* p) { size_t atomNumber = candidateAtomNumber(p); - ASSERT(atomNumber < handle().m_endAtom); + ASSERT(atomNumber >= handle().m_startAtom); + ASSERT(atomNumber < endAtom); return atomNumber; } inline bool MarkedBlock::areMarksStale(HeapVersion markingVersion) { - return markingVersion != footer().m_markingVersion; + return markingVersion != header().m_markingVersion; } -inline Dependency MarkedBlock::aboutToMark(HeapVersion markingVersion) +inline Dependency MarkedBlock::aboutToMark(HeapVersion markingVersion, HeapCell* cell) { HeapVersion version; - Dependency dependency = Dependency::loadAndFence(&footer().m_markingVersion, version); + Dependency dependency = Dependency::loadAndFence(&header().m_markingVersion, version); if (UNLIKELY(version != markingVersion)) - aboutToMarkSlow(markingVersion); + aboutToMarkSlow(markingVersion, cell); return dependency; } @@ -584,62 +602,65 @@ inline void MarkedBlock::Handle::assertMarksNotStale() inline bool MarkedBlock::isMarkedRaw(const void* p) { - return footer().m_marks.get(atomNumber(p)); + return header().m_marks.get(atomNumber(p)); } inline bool MarkedBlock::isMarked(HeapVersion markingVersion, const void* p) { HeapVersion version; - Dependency dependency = Dependency::loadAndFence(&footer().m_markingVersion, version); + Dependency dependency = Dependency::loadAndFence(&header().m_markingVersion, version); if (UNLIKELY(version != markingVersion)) return false; - return footer().m_marks.get(atomNumber(p), dependency); + return header().m_marks.get(atomNumber(p), dependency); } inline bool MarkedBlock::isMarked(const void* p, Dependency dependency) { assertMarksNotStale(); - return footer().m_marks.get(atomNumber(p), dependency); + return header().m_marks.get(atomNumber(p), dependency); } inline bool MarkedBlock::testAndSetMarked(const void* p, Dependency dependency) { assertMarksNotStale(); - return footer().m_marks.concurrentTestAndSet(atomNumber(p), dependency); + return header().m_marks.concurrentTestAndSet(atomNumber(p), dependency); } inline const Bitmap& MarkedBlock::marks() const { - return footer().m_marks; + return header().m_marks; } inline bool MarkedBlock::isNewlyAllocated(const void* p) { - return footer().m_newlyAllocated.get(atomNumber(p)); + return header().m_newlyAllocated.get(atomNumber(p)); } inline void MarkedBlock::setNewlyAllocated(const void* p) { - footer().m_newlyAllocated.set(atomNumber(p)); + header().m_newlyAllocated.set(atomNumber(p)); } inline void MarkedBlock::clearNewlyAllocated(const void* p) { - footer().m_newlyAllocated.clear(atomNumber(p)); + header().m_newlyAllocated.clear(atomNumber(p)); } inline const Bitmap& MarkedBlock::newlyAllocated() const { - return footer().m_newlyAllocated; + return header().m_newlyAllocated; } inline bool MarkedBlock::isAtom(const void* p) { ASSERT(MarkedBlock::isAtomAligned(p)); size_t atomNumber = candidateAtomNumber(p); - if (atomNumber % handle().m_atomsPerCell) // Filters pointers into cell middles. + + auto& handle = this->handle(); + size_t startAtom = handle.m_startAtom; + if (atomNumber < startAtom || atomNumber >= endAtom) return false; - if (atomNumber >= handle().m_endAtom) // Filters pointers into invalid cells out of the range. + if ((atomNumber - startAtom) % handle.m_atomsPerCell) // Filters pointers into cell middles. return false; return true; } @@ -648,7 +669,7 @@ template inline IterationStatus MarkedBlock::Handle::forEachCell(const Functor& functor) { HeapCell::Kind kind = m_attributes.cellKind; - for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) { + for (size_t i = m_startAtom; i < endAtom; i += m_atomsPerCell) { HeapCell* cell = reinterpret_cast_ptr(&m_block->atoms()[i]); if (functor(i, cell, kind) == IterationStatus::Done) return IterationStatus::Done; @@ -658,28 +679,28 @@ inline IterationStatus MarkedBlock::Handle::forEachCell(const Functor& functor) inline bool MarkedBlock::hasAnyMarked() const { - return footer().m_biasedMarkCount != footer().m_markCountBias; + return header().m_biasedMarkCount != header().m_markCountBias; } inline void MarkedBlock::noteMarked() { // This is racy by design. We don't want to pay the price of an atomic increment! - int16_t biasedMarkCount = footer().m_biasedMarkCount; + int16_t biasedMarkCount = header().m_biasedMarkCount; ++biasedMarkCount; - footer().m_biasedMarkCount = biasedMarkCount; + header().m_biasedMarkCount = biasedMarkCount; if (UNLIKELY(!biasedMarkCount)) noteMarkedSlow(); } inline void MarkedBlock::setVerifierMemo(void* p) { - footer().m_verifierMemo = p; + header().m_verifierMemo = p; } template T MarkedBlock::verifierMemo() const { - return bitwise_cast(footer().m_verifierMemo); + return bitwise_cast(header().m_verifierMemo); } } // namespace JSC diff --git a/Source/JavaScriptCore/heap/MarkedBlockInlines.h b/Source/JavaScriptCore/heap/MarkedBlockInlines.h index eb9fd72192e62..de86af2856c4d 100644 --- a/Source/JavaScriptCore/heap/MarkedBlockInlines.h +++ b/Source/JavaScriptCore/heap/MarkedBlockInlines.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2016-2021 Apple Inc. All rights reserved. + * Copyright (C) 2016-2022 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -42,7 +42,7 @@ inline unsigned MarkedBlock::Handle::cellsPerBlock() inline bool MarkedBlock::isNewlyAllocatedStale() const { - return footer().m_newlyAllocatedVersion != space()->newlyAllocatedVersion(); + return header().m_newlyAllocatedVersion != space()->newlyAllocatedVersion(); } inline bool MarkedBlock::hasAnyNewlyAllocated() @@ -67,7 +67,7 @@ inline MarkedSpace* MarkedBlock::Handle::space() const inline bool MarkedBlock::marksConveyLivenessDuringMarking(HeapVersion markingVersion) { - return marksConveyLivenessDuringMarking(footer().m_markingVersion, markingVersion); + return marksConveyLivenessDuringMarking(header().m_markingVersion, markingVersion); } inline bool MarkedBlock::marksConveyLivenessDuringMarking(HeapVersion myMarkingVersion, HeapVersion markingVersion) @@ -138,41 +138,41 @@ ALWAYS_INLINE bool MarkedBlock::Handle::isLive(HeapVersion markingVersion, HeapV // impact on perf - around 2% on splay if you get it wrong. MarkedBlock& block = this->block(); - MarkedBlock::Footer& footer = block.footer(); + MarkedBlock::Header& header = block.header(); - auto count = footer.m_lock.tryOptimisticFencelessRead(); + auto count = header.m_lock.tryOptimisticFencelessRead(); if (count.value) { Dependency fenceBefore = Dependency::fence(count.input); MarkedBlock& fencedBlock = *fenceBefore.consume(&block); - MarkedBlock::Footer& fencedFooter = fencedBlock.footer(); + MarkedBlock::Header& fencedHeader = fencedBlock.header(); MarkedBlock::Handle* fencedThis = fenceBefore.consume(this); ASSERT_UNUSED(fencedThis, !fencedThis->isFreeListed()); - HeapVersion myNewlyAllocatedVersion = fencedFooter.m_newlyAllocatedVersion; + HeapVersion myNewlyAllocatedVersion = fencedHeader.m_newlyAllocatedVersion; if (myNewlyAllocatedVersion == newlyAllocatedVersion) { bool result = fencedBlock.isNewlyAllocated(cell); - if (footer.m_lock.fencelessValidate(count.value, Dependency::fence(result))) + if (header.m_lock.fencelessValidate(count.value, Dependency::fence(result))) return result; } else { - HeapVersion myMarkingVersion = fencedFooter.m_markingVersion; + HeapVersion myMarkingVersion = fencedHeader.m_markingVersion; if (myMarkingVersion != markingVersion && (!isMarking || !fencedBlock.marksConveyLivenessDuringMarking(myMarkingVersion, markingVersion))) { - if (footer.m_lock.fencelessValidate(count.value, Dependency::fence(myMarkingVersion))) + if (header.m_lock.fencelessValidate(count.value, Dependency::fence(myMarkingVersion))) return false; } else { - bool result = fencedFooter.m_marks.get(block.atomNumber(cell)); - if (footer.m_lock.fencelessValidate(count.value, Dependency::fence(result))) + bool result = fencedHeader.m_marks.get(block.atomNumber(cell)); + if (header.m_lock.fencelessValidate(count.value, Dependency::fence(result))) return result; } } } - Locker locker { footer.m_lock }; + Locker locker { header.m_lock }; ASSERT(!isFreeListed()); - HeapVersion myNewlyAllocatedVersion = footer.m_newlyAllocatedVersion; + HeapVersion myNewlyAllocatedVersion = header.m_newlyAllocatedVersion; if (myNewlyAllocatedVersion == newlyAllocatedVersion) return block.isNewlyAllocated(cell); @@ -183,7 +183,7 @@ ALWAYS_INLINE bool MarkedBlock::Handle::isLive(HeapVersion markingVersion, HeapV return false; } - return footer.m_marks.get(block.atomNumber(cell)); + return header.m_marks.get(block.atomNumber(cell)); } inline bool MarkedBlock::Handle::isLiveCell(HeapVersion markingVersion, HeapVersion newlyAllocatedVersion, bool isMarking, const void* p) @@ -246,7 +246,7 @@ void MarkedBlock::Handle::specializedSweep(FreeList* freeList, MarkedBlock::Hand SuperSamplerScope superSamplerScope(false); MarkedBlock& block = this->block(); - MarkedBlock::Footer& footer = block.footer(); + MarkedBlock::Header& header = block.header(); if (false) dataLog(RawPointer(this), "/", RawPointer(&block), ": MarkedBlock::Handle::specializedSweep!\n"); @@ -269,26 +269,25 @@ void MarkedBlock::Handle::specializedSweep(FreeList* freeList, MarkedBlock::Hand && newlyAllocatedMode == DoesNotHaveNewlyAllocated) { // This is an incredibly powerful assertion that checks the sanity of our block bits. - if (marksMode == MarksNotStale && !footer.m_marks.isEmpty()) { + if (marksMode == MarksNotStale && !header.m_marks.isEmpty()) { WTF::dataFile().atomically( [&] (PrintStream& out) { out.print("Block ", RawPointer(&block), ": marks not empty!\n"); - out.print("Block lock is held: ", footer.m_lock.isHeld(), "\n"); - out.print("Marking version of block: ", footer.m_markingVersion, "\n"); + out.print("Block lock is held: ", header.m_lock.isHeld(), "\n"); + out.print("Marking version of block: ", header.m_markingVersion, "\n"); out.print("Marking version of heap: ", space()->markingVersion(), "\n"); UNREACHABLE_FOR_PLATFORM(); }); } - - char* startOfLastCell = static_cast(cellAlign(block.atoms() + m_endAtom - 1)); - char* payloadEnd = startOfLastCell + cellSize; - char* payloadBegin = bitwise_cast(block.atoms()); - RELEASE_ASSERT(payloadEnd - MarkedBlock::blockSize <= bitwise_cast(&block), payloadBegin, payloadEnd, &block, cellSize, m_endAtom); + + char* payloadEnd = bitwise_cast(block.atoms() + numberOfAtoms); + char* payloadBegin = bitwise_cast(block.atoms() + m_startAtom); + RELEASE_ASSERT(static_cast(payloadEnd - payloadBegin) <= payloadSize, payloadBegin, payloadEnd, &block, cellSize, m_startAtom); if (sweepMode == SweepToFreeList) setIsFreeListed(); if (space()->isMarking()) - footer.m_lock.unlock(); + header.m_lock.unlock(); if (destructionMode != BlockHasNoDestructors) { for (char* cell = payloadBegin; cell < payloadEnd; cell += cellSize) destroy(cell); @@ -326,10 +325,10 @@ void MarkedBlock::Handle::specializedSweep(FreeList* freeList, MarkedBlock::Hand ++count; } }; - for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) { + for (size_t i = m_startAtom; i < endAtom; i += m_atomsPerCell) { if (emptyMode == NotEmpty - && ((marksMode == MarksNotStale && footer.m_marks.get(i)) - || (newlyAllocatedMode == HasNewlyAllocated && footer.m_newlyAllocated.get(i)))) { + && ((marksMode == MarksNotStale && header.m_marks.get(i)) + || (newlyAllocatedMode == HasNewlyAllocated && header.m_newlyAllocated.get(i)))) { isEmpty = false; continue; } @@ -343,10 +342,10 @@ void MarkedBlock::Handle::specializedSweep(FreeList* freeList, MarkedBlock::Hand // We only want to discard the newlyAllocated bits if we're creating a FreeList, // otherwise we would lose information on what's currently alive. if (sweepMode == SweepToFreeList && newlyAllocatedMode == HasNewlyAllocated) - footer.m_newlyAllocatedVersion = MarkedSpace::nullVersion; + header.m_newlyAllocatedVersion = MarkedSpace::nullVersion; if (space()->isMarking()) - footer.m_lock.unlock(); + header.m_lock.unlock(); if (destructionMode == BlockHasDestructorsAndCollectorIsRunning) { for (size_t i : deadCells) @@ -507,7 +506,7 @@ inline IterationStatus MarkedBlock::Handle::forEachLiveCell(const Functor& funct // https://bugs.webkit.org/show_bug.cgi?id=180315 HeapCell::Kind kind = m_attributes.cellKind; - for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) { + for (size_t i = m_startAtom; i < endAtom; i += m_atomsPerCell) { HeapCell* cell = reinterpret_cast_ptr(&m_block->atoms()[i]); if (!isLive(cell)) continue; @@ -522,7 +521,7 @@ template inline IterationStatus MarkedBlock::Handle::forEachDeadCell(const Functor& functor) { HeapCell::Kind kind = m_attributes.cellKind; - for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) { + for (size_t i = m_startAtom; i < endAtom; i += m_atomsPerCell) { HeapCell* cell = reinterpret_cast_ptr(&m_block->atoms()[i]); if (isLive(cell)) continue; @@ -542,8 +541,8 @@ inline IterationStatus MarkedBlock::Handle::forEachMarkedCell(const Functor& fun WTF::loadLoadFence(); if (areMarksStale) return IterationStatus::Continue; - for (size_t i = 0; i < m_endAtom; i += m_atomsPerCell) { - if (!block.footer().m_marks.get(i)) + for (size_t i = m_startAtom; i < endAtom; i += m_atomsPerCell) { + if (!block.header().m_marks.get(i)) continue; HeapCell* cell = reinterpret_cast_ptr(&m_block->atoms()[i]); diff --git a/Source/JavaScriptCore/heap/MarkedSpace.cpp b/Source/JavaScriptCore/heap/MarkedSpace.cpp index c823d9e94db6f..b3e5c1bc35e05 100644 --- a/Source/JavaScriptCore/heap/MarkedSpace.cpp +++ b/Source/JavaScriptCore/heap/MarkedSpace.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2003-2018 Apple Inc. All rights reserved. + * Copyright (C) 2003-2024 Apple Inc. All rights reserved. * Copyright (C) 2007 Eric Seidel * * This library is free software; you can redistribute it and/or @@ -41,7 +41,7 @@ static Vector sizeClasses() if (UNLIKELY(Options::dumpSizeClasses())) { dataLog("Block size: ", MarkedBlock::blockSize, "\n"); - dataLog("Footer size: ", sizeof(MarkedBlock::Footer), "\n"); + dataLog("Header size: ", sizeof(MarkedBlock::Header), "\n"); } auto add = [&] (size_t sizeClass) { @@ -374,6 +374,21 @@ bool MarkedSpace::isPagedOut() return pagedOutPagesStats.mean() > pagedOutPagesStats.count() * bailoutPercentage; } +// FIXME: rdar://139998916 +MarkedBlock::Handle* MarkedSpace::findMarkedBlockHandleDebug(MarkedBlock* block) +{ + MarkedBlock::Handle* result = nullptr; + forEachDirectory( + [&](BlockDirectory& directory) -> IterationStatus { + if (MarkedBlock::Handle* handle = directory.findMarkedBlockHandleDebug(block)) { + result = handle; + return IterationStatus::Done; + } + return IterationStatus::Continue; + }); + return result; +} + void MarkedSpace::freeBlock(MarkedBlock::Handle* block) { m_capacity -= MarkedBlock::blockSize; diff --git a/Source/JavaScriptCore/heap/MarkedSpace.h b/Source/JavaScriptCore/heap/MarkedSpace.h index 3d45e9e1e29d1..d8f68f1431146 100644 --- a/Source/JavaScriptCore/heap/MarkedSpace.h +++ b/Source/JavaScriptCore/heap/MarkedSpace.h @@ -1,7 +1,7 @@ /* * Copyright (C) 1999-2000 Harri Porten (porten@kde.org) * Copyright (C) 2001 Peter Kelly (pmk@post.com) - * Copyright (C) 2003-2021 Apple Inc. All rights reserved. + * Copyright (C) 2003-2024 Apple Inc. All rights reserved. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -137,6 +137,9 @@ class MarkedSpace { void didConsumeFreeList(MarkedBlock::Handle*); void didAllocateInBlock(MarkedBlock::Handle*); + // FIXME: rdar://139998916 + MarkedBlock::Handle* findMarkedBlockHandleDebug(MarkedBlock*); + void beginMarking(); void endMarking(); void snapshotUnswept(); diff --git a/Source/JavaScriptCore/heap/SlotVisitorInlines.h b/Source/JavaScriptCore/heap/SlotVisitorInlines.h index 54223a64796af..03416b0bca4d1 100644 --- a/Source/JavaScriptCore/heap/SlotVisitorInlines.h +++ b/Source/JavaScriptCore/heap/SlotVisitorInlines.h @@ -54,7 +54,7 @@ ALWAYS_INLINE void SlotVisitor::appendUnbarriered(JSCell* cell) } } else { MarkedBlock& block = cell->markedBlock(); - dependency = block.aboutToMark(m_markingVersion); + dependency = block.aboutToMark(m_markingVersion, cell); if (LIKELY(block.isMarked(cell, dependency))) { if (LIKELY(!m_heapAnalyzer)) return; @@ -90,7 +90,7 @@ ALWAYS_INLINE void SlotVisitor::appendHiddenUnbarriered(JSCell* cell) return; } else { MarkedBlock& block = cell->markedBlock(); - dependency = block.aboutToMark(m_markingVersion); + dependency = block.aboutToMark(m_markingVersion, cell); if (LIKELY(block.isMarked(cell, dependency))) return; } diff --git a/Source/JavaScriptCore/llint/LLIntThunks.cpp b/Source/JavaScriptCore/llint/LLIntThunks.cpp index eb1272340d370..fdf6ccc9517a0 100644 --- a/Source/JavaScriptCore/llint/LLIntThunks.cpp +++ b/Source/JavaScriptCore/llint/LLIntThunks.cpp @@ -225,7 +225,7 @@ MacroAssemblerCodeRef getHostCallReturnValueThunk() auto preciseAllocationCase = jit.branchTestPtr(CCallHelpers::NonZero, GPRInfo::regT0, CCallHelpers::TrustedImm32(PreciseAllocation::halfAlignment)); jit.andPtr(CCallHelpers::TrustedImmPtr(MarkedBlock::blockMask), GPRInfo::regT0); - jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, MarkedBlock::offsetOfFooter + MarkedBlock::Footer::offsetOfVM()), GPRInfo::regT0); + jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, MarkedBlock::offsetOfHeader + MarkedBlock::Header::offsetOfVM()), GPRInfo::regT0); auto loadedCase = jit.jump(); preciseAllocationCase.link(&jit); diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm index 52c5188adffe3..03d4cdaf4cfb1 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm @@ -640,7 +640,7 @@ const NotInitialization = constexpr InitializationMode::NotInitialization const MarkedBlockSize = constexpr MarkedBlock::blockSize const MarkedBlockMask = ~(MarkedBlockSize - 1) -const MarkedBlockFooterOffset = constexpr MarkedBlock::offsetOfFooter +const MarkedBlockHeaderOffset = constexpr MarkedBlock::offsetOfHeader const PreciseAllocationHeaderSize = constexpr (PreciseAllocation::headerSize()) const PreciseAllocationVMOffset = (PreciseAllocation::m_weakSet + WeakSet::m_vm - PreciseAllocationHeaderSize) @@ -1507,7 +1507,7 @@ end macro convertCalleeToVM(callee) btpnz callee, (constexpr PreciseAllocation::halfAlignment), .preciseAllocation andp MarkedBlockMask, callee - loadp MarkedBlockFooterOffset + MarkedBlock::Footer::m_vm[callee], callee + loadp MarkedBlockHeaderOffset + MarkedBlock::Header::m_vm[callee], callee jmp .done .preciseAllocation: loadp PreciseAllocationVMOffset[callee], callee diff --git a/Source/JavaScriptCore/runtime/OptionsList.h b/Source/JavaScriptCore/runtime/OptionsList.h index 88f60336184b9..8ae540b0919cb 100644 --- a/Source/JavaScriptCore/runtime/OptionsList.h +++ b/Source/JavaScriptCore/runtime/OptionsList.h @@ -557,7 +557,7 @@ bool canUseWebAssemblyFastMemory(); v(Bool, useWebAssemblyExceptions, true, Normal, "Allow the new section and instructions from the wasm exception handling spec.") \ v(Bool, useWebAssemblyBranchHints, true, Normal, "Allow the new section from the wasm branch hinting spec.") \ v(Bool, disableConsoleLog, false, Normal, "Disable printing of JS console logs.") \ - + v(Unsigned, markedBlockDumpInfoCount, 0, Normal, nullptr) /* FIXME: rdar://139998916 */ \ enum OptionEquivalence { SameOption, diff --git a/Source/JavaScriptCore/tools/Integrity.h b/Source/JavaScriptCore/tools/Integrity.h index 86e59aee3b989..f998a62ce35dc 100644 --- a/Source/JavaScriptCore/tools/Integrity.h +++ b/Source/JavaScriptCore/tools/Integrity.h @@ -173,7 +173,7 @@ ALWAYS_INLINE void auditCell(VM&, JSValue); ALWAYS_INLINE void auditStructureID(StructureID); #if ENABLE(EXTRA_INTEGRITY_CHECKS) && USE(JSVALUE64) -template ALWAYS_INLINE T audit(T value) { return doAudit(value); } +template ALWAYS_INLINE T audit(T value) { return bitwise_cast(doAudit(value)); } #else template ALWAYS_INLINE T audit(T value) { return value; } #endif diff --git a/Source/JavaScriptCore/wasm/WasmIRGeneratorHelpers.h b/Source/JavaScriptCore/wasm/WasmIRGeneratorHelpers.h index 21d5eda6ed4e5..96895fe80684d 100644 --- a/Source/JavaScriptCore/wasm/WasmIRGeneratorHelpers.h +++ b/Source/JavaScriptCore/wasm/WasmIRGeneratorHelpers.h @@ -116,7 +116,7 @@ static inline void emitRethrowImpl(CCallHelpers& jit) { auto preciseAllocationCase = jit.branchTestPtr(CCallHelpers::NonZero, scratch, CCallHelpers::TrustedImm32(PreciseAllocation::halfAlignment)); jit.andPtr(CCallHelpers::TrustedImmPtr(MarkedBlock::blockMask), scratch); - jit.loadPtr(CCallHelpers::Address(scratch, MarkedBlock::offsetOfFooter + MarkedBlock::Footer::offsetOfVM()), scratch); + jit.loadPtr(CCallHelpers::Address(scratch, MarkedBlock::offsetOfHeader + MarkedBlock::Header::offsetOfVM()), scratch); auto loadedCase = jit.jump(); preciseAllocationCase.link(&jit); @@ -144,7 +144,7 @@ static inline void emitThrowImpl(CCallHelpers& jit, unsigned exceptionIndex) { auto preciseAllocationCase = jit.branchTestPtr(CCallHelpers::NonZero, scratch, CCallHelpers::TrustedImm32(PreciseAllocation::halfAlignment)); jit.andPtr(CCallHelpers::TrustedImmPtr(MarkedBlock::blockMask), scratch); - jit.loadPtr(CCallHelpers::Address(scratch, MarkedBlock::offsetOfFooter + MarkedBlock::Footer::offsetOfVM()), scratch); + jit.loadPtr(CCallHelpers::Address(scratch, MarkedBlock::offsetOfHeader + MarkedBlock::Header::offsetOfVM()), scratch); auto loadedCase = jit.jump(); preciseAllocationCase.link(&jit); @@ -184,7 +184,7 @@ static inline void emitCatchPrologueShared(B3::Air::Code& code, CCallHelpers& ji // https://bugs.webkit.org/show_bug.cgi?id=231213 auto preciseAllocationCase = jit.branchTestPtr(CCallHelpers::NonZero, GPRInfo::regT0, CCallHelpers::TrustedImm32(PreciseAllocation::halfAlignment)); jit.andPtr(CCallHelpers::TrustedImmPtr(MarkedBlock::blockMask), GPRInfo::regT0); - jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, MarkedBlock::offsetOfFooter + MarkedBlock::Footer::offsetOfVM()), GPRInfo::regT0); + jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, MarkedBlock::offsetOfHeader + MarkedBlock::Header::offsetOfVM()), GPRInfo::regT0); auto loadedCase = jit.jump(); preciseAllocationCase.link(&jit); diff --git a/Source/WTF/wtf/StdLibExtras.h b/Source/WTF/wtf/StdLibExtras.h index bb1f6c4243887..c4c11f842e0d2 100644 --- a/Source/WTF/wtf/StdLibExtras.h +++ b/Source/WTF/wtf/StdLibExtras.h @@ -57,11 +57,22 @@ #define DEFINE_DEBUG_ONLY_GLOBAL(type, name, arguments) #endif // NDEBUG -// OBJECT_OFFSETOF: Like the C++ offsetof macro, but you can use it with classes. +#if COMPILER(CLANG) +// We have to use __builtin_offsetof directly here instead of offsetof because otherwise Clang will drop +// our pragma and we'll still get the warning. +#define OBJECT_OFFSETOF(class, field) \ + _Pragma("clang diagnostic push") \ + _Pragma("clang diagnostic ignored \"-Winvalid-offsetof\"") \ + __builtin_offsetof(class, field) \ + _Pragma("clang diagnostic pop") +#elif COMPILER(GCC) +// It would be nice to silence this warning locally like we do on Clang but GCC complains about `error: ‘#pragma’ is not allowed here` +#pragma GCC diagnostic ignored "-Winvalid-offsetof" +#define OBJECT_OFFSETOF(class, field) offsetof(class, field) +#endif + // The magic number 0x4000 is insignificant. We use it to avoid using NULL, since // NULL can cause compiler problems, especially in cases of multiple inheritance. -#define OBJECT_OFFSETOF(class, field) (reinterpret_cast(&(reinterpret_cast(0x4000)->field)) - 0x4000) - #define CAST_OFFSET(from, to) (reinterpret_cast(static_cast((reinterpret_cast(0x4000)))) - 0x4000) // STRINGIZE: Can convert any value to quoted string, even expandable macros