/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- * vim: set ts=8 sts=4 et sw=4 tw=99: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #ifndef jsgcinlines_h #define jsgcinlines_h #include "jsgc.h" #include "gc/Zone.h" namespace js { class Shape; /* * This auto class should be used around any code that might cause a mark bit to * be set on an object in a dead zone. See AutoMaybeTouchDeadZones * for more details. */ struct AutoMarkInDeadZone { explicit AutoMarkInDeadZone(JS::Zone* zone) : zone(zone), scheduled(zone->scheduledForDestruction) { JSRuntime* rt = zone->runtimeFromMainThread(); if (rt->gc.manipulatingDeadZones && zone->scheduledForDestruction) { rt->gc.objectsMarkedInDeadZones++; zone->scheduledForDestruction = false; } } ~AutoMarkInDeadZone() { zone->scheduledForDestruction = scheduled; } private: JS::Zone* zone; bool scheduled; }; inline Allocator* ThreadSafeContext::allocator() const { JS_ASSERT_IF(isJSContext(), &asJSContext()->zone()->allocator == allocator_); return allocator_; } template inline bool ThreadSafeContext::isThreadLocal(T thing) const { if (!isForkJoinContext()) return true; if (!IsInsideNursery(thing) && allocator_->arenas.containsArena(runtime_, thing->arenaHeader())) { // GC should be suppressed in preparation for mutating thread local // objects, as we don't want to trip any barriers. JS_ASSERT(!thing->zoneFromAnyThread()->needsBarrier()); JS_ASSERT(!thing->runtimeFromAnyThread()->needsBarrier()); return true; } return false; } namespace gc { static inline AllocKind GetGCObjectKind(const Class* clasp) { if (clasp == FunctionClassPtr) return JSFunction::FinalizeKind; uint32_t nslots = JSCLASS_RESERVED_SLOTS(clasp); if (clasp->flags & JSCLASS_HAS_PRIVATE) nslots++; return GetGCObjectKind(nslots); } #ifdef JSGC_GENERATIONAL inline bool ShouldNurseryAllocate(const Nursery& nursery, AllocKind kind, InitialHeap heap) { return nursery.isEnabled() && IsNurseryAllocable(kind) && heap != TenuredHeap; } #endif inline JSGCTraceKind GetGCThingTraceKind(const void* thing) { JS_ASSERT(thing); const Cell* cell = static_cast(thing); #ifdef JSGC_GENERATIONAL if (IsInsideNursery(cell)) return JSTRACE_OBJECT; #endif return MapAllocToTraceKind(cell->tenuredGetAllocKind()); } inline void GCRuntime::poke() { poked = true; #ifdef JS_GC_ZEAL /* Schedule a GC to happen "soon" after a GC poke. */ if (zealMode == ZealPokeValue) nextScheduled = 1; #endif } class ArenaIter { ArenaHeader* aheader; ArenaHeader* remainingHeader; public: ArenaIter() { aheader = nullptr; remainingHeader = nullptr; } ArenaIter(JS::Zone* zone, AllocKind kind) { init(zone, kind); } void init(JS::Zone* zone, AllocKind kind) { aheader = zone->allocator.arenas.getFirstArena(kind); remainingHeader = zone->allocator.arenas.getFirstArenaToSweep(kind); if (!aheader) { aheader = remainingHeader; remainingHeader = nullptr; } } bool done() const { return !aheader; } ArenaHeader* get() const { return aheader; } void next() { JS_ASSERT(!done()); aheader = aheader->next; if (!aheader) { aheader = remainingHeader; remainingHeader = nullptr; } } }; class ArenaCellIterImpl { // These three are set in initUnsynchronized(). size_t firstThingOffset; size_t thingSize; #ifdef DEBUG bool isInited; #endif // These three are set in reset() (which is called by init()). FreeSpan span; uintptr_t thing; uintptr_t limit; // Upon entry, |thing| points to any thing (free or used) and finds the // first used thing, which may be |thing|. void moveForwardIfFree() { JS_ASSERT(!done()); JS_ASSERT(thing); // Note: if |span| is empty, this test will fail, which is what we want // -- |span| being empty means that we're past the end of the last free // thing, all the remaining things in the arena are used, and we'll // never need to move forward. if (thing == span.first) { thing = span.last + thingSize; span = *span.nextSpan(); } } public: ArenaCellIterImpl() {} void initUnsynchronized(ArenaHeader* aheader) { AllocKind kind = aheader->getAllocKind(); #ifdef DEBUG isInited = true; #endif firstThingOffset = Arena::firstThingOffset(kind); thingSize = Arena::thingSize(kind); reset(aheader); } void init(ArenaHeader* aheader) { #ifdef DEBUG AllocKind kind = aheader->getAllocKind(); JS_ASSERT(aheader->zone->allocator.arenas.isSynchronizedFreeList(kind)); #endif initUnsynchronized(aheader); } // Use this to move from an Arena of a particular kind to another Arena of // the same kind. void reset(ArenaHeader* aheader) { JS_ASSERT(isInited); span = aheader->getFirstFreeSpan(); uintptr_t arenaAddr = aheader->arenaAddress(); thing = arenaAddr + firstThingOffset; limit = arenaAddr + ArenaSize; moveForwardIfFree(); } bool done() const { return thing == limit; } Cell* getCell() const { JS_ASSERT(!done()); return reinterpret_cast(thing); } template T* get() const { JS_ASSERT(!done()); return static_cast(getCell()); } void next() { MOZ_ASSERT(!done()); thing += thingSize; if (thing < limit) moveForwardIfFree(); } }; class ArenaCellIterUnderGC : public ArenaCellIterImpl { public: explicit ArenaCellIterUnderGC(ArenaHeader* aheader) { JS_ASSERT(aheader->zone->runtimeFromAnyThread()->isHeapBusy()); init(aheader); } }; class ArenaCellIterUnderFinalize : public ArenaCellIterImpl { public: explicit ArenaCellIterUnderFinalize(ArenaHeader* aheader) { initUnsynchronized(aheader); } }; class ZoneCellIterImpl { ArenaIter arenaIter; ArenaCellIterImpl cellIter; protected: ZoneCellIterImpl() {} void init(JS::Zone* zone, AllocKind kind) { JS_ASSERT(zone->allocator.arenas.isSynchronizedFreeList(kind)); arenaIter.init(zone, kind); if (!arenaIter.done()) cellIter.init(arenaIter.get()); } public: bool done() const { return arenaIter.done(); } template T* get() const { JS_ASSERT(!done()); return cellIter.get(); } Cell* getCell() const { JS_ASSERT(!done()); return cellIter.getCell(); } void next() { JS_ASSERT(!done()); cellIter.next(); if (cellIter.done()) { JS_ASSERT(!arenaIter.done()); arenaIter.next(); if (!arenaIter.done()) cellIter.reset(arenaIter.get()); } } }; class ZoneCellIterUnderGC : public ZoneCellIterImpl { public: ZoneCellIterUnderGC(JS::Zone* zone, AllocKind kind) { #ifdef JSGC_GENERATIONAL JS_ASSERT(zone->runtimeFromAnyThread()->gc.nursery.isEmpty()); #endif JS_ASSERT(zone->runtimeFromAnyThread()->isHeapBusy()); init(zone, kind); } }; /* In debug builds, assert that no allocation occurs. */ class AutoAssertNoAlloc { #ifdef JS_DEBUG GCRuntime* gc; public: AutoAssertNoAlloc() : gc(nullptr) {} explicit AutoAssertNoAlloc(JSRuntime* rt) : gc(nullptr) { disallowAlloc(rt); } void disallowAlloc(JSRuntime* rt) { JS_ASSERT(!gc); gc = &rt->gc; gc->disallowAlloc(); } ~AutoAssertNoAlloc() { if (gc) gc->allowAlloc(); } #else public: AutoAssertNoAlloc() {} explicit AutoAssertNoAlloc(JSRuntime*) {} void disallowAlloc(JSRuntime* rt) {} #endif }; class ZoneCellIter : public ZoneCellIterImpl { AutoAssertNoAlloc noAlloc; ArenaLists* lists; AllocKind kind; public: ZoneCellIter(JS::Zone* zone, AllocKind kind) : lists(&zone->allocator.arenas), kind(kind) { /* * We have a single-threaded runtime, so there's no need to protect * against other threads iterating or allocating. However, we do have * background finalization; we have to wait for this to finish if it's * currently active. */ if (IsBackgroundFinalized(kind) && zone->allocator.arenas.needBackgroundFinalizeWait(kind)) { gc::FinishBackgroundFinalize(zone->runtimeFromMainThread()); } #ifdef JSGC_GENERATIONAL /* Evict the nursery before iterating so we can see all things. */ JSRuntime* rt = zone->runtimeFromMainThread(); if (!rt->gc.nursery.isEmpty()) MinorGC(rt, JS::gcreason::EVICT_NURSERY); #endif if (lists->isSynchronizedFreeList(kind)) { lists = nullptr; } else { JS_ASSERT(!zone->runtimeFromMainThread()->isHeapBusy()); lists->copyFreeListToArena(kind); } /* Assert that no GCs can occur while a ZoneCellIter is live. */ noAlloc.disallowAlloc(zone->runtimeFromMainThread()); init(zone, kind); } ~ZoneCellIter() { if (lists) lists->clearFreeListInArena(kind); } }; class GCZonesIter { private: ZonesIter zone; public: explicit GCZonesIter(JSRuntime* rt) : zone(rt, WithAtoms) { if (!zone->isCollecting()) next(); } bool done() const { return zone.done(); } void next() { JS_ASSERT(!done()); do { zone.next(); } while (!zone.done() && !zone->isCollecting()); } JS::Zone* get() const { JS_ASSERT(!done()); return zone; } operator JS::Zone*() const { return get(); } JS::Zone* operator->() const { return get(); } }; typedef CompartmentsIterT GCCompartmentsIter; /* Iterates over all zones in the current zone group. */ class GCZoneGroupIter { private: JS::Zone* current; public: explicit GCZoneGroupIter(JSRuntime* rt) { JS_ASSERT(rt->isHeapBusy()); current = rt->gc.currentZoneGroup; } bool done() const { return !current; } void next() { JS_ASSERT(!done()); current = current->nextNodeInGroup(); } JS::Zone* get() const { JS_ASSERT(!done()); return current; } operator JS::Zone*() const { return get(); } JS::Zone* operator->() const { return get(); } }; typedef CompartmentsIterT GCCompartmentGroupIter; #ifdef JSGC_GENERATIONAL /* * Attempt to allocate a new GC thing out of the nursery. If there is not enough * room in the nursery or there is an OOM, this method will return nullptr. */ template inline JSObject* TryNewNurseryObject(ThreadSafeContext* cxArg, size_t thingSize, size_t nDynamicSlots) { JSContext* cx = cxArg->asJSContext(); JS_ASSERT(!IsAtomsCompartment(cx->compartment())); JSRuntime* rt = cx->runtime(); Nursery& nursery = rt->gc.nursery; JSObject* obj = nursery.allocateObject(cx, thingSize, nDynamicSlots); if (obj) return obj; if (allowGC && !rt->mainThread.suppressGC) { MinorGC(cx, JS::gcreason::OUT_OF_NURSERY); /* Exceeding gcMaxBytes while tenuring can disable the Nursery. */ if (nursery.isEnabled()) { JSObject* obj = nursery.allocateObject(cx, thingSize, nDynamicSlots); JS_ASSERT(obj); return obj; } } return nullptr; } #endif /* JSGC_GENERATIONAL */ static inline bool PossiblyFail() { JS_OOM_POSSIBLY_FAIL(); return true; } template static inline bool CheckAllocatorState(ThreadSafeContext* cx, AllocKind kind) { if (!cx->isJSContext()) return true; JSContext* ncx = cx->asJSContext(); JSRuntime* rt = ncx->runtime(); #if defined(JS_GC_ZEAL) || defined(DEBUG) JS_ASSERT_IF(rt->isAtomsCompartment(ncx->compartment()), kind == FINALIZE_STRING || kind == FINALIZE_FAT_INLINE_STRING || kind == FINALIZE_JITCODE); JS_ASSERT(!rt->isHeapBusy()); JS_ASSERT(rt->gc.isAllocAllowed()); #endif // Crash if we perform a GC action when it is not safe. if (allowGC && !rt->mainThread.suppressGC) JS::AutoAssertOnGC::VerifyIsSafeToGC(rt); // For testing out of memory conditions if (!PossiblyFail()) { js_ReportOutOfMemory(cx); return false; } if (allowGC) { #ifdef JS_GC_ZEAL if (rt->gc.needZealousGC()) js::gc::RunDebugGC(ncx); #endif if (rt->interrupt) { // Invoking the interrupt callback can fail and we can't usefully // handle that here. Just check in case we need to collect instead. js::gc::GCIfNeeded(ncx); } } return true; } template static inline void CheckIncrementalZoneState(ThreadSafeContext* cx, T* t) { #ifdef DEBUG if (!cx->isJSContext()) return; Zone* zone = cx->asJSContext()->zone(); JS_ASSERT_IF(t && zone->wasGCStarted() && (zone->isGCMarking() || zone->isGCSweeping()), t->arenaHeader()->allocatedDuringIncremental); #endif } /* * Allocate a new GC thing. After a successful allocation the caller must * fully initialize the thing before calling any function that can potentially * trigger GC. This will ensure that GC tracing never sees junk values stored * in the partially initialized thing. */ template inline JSObject* AllocateObject(ThreadSafeContext* cx, AllocKind kind, size_t nDynamicSlots, InitialHeap heap) { size_t thingSize = Arena::thingSize(kind); JS_ASSERT(thingSize == Arena::thingSize(kind)); if (!CheckAllocatorState(cx, kind)) return nullptr; #ifdef JSGC_GENERATIONAL if (cx->hasNursery() && ShouldNurseryAllocate(cx->nursery(), kind, heap)) { JSObject* obj = TryNewNurseryObject(cx, thingSize, nDynamicSlots); if (obj) return obj; } #endif HeapSlot* slots = nullptr; if (nDynamicSlots) { slots = cx->pod_malloc(nDynamicSlots); if (MOZ_UNLIKELY(!slots)) return nullptr; js::Debug_SetSlotRangeToCrashOnTouch(slots, nDynamicSlots); } JSObject* obj = static_cast(cx->allocator()->arenas.allocateFromFreeList(kind, thingSize)); if (!obj) obj = static_cast(js::gc::ArenaLists::refillFreeList(cx, kind)); if (obj) obj->setInitialSlots(slots); else js_free(slots); CheckIncrementalZoneState(cx, obj); return obj; } template inline T* AllocateNonObject(ThreadSafeContext* cx) { AllocKind kind = MapTypeToFinalizeKind::kind; size_t thingSize = sizeof(T); JS_ASSERT(thingSize == Arena::thingSize(kind)); if (!CheckAllocatorState(cx, kind)) return nullptr; T* t = static_cast(cx->allocator()->arenas.allocateFromFreeList(kind, thingSize)); if (!t) t = static_cast(js::gc::ArenaLists::refillFreeList(cx, kind)); CheckIncrementalZoneState(cx, t); return t; } /* * When allocating for initialization from a cached object copy, we will * potentially destroy the cache entry we want to copy if we allow GC. On the * other hand, since these allocations are extremely common, we don't want to * delay GC from these allocation sites. Instead we allow the GC, but still * fail the allocation, forcing the non-cached path. */ template inline JSObject* AllocateObjectForCacheHit(JSContext* cx, AllocKind kind, InitialHeap heap) { #ifdef JSGC_GENERATIONAL if (ShouldNurseryAllocate(cx->nursery(), kind, heap)) { size_t thingSize = Arena::thingSize(kind); JS_ASSERT(thingSize == Arena::thingSize(kind)); if (!CheckAllocatorState(cx, kind)) return nullptr; JSObject* obj = TryNewNurseryObject(cx, thingSize, 0); if (!obj && allowGC) { MinorGC(cx, JS::gcreason::OUT_OF_NURSERY); return nullptr; } return obj; } #endif JSObject* obj = AllocateObject(cx, kind, 0, heap); if (!obj && allowGC) { MaybeGC(cx); return nullptr; } return obj; } } /* namespace gc */ template inline JSObject* NewGCObject(js::ThreadSafeContext* cx, js::gc::AllocKind kind, size_t nDynamicSlots, js::gc::InitialHeap heap) { JS_ASSERT(kind >= js::gc::FINALIZE_OBJECT0 && kind <= js::gc::FINALIZE_OBJECT_LAST); return js::gc::AllocateObject(cx, kind, nDynamicSlots, heap); } template inline jit::JitCode* NewJitCode(js::ThreadSafeContext* cx) { return gc::AllocateNonObject(cx); } inline types::TypeObject* NewTypeObject(js::ThreadSafeContext* cx) { return gc::AllocateNonObject(cx); } } /* namespace js */ template inline JSString* js_NewGCString(js::ThreadSafeContext* cx) { return js::gc::AllocateNonObject(cx); } template inline JSFatInlineString* js_NewGCFatInlineString(js::ThreadSafeContext* cx) { return js::gc::AllocateNonObject(cx); } inline JSExternalString* js_NewGCExternalString(js::ThreadSafeContext* cx) { return js::gc::AllocateNonObject(cx); } inline JSScript* js_NewGCScript(js::ThreadSafeContext* cx) { return js::gc::AllocateNonObject(cx); } inline js::LazyScript* js_NewGCLazyScript(js::ThreadSafeContext* cx) { return js::gc::AllocateNonObject(cx); } inline js::Shape* js_NewGCShape(js::ThreadSafeContext* cx) { return js::gc::AllocateNonObject(cx); } template inline js::BaseShape* js_NewGCBaseShape(js::ThreadSafeContext* cx) { return js::gc::AllocateNonObject(cx); } #endif /* jsgcinlines_h */