+
+ inline void* MarkedBlock::allocate()
+ {
+ while (m_nextAtom < m_endAtom) {
+ if (!m_marks.testAndSet(m_nextAtom)) {
+ JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[m_nextAtom]);
+ m_nextAtom += m_atomsPerCell;
+ cell->~JSCell();
+ return cell;
+ }
+ m_nextAtom += m_atomsPerCell;
+ }
+
+ return 0;
+ }
+
+ inline MarkedSpace::SizeClass& MarkedSpace::sizeClassFor(size_t bytes)
+ {
+ ASSERT(bytes && bytes < maxCellSize);
+ if (bytes < preciseCutoff)
+ return m_preciseSizeClasses[(bytes - 1) / preciseStep];
+ return m_impreciseSizeClasses[(bytes - 1) / impreciseStep];
+ }
+
+ inline void* MarkedSpace::allocate(size_t bytes)
+ {
+ SizeClass& sizeClass = sizeClassFor(bytes);
+ return allocateFromSizeClass(sizeClass);
+ }
+
+ inline void* Heap::allocate(size_t bytes)
+ {
+ ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
+ ASSERT(JSLock::lockCount() > 0);
+ ASSERT(JSLock::currentThreadIsHoldingLock());
+ ASSERT(bytes <= MarkedSpace::maxCellSize);
+ ASSERT(m_operationInProgress == NoOperation);
+
+ m_operationInProgress = Allocation;
+ void* result = m_markedSpace.allocate(bytes);
+ m_operationInProgress = NoOperation;
+ if (result)
+ return result;
+
+ return allocateSlowCase(bytes);
+ }
+
+ inline void* JSCell::operator new(size_t size, JSGlobalData* globalData)
+ {
+ JSCell* result = static_cast<JSCell*>(globalData->heap.allocate(size));
+ result->m_structure.clear();
+ return result;
+ }
+
+ inline void* JSCell::operator new(size_t size, ExecState* exec)
+ {
+ JSCell* result = static_cast<JSCell*>(exec->heap()->allocate(size));
+ result->m_structure.clear();
+ return result;
+ }
+