From 5b5c8b600528bc31e1bcae2fdcca95ed07427333 Mon Sep 17 00:00:00 2001 From: isaacs Date: Sat, 7 Jul 2012 14:07:19 -0700 Subject: [PATCH] v8: Upgrade to 3.11.10.14 --- deps/v8/build/common.gypi | 7 ++--- deps/v8/src/heap.cc | 26 +++++++++++++++--- deps/v8/src/mark-compact.cc | 44 ++++++++++++++++++------------- deps/v8/src/version.cc | 2 +- deps/v8/test/cctest/test-alloc.cc | 3 ++- deps/v8/test/cctest/test-heap.cc | 39 +++++++++++++++++++++++++++ deps/v8/tools/merge-to-branch.sh | 0 7 files changed, 93 insertions(+), 28 deletions(-) mode change 100755 => 100644 deps/v8/tools/merge-to-branch.sh diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi index 2d8dc11b70d..7f084b8c1db 100644 --- a/deps/v8/build/common.gypi +++ b/deps/v8/build/common.gypi @@ -239,6 +239,7 @@ 'WIN32', ], 'msvs_configuration_attributes': { + 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)', 'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)', 'CharacterSet': '1', }, @@ -270,7 +271,7 @@ 'target_conditions': [ ['_toolset=="host"', { 'variables': { - 'm32flag': ' /dev/null 2>&1) && echo "-m32" || true)', + 'm32flag': ' /dev/null 2>&1) && echo -n "-m32" || true)', }, 'cflags': [ '<(m32flag)' ], 'ldflags': [ '<(m32flag)' ], @@ -280,7 +281,7 @@ }], ['_toolset=="target"', { 'variables': { - 'm32flag': ' /dev/null 2>&1) && echo "-m32" || true)', + 'm32flag': ' /dev/null 2>&1) && echo -n "-m32" || true)', }, 'cflags': [ '<(m32flag)' ], 'ldflags': [ '<(m32flag)' ], @@ -323,7 +324,7 @@ }, 'conditions': [ ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', { - 'cflags': [ '-Wno-unused-parameter', + 'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter', '-Wnon-virtual-dtor', '-Woverloaded-virtual' ], }], ], diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc index 172405b72cc..c37c084fa9c 100644 --- a/deps/v8/src/heap.cc +++ b/deps/v8/src/heap.cc @@ -5013,7 +5013,11 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) { bool Heap::IdleNotification(int hint) { + // Hints greater than this value indicate that + // the embedder is requesting a lot of GC work. const int kMaxHint = 1000; + // Minimal hint that allows to do full GC. + const int kMinHintForFullGC = 100; intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4; // The size factor is in range [5..250]. The numbers here are chosen from // experiments. If you changes them, make sure to test with @@ -5081,16 +5085,30 @@ bool Heap::IdleNotification(int hint) { mark_sweeps_since_idle_round_started_ += new_mark_sweeps; ms_count_at_last_idle_notification_ = ms_count_; - if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) { + int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound - + mark_sweeps_since_idle_round_started_; + + if (remaining_mark_sweeps <= 0) { FinishIdleRound(); return true; } if (incremental_marking()->IsStopped()) { - incremental_marking()->Start(); + // If there are no more than two GCs left in this idle round and we are + // allowed to do a full GC, then make those GCs full in order to compact + // the code space. + // TODO(ulan): Once we enable code compaction for incremental marking, + // we can get rid of this special case and always start incremental marking. + if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) { + CollectAllGarbage(kReduceMemoryFootprintMask, + "idle notification: finalize idle round"); + } else { + incremental_marking()->Start(); + } + } + if (!incremental_marking()->IsStopped()) { + AdvanceIdleIncrementalMarking(step_size); } - - AdvanceIdleIncrementalMarking(step_size); return false; } diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc index 878c97413bb..6f2b5596371 100644 --- a/deps/v8/src/mark-compact.cc +++ b/deps/v8/src/mark-compact.cc @@ -500,12 +500,10 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { space->identity() == OLD_DATA_SPACE || space->identity() == CODE_SPACE); + static const int kMaxMaxEvacuationCandidates = 1000; int number_of_pages = space->CountTotalPages(); - - const int kMaxMaxEvacuationCandidates = 1000; - int max_evacuation_candidates = Min( - kMaxMaxEvacuationCandidates, - static_cast(sqrt(static_cast(number_of_pages / 2)) + 1)); + int max_evacuation_candidates = + static_cast(sqrt(static_cast(number_of_pages / 2)) + 1); if (FLAG_stress_compaction || FLAG_always_compact) { max_evacuation_candidates = kMaxMaxEvacuationCandidates; @@ -535,17 +533,27 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { intptr_t over_reserved = reserved - space->SizeOfObjects(); static const intptr_t kFreenessThreshold = 50; - if (over_reserved >= 2 * space->AreaSize() && - reduce_memory_footprint_) { - mode = REDUCE_MEMORY_FOOTPRINT; + if (over_reserved >= 2 * space->AreaSize()) { + // If reduction of memory footprint was requested, we are aggressive + // about choosing pages to free. We expect that half-empty pages + // are easier to compact so slightly bump the limit. + if (reduce_memory_footprint_) { + mode = REDUCE_MEMORY_FOOTPRINT; + max_evacuation_candidates += 2; + } - // We expect that empty pages are easier to compact so slightly bump the - // limit. - max_evacuation_candidates += 2; + // If over-usage is very high (more than a third of the space), we + // try to free all mostly empty pages. We expect that almost empty + // pages are even easier to compact so bump the limit even more. + if (over_reserved > reserved / 3) { + mode = REDUCE_MEMORY_FOOTPRINT; + max_evacuation_candidates *= 2; + } - if (FLAG_trace_fragmentation) { - PrintF("Estimated over reserved memory: %.1f MB (setting threshold %d)\n", + if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) { + PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d)\n", static_cast(over_reserved) / MB, + static_cast(reserved) / MB, static_cast(kFreenessThreshold)); } } @@ -554,6 +562,9 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) { Candidate candidates[kMaxMaxEvacuationCandidates]; + max_evacuation_candidates = + Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates); + int count = 0; int fragmentation = 0; Candidate* least = NULL; @@ -3817,11 +3828,6 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { bool lazy_sweeping_active = false; bool unused_page_present = false; - intptr_t old_space_size = heap()->PromotedSpaceSizeOfObjects(); - intptr_t space_left = - Min(heap()->OldGenPromotionLimit(old_space_size), - heap()->OldGenAllocationLimit(old_space_size)) - old_space_size; - while (it.has_next()) { Page* p = it.next(); @@ -3881,7 +3887,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { } freed_bytes += SweepConservatively(space, p); pages_swept++; - if (space_left + freed_bytes > newspace_size) { + if (freed_bytes > 2 * newspace_size) { space->SetPagesToSweep(p->next_page()); lazy_sweeping_active = true; } else { diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc index 9b9c9beb259..bd350fe2026 100644 --- a/deps/v8/src/version.cc +++ b/deps/v8/src/version.cc @@ -35,7 +35,7 @@ #define MAJOR_VERSION 3 #define MINOR_VERSION 11 #define BUILD_NUMBER 10 -#define PATCH_LEVEL 12 +#define PATCH_LEVEL 14 // Use 1 for candidates and 0 otherwise. // (Boolean macro values are not supported by all preprocessors.) #define IS_CANDIDATE_VERSION 0 diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc index e195d14923e..a8e504fd443 100644 --- a/deps/v8/test/cctest/test-alloc.cc +++ b/deps/v8/test/cctest/test-alloc.cc @@ -34,7 +34,8 @@ using namespace v8::internal; -static inline void SimulateFullSpace(PagedSpace* space) { +// Also used in test-heap.cc test cases. +void SimulateFullSpace(PagedSpace* space) { int old_linear_size = static_cast(space->limit() - space->top()); space->Free(space->top(), old_linear_size); space->SetTop(space->limit(), space->limit()); diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc index 275bd3d0fd6..f8f20ab3cc3 100644 --- a/deps/v8/test/cctest/test-heap.cc +++ b/deps/v8/test/cctest/test-heap.cc @@ -1898,3 +1898,42 @@ TEST(Regress2143b) { CHECK(root->IsJSObject()); CHECK(root->map()->IsMap()); } + + +// Implemented in the test-alloc.cc test suite. +void SimulateFullSpace(PagedSpace* space); + + +TEST(ReleaseOverReservedPages) { + i::FLAG_trace_gc = true; + InitializeVM(); + v8::HandleScope scope; + static const int number_of_test_pages = 20; + + // Prepare many pages with low live-bytes count. + PagedSpace* old_pointer_space = HEAP->old_pointer_space(); + CHECK_EQ(1, old_pointer_space->CountTotalPages()); + for (int i = 0; i < number_of_test_pages; i++) { + AlwaysAllocateScope always_allocate; + SimulateFullSpace(old_pointer_space); + FACTORY->NewFixedArray(1, TENURED); + } + CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages()); + + // Triggering one GC will cause a lot of garbage to be discovered but + // even spread across all allocated pages. + HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation"); + CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages()); + + // Triggering subsequent GCs should cause at least half of the pages + // to be released to the OS after at most two cycles. + HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 1"); + CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages()); + HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered by test 2"); + CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages() * 2); + + // Triggering a last-resort GC should cause all pages to be released + // to the OS so that other processes can seize the memory. + HEAP->CollectAllAvailableGarbage("triggered really hard"); + CHECK_EQ(1, old_pointer_space->CountTotalPages()); +} diff --git a/deps/v8/tools/merge-to-branch.sh b/deps/v8/tools/merge-to-branch.sh old mode 100755 new mode 100644