Skip to content

Commit 15f2538

Browse files
author
Xiaolong Peng
committed
8373056: Shenandoah: Remove unnecessary use of ShenandoahAllocRequest.type()
Reviewed-by: wkemper, kdnilsen
1 parent 13e32bf commit 15f2538

File tree

6 files changed

+59
-68
lines changed

6 files changed

+59
-68
lines changed

src/hotspot/share/gc/shenandoah/shenandoahAllocRequest.hpp

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -83,16 +83,15 @@ class ShenandoahAllocRequest : StackObj {
8383
return "PLAB";
8484
default:
8585
ShouldNotReachHere();
86-
return "";
8786
}
8887
}
8988

9089
private:
9190
// When ShenandoahElasticTLAB is enabled, the request cannot be made smaller than _min_size.
92-
size_t _min_size;
91+
size_t const _min_size;
9392

9493
// The size of the request in words.
95-
size_t _requested_size;
94+
size_t const _requested_size;
9695

9796
// The allocation may be increased for padding or decreased to fit in the remaining space of a region.
9897
size_t _actual_size;
@@ -104,7 +103,7 @@ class ShenandoahAllocRequest : StackObj {
104103
size_t _waste;
105104

106105
// This is the type of the request.
107-
Type _alloc_type;
106+
Type const _alloc_type;
108107

109108
#ifdef ASSERT
110109
// Check that this is set before being read.
@@ -209,6 +208,10 @@ class ShenandoahAllocRequest : StackObj {
209208
return (_alloc_type & bit_old_alloc) == 0;
210209
}
211210

211+
inline bool is_cds() const {
212+
return _alloc_type == _alloc_cds;
213+
}
214+
212215
inline ShenandoahAffiliation affiliation() const {
213216
return (_alloc_type & bit_old_alloc) == 0 ? YOUNG_GENERATION : OLD_GENERATION ;
214217
}

src/hotspot/share/gc/shenandoah/shenandoahFreeSet.cpp

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1369,7 +1369,7 @@ template<typename Iter>
13691369
HeapWord* ShenandoahFreeSet::allocate_from_regions(Iter& iterator, ShenandoahAllocRequest &req, bool &in_new_region) {
13701370
for (idx_t idx = iterator.current(); iterator.has_next(); idx = iterator.next()) {
13711371
ShenandoahHeapRegion* r = _heap->get_region(idx);
1372-
size_t min_size = (req.type() == ShenandoahAllocRequest::_alloc_tlab) ? req.min_size() : req.size();
1372+
size_t min_size = req.is_lab_alloc() ? req.min_size() : req.size();
13731373
if (alloc_capacity(r) >= min_size * HeapWordSize) {
13741374
HeapWord* result = try_allocate_in(r, req, in_new_region);
13751375
if (result != nullptr) {
@@ -1501,7 +1501,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
15011501

15021502
if (in_new_region) {
15031503
log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").",
1504-
r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
1504+
r->index(), req.type_string(), p2i(&req));
15051505
assert(!r->is_affiliated(), "New region %zu should be unaffiliated", r->index());
15061506
r->set_affiliation(req.affiliation());
15071507
if (r->is_old()) {
@@ -1520,7 +1520,7 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
15201520
assert(ctx->is_bitmap_range_within_region_clear(ctx->top_bitmap(r), r->end()), "Bitmap above top_bitmap() must be clear");
15211521
#endif
15221522
log_debug(gc, free)("Using new region (%zu) for %s (" PTR_FORMAT ").",
1523-
r->index(), ShenandoahAllocRequest::alloc_type_to_string(req.type()), p2i(&req));
1523+
r->index(), req.type_string(), p2i(&req));
15241524
} else {
15251525
assert(r->is_affiliated(), "Region %zu that is not new should be affiliated", r->index());
15261526
if (r->affiliation() != req.affiliation()) {
@@ -1534,8 +1534,8 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
15341534
if (req.is_lab_alloc()) {
15351535
size_t adjusted_size = req.size();
15361536
size_t free = r->free(); // free represents bytes available within region r
1537-
if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
1538-
// This is a PLAB allocation
1537+
if (req.is_old()) {
1538+
// This is a PLAB allocation(lab alloc in old gen)
15391539
assert(_heap->mode()->is_generational(), "PLABs are only for generational mode");
15401540
assert(_partitions.in_free_set(ShenandoahFreeSetPartitionId::OldCollector, r->index()),
15411541
"PLABS must be allocated in old_collector_free regions");
@@ -1596,8 +1596,6 @@ HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, Shenandoah
15961596
r->set_update_watermark(r->top());
15971597
if (r->is_old()) {
15981598
_partitions.increase_used(ShenandoahFreeSetPartitionId::OldCollector, (req.actual_size() + req.waste()) * HeapWordSize);
1599-
assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "old-gen allocations use PLAB or shared allocation");
1600-
// for plabs, we'll sort the difference between evac and promotion usage when we retire the plab
16011599
} else {
16021600
_partitions.increase_used(ShenandoahFreeSetPartitionId::Collector, (req.actual_size() + req.waste()) * HeapWordSize);
16031601
}

src/hotspot/share/gc/shenandoah/shenandoahHeap.cpp

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -985,7 +985,7 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
985985

986986
assert (req.is_lab_alloc() || (requested == actual),
987987
"Only LAB allocations are elastic: %s, requested = %zu, actual = %zu",
988-
ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
988+
req.type_string(), requested, actual);
989989
}
990990

991991
return result;
@@ -1014,8 +1014,9 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
10141014

10151015
// Record the plab configuration for this result and register the object.
10161016
if (result != nullptr && req.is_old()) {
1017-
old_generation()->configure_plab_for_current_thread(req);
1018-
if (!req.is_lab_alloc()) {
1017+
if (req.is_lab_alloc()) {
1018+
old_generation()->configure_plab_for_current_thread(req);
1019+
} else {
10191020
// Register the newly allocated object while we're holding the global lock since there's no synchronization
10201021
// built in to the implementation of register_object(). There are potential races when multiple independent
10211022
// threads are allocating objects, some of which might span the same card region. For example, consider
@@ -1035,6 +1036,13 @@ HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req
10351036
// last-start representing object b while first-start represents object c. This is why we need to require all
10361037
// register_object() invocations to be "mutually exclusive" with respect to each card's memory range.
10371038
old_generation()->card_scan()->register_object(result);
1039+
1040+
if (req.is_promotion()) {
1041+
// Shared promotion.
1042+
const size_t actual_size = req.actual_size() * HeapWordSize;
1043+
log_debug(gc, plab)("Expend shared promotion of %zu bytes", actual_size);
1044+
old_generation()->expend_promoted(actual_size);
1045+
}
10381046
}
10391047
}
10401048

src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.hpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -447,7 +447,7 @@ class ShenandoahHeapRegion {
447447
return (bottom() <= p) && (p < top());
448448
}
449449

450-
inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t);
450+
inline void adjust_alloc_metadata(const ShenandoahAllocRequest &req, size_t);
451451
void reset_alloc_metadata();
452452
size_t get_shared_allocs() const;
453453
size_t get_tlab_allocs() const;

src/hotspot/share/gc/shenandoah/shenandoahHeapRegion.inline.hpp

Lines changed: 10 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ HeapWord* ShenandoahHeapRegion::allocate_aligned(size_t size, ShenandoahAllocReq
7171
}
7272

7373
make_regular_allocation(req.affiliation());
74-
adjust_alloc_metadata(req.type(), size);
74+
adjust_alloc_metadata(req, size);
7575

7676
HeapWord* new_top = aligned_obj + size;
7777
assert(new_top <= end(), "PLAB cannot span end of heap region");
@@ -111,7 +111,7 @@ HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocReque
111111
HeapWord* obj = top();
112112
if (pointer_delta(end(), obj) >= size) {
113113
make_regular_allocation(req.affiliation());
114-
adjust_alloc_metadata(req.type(), size);
114+
adjust_alloc_metadata(req, size);
115115

116116
HeapWord* new_top = obj + size;
117117
set_top(new_top);
@@ -125,26 +125,16 @@ HeapWord* ShenandoahHeapRegion::allocate(size_t size, const ShenandoahAllocReque
125125
}
126126
}
127127

128-
inline void ShenandoahHeapRegion::adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t size) {
129-
switch (type) {
130-
case ShenandoahAllocRequest::_alloc_shared:
131-
case ShenandoahAllocRequest::_alloc_shared_gc:
132-
case ShenandoahAllocRequest::_alloc_shared_gc_old:
133-
case ShenandoahAllocRequest::_alloc_shared_gc_promotion:
134-
case ShenandoahAllocRequest::_alloc_cds:
135-
// Counted implicitly by tlab/gclab allocs
136-
break;
137-
case ShenandoahAllocRequest::_alloc_tlab:
128+
inline void ShenandoahHeapRegion::adjust_alloc_metadata(const ShenandoahAllocRequest &req, size_t size) {
129+
// Only need to update alloc metadata for lab alloc, shared alloc is counted implicitly by tlab/gclab allocs
130+
if (req.is_lab_alloc()) {
131+
if (req.is_mutator_alloc()) {
138132
_tlab_allocs += size;
139-
break;
140-
case ShenandoahAllocRequest::_alloc_gclab:
141-
_gclab_allocs += size;
142-
break;
143-
case ShenandoahAllocRequest::_alloc_plab:
133+
} else if (req.is_old()) {
144134
_plab_allocs += size;
145-
break;
146-
default:
147-
ShouldNotReachHere();
135+
} else {
136+
_gclab_allocs += size;
137+
}
148138
}
149139
}
150140

src/hotspot/share/gc/shenandoah/shenandoahOldGeneration.cpp

Lines changed: 25 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ size_t ShenandoahOldGeneration::get_promoted_expended() const {
168168
}
169169

170170
bool ShenandoahOldGeneration::can_allocate(const ShenandoahAllocRequest &req) const {
171-
assert(req.type() != ShenandoahAllocRequest::_alloc_gclab, "GCLAB pertains only to young-gen memory");
171+
assert(req.is_old(), "Must be old allocation request");
172172

173173
const size_t requested_bytes = req.size() * HeapWordSize;
174174
// The promotion reserve may also be used for evacuations. If we can promote this object,
@@ -180,7 +180,7 @@ bool ShenandoahOldGeneration::can_allocate(const ShenandoahAllocRequest &req) co
180180
return true;
181181
}
182182

183-
if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
183+
if (req.is_lab_alloc()) {
184184
// The promotion reserve cannot accommodate this plab request. Check if we still have room for
185185
// evacuations. Note that we cannot really know how much of the plab will be used for evacuations,
186186
// so here we only check that some evacuation reserve still exists.
@@ -195,37 +195,29 @@ bool ShenandoahOldGeneration::can_allocate(const ShenandoahAllocRequest &req) co
195195

196196
void
197197
ShenandoahOldGeneration::configure_plab_for_current_thread(const ShenandoahAllocRequest &req) {
198-
// Note: Even when a mutator is performing a promotion outside a LAB, we use a 'shared_gc' request.
199-
if (req.is_gc_alloc()) {
200-
const size_t actual_size = req.actual_size() * HeapWordSize;
201-
if (req.type() == ShenandoahAllocRequest::_alloc_plab) {
202-
// We've created a new plab. Now we configure it whether it will be used for promotions
203-
// and evacuations - or just evacuations.
204-
Thread* thread = Thread::current();
205-
ShenandoahThreadLocalData::reset_plab_promoted(thread);
206-
207-
// The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries).
208-
// If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread.
209-
if (can_promote(actual_size)) {
210-
// Assume the entirety of this PLAB will be used for promotion. This prevents promotion from overreach.
211-
// When we retire this plab, we'll unexpend what we don't really use.
212-
log_debug(gc, plab)("Thread can promote using PLAB of %zu bytes. Expended: %zu, available: %zu",
213-
actual_size, get_promoted_expended(), get_promoted_reserve());
214-
expend_promoted(actual_size);
215-
ShenandoahThreadLocalData::enable_plab_promotions(thread);
216-
ShenandoahThreadLocalData::set_plab_actual_size(thread, actual_size);
217-
} else {
218-
// Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
219-
ShenandoahThreadLocalData::disable_plab_promotions(thread);
220-
ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
221-
log_debug(gc, plab)("Thread cannot promote using PLAB of %zu bytes. Expended: %zu, available: %zu, mixed evacuations? %s",
222-
actual_size, get_promoted_expended(), get_promoted_reserve(), BOOL_TO_STR(ShenandoahHeap::heap()->collection_set()->has_old_regions()));
223-
}
224-
} else if (req.is_promotion()) {
225-
// Shared promotion.
226-
log_debug(gc, plab)("Expend shared promotion of %zu bytes", actual_size);
227-
expend_promoted(actual_size);
228-
}
198+
assert(req.is_gc_alloc() && req.is_old() && req.is_lab_alloc(), "Must be a plab alloc request");
199+
const size_t actual_size = req.actual_size() * HeapWordSize;
200+
// We've created a new plab. Now we configure it whether it will be used for promotions
201+
// and evacuations - or just evacuations.
202+
Thread* thread = Thread::current();
203+
ShenandoahThreadLocalData::reset_plab_promoted(thread);
204+
205+
// The actual size of the allocation may be larger than the requested bytes (due to alignment on card boundaries).
206+
// If this puts us over our promotion budget, we need to disable future PLAB promotions for this thread.
207+
if (can_promote(actual_size)) {
208+
// Assume the entirety of this PLAB will be used for promotion. This prevents promotion from overreach.
209+
// When we retire this plab, we'll unexpend what we don't really use.
210+
log_debug(gc, plab)("Thread can promote using PLAB of %zu bytes. Expended: %zu, available: %zu",
211+
actual_size, get_promoted_expended(), get_promoted_reserve());
212+
expend_promoted(actual_size);
213+
ShenandoahThreadLocalData::enable_plab_promotions(thread);
214+
ShenandoahThreadLocalData::set_plab_actual_size(thread, actual_size);
215+
} else {
216+
// Disable promotions in this thread because entirety of this PLAB must be available to hold old-gen evacuations.
217+
ShenandoahThreadLocalData::disable_plab_promotions(thread);
218+
ShenandoahThreadLocalData::set_plab_actual_size(thread, 0);
219+
log_debug(gc, plab)("Thread cannot promote using PLAB of %zu bytes. Expended: %zu, available: %zu, mixed evacuations? %s",
220+
actual_size, get_promoted_expended(), get_promoted_reserve(), BOOL_TO_STR(ShenandoahHeap::heap()->collection_set()->has_old_regions()));
229221
}
230222
}
231223

0 commit comments

Comments
 (0)