Skip to content
GitLab
Projects
Groups
Snippets
Help
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Open sidebar
halo
art
Commits
97509954
Commit
97509954
authored
10 years ago
by
Mathieu Chartier
Browse files
Options
Download
Email Patches
Plain Diff
Clean up GC callbacks to be virtual methods
Change-Id: Ia08034a4e5931c4fcb329c3bd3c4b1f301135735
parent
c6f3439b
Changes
33
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
350 additions
and
459 deletions
+350
-459
runtime/gc/accounting/mod_union_table.cc
runtime/gc/accounting/mod_union_table.cc
+18
-28
runtime/gc/accounting/mod_union_table.h
runtime/gc/accounting/mod_union_table.h
+3
-3
runtime/gc/accounting/mod_union_table_test.cc
runtime/gc/accounting/mod_union_table_test.cc
+22
-8
runtime/gc/accounting/remembered_set.cc
runtime/gc/accounting/remembered_set.cc
+18
-25
runtime/gc/accounting/remembered_set.h
runtime/gc/accounting/remembered_set.h
+3
-3
runtime/gc/accounting/space_bitmap.h
runtime/gc/accounting/space_bitmap.h
+1
-0
runtime/gc/allocation_record.cc
runtime/gc/allocation_record.cc
+13
-12
runtime/gc/allocation_record.h
runtime/gc/allocation_record.h
+1
-1
runtime/gc/collector/concurrent_copying.cc
runtime/gc/collector/concurrent_copying.cc
+20
-33
runtime/gc/collector/concurrent_copying.h
runtime/gc/collector/concurrent_copying.h
+5
-7
runtime/gc/collector/garbage_collector.h
runtime/gc/collector/garbage_collector.h
+28
-3
runtime/gc/collector/mark_compact.cc
runtime/gc/collector/mark_compact.cc
+48
-86
runtime/gc/collector/mark_compact.h
runtime/gc/collector/mark_compact.h
+14
-35
runtime/gc/collector/mark_sweep.cc
runtime/gc/collector/mark_sweep.cc
+72
-77
runtime/gc/collector/mark_sweep.h
runtime/gc/collector/mark_sweep.h
+16
-25
runtime/gc/collector/semi_space.cc
runtime/gc/collector/semi_space.cc
+24
-44
runtime/gc/collector/semi_space.h
runtime/gc/collector/semi_space.h
+8
-19
runtime/gc/heap.cc
runtime/gc/heap.cc
+11
-5
runtime/gc/heap.h
runtime/gc/heap.h
+1
-1
runtime/gc/reference_processor.cc
runtime/gc/reference_processor.cc
+24
-44
No files found.
runtime/gc/accounting/mod_union_table.cc
View file @
97509954
...
...
@@ -21,16 +21,11 @@
#include "base/stl_util.h"
#include "bitmap-inl.h"
#include "card_table-inl.h"
#include "heap_bitmap.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/collector/mark_sweep.h"
#include "gc/collector/mark_sweep-inl.h"
#include "gc/heap.h"
#include "gc/space/space.h"
#include "gc/space/image_space.h"
#include "gc/space/space.h"
#include "mirror/object-inl.h"
#include "mirror/class-inl.h"
#include "mirror/object_array-inl.h"
#include "space_bitmap-inl.h"
#include "thread.h"
...
...
@@ -95,11 +90,11 @@ class ModUnionAddToCardVectorVisitor {
class
ModUnionUpdateObjectReferencesVisitor
{
public:
ModUnionUpdateObjectReferencesVisitor
(
Mark
HeapReferenceCallback
*
callback
,
void
*
arg
,
ModUnionUpdateObjectReferencesVisitor
(
Mark
ObjectVisitor
*
visitor
,
space
::
ContinuousSpace
*
from_space
,
space
::
ContinuousSpace
*
immune_space
,
bool
*
contains_reference_to_other_space
)
:
callback_
(
callback
),
arg_
(
arg
),
from_space_
(
from_space
),
immune_space_
(
immune_space
),
:
visitor_
(
visitor
),
from_space_
(
from_space
),
immune_space_
(
immune_space
),
contains_reference_to_other_space_
(
contains_reference_to_other_space
)
{
}
...
...
@@ -111,13 +106,12 @@ class ModUnionUpdateObjectReferencesVisitor {
mirror
::
Object
*
ref
=
obj_ptr
->
AsMirrorPtr
();
if
(
ref
!=
nullptr
&&
!
from_space_
->
HasAddress
(
ref
)
&&
!
immune_space_
->
HasAddress
(
ref
))
{
*
contains_reference_to_other_space_
=
true
;
callback_
(
obj_ptr
,
arg_
);
visitor_
->
MarkHeapReference
(
obj_ptr
);
}
}
private:
MarkHeapReferenceCallback
*
const
callback_
;
void
*
const
arg_
;
MarkObjectVisitor
*
const
visitor_
;
// Space which we are scanning
space
::
ContinuousSpace
*
const
from_space_
;
space
::
ContinuousSpace
*
const
immune_space_
;
...
...
@@ -129,25 +123,24 @@ class ModUnionScanImageRootVisitor {
public:
// Immune space is any other space which we don't care about references to. Currently this is
// the image space in the case of the zygote mod union table.
ModUnionScanImageRootVisitor
(
Mark
HeapReferenceCallback
*
callback
,
void
*
arg
,
ModUnionScanImageRootVisitor
(
Mark
ObjectVisitor
*
visitor
,
space
::
ContinuousSpace
*
from_space
,
space
::
ContinuousSpace
*
immune_space
,
bool
*
contains_reference_to_other_space
)
:
callback_
(
callback
),
arg_
(
arg
),
from_space_
(
from_space
),
immune_space_
(
immune_space
),
:
visitor_
(
visitor
),
from_space_
(
from_space
),
immune_space_
(
immune_space
),
contains_reference_to_other_space_
(
contains_reference_to_other_space
)
{}
void
operator
()(
Object
*
root
)
const
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
{
DCHECK
(
root
!=
nullptr
);
ModUnionUpdateObjectReferencesVisitor
ref_visitor
(
callback_
,
arg
_
,
from_space_
,
immune_space_
,
ModUnionUpdateObjectReferencesVisitor
ref_visitor
(
visitor
_
,
from_space_
,
immune_space_
,
contains_reference_to_other_space_
);
root
->
VisitReferences
<
kMovingClasses
>
(
ref_visitor
,
VoidFunctor
());
}
private:
MarkHeapReferenceCallback
*
const
callback_
;
void
*
const
arg_
;
MarkObjectVisitor
*
const
visitor_
;
// Space which we are scanning
space
::
ContinuousSpace
*
const
from_space_
;
space
::
ContinuousSpace
*
const
immune_space_
;
...
...
@@ -305,8 +298,7 @@ void ModUnionTableReferenceCache::Dump(std::ostream& os) {
}
}
void
ModUnionTableReferenceCache
::
UpdateAndMarkReferences
(
MarkHeapReferenceCallback
*
callback
,
void
*
arg
)
{
void
ModUnionTableReferenceCache
::
UpdateAndMarkReferences
(
MarkObjectVisitor
*
visitor
)
{
CardTable
*
card_table
=
heap_
->
GetCardTable
();
std
::
vector
<
mirror
::
HeapReference
<
Object
>*>
cards_references
;
...
...
@@ -338,7 +330,7 @@ void ModUnionTableReferenceCache::UpdateAndMarkReferences(MarkHeapReferenceCallb
size_t
count
=
0
;
for
(
const
auto
&
ref
:
references_
)
{
for
(
mirror
::
HeapReference
<
Object
>*
obj_ptr
:
ref
.
second
)
{
callback
(
obj_ptr
,
arg
);
visitor
->
MarkHeapReference
(
obj_ptr
);
}
count
+=
ref
.
second
.
size
();
}
...
...
@@ -362,9 +354,9 @@ ModUnionTableCardCache::ModUnionTableCardCache(const std::string& name, Heap* he
class
CardBitVisitor
{
public:
CardBitVisitor
(
Mark
HeapReferenceCallback
*
callback
,
void
*
arg
,
space
::
ContinuousSpace
*
space
,
CardBitVisitor
(
Mark
ObjectVisitor
*
visitor
,
space
::
ContinuousSpace
*
space
,
space
::
ContinuousSpace
*
immune_space
,
ModUnionTable
::
CardBitmap
*
card_bitmap
)
:
callback_
(
callback
),
arg_
(
arg
),
space_
(
space
),
immune_space_
(
immune_space
),
:
visitor_
(
visitor
),
space_
(
space
),
immune_space_
(
immune_space
),
bitmap_
(
space
->
GetLiveBitmap
()),
card_bitmap_
(
card_bitmap
)
{
DCHECK
(
immune_space_
!=
nullptr
);
}
...
...
@@ -374,7 +366,7 @@ class CardBitVisitor {
DCHECK
(
space_
->
HasAddress
(
reinterpret_cast
<
mirror
::
Object
*>
(
start
)))
<<
start
<<
" "
<<
*
space_
;
bool
reference_to_other_space
=
false
;
ModUnionScanImageRootVisitor
scan_visitor
(
callback_
,
arg
_
,
space_
,
immune_space_
,
ModUnionScanImageRootVisitor
scan_visitor
(
visitor
_
,
space_
,
immune_space_
,
&
reference_to_other_space
);
bitmap_
->
VisitMarkedRange
(
start
,
start
+
CardTable
::
kCardSize
,
scan_visitor
);
if
(
!
reference_to_other_space
)
{
...
...
@@ -384,8 +376,7 @@ class CardBitVisitor {
}
private:
MarkHeapReferenceCallback
*
const
callback_
;
void
*
const
arg_
;
MarkObjectVisitor
*
const
visitor_
;
space
::
ContinuousSpace
*
const
space_
;
space
::
ContinuousSpace
*
const
immune_space_
;
ContinuousSpaceBitmap
*
const
bitmap_
;
...
...
@@ -400,15 +391,14 @@ void ModUnionTableCardCache::ClearCards() {
}
// Mark all references to the alloc space(s).
void
ModUnionTableCardCache
::
UpdateAndMarkReferences
(
MarkHeapReferenceCallback
*
callback
,
void
*
arg
)
{
void
ModUnionTableCardCache
::
UpdateAndMarkReferences
(
MarkObjectVisitor
*
visitor
)
{
auto
*
image_space
=
heap_
->
GetImageSpace
();
// If we don't have an image space, just pass in space_ as the immune space. Pass in the same
// space_ instead of image_space to avoid a null check in ModUnionUpdateObjectReferencesVisitor.
CardBitVisitor
visitor
(
callback
,
arg
,
space_
,
image_space
!=
nullptr
?
image_space
:
space_
,
CardBitVisitor
bit_
visitor
(
visitor
,
space_
,
image_space
!=
nullptr
?
image_space
:
space_
,
card_bitmap_
.
get
());
card_bitmap_
->
VisitSetBits
(
0
,
RoundUp
(
space_
->
Size
(),
CardTable
::
kCardSize
)
/
CardTable
::
kCardSize
,
visitor
);
0
,
RoundUp
(
space_
->
Size
(),
CardTable
::
kCardSize
)
/
CardTable
::
kCardSize
,
bit_
visitor
);
}
void
ModUnionTableCardCache
::
Dump
(
std
::
ostream
&
os
)
{
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/accounting/mod_union_table.h
View file @
97509954
...
...
@@ -76,7 +76,7 @@ class ModUnionTable {
// Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
// before a call to update, for example, back-to-back sticky GCs. Also mark references to other
// spaces which are stored in the mod-union table.
virtual
void
UpdateAndMarkReferences
(
Mark
HeapReferenceCallback
*
callback
,
void
*
arg
)
=
0
;
virtual
void
UpdateAndMarkReferences
(
Mark
ObjectVisitor
*
visitor
)
=
0
;
// Verification, sanity checks that we don't have clean cards which conflict with out cached data
// for said cards. Exclusive lock is required since verify sometimes uses
...
...
@@ -117,7 +117,7 @@ class ModUnionTableReferenceCache : public ModUnionTable {
void
ClearCards
()
OVERRIDE
;
// Update table based on cleared cards and mark all references to the other spaces.
void
UpdateAndMarkReferences
(
Mark
HeapReferenceCallback
*
callback
,
void
*
arg
)
OVERRIDE
void
UpdateAndMarkReferences
(
Mark
ObjectVisitor
*
visitor
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
...
...
@@ -157,7 +157,7 @@ class ModUnionTableCardCache : public ModUnionTable {
virtual
void
ClearCards
()
OVERRIDE
;
// Mark all references to the alloc space(s).
virtual
void
UpdateAndMarkReferences
(
Mark
HeapReferenceCallback
*
callback
,
void
*
arg
)
OVERRIDE
virtual
void
UpdateAndMarkReferences
(
Mark
ObjectVisitor
*
visitor
)
OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/accounting/mod_union_table_test.cc
View file @
97509954
...
...
@@ -93,12 +93,24 @@ class ModUnionTableTest : public CommonRuntimeTest {
};
// Collect visited objects into container.
static
void
CollectVisitedCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref
,
void
*
arg
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
{
DCHECK
(
ref
!=
nullptr
);
DCHECK
(
arg
!=
nullptr
);
reinterpret_cast
<
std
::
set
<
mirror
::
Object
*>*>
(
arg
)
->
insert
(
ref
->
AsMirrorPtr
());
}
class
CollectVisitedVisitor
:
public
MarkObjectVisitor
{
public:
explicit
CollectVisitedVisitor
(
std
::
set
<
mirror
::
Object
*>*
out
)
:
out_
(
out
)
{}
virtual
void
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
{
DCHECK
(
ref
!=
nullptr
);
MarkObject
(
ref
->
AsMirrorPtr
());
}
virtual
mirror
::
Object
*
MarkObject
(
mirror
::
Object
*
obj
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
{
DCHECK
(
obj
!=
nullptr
);
out_
->
insert
(
obj
);
return
obj
;
}
private:
std
::
set
<
mirror
::
Object
*>*
const
out_
;
};
// A mod union table that only holds references to a specified target space.
class
ModUnionTableRefCacheToSpace
:
public
ModUnionTableReferenceCache
{
...
...
@@ -199,7 +211,8 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) {
obj2
->
Set
(
3
,
other_space_ref2
);
table
->
ClearCards
();
std
::
set
<
mirror
::
Object
*>
visited_before
;
table
->
UpdateAndMarkReferences
(
&
CollectVisitedCallback
,
&
visited_before
);
CollectVisitedVisitor
collector_before
(
&
visited_before
);
table
->
UpdateAndMarkReferences
(
&
collector_before
);
// Check that we visited all the references in other spaces only.
ASSERT_GE
(
visited_before
.
size
(),
2u
);
ASSERT_TRUE
(
visited_before
.
find
(
other_space_ref1
)
!=
visited_before
.
end
());
...
...
@@ -230,7 +243,8 @@ void ModUnionTableTest::RunTest(ModUnionTableFactory::TableType type) {
}
// Visit again and make sure the cards got cleared back to their sane state.
std
::
set
<
mirror
::
Object
*>
visited_after
;
table
->
UpdateAndMarkReferences
(
&
CollectVisitedCallback
,
&
visited_after
);
CollectVisitedVisitor
collector_after
(
&
visited_after
);
table
->
UpdateAndMarkReferences
(
&
collector_after
);
// Check that we visited a superset after.
for
(
auto
*
obj
:
visited_before
)
{
ASSERT_TRUE
(
visited_after
.
find
(
obj
)
!=
visited_after
.
end
())
<<
obj
;
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/accounting/remembered_set.cc
View file @
97509954
...
...
@@ -61,11 +61,10 @@ void RememberedSet::ClearCards() {
class
RememberedSetReferenceVisitor
{
public:
RememberedSetReferenceVisitor
(
MarkHeapReferenceCallback
*
callback
,
DelayReferenceReferentCallback
*
ref_callback
,
space
::
ContinuousSpace
*
target_space
,
bool
*
const
contains_reference_to_target_space
,
void
*
arg
)
:
callback_
(
callback
),
ref_callback_
(
ref_callback
),
target_space_
(
target_space
),
arg_
(
arg
),
RememberedSetReferenceVisitor
(
space
::
ContinuousSpace
*
target_space
,
bool
*
const
contains_reference_to_target_space
,
collector
::
GarbageCollector
*
collector
)
:
collector_
(
collector
),
target_space_
(
target_space
),
contains_reference_to_target_space_
(
contains_reference_to_target_space
)
{}
void
operator
()(
mirror
::
Object
*
obj
,
MemberOffset
offset
,
bool
/* is_static */
)
const
...
...
@@ -74,7 +73,7 @@ class RememberedSetReferenceVisitor {
mirror
::
HeapReference
<
mirror
::
Object
>*
ref_ptr
=
obj
->
GetFieldObjectReferenceAddr
(
offset
);
if
(
target_space_
->
HasAddress
(
ref_ptr
->
AsMirrorPtr
()))
{
*
contains_reference_to_target_space_
=
true
;
c
a
ll
back_
(
ref_ptr
,
arg_
);
c
o
ll
ector_
->
MarkHeapReference
(
ref_ptr
);
DCHECK
(
!
target_space_
->
HasAddress
(
ref_ptr
->
AsMirrorPtr
()));
}
}
...
...
@@ -84,49 +83,43 @@ class RememberedSetReferenceVisitor {
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
{
if
(
target_space_
->
HasAddress
(
ref
->
GetReferent
()))
{
*
contains_reference_to_target_space_
=
true
;
ref_callback_
(
klass
,
ref
,
arg_
);
collector_
->
DelayReferenceReferent
(
klass
,
ref
);
}
}
private:
MarkHeapReferenceCallback
*
const
callback_
;
DelayReferenceReferentCallback
*
const
ref_callback_
;
collector
::
GarbageCollector
*
const
collector_
;
space
::
ContinuousSpace
*
const
target_space_
;
void
*
const
arg_
;
bool
*
const
contains_reference_to_target_space_
;
};
class
RememberedSetObjectVisitor
{
public:
RememberedSetObjectVisitor
(
MarkHeapReferenceCallback
*
callback
,
DelayReferenceReferentCallback
*
ref_callback
,
space
::
ContinuousSpace
*
target_space
,
bool
*
const
contains_reference_to_target_space
,
void
*
arg
)
:
callback_
(
callback
),
ref_callback_
(
ref_callback
),
target_space_
(
target_space
),
arg_
(
arg
),
RememberedSetObjectVisitor
(
space
::
ContinuousSpace
*
target_space
,
bool
*
const
contains_reference_to_target_space
,
collector
::
GarbageCollector
*
collector
)
:
collector_
(
collector
),
target_space_
(
target_space
),
contains_reference_to_target_space_
(
contains_reference_to_target_space
)
{}
void
operator
()(
mirror
::
Object
*
obj
)
const
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
{
RememberedSetReferenceVisitor
visitor
(
callb
ac
k
_
,
ref_callback_
,
target_space_
,
co
ntains_reference_to_target_space_
,
arg
_
);
RememberedSetReferenceVisitor
visitor
(
target_sp
ac
e
_
,
contains_reference_to_
target_space_
,
co
llector
_
);
obj
->
VisitReferences
<
kMovingClasses
>
(
visitor
,
visitor
);
}
private:
MarkHeapReferenceCallback
*
const
callback_
;
DelayReferenceReferentCallback
*
const
ref_callback_
;
collector
::
GarbageCollector
*
const
collector_
;
space
::
ContinuousSpace
*
const
target_space_
;
void
*
const
arg_
;
bool
*
const
contains_reference_to_target_space_
;
};
void
RememberedSet
::
UpdateAndMarkReferences
(
MarkHeapReferenceCallback
*
callback
,
DelayReferenceReferentCallback
*
ref_callback
,
space
::
ContinuousSpace
*
target_space
,
void
*
arg
)
{
void
RememberedSet
::
UpdateAndMarkReferences
(
space
::
ContinuousSpace
*
target_space
,
collector
::
GarbageCollector
*
collector
)
{
CardTable
*
card_table
=
heap_
->
GetCardTable
();
bool
contains_reference_to_target_space
=
false
;
RememberedSetObjectVisitor
obj_visitor
(
callb
ac
k
,
ref_callback
,
target_space
,
&
co
ntains_reference_to_target_space
,
arg
);
RememberedSetObjectVisitor
obj_visitor
(
target_sp
ac
e
,
&
contains_reference_to_
target_space
,
co
llector
);
ContinuousSpaceBitmap
*
bitmap
=
space_
->
GetLiveBitmap
();
CardSet
remove_card_set
;
for
(
uint8_t
*
const
card_addr
:
dirty_cards_
)
{
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/accounting/remembered_set.h
View file @
97509954
...
...
@@ -29,6 +29,7 @@ namespace art {
namespace
gc
{
namespace
collector
{
class
GarbageCollector
;
class
MarkSweep
;
}
// namespace collector
namespace
space
{
...
...
@@ -53,9 +54,8 @@ class RememberedSet {
void
ClearCards
();
// Mark through all references to the target space.
void
UpdateAndMarkReferences
(
MarkHeapReferenceCallback
*
callback
,
DelayReferenceReferentCallback
*
ref_callback
,
space
::
ContinuousSpace
*
target_space
,
void
*
arg
)
void
UpdateAndMarkReferences
(
space
::
ContinuousSpace
*
target_space
,
collector
::
GarbageCollector
*
collector
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/accounting/space_bitmap.h
View file @
97509954
...
...
@@ -30,6 +30,7 @@
namespace
art
{
namespace
mirror
{
class
Class
;
class
Object
;
}
// namespace mirror
class
MemMap
;
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/allocation_record.cc
View file @
97509954
...
...
@@ -110,23 +110,24 @@ void AllocRecordObjectMap::VisitRoots(RootVisitor* visitor) {
}
}
static
inline
void
SweepClassObject
(
AllocRecord
*
record
,
IsMarked
Callback
*
callback
,
void
*
arg
)
static
inline
void
SweepClassObject
(
AllocRecord
*
record
,
IsMarked
Visitor
*
visitor
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
alloc_tracker_lock_
)
{
GcRoot
<
mirror
::
Class
>&
klass
=
record
->
GetClassGcRoot
();
// This does not need a read barrier because this is called by GC.
mirror
::
Object
*
old_object
=
klass
.
Read
<
kWithoutReadBarrier
>
();
// The class object can become null if we implement class unloading.
// In that case we might still want to keep the class name string (not implemented).
mirror
::
Object
*
new_object
=
UNLIKELY
(
old_object
==
nullptr
)
?
nullptr
:
callback
(
old_object
,
arg
);
if
(
UNLIKELY
(
old_object
!=
new_object
))
{
mirror
::
Class
*
new_klass
=
UNLIKELY
(
new_object
==
nullptr
)
?
nullptr
:
new_object
->
AsClass
();
klass
=
GcRoot
<
mirror
::
Class
>
(
new_klass
);
if
(
old_object
!=
nullptr
)
{
// The class object can become null if we implement class unloading.
// In that case we might still want to keep the class name string (not implemented).
mirror
::
Object
*
new_object
=
visitor
->
IsMarked
(
old_object
);
DCHECK
(
new_object
!=
nullptr
);
if
(
UNLIKELY
(
old_object
!=
new_object
))
{
klass
=
GcRoot
<
mirror
::
Class
>
(
new_object
->
AsClass
());
}
}
}
void
AllocRecordObjectMap
::
SweepAllocationRecords
(
IsMarked
Callback
*
callback
,
void
*
arg
)
{
void
AllocRecordObjectMap
::
SweepAllocationRecords
(
IsMarked
Visitor
*
visitor
)
{
VLOG
(
heap
)
<<
"Start SweepAllocationRecords()"
;
size_t
count_deleted
=
0
,
count_moved
=
0
,
count
=
0
;
// Only the first (size - recent_record_max_) number of records can be deleted.
...
...
@@ -141,11 +142,11 @@ void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedCallback* callback, vo
// This does not need a read barrier because this is called by GC.
mirror
::
Object
*
old_object
=
it
->
first
.
Read
<
kWithoutReadBarrier
>
();
AllocRecord
*
record
=
it
->
second
;
mirror
::
Object
*
new_object
=
old_object
==
nullptr
?
nullptr
:
callback
(
old_object
,
arg
);
mirror
::
Object
*
new_object
=
old_object
==
nullptr
?
nullptr
:
visitor
->
IsMarked
(
old_object
);
if
(
new_object
==
nullptr
)
{
if
(
count
>
delete_bound
)
{
it
->
first
=
GcRoot
<
mirror
::
Object
>
(
nullptr
);
SweepClassObject
(
record
,
callback
,
arg
);
SweepClassObject
(
record
,
visitor
);
++
it
;
}
else
{
delete
record
;
...
...
@@ -157,7 +158,7 @@ void AllocRecordObjectMap::SweepAllocationRecords(IsMarkedCallback* callback, vo
it
->
first
=
GcRoot
<
mirror
::
Object
>
(
new_object
);
++
count_moved
;
}
SweepClassObject
(
record
,
callback
,
arg
);
SweepClassObject
(
record
,
visitor
);
++
it
;
}
}
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/allocation_record.h
View file @
97509954
...
...
@@ -261,7 +261,7 @@ class AllocRecordObjectMap {
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
alloc_tracker_lock_
);
void
SweepAllocationRecords
(
IsMarked
Callback
*
callback
,
void
*
arg
)
void
SweepAllocationRecords
(
IsMarked
Visitor
*
visitor
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
alloc_tracker_lock_
);
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/collector/concurrent_copying.cc
View file @
97509954
...
...
@@ -73,6 +73,12 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap, const std::string& name_prefix)
}
}
void
ConcurrentCopying
::
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
from_ref
ATTRIBUTE_UNUSED
)
{
// Unused, usually called from mod union tables.
UNIMPLEMENTED
(
FATAL
);
}
ConcurrentCopying
::~
ConcurrentCopying
()
{
STLDeleteElements
(
&
pooled_mark_stacks_
);
}
...
...
@@ -308,7 +314,7 @@ class ConcurrentCopyingImmuneSpaceObjVisitor {
}
private:
ConcurrentCopying
*
collector_
;
ConcurrentCopying
*
const
collector_
;
};
class
EmptyCheckpoint
:
public
Closure
{
...
...
@@ -429,7 +435,7 @@ void ConcurrentCopying::MarkingPhase() {
LOG
(
INFO
)
<<
"ProcessReferences"
;
}
// Process weak references. This may produce new refs to process and have them processed via
// ProcessMarkStack
Callback
(in the GC exclusive mark stack mode).
// ProcessMarkStack (in the GC exclusive mark stack mode).
ProcessReferences
(
self
);
CheckEmptyMarkStack
();
if
(
kVerboseMode
)
{
...
...
@@ -644,7 +650,7 @@ class ConcurrentCopyingVerifyNoFromSpaceRefsFieldVisitor {
}
private:
ConcurrentCopying
*
collector_
;
ConcurrentCopying
*
const
collector_
;
};
class
ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor
{
...
...
@@ -732,16 +738,9 @@ class ConcurrentCopyingAssertToSpaceInvariantRefsVisitor {
}
collector_
->
AssertToSpaceInvariant
(
nullptr
,
MemberOffset
(
0
),
ref
);
}
static
void
RootCallback
(
mirror
::
Object
**
root
,
void
*
arg
,
const
RootInfo
&
/*root_info*/
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
{
ConcurrentCopying
*
collector
=
reinterpret_cast
<
ConcurrentCopying
*>
(
arg
);
ConcurrentCopyingAssertToSpaceInvariantRefsVisitor
visitor
(
collector
);
DCHECK
(
root
!=
nullptr
);
visitor
(
*
root
);
}
private:
ConcurrentCopying
*
collector_
;
ConcurrentCopying
*
const
collector_
;
};
class
ConcurrentCopyingAssertToSpaceInvariantFieldVisitor
{
...
...
@@ -762,7 +761,7 @@ class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
}
private:
ConcurrentCopying
*
collector_
;
ConcurrentCopying
*
const
collector_
;
};
class
ConcurrentCopyingAssertToSpaceInvariantObjectVisitor
{
...
...
@@ -785,7 +784,7 @@ class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
}
private:
ConcurrentCopying
*
collector_
;
ConcurrentCopying
*
const
collector_
;
};
class
RevokeThreadLocalMarkStackCheckpoint
:
public
Closure
{
...
...
@@ -1088,7 +1087,7 @@ void ConcurrentCopying::CheckEmptyMarkStack() {
void
ConcurrentCopying
::
SweepSystemWeaks
(
Thread
*
self
)
{
TimingLogger
::
ScopedTiming
split
(
"SweepSystemWeaks"
,
GetTimings
());
ReaderMutexLock
mu
(
self
,
*
Locks
::
heap_bitmap_lock_
);
Runtime
::
Current
()
->
SweepSystemWeaks
(
IsMarkedCallback
,
this
);
Runtime
::
Current
()
->
SweepSystemWeaks
(
this
);
}
void
ConcurrentCopying
::
Sweep
(
bool
swap_bitmaps
)
{
...
...
@@ -1293,7 +1292,7 @@ class ConcurrentCopyingComputeUnevacFromSpaceLiveRatioVisitor {
}
private:
ConcurrentCopying
*
collector_
;
ConcurrentCopying
*
const
collector_
;
};
// Compute how much live objects are left in regions.
...
...
@@ -2029,14 +2028,9 @@ void ConcurrentCopying::FinishPhase() {
heap_
->
ClearMarkedObjects
();
}
mirror
::
Object
*
ConcurrentCopying
::
IsMarkedCallback
(
mirror
::
Object
*
from_ref
,
void
*
arg
)
{
return
reinterpret_cast
<
ConcurrentCopying
*>
(
arg
)
->
IsMarked
(
from_ref
);
}
bool
ConcurrentCopying
::
IsHeapReferenceMarkedCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
field
,
void
*
arg
)
{
bool
ConcurrentCopying
::
IsMarkedHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
field
)
{
mirror
::
Object
*
from_ref
=
field
->
AsMirrorPtr
();
mirror
::
Object
*
to_ref
=
reinterpret_cast
<
ConcurrentCopying
*>
(
arg
)
->
IsMarked
(
from_ref
);
mirror
::
Object
*
to_ref
=
IsMarked
(
from_ref
);
if
(
to_ref
==
nullptr
)
{
return
false
;
}
...
...
@@ -2048,18 +2042,12 @@ bool ConcurrentCopying::IsHeapReferenceMarkedCallback(
return
true
;
}
mirror
::
Object
*
ConcurrentCopying
::
MarkCallback
(
mirror
::
Object
*
from_ref
,
void
*
arg
)
{
return
reinterpret_cast
<
ConcurrentCopying
*>
(
arg
)
->
Mark
(
from_ref
);
}
void
ConcurrentCopying
::
ProcessMarkStackCallback
(
void
*
arg
)
{
ConcurrentCopying
*
concurrent_copying
=
reinterpret_cast
<
ConcurrentCopying
*>
(
arg
);
concurrent_copying
->
ProcessMarkStack
();
mirror
::
Object
*
ConcurrentCopying
::
MarkObject
(
mirror
::
Object
*
from_ref
)
{
return
Mark
(
from_ref
);
}
void
ConcurrentCopying
::
DelayReferenceReferent
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
reference
)
{
heap_
->
GetReferenceProcessor
()
->
DelayReferenceReferent
(
klass
,
reference
,
&
IsHeapReferenceMarkedCallback
,
this
);
heap_
->
GetReferenceProcessor
()
->
DelayReferenceReferent
(
klass
,
reference
,
this
);
}
void
ConcurrentCopying
::
ProcessReferences
(
Thread
*
self
)
{
...
...
@@ -2067,8 +2055,7 @@ void ConcurrentCopying::ProcessReferences(Thread* self) {
// We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
WriterMutexLock
mu
(
self
,
*
Locks
::
heap_bitmap_lock_
);
GetHeap
()
->
GetReferenceProcessor
()
->
ProcessReferences
(
true
/*concurrent*/
,
GetTimings
(),
GetCurrentIteration
()
->
GetClearSoftReferences
(),
&
IsHeapReferenceMarkedCallback
,
&
MarkCallback
,
&
ProcessMarkStackCallback
,
this
);
true
/*concurrent*/
,
GetTimings
(),
GetCurrentIteration
()
->
GetClearSoftReferences
(),
this
);
}
void
ConcurrentCopying
::
RevokeAllThreadLocalBuffers
()
{
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/collector/concurrent_copying.h
View file @
97509954
...
...
@@ -130,18 +130,16 @@ class ConcurrentCopying : public GarbageCollector {
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
void
SwitchToSharedMarkStackMode
()
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
void
SwitchToGcExclusiveMarkStackMode
()
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
void
DelayReferenceReferent
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
reference
)
virtual
void
DelayReferenceReferent
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
reference
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
void
ProcessReferences
(
Thread
*
self
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
mirror
::
Object
*
IsMarked
(
mirror
::
Object
*
from_ref
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
static
mirror
::
Object
*
MarkCallback
(
mirror
::
Object
*
from_ref
,
void
*
arg
)
virtual
mirror
::
Object
*
MarkObject
(
mirror
::
Object
*
from_ref
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
static
mirror
::
Object
*
IsMarkedCallback
(
mirror
::
Object
*
from_ref
,
void
*
arg
)
virtual
void
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>
*
from_ref
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
static
bool
IsHeapReferenceMarkedCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
field
,
void
*
arg
)
virtual
mirror
::
Object
*
IsMarked
(
mirror
::
Object
*
from_ref
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
static
void
ProcessMarkStackCallback
(
void
*
arg
)
virtual
bool
IsMarkedHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
field
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
void
SweepSystemWeaks
(
Thread
*
self
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
LOCKS_EXCLUDED
(
Locks
::
heap_bitmap_lock_
);
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/collector/garbage_collector.h
View file @
97509954
...
...
@@ -17,6 +17,9 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
#define ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
#include <stdint.h>
#include <vector>
#include "base/histogram.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
...
...
@@ -24,10 +27,16 @@
#include "gc/gc_cause.h"
#include "gc_root.h"
#include "gc_type.h"
#include <stdint.h>
#include <vector>
#include "object_callbacks.h"
namespace
art
{
namespace
mirror
{
class
Class
;
class
Object
;
class
Reference
;
}
// namespace mirror
namespace
gc
{
class
Heap
;
...
...
@@ -113,7 +122,7 @@ class Iteration {
DISALLOW_COPY_AND_ASSIGN
(
Iteration
);
};
class
GarbageCollector
:
public
RootVisitor
{
class
GarbageCollector
:
public
RootVisitor
,
public
IsMarkedVisitor
,
public
MarkObjectVisitor
{
public:
class
SCOPED_LOCKABLE
ScopedPause
{
public:
...
...
@@ -172,6 +181,22 @@ class GarbageCollector : public RootVisitor {
void
RecordFreeLOS
(
const
ObjectBytePair
&
freed
);
void
DumpPerformanceInfo
(
std
::
ostream
&
os
)
LOCKS_EXCLUDED
(
pause_histogram_lock_
);
// Helper functions for querying if objects are marked at compile time. These are used for
// reading system weaks, processing references.
virtual
mirror
::
Object
*
IsMarked
(
mirror
::
Object
*
obj
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
=
0
;
virtual
bool
IsMarkedHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
=
0
;
// Used by reference processor.
virtual
void
ProcessMarkStack
()
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
=
0
;
// Force mark an object.
virtual
mirror
::
Object
*
MarkObject
(
mirror
::
Object
*
obj
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
=
0
;
virtual
void
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
=
0
;
virtual
void
DelayReferenceReferent
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
reference
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
=
0
;
protected:
// Run all of the GC phases.
virtual
void
RunPhases
()
=
0
;
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/collector/mark_compact.cc
View file @
97509954
...
...
@@ -21,34 +21,19 @@
#include "base/timing_logger.h"
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table.h"
#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/reference_processor.h"
#include "gc/space/bump_pointer_space.h"
#include "gc/space/bump_pointer_space-inl.h"
#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "indirect_reference_table.h"
#include "intern_table.h"
#include "jni_internal.h"
#include "mark_sweep-inl.h"
#include "monitor.h"
#include "mirror/class-inl.h"
#include "mirror/class_loader.h"
#include "mirror/dex_cache.h"
#include "mirror/reference-inl.h"
#include "mirror/object-inl.h"
#include "mirror/object_array.h"
#include "mirror/object_array-inl.h"
#include "runtime.h"
#include "stack.h"
#include "thread-inl.h"
#include "thread_list.h"
using
::
art
::
mirror
::
Object
;
namespace
art
{
namespace
gc
{
namespace
collector
{
...
...
@@ -67,7 +52,7 @@ void MarkCompact::BindBitmaps() {
MarkCompact
::
MarkCompact
(
Heap
*
heap
,
const
std
::
string
&
name_prefix
)
:
GarbageCollector
(
heap
,
name_prefix
+
(
name_prefix
.
empty
()
?
""
:
" "
)
+
"mark compact"
),
space_
(
nullptr
),
collector_name_
(
name_
)
{
space_
(
nullptr
),
collector_name_
(
name_
)
,
updating_references_
(
false
)
{
}
void
MarkCompact
::
RunPhases
()
{
...
...
@@ -107,7 +92,7 @@ class CalculateObjectForwardingAddressVisitor {
void
operator
()(
mirror
::
Object
*
obj
)
const
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
)
{
DCHECK_ALIGNED
(
obj
,
space
::
BumpPointerSpace
::
kAlignment
);
DCHECK
(
collector_
->
IsMarked
(
obj
));
DCHECK
(
collector_
->
IsMarked
(
obj
)
!=
nullptr
);
collector_
->
ForwardObject
(
obj
);
}
...
...
@@ -141,8 +126,7 @@ void MarkCompact::InitializePhase() {
void
MarkCompact
::
ProcessReferences
(
Thread
*
self
)
{
WriterMutexLock
mu
(
self
,
*
Locks
::
heap_bitmap_lock_
);
heap_
->
GetReferenceProcessor
()
->
ProcessReferences
(
false
,
GetTimings
(),
GetCurrentIteration
()
->
GetClearSoftReferences
(),
&
HeapReferenceMarkedCallback
,
&
MarkObjectCallback
,
&
ProcessMarkStackCallback
,
this
);
false
,
GetTimings
(),
GetCurrentIteration
()
->
GetClearSoftReferences
(),
this
);
}
class
BitmapSetSlowPathVisitor
{
...
...
@@ -156,29 +140,29 @@ class BitmapSetSlowPathVisitor {
}
};
inline
void
MarkCompact
::
MarkObject
(
mirror
::
Object
*
obj
)
{
inline
mirror
::
Object
*
MarkCompact
::
MarkObject
(
mirror
::
Object
*
obj
)
{
if
(
obj
==
nullptr
)
{
return
;
return
obj
;
}
if
(
kUseBakerOrBrooksReadBarrier
)
{
// Verify all the objects have the correct forward pointer installed.
obj
->
AssertReadBarrierPointer
();
}
if
(
immune_region_
.
ContainsObject
(
obj
))
{
return
;
}
if
(
objects_before_forwarding_
->
HasAddress
(
obj
))
{
if
(
!
objects_before_forwarding_
->
Set
(
obj
))
{
MarkStackPush
(
obj
);
// This object was not previously marked.
}
}
else
{
DCHECK
(
!
space_
->
HasAddress
(
obj
));
BitmapSetSlowPathVisitor
visitor
;
if
(
!
mark_bitmap_
->
Set
(
obj
,
visitor
))
{
// This object was not previously marked.
MarkStackPush
(
obj
);
if
(
!
immune_region_
.
ContainsObject
(
obj
))
{
if
(
objects_before_forwarding_
->
HasAddress
(
obj
))
{
if
(
!
objects_before_forwarding_
->
Set
(
obj
))
{
MarkStackPush
(
obj
);
// This object was not previously marked.
}
}
else
{
DCHECK
(
!
space_
->
HasAddress
(
obj
));
BitmapSetSlowPathVisitor
visitor
;
if
(
!
mark_bitmap_
->
Set
(
obj
,
visitor
))
{
// This object was not previously marked.
MarkStackPush
(
obj
);
}
}
}
return
obj
;
}
void
MarkCompact
::
MarkingPhase
()
{
...
...
@@ -240,7 +224,7 @@ void MarkCompact::UpdateAndMarkModUnion() {
TimingLogger
::
ScopedTiming
t2
(
space
->
IsZygoteSpace
()
?
"UpdateAndMarkZygoteModUnionTable"
:
"UpdateAndMarkImageModUnionTable"
,
GetTimings
());
table
->
UpdateAndMarkReferences
(
MarkHeapReferenceCallback
,
this
);
table
->
UpdateAndMarkReferences
(
this
);
}
}
}
...
...
@@ -272,7 +256,7 @@ void MarkCompact::ReclaimPhase() {
}
void
MarkCompact
::
ResizeMarkStack
(
size_t
new_size
)
{
std
::
vector
<
StackReference
<
Object
>>
temp
(
mark_stack_
->
Begin
(),
mark_stack_
->
End
());
std
::
vector
<
StackReference
<
mirror
::
Object
>>
temp
(
mark_stack_
->
Begin
(),
mark_stack_
->
End
());
CHECK_LE
(
mark_stack_
->
Size
(),
new_size
);
mark_stack_
->
Resize
(
new_size
);
for
(
auto
&
obj
:
temp
)
{
...
...
@@ -280,7 +264,7 @@ void MarkCompact::ResizeMarkStack(size_t new_size) {
}
}
inline
void
MarkCompact
::
MarkStackPush
(
Object
*
obj
)
{
inline
void
MarkCompact
::
MarkStackPush
(
mirror
::
Object
*
obj
)
{
if
(
UNLIKELY
(
mark_stack_
->
Size
()
>=
mark_stack_
->
Capacity
()))
{
ResizeMarkStack
(
mark_stack_
->
Capacity
()
*
2
);
}
...
...
@@ -288,23 +272,12 @@ inline void MarkCompact::MarkStackPush(Object* obj) {
mark_stack_
->
PushBack
(
obj
);
}
void
MarkCompact
::
ProcessMarkStackCallback
(
void
*
arg
)
{
reinterpret_cast
<
MarkCompact
*>
(
arg
)
->
ProcessMarkStack
();
}
mirror
::
Object
*
MarkCompact
::
MarkObjectCallback
(
mirror
::
Object
*
root
,
void
*
arg
)
{
reinterpret_cast
<
MarkCompact
*>
(
arg
)
->
MarkObject
(
root
);
return
root
;
}
void
MarkCompact
::
MarkHeapReferenceCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj_ptr
,
void
*
arg
)
{
reinterpret_cast
<
MarkCompact
*>
(
arg
)
->
MarkObject
(
obj_ptr
->
AsMirrorPtr
());
}
void
MarkCompact
::
DelayReferenceReferentCallback
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
ref
,
void
*
arg
)
{
reinterpret_cast
<
MarkCompact
*>
(
arg
)
->
DelayReferenceReferent
(
klass
,
ref
);
void
MarkCompact
::
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj_ptr
)
{
if
(
updating_references_
)
{
UpdateHeapReference
(
obj_ptr
);
}
else
{
MarkObject
(
obj_ptr
->
AsMirrorPtr
());
}
}
void
MarkCompact
::
VisitRoots
(
...
...
@@ -373,6 +346,7 @@ class UpdateObjectReferencesVisitor {
void
MarkCompact
::
UpdateReferences
()
{
TimingLogger
::
ScopedTiming
t
(
__FUNCTION__
,
GetTimings
());
updating_references_
=
true
;
Runtime
*
runtime
=
Runtime
::
Current
();
// Update roots.
UpdateRootVisitor
update_root_visitor
(
this
);
...
...
@@ -387,7 +361,7 @@ void MarkCompact::UpdateReferences() {
space
->
IsZygoteSpace
()
?
"UpdateZygoteModUnionTableReferences"
:
"UpdateImageModUnionTableReferences"
,
GetTimings
());
table
->
UpdateAndMarkReferences
(
&
UpdateHeapReferenceCallback
,
this
);
table
->
UpdateAndMarkReferences
(
this
);
}
else
{
// No mod union table, so we need to scan the space using bitmap visit.
// Scan the space using bitmap visit.
...
...
@@ -403,14 +377,15 @@ void MarkCompact::UpdateReferences() {
CHECK
(
!
kMovingClasses
)
<<
"Didn't update large object classes since they are assumed to not move."
;
// Update the system weaks, these should already have been swept.
runtime
->
SweepSystemWeaks
(
&
MarkedForwardingAddressCallback
,
this
);
runtime
->
SweepSystemWeaks
(
this
);
// Update the objects in the bump pointer space last, these objects don't have a bitmap.
UpdateObjectReferencesVisitor
visitor
(
this
);
objects_before_forwarding_
->
VisitMarkedRange
(
reinterpret_cast
<
uintptr_t
>
(
space_
->
Begin
()),
reinterpret_cast
<
uintptr_t
>
(
space_
->
End
()),
visitor
);
// Update the reference processor cleared list.
heap_
->
GetReferenceProcessor
()
->
UpdateRoots
(
&
MarkedForwardingAddressCallback
,
this
);
heap_
->
GetReferenceProcessor
()
->
UpdateRoots
(
this
);
updating_references_
=
false
;
}
void
MarkCompact
::
Compact
()
{
...
...
@@ -436,10 +411,6 @@ void MarkCompact::MarkRoots() {
Runtime
::
Current
()
->
VisitRoots
(
this
);
}
mirror
::
Object
*
MarkCompact
::
MarkedForwardingAddressCallback
(
mirror
::
Object
*
obj
,
void
*
arg
)
{
return
reinterpret_cast
<
MarkCompact
*>
(
arg
)
->
GetMarkedForwardAddress
(
obj
);
}
inline
void
MarkCompact
::
UpdateHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
reference
)
{
mirror
::
Object
*
obj
=
reference
->
AsMirrorPtr
();
if
(
obj
!=
nullptr
)
{
...
...
@@ -451,17 +422,12 @@ inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Objec
}
}
void
MarkCompact
::
UpdateHeapReferenceCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
reference
,
void
*
arg
)
{
reinterpret_cast
<
MarkCompact
*>
(
arg
)
->
UpdateHeapReference
(
reference
);
}
class
UpdateReferenceVisitor
{
public:
explicit
UpdateReferenceVisitor
(
MarkCompact
*
collector
)
:
collector_
(
collector
)
{
}
void
operator
()(
Object
*
obj
,
MemberOffset
offset
,
bool
/*is_static*/
)
const
void
operator
()(
mirror
::
Object
*
obj
,
MemberOffset
offset
,
bool
/*is_static*/
)
const
ALWAYS_INLINE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
)
{
collector_
->
UpdateHeapReference
(
obj
->
GetFieldObjectReferenceAddr
<
kVerifyNone
>
(
offset
));
}
...
...
@@ -481,7 +447,7 @@ void MarkCompact::UpdateObjectReferences(mirror::Object* obj) {
obj
->
VisitReferences
<
kMovingClasses
>
(
visitor
,
visitor
);
}
inline
mirror
::
Object
*
MarkCompact
::
GetMarkedForwardAddress
(
mirror
::
Object
*
obj
)
const
{
inline
mirror
::
Object
*
MarkCompact
::
GetMarkedForwardAddress
(
mirror
::
Object
*
obj
)
{
DCHECK
(
obj
!=
nullptr
);
if
(
objects_before_forwarding_
->
HasAddress
(
obj
))
{
DCHECK
(
objects_before_forwarding_
->
Test
(
obj
));
...
...
@@ -491,33 +457,30 @@ inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj)
return
ret
;
}
DCHECK
(
!
space_
->
HasAddress
(
obj
));
DCHECK
(
IsMarked
(
obj
));
return
obj
;
}
inline
bool
MarkCompact
::
IsMarked
(
const
Object
*
object
)
const
{
mirror
::
Object
*
MarkCompact
::
IsMarked
(
mirror
::
Object
*
object
)
{
if
(
immune_region_
.
ContainsObject
(
object
))
{
return
true
;
return
object
;
}
if
(
updating_references_
)
{
return
GetMarkedForwardAddress
(
object
);
}
if
(
objects_before_forwarding_
->
HasAddress
(
object
))
{
return
objects_before_forwarding_
->
Test
(
object
);
return
objects_before_forwarding_
->
Test
(
object
)
?
object
:
nullptr
;
}
return
mark_bitmap_
->
Test
(
object
);
}
mirror
::
Object
*
MarkCompact
::
IsMarkedCallback
(
mirror
::
Object
*
object
,
void
*
arg
)
{
return
reinterpret_cast
<
MarkCompact
*>
(
arg
)
->
IsMarked
(
object
)
?
object
:
nullptr
;
return
mark_bitmap_
->
Test
(
object
)
?
object
:
nullptr
;
}
bool
MarkCompact
::
HeapReferenceMarkedCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref_ptr
,
void
*
arg
)
{
bool
MarkCompact
::
IsMarkedHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref_ptr
)
{
// Side effect free since we call this before ever moving objects.
return
reinterpret_cast
<
MarkCompact
*>
(
arg
)
->
IsMarked
(
ref_ptr
->
AsMirrorPtr
());
return
IsMarked
(
ref_ptr
->
AsMirrorPtr
())
!=
nullptr
;
}
void
MarkCompact
::
SweepSystemWeaks
()
{
TimingLogger
::
ScopedTiming
t
(
__FUNCTION__
,
GetTimings
());
Runtime
::
Current
()
->
SweepSystemWeaks
(
IsMarkedCallback
,
this
);
Runtime
::
Current
()
->
SweepSystemWeaks
(
this
);
}
bool
MarkCompact
::
ShouldSweepSpace
(
space
::
ContinuousSpace
*
space
)
const
{
...
...
@@ -592,8 +555,7 @@ void MarkCompact::SweepLargeObjects(bool swap_bitmaps) {
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void
MarkCompact
::
DelayReferenceReferent
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
reference
)
{
heap_
->
GetReferenceProcessor
()
->
DelayReferenceReferent
(
klass
,
reference
,
&
HeapReferenceMarkedCallback
,
this
);
heap_
->
GetReferenceProcessor
()
->
DelayReferenceReferent
(
klass
,
reference
,
this
);
}
class
MarkCompactMarkObjectVisitor
{
...
...
@@ -601,7 +563,7 @@ class MarkCompactMarkObjectVisitor {
explicit
MarkCompactMarkObjectVisitor
(
MarkCompact
*
collector
)
:
collector_
(
collector
)
{
}
void
operator
()(
Object
*
obj
,
MemberOffset
offset
,
bool
/*is_static*/
)
const
ALWAYS_INLINE
void
operator
()(
mirror
::
Object
*
obj
,
MemberOffset
offset
,
bool
/*is_static*/
)
const
ALWAYS_INLINE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
)
{
// Object was already verified when we scanned it.
collector_
->
MarkObject
(
obj
->
GetFieldObject
<
mirror
::
Object
,
kVerifyNone
>
(
offset
));
...
...
@@ -618,7 +580,7 @@ class MarkCompactMarkObjectVisitor {
};
// Visit all of the references of an object and update.
void
MarkCompact
::
ScanObject
(
Object
*
obj
)
{
void
MarkCompact
::
ScanObject
(
mirror
::
Object
*
obj
)
{
MarkCompactMarkObjectVisitor
visitor
(
this
);
obj
->
VisitReferences
<
kMovingClasses
>
(
visitor
,
visitor
);
}
...
...
@@ -627,7 +589,7 @@ void MarkCompact::ScanObject(Object* obj) {
void
MarkCompact
::
ProcessMarkStack
()
{
TimingLogger
::
ScopedTiming
t
(
__FUNCTION__
,
GetTimings
());
while
(
!
mark_stack_
->
IsEmpty
())
{
Object
*
obj
=
mark_stack_
->
PopBack
();
mirror
::
Object
*
obj
=
mark_stack_
->
PopBack
();
DCHECK
(
obj
!=
nullptr
);
ScanObject
(
obj
);
}
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/collector/mark_compact.h
View file @
97509954
...
...
@@ -121,23 +121,6 @@ class MarkCompact : public GarbageCollector {
const
RootInfo
&
info
)
OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
);
static
mirror
::
Object
*
MarkObjectCallback
(
mirror
::
Object
*
root
,
void
*
arg
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
static
void
MarkHeapReferenceCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj_ptr
,
void
*
arg
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
static
bool
HeapReferenceMarkedCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref_ptr
,
void
*
arg
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
static
void
ProcessMarkStackCallback
(
void
*
arg
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
);
static
void
DelayReferenceReferentCallback
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
ref
,
void
*
arg
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
// Schedules an unmarked object for reference processing.
void
DelayReferenceReferent
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
reference
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
...
...
@@ -145,11 +128,7 @@ class MarkCompact : public GarbageCollector {
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
mirror
::
Object
*
GetMarkedForwardAddress
(
mirror
::
Object
*
object
)
const
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
static
mirror
::
Object
*
MarkedForwardingAddressCallback
(
mirror
::
Object
*
object
,
void
*
arg
)
mirror
::
Object
*
GetMarkedForwardAddress
(
mirror
::
Object
*
object
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
...
...
@@ -184,30 +163,27 @@ class MarkCompact : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
);
// Update the references of objects by using the forwarding addresses.
void
UpdateReferences
()
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
);
static
void
UpdateRootCallback
(
mirror
::
Object
**
root
,
void
*
arg
,
const
RootInfo
&
/*root_info*/
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
// Move objects and restore lock words.
void
MoveObjects
()
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
// Move a single object to its forward address.
void
MoveObject
(
mirror
::
Object
*
obj
,
size_t
len
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
// Mark a single object.
void
MarkObject
(
mirror
::
Object
*
obj
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
bool
IsMarked
(
const
mirror
::
Object
*
obj
)
const
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
static
mirror
::
Object
*
IsMarkedCallback
(
mirror
::
Object
*
object
,
void
*
arg
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
virtual
mirror
::
Object
*
MarkObject
(
mirror
::
Object
*
obj
)
OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
virtual
void
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj_ptr
)
OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
virtual
mirror
::
Object
*
IsMarked
(
mirror
::
Object
*
obj
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
virtual
bool
IsMarkedHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
void
ForwardObject
(
mirror
::
Object
*
obj
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
// Update a single heap reference.
void
UpdateHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
reference
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
static
void
UpdateHeapReferenceCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
reference
,
void
*
arg
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
// Update all of the references of a single object.
void
UpdateObjectReferences
(
mirror
::
Object
*
obj
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
...
...
@@ -242,6 +218,9 @@ class MarkCompact : public GarbageCollector {
// Which lock words we need to restore as we are moving objects.
std
::
deque
<
LockWord
>
lock_words_to_restore_
;
// State whether or not we are updating references.
bool
updating_references_
;
private:
friend
class
BitmapSetSlowPathVisitor
;
friend
class
CalculateObjectForwardingAddressVisitor
;
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/collector/mark_sweep.cc
View file @
97509954
...
...
@@ -37,7 +37,6 @@
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/reference_processor.h"
#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "mark_sweep-inl.h"
...
...
@@ -47,8 +46,6 @@
#include "thread-inl.h"
#include "thread_list.h"
using
::
art
::
mirror
::
Object
;
namespace
art
{
namespace
gc
{
namespace
collector
{
...
...
@@ -175,8 +172,7 @@ void MarkSweep::RunPhases() {
void
MarkSweep
::
ProcessReferences
(
Thread
*
self
)
{
WriterMutexLock
mu
(
self
,
*
Locks
::
heap_bitmap_lock_
);
GetHeap
()
->
GetReferenceProcessor
()
->
ProcessReferences
(
true
,
GetTimings
(),
GetCurrentIteration
()
->
GetClearSoftReferences
(),
&
HeapReferenceMarkedCallback
,
&
MarkObjectCallback
,
&
ProcessMarkStackCallback
,
this
);
true
,
GetTimings
(),
GetCurrentIteration
()
->
GetClearSoftReferences
(),
this
);
}
void
MarkSweep
::
PausePhase
()
{
...
...
@@ -273,7 +269,7 @@ void MarkSweep::UpdateAndMarkModUnion() {
TimingLogger
::
ScopedTiming
t
(
name
,
GetTimings
());
accounting
::
ModUnionTable
*
mod_union_table
=
heap_
->
FindModUnionTableFromSpace
(
space
);
CHECK
(
mod_union_table
!=
nullptr
);
mod_union_table
->
UpdateAndMarkReferences
(
MarkHeapReferenceCallback
,
this
);
mod_union_table
->
UpdateAndMarkReferences
(
this
);
}
}
}
...
...
@@ -333,7 +329,7 @@ void MarkSweep::ResizeMarkStack(size_t new_size) {
// Someone else acquired the lock and expanded the mark stack before us.
return
;
}
std
::
vector
<
StackReference
<
Object
>>
temp
(
mark_stack_
->
Begin
(),
mark_stack_
->
End
());
std
::
vector
<
StackReference
<
mirror
::
Object
>>
temp
(
mark_stack_
->
Begin
(),
mark_stack_
->
End
());
CHECK_LE
(
mark_stack_
->
Size
(),
new_size
);
mark_stack_
->
Resize
(
new_size
);
for
(
auto
&
obj
:
temp
)
{
...
...
@@ -341,7 +337,7 @@ void MarkSweep::ResizeMarkStack(size_t new_size) {
}
}
inline
void
MarkSweep
::
MarkObjectNonNullParallel
(
Object
*
obj
)
{
inline
void
MarkSweep
::
MarkObjectNonNullParallel
(
mirror
::
Object
*
obj
)
{
DCHECK
(
obj
!=
nullptr
);
if
(
MarkObjectParallel
(
obj
))
{
MutexLock
mu
(
Thread
::
Current
(),
mark_stack_lock_
);
...
...
@@ -353,28 +349,18 @@ inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
}
}
mirror
::
Object
*
MarkSweep
::
MarkObjectCallback
(
mirror
::
Object
*
obj
,
void
*
arg
)
{
MarkSweep
*
mark_sweep
=
reinterpret_cast
<
MarkSweep
*>
(
arg
);
mark_sweep
->
MarkObject
(
obj
);
return
obj
;
}
void
MarkSweep
::
MarkHeapReferenceCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref
,
void
*
arg
)
{
reinterpret_cast
<
MarkSweep
*>
(
arg
)
->
MarkObject
(
ref
->
AsMirrorPtr
());
}
bool
MarkSweep
::
HeapReferenceMarkedCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref
,
void
*
arg
)
{
return
reinterpret_cast
<
MarkSweep
*>
(
arg
)
->
IsMarked
(
ref
->
AsMirrorPtr
());
bool
MarkSweep
::
IsMarkedHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref
)
{
return
IsMarked
(
ref
->
AsMirrorPtr
());
}
class
MarkSweepMarkObjectSlowPath
{
public:
explicit
MarkSweepMarkObjectSlowPath
(
MarkSweep
*
mark_sweep
,
Object
*
holder
=
nullptr
,
explicit
MarkSweepMarkObjectSlowPath
(
MarkSweep
*
mark_sweep
,
mirror
::
Object
*
holder
=
nullptr
,
MemberOffset
offset
=
MemberOffset
(
0
))
:
mark_sweep_
(
mark_sweep
),
holder_
(
holder
),
offset_
(
offset
)
{
}
void
operator
()(
const
Object
*
obj
)
const
ALWAYS_INLINE
NO_THREAD_SAFETY_ANALYSIS
{
void
operator
()(
const
mirror
::
Object
*
obj
)
const
ALWAYS_INLINE
NO_THREAD_SAFETY_ANALYSIS
{
if
(
kProfileLargeObjects
)
{
// TODO: Differentiate between marking and testing somehow.
++
mark_sweep_
->
large_object_test_
;
...
...
@@ -450,7 +436,8 @@ class MarkSweepMarkObjectSlowPath {
MemberOffset
offset_
;
};
inline
void
MarkSweep
::
MarkObjectNonNull
(
Object
*
obj
,
Object
*
holder
,
MemberOffset
offset
)
{
inline
void
MarkSweep
::
MarkObjectNonNull
(
mirror
::
Object
*
obj
,
mirror
::
Object
*
holder
,
MemberOffset
offset
)
{
DCHECK
(
obj
!=
nullptr
);
if
(
kUseBakerOrBrooksReadBarrier
)
{
// Verify all the objects have the correct pointer installed.
...
...
@@ -481,7 +468,7 @@ inline void MarkSweep::MarkObjectNonNull(Object* obj, Object* holder, MemberOffs
}
}
inline
void
MarkSweep
::
PushOnMarkStack
(
Object
*
obj
)
{
inline
void
MarkSweep
::
PushOnMarkStack
(
mirror
::
Object
*
obj
)
{
if
(
UNLIKELY
(
mark_stack_
->
Size
()
>=
mark_stack_
->
Capacity
()))
{
// Lock is not needed but is here anyways to please annotalysis.
MutexLock
mu
(
Thread
::
Current
(),
mark_stack_lock_
);
...
...
@@ -491,14 +478,14 @@ inline void MarkSweep::PushOnMarkStack(Object* obj) {
mark_stack_
->
PushBack
(
obj
);
}
inline
bool
MarkSweep
::
MarkObjectParallel
(
const
Object
*
obj
)
{
inline
bool
MarkSweep
::
MarkObjectParallel
(
mirror
::
Object
*
obj
)
{
DCHECK
(
obj
!=
nullptr
);
if
(
kUseBakerOrBrooksReadBarrier
)
{
// Verify all the objects have the correct pointer installed.
obj
->
AssertReadBarrierPointer
();
}
if
(
immune_region_
.
ContainsObject
(
obj
))
{
DCHECK
(
IsMarked
(
obj
));
DCHECK
(
IsMarked
(
obj
)
!=
nullptr
);
return
false
;
}
// Try to take advantage of locality of references within a space, failing this find the space
...
...
@@ -511,8 +498,18 @@ inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
return
!
mark_bitmap_
->
AtomicTestAndSet
(
obj
,
visitor
);
}
mirror
::
Object
*
MarkSweep
::
MarkObject
(
mirror
::
Object
*
obj
)
{
MarkObject
(
obj
,
nullptr
,
MemberOffset
(
0
));
return
obj
;
}
void
MarkSweep
::
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref
)
{
MarkObject
(
ref
->
AsMirrorPtr
(),
nullptr
,
MemberOffset
(
0
));
}
// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
inline
void
MarkSweep
::
MarkObject
(
Object
*
obj
,
Object
*
holder
,
MemberOffset
offset
)
{
inline
void
MarkSweep
::
MarkObject
(
mirror
::
Object
*
obj
,
mirror
::
Object
*
holder
,
MemberOffset
offset
)
{
if
(
obj
!=
nullptr
)
{
MarkObjectNonNull
(
obj
,
holder
,
offset
);
}
else
if
(
kCountMarkedObjects
)
{
...
...
@@ -526,7 +523,7 @@ class VerifyRootMarkedVisitor : public SingleRootVisitor {
void
VisitRoot
(
mirror
::
Object
*
root
,
const
RootInfo
&
info
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
)
{
CHECK
(
collector_
->
IsMarked
(
root
))
<<
info
.
ToString
();
CHECK
(
collector_
->
IsMarked
(
root
)
!=
nullptr
)
<<
info
.
ToString
();
}
private:
...
...
@@ -599,7 +596,8 @@ class ScanObjectVisitor {
explicit
ScanObjectVisitor
(
MarkSweep
*
const
mark_sweep
)
ALWAYS_INLINE
:
mark_sweep_
(
mark_sweep
)
{}
void
operator
()(
Object
*
obj
)
const
ALWAYS_INLINE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
void
operator
()(
mirror
::
Object
*
obj
)
const
ALWAYS_INLINE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
{
if
(
kCheckLocks
)
{
Locks
::
mutator_lock_
->
AssertSharedHeld
(
Thread
::
Current
());
...
...
@@ -631,7 +629,7 @@ template <bool kUseFinger = false>
class
MarkStackTask
:
public
Task
{
public:
MarkStackTask
(
ThreadPool
*
thread_pool
,
MarkSweep
*
mark_sweep
,
size_t
mark_stack_size
,
StackReference
<
Object
>*
mark_stack
)
StackReference
<
mirror
::
Object
>*
mark_stack
)
:
mark_sweep_
(
mark_sweep
),
thread_pool_
(
thread_pool
),
mark_stack_pos_
(
mark_stack_size
)
{
...
...
@@ -655,7 +653,7 @@ class MarkStackTask : public Task {
MarkSweep
*
mark_sweep
)
ALWAYS_INLINE
:
chunk_task_
(
chunk_task
),
mark_sweep_
(
mark_sweep
)
{}
void
operator
()(
Object
*
obj
,
MemberOffset
offset
,
bool
/* static */
)
const
ALWAYS_INLINE
void
operator
()(
mirror
::
Object
*
obj
,
MemberOffset
offset
,
bool
/* static */
)
const
ALWAYS_INLINE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
{
mirror
::
Object
*
ref
=
obj
->
GetFieldObject
<
mirror
::
Object
>
(
offset
);
if
(
ref
!=
nullptr
&&
mark_sweep_
->
MarkObjectParallel
(
ref
))
{
...
...
@@ -681,7 +679,7 @@ class MarkStackTask : public Task {
:
chunk_task_
(
chunk_task
)
{}
// No thread safety analysis since multiple threads will use this visitor.
void
operator
()(
Object
*
obj
)
const
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
void
operator
()(
mirror
::
Object
*
obj
)
const
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
{
MarkSweep
*
const
mark_sweep
=
chunk_task_
->
mark_sweep_
;
MarkObjectParallelVisitor
mark_visitor
(
chunk_task_
,
mark_sweep
);
...
...
@@ -704,11 +702,12 @@ class MarkStackTask : public Task {
MarkSweep
*
const
mark_sweep_
;
ThreadPool
*
const
thread_pool_
;
// Thread local mark stack for this task.
StackReference
<
Object
>
mark_stack_
[
kMaxSize
];
StackReference
<
mirror
::
Object
>
mark_stack_
[
kMaxSize
];
// Mark stack position.
size_t
mark_stack_pos_
;
ALWAYS_INLINE
void
MarkStackPush
(
Object
*
obj
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
{
ALWAYS_INLINE
void
MarkStackPush
(
mirror
::
Object
*
obj
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
{
if
(
UNLIKELY
(
mark_stack_pos_
==
kMaxSize
))
{
// Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
mark_stack_pos_
/=
2
;
...
...
@@ -732,12 +731,12 @@ class MarkStackTask : public Task {
ScanObjectParallelVisitor
visitor
(
this
);
// TODO: Tune this.
static
const
size_t
kFifoSize
=
4
;
BoundedFifoPowerOfTwo
<
Object
*
,
kFifoSize
>
prefetch_fifo
;
BoundedFifoPowerOfTwo
<
mirror
::
Object
*
,
kFifoSize
>
prefetch_fifo
;
for
(;;)
{
Object
*
obj
=
nullptr
;
mirror
::
Object
*
obj
=
nullptr
;
if
(
kUseMarkStackPrefetch
)
{
while
(
mark_stack_pos_
!=
0
&&
prefetch_fifo
.
size
()
<
kFifoSize
)
{
Object
*
const
mark_stack_obj
=
mark_stack_
[
--
mark_stack_pos_
].
AsMirrorPtr
();
mirror
::
Object
*
const
mark_stack_obj
=
mark_stack_
[
--
mark_stack_pos_
].
AsMirrorPtr
();
DCHECK
(
mark_stack_obj
!=
nullptr
);
__builtin_prefetch
(
mark_stack_obj
);
prefetch_fifo
.
push_back
(
mark_stack_obj
);
...
...
@@ -764,7 +763,7 @@ class CardScanTask : public MarkStackTask<false> {
CardScanTask
(
ThreadPool
*
thread_pool
,
MarkSweep
*
mark_sweep
,
accounting
::
ContinuousSpaceBitmap
*
bitmap
,
uint8_t
*
begin
,
uint8_t
*
end
,
uint8_t
minimum_age
,
size_t
mark_stack_size
,
StackReference
<
Object
>*
mark_stack_obj
,
bool
clear_card
)
StackReference
<
mirror
::
Object
>*
mark_stack_obj
,
bool
clear_card
)
:
MarkStackTask
<
false
>
(
thread_pool
,
mark_sweep
,
mark_stack_size
,
mark_stack_obj
),
bitmap_
(
bitmap
),
begin_
(
begin
),
...
...
@@ -815,8 +814,8 @@ void MarkSweep::ScanGrayObjects(bool paused, uint8_t minimum_age) {
TimingLogger
::
ScopedTiming
t
(
paused
?
"(Paused)ScanGrayObjects"
:
__FUNCTION__
,
GetTimings
());
// Try to take some of the mark stack since we can pass this off to the worker tasks.
StackReference
<
Object
>*
mark_stack_begin
=
mark_stack_
->
Begin
();
StackReference
<
Object
>*
mark_stack_end
=
mark_stack_
->
End
();
StackReference
<
mirror
::
Object
>*
mark_stack_begin
=
mark_stack_
->
Begin
();
StackReference
<
mirror
::
Object
>*
mark_stack_end
=
mark_stack_
->
End
();
const
size_t
mark_stack_size
=
mark_stack_end
-
mark_stack_begin
;
// Estimated number of work tasks we will create.
const
size_t
mark_stack_tasks
=
GetHeap
()
->
GetContinuousSpaces
().
size
()
*
thread_count
;
...
...
@@ -988,13 +987,6 @@ void MarkSweep::RecursiveMark() {
ProcessMarkStack
(
false
);
}
mirror
::
Object
*
MarkSweep
::
IsMarkedCallback
(
mirror
::
Object
*
object
,
void
*
arg
)
{
if
(
reinterpret_cast
<
MarkSweep
*>
(
arg
)
->
IsMarked
(
object
))
{
return
object
;
}
return
nullptr
;
}
void
MarkSweep
::
RecursiveMarkDirtyObjects
(
bool
paused
,
uint8_t
minimum_age
)
{
ScanGrayObjects
(
paused
,
minimum_age
);
ProcessMarkStack
(
paused
);
...
...
@@ -1015,16 +1007,23 @@ void MarkSweep::ReMarkRoots() {
void
MarkSweep
::
SweepSystemWeaks
(
Thread
*
self
)
{
TimingLogger
::
ScopedTiming
t
(
__FUNCTION__
,
GetTimings
());
WriterMutexLock
mu
(
self
,
*
Locks
::
heap_bitmap_lock_
);
Runtime
::
Current
()
->
SweepSystemWeaks
(
IsMarkedCallback
,
this
);
Runtime
::
Current
()
->
SweepSystemWeaks
(
this
);
}
mirror
::
Object
*
MarkSweep
::
VerifySystemWeakIsLiveCallback
(
Object
*
obj
,
void
*
arg
)
{
reinterpret_cast
<
MarkSweep
*>
(
arg
)
->
VerifyIsLive
(
obj
);
// We don't actually want to sweep the object, so lets return "marked"
return
obj
;
}
class
VerifySystemWeakVisitor
:
public
IsMarkedVisitor
{
public:
explicit
VerifySystemWeakVisitor
(
MarkSweep
*
mark_sweep
)
:
mark_sweep_
(
mark_sweep
)
{}
virtual
mirror
::
Object
*
IsMarked
(
mirror
::
Object
*
obj
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
)
{
mark_sweep_
->
VerifyIsLive
(
obj
);
return
obj
;
}
MarkSweep
*
const
mark_sweep_
;
};
void
MarkSweep
::
VerifyIsLive
(
const
Object
*
obj
)
{
void
MarkSweep
::
VerifyIsLive
(
const
mirror
::
Object
*
obj
)
{
if
(
!
heap_
->
GetLiveBitmap
()
->
Test
(
obj
))
{
// TODO: Consider live stack? Has this code bitrotted?
CHECK
(
!
heap_
->
allocation_stack_
->
Contains
(
obj
))
...
...
@@ -1035,7 +1034,8 @@ void MarkSweep::VerifyIsLive(const Object* obj) {
void
MarkSweep
::
VerifySystemWeaks
()
{
TimingLogger
::
ScopedTiming
t
(
__FUNCTION__
,
GetTimings
());
// Verify system weaks, uses a special object visitor which returns the input object.
Runtime
::
Current
()
->
SweepSystemWeaks
(
VerifySystemWeakIsLiveCallback
,
this
);
VerifySystemWeakVisitor
visitor
(
this
);
Runtime
::
Current
()
->
SweepSystemWeaks
(
&
visitor
);
}
class
CheckpointMarkThreadRoots
:
public
Closure
,
public
RootVisitor
{
...
...
@@ -1122,7 +1122,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
ObjectBytePair
freed
;
ObjectBytePair
freed_los
;
// How many objects are left in the array, modified after each space is swept.
StackReference
<
Object
>*
objects
=
allocations
->
Begin
();
StackReference
<
mirror
::
Object
>*
objects
=
allocations
->
Begin
();
size_t
count
=
allocations
->
Size
();
// Change the order to ensure that the non-moving space last swept as an optimization.
std
::
vector
<
space
::
ContinuousSpace
*>
sweep_spaces
;
...
...
@@ -1150,9 +1150,9 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
if
(
swap_bitmaps
)
{
std
::
swap
(
live_bitmap
,
mark_bitmap
);
}
StackReference
<
Object
>*
out
=
objects
;
StackReference
<
mirror
::
Object
>*
out
=
objects
;
for
(
size_t
i
=
0
;
i
<
count
;
++
i
)
{
Object
*
const
obj
=
objects
[
i
].
AsMirrorPtr
();
mirror
::
Object
*
const
obj
=
objects
[
i
].
AsMirrorPtr
();
if
(
kUseThreadLocalAllocationStack
&&
obj
==
nullptr
)
{
continue
;
}
...
...
@@ -1191,7 +1191,7 @@ void MarkSweep::SweepArray(accounting::ObjectStack* allocations, bool swap_bitma
std
::
swap
(
large_live_objects
,
large_mark_objects
);
}
for
(
size_t
i
=
0
;
i
<
count
;
++
i
)
{
Object
*
const
obj
=
objects
[
i
].
AsMirrorPtr
();
mirror
::
Object
*
const
obj
=
objects
[
i
].
AsMirrorPtr
();
// Handle large objects.
if
(
kUseThreadLocalAllocationStack
&&
obj
==
nullptr
)
{
continue
;
...
...
@@ -1250,16 +1250,15 @@ void MarkSweep::DelayReferenceReferent(mirror::Class* klass, mirror::Reference*
if
(
kCountJavaLangRefs
)
{
++
reference_count_
;
}
heap_
->
GetReferenceProcessor
()
->
DelayReferenceReferent
(
klass
,
ref
,
&
HeapReferenceMarkedCallback
,
this
);
heap_
->
GetReferenceProcessor
()
->
DelayReferenceReferent
(
klass
,
ref
,
this
);
}
class
Mark
Object
Visitor
{
class
MarkVisitor
{
public:
explicit
Mark
Object
Visitor
(
MarkSweep
*
const
mark_sweep
)
ALWAYS_INLINE
:
mark_sweep_
(
mark_sweep
)
{
explicit
MarkVisitor
(
MarkSweep
*
const
mark_sweep
)
ALWAYS_INLINE
:
mark_sweep_
(
mark_sweep
)
{
}
void
operator
()(
Object
*
obj
,
MemberOffset
offset
,
bool
/* is_static */
)
const
void
operator
()(
mirror
::
Object
*
obj
,
MemberOffset
offset
,
bool
/* is_static */
)
const
ALWAYS_INLINE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
{
if
(
kCheckLocks
)
{
...
...
@@ -1275,16 +1274,12 @@ class MarkObjectVisitor {
// Scans an object reference. Determines the type of the reference
// and dispatches to a specialized scanning routine.
void
MarkSweep
::
ScanObject
(
Object
*
obj
)
{
Mark
Object
Visitor
mark_visitor
(
this
);
void
MarkSweep
::
ScanObject
(
mirror
::
Object
*
obj
)
{
MarkVisitor
mark_visitor
(
this
);
DelayReferenceReferentVisitor
ref_visitor
(
this
);
ScanObjectVisit
(
obj
,
mark_visitor
,
ref_visitor
);
}
void
MarkSweep
::
ProcessMarkStackCallback
(
void
*
arg
)
{
reinterpret_cast
<
MarkSweep
*>
(
arg
)
->
ProcessMarkStack
(
false
);
}
void
MarkSweep
::
ProcessMarkStackParallel
(
size_t
thread_count
)
{
Thread
*
self
=
Thread
::
Current
();
ThreadPool
*
thread_pool
=
GetHeap
()
->
GetThreadPool
();
...
...
@@ -1317,12 +1312,12 @@ void MarkSweep::ProcessMarkStack(bool paused) {
}
else
{
// TODO: Tune this.
static
const
size_t
kFifoSize
=
4
;
BoundedFifoPowerOfTwo
<
Object
*
,
kFifoSize
>
prefetch_fifo
;
BoundedFifoPowerOfTwo
<
mirror
::
Object
*
,
kFifoSize
>
prefetch_fifo
;
for
(;;)
{
Object
*
obj
=
nullptr
;
mirror
::
Object
*
obj
=
nullptr
;
if
(
kUseMarkStackPrefetch
)
{
while
(
!
mark_stack_
->
IsEmpty
()
&&
prefetch_fifo
.
size
()
<
kFifoSize
)
{
Object
*
mark_stack_obj
=
mark_stack_
->
PopBack
();
mirror
::
Object
*
mark_stack_obj
=
mark_stack_
->
PopBack
();
DCHECK
(
mark_stack_obj
!=
nullptr
);
__builtin_prefetch
(
mark_stack_obj
);
prefetch_fifo
.
push_back
(
mark_stack_obj
);
...
...
@@ -1344,14 +1339,14 @@ void MarkSweep::ProcessMarkStack(bool paused) {
}
}
inline
bool
MarkSweep
::
IsMarked
(
const
Object
*
object
)
const
{
inline
mirror
::
Object
*
MarkSweep
::
IsMarked
(
mirror
::
Object
*
object
)
{
if
(
immune_region_
.
ContainsObject
(
object
))
{
return
true
;
return
object
;
}
if
(
current_space_bitmap_
->
HasAddress
(
object
))
{
return
current_space_bitmap_
->
Test
(
object
);
return
current_space_bitmap_
->
Test
(
object
)
?
object
:
nullptr
;
}
return
mark_bitmap_
->
Test
(
object
);
return
mark_bitmap_
->
Test
(
object
)
?
object
:
nullptr
;
}
void
MarkSweep
::
FinishPhase
()
{
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/collector/mark_sweep.h
View file @
97509954
...
...
@@ -170,18 +170,9 @@ class MarkSweep : public GarbageCollector {
// Verify that an object is live, either in a live bitmap or in the allocation stack.
void
VerifyIsLive
(
const
mirror
::
Object
*
obj
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
static
mirror
::
Object
*
MarkObjectCallback
(
mirror
::
Object
*
obj
,
void
*
arg
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
static
void
MarkHeapReferenceCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref
,
void
*
arg
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
);
static
bool
HeapReferenceMarkedCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref
,
void
*
arg
)
virtual
bool
IsMarkedHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
...
...
@@ -194,13 +185,14 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
static
void
ProcessMarkStackCallback
(
void
*
arg
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
// Marks an object.
void
MarkObject
(
mirror
::
Object
*
obj
,
mirror
::
Object
*
holder
=
nullptr
,
MemberOffset
offset
=
MemberOffset
(
0
))
virtual
mirror
::
Object
*
MarkObject
(
mirror
::
Object
*
obj
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
void
MarkObject
(
mirror
::
Object
*
obj
,
mirror
::
Object
*
holder
,
MemberOffset
offset
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
virtual
void
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
ref
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
...
...
@@ -214,15 +206,9 @@ class MarkSweep : public GarbageCollector {
protected:
// Returns true if the object has its bit set in the mark bitmap.
bool
IsMarked
(
const
mirror
::
Object
*
object
)
const
virtual
mirror
::
Object
*
IsMarked
(
mirror
::
Object
*
object
)
OVERRIDE
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
static
mirror
::
Object
*
IsMarkedCallback
(
mirror
::
Object
*
object
,
void
*
arg
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
static
void
VerifyImageRootVisitor
(
mirror
::
Object
*
root
,
void
*
arg
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
void
MarkObjectNonNull
(
mirror
::
Object
*
obj
,
mirror
::
Object
*
holder
=
nullptr
,
MemberOffset
offset
=
MemberOffset
(
0
))
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
...
...
@@ -233,7 +219,7 @@ class MarkSweep : public GarbageCollector {
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
// Returns true if we need to add obj to a mark stack.
bool
MarkObjectParallel
(
const
mirror
::
Object
*
obj
)
NO_THREAD_SAFETY_ANALYSIS
;
bool
MarkObjectParallel
(
mirror
::
Object
*
obj
)
NO_THREAD_SAFETY_ANALYSIS
;
// Verify the roots of the heap and print out information related to any invalid roots.
// Called in MarkObject, so may we may not hold the mutator lock.
...
...
@@ -258,6 +244,11 @@ class MarkSweep : public GarbageCollector {
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
);
virtual
void
ProcessMarkStack
()
OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
{
ProcessMarkStack
(
false
);
}
// Recursively blackens objects on the mark stack.
void
ProcessMarkStack
(
bool
paused
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/collector/semi_space.cc
View file @
97509954
...
...
@@ -157,8 +157,7 @@ void SemiSpace::InitializePhase() {
void
SemiSpace
::
ProcessReferences
(
Thread
*
self
)
{
WriterMutexLock
mu
(
self
,
*
Locks
::
heap_bitmap_lock_
);
GetHeap
()
->
GetReferenceProcessor
()
->
ProcessReferences
(
false
,
GetTimings
(),
GetCurrentIteration
()
->
GetClearSoftReferences
(),
&
HeapReferenceMarkedCallback
,
&
MarkObjectCallback
,
&
ProcessMarkStackCallback
,
this
);
false
,
GetTimings
(),
GetCurrentIteration
()
->
GetClearSoftReferences
(),
this
);
}
void
SemiSpace
::
MarkingPhase
()
{
...
...
@@ -336,7 +335,7 @@ void SemiSpace::MarkReachableObjects() {
space
->
IsZygoteSpace
()
?
"UpdateAndMarkZygoteModUnionTable"
:
"UpdateAndMarkImageModUnionTable"
,
GetTimings
());
table
->
UpdateAndMarkReferences
(
MarkHeapReferenceCallback
,
this
);
table
->
UpdateAndMarkReferences
(
this
);
DCHECK
(
GetHeap
()
->
FindRememberedSetFromSpace
(
space
)
==
nullptr
);
}
else
if
(
collect_from_space_only_
&&
space
->
GetLiveBitmap
()
!=
nullptr
)
{
// If the space has no mod union table (the non-moving space and main spaces when the bump
...
...
@@ -351,8 +350,7 @@ void SemiSpace::MarkReachableObjects() {
CHECK_EQ
(
rem_set
!=
nullptr
,
kUseRememberedSet
);
if
(
rem_set
!=
nullptr
)
{
TimingLogger
::
ScopedTiming
t2
(
"UpdateAndMarkRememberedSet"
,
GetTimings
());
rem_set
->
UpdateAndMarkReferences
(
MarkHeapReferenceCallback
,
DelayReferenceReferentCallback
,
from_space_
,
this
);
rem_set
->
UpdateAndMarkReferences
(
from_space_
,
this
);
if
(
kIsDebugBuild
)
{
// Verify that there are no from-space references that
// remain in the space, that is, the remembered set (and the
...
...
@@ -583,24 +581,14 @@ mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) {
return
forward_address
;
}
void
SemiSpace
::
ProcessMarkStackCallback
(
void
*
arg
)
{
reinterpret_cast
<
SemiSpace
*>
(
arg
)
->
ProcessMarkStack
();
}
mirror
::
Object
*
SemiSpace
::
MarkObjectCallback
(
mirror
::
Object
*
root
,
void
*
arg
)
{
mirror
::
Object
*
SemiSpace
::
MarkObject
(
mirror
::
Object
*
root
)
{
auto
ref
=
StackReference
<
mirror
::
Object
>::
FromMirrorPtr
(
root
);
reinterpret_cast
<
SemiSpace
*>
(
arg
)
->
MarkObject
(
&
ref
);
MarkObject
(
&
ref
);
return
ref
.
AsMirrorPtr
();
}
void
SemiSpace
::
MarkHeapReferenceCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj_ptr
,
void
*
arg
)
{
reinterpret_cast
<
SemiSpace
*>
(
arg
)
->
MarkObject
(
obj_ptr
);
}
void
SemiSpace
::
DelayReferenceReferentCallback
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
ref
,
void
*
arg
)
{
reinterpret_cast
<
SemiSpace
*>
(
arg
)
->
DelayReferenceReferent
(
klass
,
ref
);
void
SemiSpace
::
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj_ptr
)
{
MarkObject
(
obj_ptr
);
}
void
SemiSpace
::
VisitRoots
(
mirror
::
Object
***
roots
,
size_t
count
,
...
...
@@ -628,29 +616,9 @@ void SemiSpace::MarkRoots() {
Runtime
::
Current
()
->
VisitRoots
(
this
);
}
bool
SemiSpace
::
HeapReferenceMarkedCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
object
,
void
*
arg
)
{
mirror
::
Object
*
obj
=
object
->
AsMirrorPtr
();
mirror
::
Object
*
new_obj
=
reinterpret_cast
<
SemiSpace
*>
(
arg
)
->
GetMarkedForwardAddress
(
obj
);
if
(
new_obj
==
nullptr
)
{
return
false
;
}
if
(
new_obj
!=
obj
)
{
// Write barrier is not necessary since it still points to the same object, just at a different
// address.
object
->
Assign
(
new_obj
);
}
return
true
;
}
mirror
::
Object
*
SemiSpace
::
MarkedForwardingAddressCallback
(
mirror
::
Object
*
object
,
void
*
arg
)
{
return
reinterpret_cast
<
SemiSpace
*>
(
arg
)
->
GetMarkedForwardAddress
(
object
);
}
void
SemiSpace
::
SweepSystemWeaks
()
{
TimingLogger
::
ScopedTiming
t
(
__FUNCTION__
,
GetTimings
());
Runtime
::
Current
()
->
SweepSystemWeaks
(
MarkedForwardingAddressCallback
,
this
);
Runtime
::
Current
()
->
SweepSystemWeaks
(
this
);
}
bool
SemiSpace
::
ShouldSweepSpace
(
space
::
ContinuousSpace
*
space
)
const
{
...
...
@@ -688,8 +656,7 @@ void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void
SemiSpace
::
DelayReferenceReferent
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
reference
)
{
heap_
->
GetReferenceProcessor
()
->
DelayReferenceReferent
(
klass
,
reference
,
&
HeapReferenceMarkedCallback
,
this
);
heap_
->
GetReferenceProcessor
()
->
DelayReferenceReferent
(
klass
,
reference
,
this
);
}
class
SemiSpaceMarkObjectVisitor
{
...
...
@@ -746,8 +713,7 @@ void SemiSpace::ProcessMarkStack() {
}
}
inline
Object
*
SemiSpace
::
GetMarkedForwardAddress
(
mirror
::
Object
*
obj
)
const
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
)
{
mirror
::
Object
*
SemiSpace
::
IsMarked
(
mirror
::
Object
*
obj
)
{
// All immune objects are assumed marked.
if
(
from_space_
->
HasAddress
(
obj
))
{
// Returns either the forwarding address or null.
...
...
@@ -759,6 +725,20 @@ inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
return
mark_bitmap_
->
Test
(
obj
)
?
obj
:
nullptr
;
}
bool
SemiSpace
::
IsMarkedHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
object
)
{
mirror
::
Object
*
obj
=
object
->
AsMirrorPtr
();
mirror
::
Object
*
new_obj
=
IsMarked
(
obj
);
if
(
new_obj
==
nullptr
)
{
return
false
;
}
if
(
new_obj
!=
obj
)
{
// Write barrier is not necessary since it still points to the same object, just at a different
// address.
object
->
Assign
(
new_obj
);
}
return
true
;
}
void
SemiSpace
::
SetToSpace
(
space
::
ContinuousMemMapAllocSpace
*
to_space
)
{
DCHECK
(
to_space
!=
nullptr
);
to_space_
=
to_space
;
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/collector/semi_space.h
View file @
97509954
...
...
@@ -103,6 +103,12 @@ class SemiSpace : public GarbageCollector {
void
MarkObject
(
mirror
::
ObjectReference
<
kPoisonReferences
,
mirror
::
Object
>*
obj_ptr
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
virtual
mirror
::
Object
*
MarkObject
(
mirror
::
Object
*
root
)
OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
virtual
void
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj_ptr
)
OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
void
ScanObject
(
mirror
::
Object
*
obj
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
...
...
@@ -140,19 +146,6 @@ class SemiSpace : public GarbageCollector {
const
RootInfo
&
info
)
OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
);
static
mirror
::
Object
*
MarkObjectCallback
(
mirror
::
Object
*
root
,
void
*
arg
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
static
void
MarkHeapReferenceCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj_ptr
,
void
*
arg
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
static
void
ProcessMarkStackCallback
(
void
*
arg
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
,
Locks
::
heap_bitmap_lock_
);
static
void
DelayReferenceReferentCallback
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
ref
,
void
*
arg
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
virtual
mirror
::
Object
*
MarkNonForwardedObject
(
mirror
::
Object
*
obj
)
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
,
Locks
::
mutator_lock_
);
...
...
@@ -163,15 +156,11 @@ class SemiSpace : public GarbageCollector {
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
mirror
::
Object
*
GetMarkedForwardAddress
(
mirror
::
Object
*
object
)
const
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
static
bool
HeapReferenceMarkedCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
object
,
void
*
arg
)
virtual
mirror
::
Object
*
IsMarked
(
mirror
::
Object
*
object
)
OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
static
mirror
::
Object
*
MarkedForwardingAddressCallback
(
mirror
::
Object
*
object
,
void
*
arg
)
virtual
bool
IsMarkedHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>
*
object
)
OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
SHARED_LOCKS_REQUIRED
(
Locks
::
heap_bitmap_lock_
);
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/heap.cc
View file @
97509954
...
...
@@ -3055,8 +3055,13 @@ void Heap::ProcessCards(TimingLogger* timings, bool use_rem_sets, bool process_a
}
}
static
void
IdentityMarkHeapReferenceCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
,
void
*
)
{
}
struct
IdentityMarkHeapReferenceVisitor
:
public
MarkObjectVisitor
{
virtual
mirror
::
Object
*
MarkObject
(
mirror
::
Object
*
obj
)
OVERRIDE
{
return
obj
;
}
virtual
void
MarkHeapReference
(
mirror
::
HeapReference
<
mirror
::
Object
>*
)
OVERRIDE
{
}
};
void
Heap
::
PreGcVerificationPaused
(
collector
::
GarbageCollector
*
gc
)
{
Thread
*
const
self
=
Thread
::
Current
();
...
...
@@ -3085,7 +3090,8 @@ void Heap::PreGcVerificationPaused(collector::GarbageCollector* gc) {
ReaderMutexLock
reader_lock
(
self
,
*
Locks
::
heap_bitmap_lock_
);
for
(
const
auto
&
table_pair
:
mod_union_tables_
)
{
accounting
::
ModUnionTable
*
mod_union_table
=
table_pair
.
second
;
mod_union_table
->
UpdateAndMarkReferences
(
IdentityMarkHeapReferenceCallback
,
nullptr
);
IdentityMarkHeapReferenceVisitor
visitor
;
mod_union_table
->
UpdateAndMarkReferences
(
&
visitor
);
mod_union_table
->
Verify
();
}
}
...
...
@@ -3714,11 +3720,11 @@ void Heap::VisitAllocationRecords(RootVisitor* visitor) const {
}
}
void
Heap
::
SweepAllocationRecords
(
IsMarked
Callback
*
v
isitor
,
v
oid
*
arg
)
const
{
void
Heap
::
SweepAllocationRecords
(
IsMarked
V
isitor
*
v
isitor
)
const
{
if
(
IsAllocTrackingEnabled
())
{
MutexLock
mu
(
Thread
::
Current
(),
*
Locks
::
alloc_tracker_lock_
);
if
(
IsAllocTrackingEnabled
())
{
GetAllocationRecords
()
->
SweepAllocationRecords
(
visitor
,
arg
);
GetAllocationRecords
()
->
SweepAllocationRecords
(
visitor
);
}
}
}
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/heap.h
View file @
97509954
...
...
@@ -705,7 +705,7 @@ class Heap {
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
LOCKS_EXCLUDED
(
Locks
::
alloc_tracker_lock_
);
void
SweepAllocationRecords
(
IsMarked
Callback
*
v
isitor
,
v
oid
*
arg
)
const
void
SweepAllocationRecords
(
IsMarked
V
isitor
*
v
isitor
)
const
SHARED_LOCKS_REQUIRED
(
Locks
::
mutator_lock_
)
LOCKS_EXCLUDED
(
Locks
::
alloc_tracker_lock_
);
...
...
This diff is collapsed.
Click to expand it.
runtime/gc/reference_processor.cc
View file @
97509954
...
...
@@ -17,6 +17,7 @@
#include "reference_processor.h"
#include "base/time_utils.h"
#include "collector/garbage_collector.h"
#include "mirror/class-inl.h"
#include "mirror/object-inl.h"
#include "mirror/reference-inl.h"
...
...
@@ -34,7 +35,7 @@ namespace gc {
static
constexpr
bool
kAsyncReferenceQueueAdd
=
false
;
ReferenceProcessor
::
ReferenceProcessor
()
:
process_references_args_
(
nullptr
,
nullptr
,
nullptr
),
:
collector_
(
nullptr
),
preserving_references_
(
false
),
condition_
(
"reference processor condition"
,
*
Locks
::
reference_processor_lock_
)
,
soft_reference_queue_
(
Locks
::
reference_queue_soft_references_lock_
),
...
...
@@ -83,16 +84,14 @@ mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference*
}
// Try to see if the referent is already marked by using the is_marked_callback. We can return
// it to the mutator as long as the GC is not preserving references.
IsHeapReferenceMarkedCallback
*
const
is_marked_callback
=
process_references_args_
.
is_marked_callback_
;
if
(
LIKELY
(
is_marked_callback
!=
nullptr
))
{
if
(
LIKELY
(
collector_
!=
nullptr
))
{
// If it's null it means not marked, but it could become marked if the referent is reachable
// by finalizer referents. So we can not return in this case and must block. Otherwise, we
// can return it to the mutator as long as the GC is not preserving references, in which
// case only black nodes can be safely returned. If the GC is preserving references, the
// mutator could take a white field from a grey or white node and move it somewhere else
// in the heap causing corruption since this field would get swept.
if
(
is_marked_callback
(
referent_addr
,
process_references_args_
.
arg_
))
{
if
(
collector_
->
IsMarkedHeapReference
(
referent_addr
))
{
if
(
!
preserving_references_
||
(
LIKELY
(
!
reference
->
IsFinalizerReferenceInstance
())
&&
!
reference
->
IsEnqueued
()))
{
return
referent_addr
->
AsMirrorPtr
();
...
...
@@ -104,16 +103,6 @@ mirror::Object* ReferenceProcessor::GetReferent(Thread* self, mirror::Reference*
return
reference
->
GetReferent
();
}
bool
ReferenceProcessor
::
PreserveSoftReferenceCallback
(
mirror
::
HeapReference
<
mirror
::
Object
>*
obj
,
void
*
arg
)
{
auto
*
const
args
=
reinterpret_cast
<
ProcessReferencesArgs
*>
(
arg
);
// TODO: Add smarter logic for preserving soft references.
mirror
::
Object
*
new_obj
=
args
->
mark_callback_
(
obj
->
AsMirrorPtr
(),
args
->
arg_
);
DCHECK
(
new_obj
!=
nullptr
);
obj
->
Assign
(
new_obj
);
return
true
;
}
void
ReferenceProcessor
::
StartPreservingReferences
(
Thread
*
self
)
{
MutexLock
mu
(
self
,
*
Locks
::
reference_processor_lock_
);
preserving_references_
=
true
;
...
...
@@ -129,17 +118,12 @@ void ReferenceProcessor::StopPreservingReferences(Thread* self) {
// Process reference class instances and schedule finalizations.
void
ReferenceProcessor
::
ProcessReferences
(
bool
concurrent
,
TimingLogger
*
timings
,
bool
clear_soft_references
,
IsHeapReferenceMarkedCallback
*
is_marked_callback
,
MarkObjectCallback
*
mark_object_callback
,
ProcessMarkStackCallback
*
process_mark_stack_callback
,
void
*
arg
)
{
collector
::
GarbageCollector
*
collector
)
{
TimingLogger
::
ScopedTiming
t
(
concurrent
?
__FUNCTION__
:
"(Paused)ProcessReferences"
,
timings
);
Thread
*
self
=
Thread
::
Current
();
{
MutexLock
mu
(
self
,
*
Locks
::
reference_processor_lock_
);
process_references_args_
.
is_marked_callback_
=
is_marked_callback
;
process_references_args_
.
mark_callback_
=
mark_object_callback
;
process_references_args_
.
arg_
=
arg
;
collector_
=
collector
;
if
(
!
kUseReadBarrier
)
{
CHECK_EQ
(
SlowPathEnabled
(),
concurrent
)
<<
"Slow path must be enabled iff concurrent"
;
}
else
{
...
...
@@ -154,16 +138,16 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing
if
(
concurrent
)
{
StartPreservingReferences
(
self
);
}
soft_reference_queue_
.
ForwardSoftReferences
(
&
P
reserv
eS
oft
R
eference
Callback
,
&
process_references_args_
);
p
rocess
_m
ark
_s
tack
_callback
(
arg
);
// TODO: Add smarter logic for p
reserv
ing s
oft
r
eference
s.
soft_reference_queue_
.
ForwardSoftReferences
(
collector
);
collector
->
P
rocess
M
ark
S
tack
(
);
if
(
concurrent
)
{
StopPreservingReferences
(
self
);
}
}
// Clear all remaining soft and weak references with white referents.
soft_reference_queue_
.
ClearWhiteReferences
(
&
cleared_references_
,
is_marked_callback
,
arg
);
weak_reference_queue_
.
ClearWhiteReferences
(
&
cleared_references_
,
is_marked_callback
,
arg
);
soft_reference_queue_
.
ClearWhiteReferences
(
&
cleared_references_
,
collector
);
weak_reference_queue_
.
ClearWhiteReferences
(
&
cleared_references_
,
collector
);
{
TimingLogger
::
ScopedTiming
t2
(
concurrent
?
"EnqueueFinalizerReferences"
:
"(Paused)EnqueueFinalizerReferences"
,
timings
);
...
...
@@ -171,18 +155,17 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing
StartPreservingReferences
(
self
);
}
// Preserve all white objects with finalize methods and schedule them for finalization.
finalizer_reference_queue_
.
EnqueueFinalizerReferences
(
&
cleared_references_
,
is_marked_callback
,
mark_object_callback
,
arg
);
process_mark_stack_callback
(
arg
);
finalizer_reference_queue_
.
EnqueueFinalizerReferences
(
&
cleared_references_
,
collector
);
collector
->
ProcessMarkStack
();
if
(
concurrent
)
{
StopPreservingReferences
(
self
);
}
}
// Clear all finalizer referent reachable soft and weak references with white referents.
soft_reference_queue_
.
ClearWhiteReferences
(
&
cleared_references_
,
is_marked_callback
,
arg
);
weak_reference_queue_
.
ClearWhiteReferences
(
&
cleared_references_
,
is_marked_callback
,
arg
);
soft_reference_queue_
.
ClearWhiteReferences
(
&
cleared_references_
,
collector
);
weak_reference_queue_
.
ClearWhiteReferences
(
&
cleared_references_
,
collector
);
// Clear all phantom references with white referents.
phantom_reference_queue_
.
ClearWhiteReferences
(
&
cleared_references_
,
is_marked_callback
,
arg
);
phantom_reference_queue_
.
ClearWhiteReferences
(
&
cleared_references_
,
collector
);
// At this point all reference queues other than the cleared references should be empty.
DCHECK
(
soft_reference_queue_
.
IsEmpty
());
DCHECK
(
weak_reference_queue_
.
IsEmpty
());
...
...
@@ -194,12 +177,10 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing
// could result in a stale is_marked_callback_ being called before the reference processing
// starts since there is a small window of time where slow_path_enabled_ is enabled but the
// callback isn't yet set.
process_references_args_
.
is_marked_callback_
=
nullptr
;
if
(
!
kUseReadBarrier
)
{
if
(
concurrent
)
{
// Done processing, disable the slow path and broadcast to the waiters.
DisableSlowPath
(
self
);
}
collector_
=
nullptr
;
if
(
!
kUseReadBarrier
&&
concurrent
)
{
// Done processing, disable the slow path and broadcast to the waiters.
DisableSlowPath
(
self
);
}
}
}
...
...
@@ -207,13 +188,12 @@ void ReferenceProcessor::ProcessReferences(bool concurrent, TimingLogger* timing
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void
ReferenceProcessor
::
DelayReferenceReferent
(
mirror
::
Class
*
klass
,
mirror
::
Reference
*
ref
,
IsHeapReferenceMarkedCallback
*
is_marked_callback
,
void
*
arg
)
{
collector
::
GarbageCollector
*
collector
)
{
// klass can be the class of the old object if the visitor already updated the class of ref.
DCHECK
(
klass
!=
nullptr
);
DCHECK
(
klass
->
IsTypeOfReferenceClass
());
mirror
::
HeapReference
<
mirror
::
Object
>*
referent
=
ref
->
GetReferentReferenceAddr
();
if
(
referent
->
AsMirrorPtr
()
!=
nullptr
&&
!
is_marked_callback
(
referent
,
arg
))
{
if
(
referent
->
AsMirrorPtr
()
!=
nullptr
&&
!
collector
->
IsMarkedHeapReference
(
referent
))
{
Thread
*
self
=
Thread
::
Current
();
// TODO: Remove these locks, and use atomic stacks for storing references?
// We need to check that the references haven't already been enqueued since we can end up
...
...
@@ -233,8 +213,8 @@ void ReferenceProcessor::DelayReferenceReferent(mirror::Class* klass, mirror::Re
}
}
void
ReferenceProcessor
::
UpdateRoots
(
IsMarked
Callback
*
callback
,
void
*
arg
)
{
cleared_references_
.
UpdateRoots
(
callback
,
arg
);
void
ReferenceProcessor
::
UpdateRoots
(
IsMarked
Visitor
*
visitor
)
{
cleared_references_
.
UpdateRoots
(
visitor
);
}
class
ClearedReferenceTask
:
public
HeapTask
{
...
...
This diff is collapsed.
Click to expand it.
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment