代码拉取完成,页面将自动刷新
同步操作将从 src-openEuler/openjdk-1.8.0 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
Date: Sat, 30 Mar 2024 07:12:45 +0000
Subject: 8139595: MethodHandles::remove_dependent_nmethod is not
MT safe
---
.../src/share/vm/classfile/javaClasses.cpp | 17 +-
.../src/share/vm/classfile/javaClasses.hpp | 5 +-
.../src/share/vm/code/dependencyContext.cpp | 347 ++++++++++++++++++
.../src/share/vm/code/dependencyContext.hpp | 152 ++++++++
.../src/share/vm/compiler/compileBroker.cpp | 2 +-
hotspot/src/share/vm/oops/instanceKlass.cpp | 207 +----------
hotspot/src/share/vm/oops/instanceKlass.hpp | 62 +---
hotspot/src/share/vm/prims/jni.cpp | 2 +
hotspot/src/share/vm/prims/methodHandles.cpp | 64 ++--
hotspot/src/share/vm/runtime/init.cpp | 2 +
hotspot/src/share/vm/runtime/perfData.hpp | 1 +
hotspot/src/share/vm/runtime/vmStructs.cpp | 5 -
12 files changed, 573 insertions(+), 293 deletions(-)
create mode 100644 hotspot/src/share/vm/code/dependencyContext.cpp
create mode 100644 hotspot/src/share/vm/code/dependencyContext.hpp
diff --git a/hotspot/src/share/vm/classfile/javaClasses.cpp b/hotspot/src/share/vm/classfile/javaClasses.cpp
index 267bbacd..ece8ef03 100644
--- a/hotspot/src/share/vm/classfile/javaClasses.cpp
+++ b/hotspot/src/share/vm/classfile/javaClasses.cpp
@@ -28,6 +28,7 @@
#include "classfile/symbolTable.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/debugInfo.hpp"
+#include "code/dependencyContext.hpp"
#include "code/pcDesc.hpp"
#include "compiler/compilerOracle.hpp"
#include "interpreter/interpreter.hpp"
@@ -3033,14 +3034,16 @@ void java_lang_invoke_MethodHandleNatives_CallSiteContext::compute_offsets() {
}
}
-nmethodBucket* java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) {
+DependencyContext java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(oop call_site) {
assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), "");
- return (nmethodBucket*) (address) call_site->long_field(_vmdependencies_offset);
-}
-
-void java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(oop call_site, nmethodBucket* context) {
- assert(java_lang_invoke_MethodHandleNatives_CallSiteContext::is_instance(call_site), "");
- call_site->long_field_put(_vmdependencies_offset, (jlong) (address) context);
+ intptr_t* vmdeps_addr = (intptr_t*)call_site->address_field_addr(_vmdependencies_offset);
+#ifndef ASSERT
+ DependencyContext dep_ctx(vmdeps_addr);
+#else
+ // Verify that call_site isn't moved during DependencyContext lifetime.
+ DependencyContext dep_ctx(vmdeps_addr, Handle(call_site));
+#endif // ASSERT
+ return dep_ctx;
}
// Support for java_security_AccessControlContext
diff --git a/hotspot/src/share/vm/classfile/javaClasses.hpp b/hotspot/src/share/vm/classfile/javaClasses.hpp
index 1eb04b96..ccd0cf27 100644
--- a/hotspot/src/share/vm/classfile/javaClasses.hpp
+++ b/hotspot/src/share/vm/classfile/javaClasses.hpp
@@ -1243,6 +1243,8 @@ public:
#define CALLSITECONTEXT_INJECTED_FIELDS(macro) \
macro(java_lang_invoke_MethodHandleNatives_CallSiteContext, vmdependencies, intptr_signature, false)
+class DependencyContext;
+
class java_lang_invoke_MethodHandleNatives_CallSiteContext : AllStatic {
friend class JavaClasses;
@@ -1253,8 +1255,7 @@ private:
public:
// Accessors
- static nmethodBucket* vmdependencies(oop context);
- static void set_vmdependencies(oop context, nmethodBucket* bucket);
+ static DependencyContext vmdependencies(oop context);
// Testers
static bool is_subclass(Klass* klass) {
diff --git a/hotspot/src/share/vm/code/dependencyContext.cpp b/hotspot/src/share/vm/code/dependencyContext.cpp
new file mode 100644
index 00000000..5c0af1e3
--- /dev/null
+++ b/hotspot/src/share/vm/code/dependencyContext.cpp
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#include "precompiled.hpp"
+#include "code/nmethod.hpp"
+#include "code/dependencies.hpp"
+#include "code/dependencyContext.hpp"
+#include "memory/resourceArea.hpp"
+#include "runtime/atomic.hpp"
+#include "runtime/perfData.hpp"
+#include "utilities/exceptions.hpp"
+
+PerfCounter* DependencyContext::_perf_total_buckets_allocated_count = NULL;
+PerfCounter* DependencyContext::_perf_total_buckets_deallocated_count = NULL;
+PerfCounter* DependencyContext::_perf_total_buckets_stale_count = NULL;
+PerfCounter* DependencyContext::_perf_total_buckets_stale_acc_count = NULL;
+
+void dependencyContext_init() {
+ DependencyContext::init();
+}
+
+void DependencyContext::init() {
+ if (UsePerfData) {
+ EXCEPTION_MARK;
+ _perf_total_buckets_allocated_count =
+ PerfDataManager::create_counter(SUN_CI, "nmethodBucketsAllocated", PerfData::U_Events, CHECK);
+ _perf_total_buckets_deallocated_count =
+ PerfDataManager::create_counter(SUN_CI, "nmethodBucketsDeallocated", PerfData::U_Events, CHECK);
+ _perf_total_buckets_stale_count =
+ PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStale", PerfData::U_Events, CHECK);
+ _perf_total_buckets_stale_acc_count =
+ PerfDataManager::create_counter(SUN_CI, "nmethodBucketsStaleAccumulated", PerfData::U_Events, CHECK);
+ }
+}
+
+//
+// Walk the list of dependent nmethods searching for nmethods which
+// are dependent on the changes that were passed in and mark them for
+// deoptimization. Returns the number of nmethods found.
+//
+int DependencyContext::mark_dependent_nmethods(DepChange& changes) {
+ int found = 0;
+ for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
+ nmethod* nm = b->get_nmethod();
+ // since dependencies aren't removed until an nmethod becomes a zombie,
+ // the dependency list may contain nmethods which aren't alive.
+ if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
+ if (TraceDependencies) {
+ ResourceMark rm;
+ tty->print_cr("Marked for deoptimization");
+ changes.print();
+ nm->print();
+ nm->print_dependencies();
+ }
+ nm->mark_for_deoptimization();
+ found++;
+ }
+ }
+ return found;
+}
+
+//
+// Add an nmethod to the dependency context.
+// It's possible that an nmethod has multiple dependencies on a klass
+// so a count is kept for each bucket to guarantee that creation and
+// deletion of dependencies is consistent.
+//
+void DependencyContext::add_dependent_nmethod(nmethod* nm, bool expunge) {
+ assert_lock_strong(CodeCache_lock);
+ for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
+ if (nm == b->get_nmethod()) {
+ b->increment();
+ return;
+ }
+ }
+ set_dependencies(new nmethodBucket(nm, dependencies()));
+ if (UsePerfData) {
+ _perf_total_buckets_allocated_count->inc();
+ }
+ if (expunge) {
+ // Remove stale entries from the list.
+ expunge_stale_entries();
+ }
+}
+
+//
+// Remove an nmethod dependency from the context.
+// Decrement count of the nmethod in the dependency list and, optionally, remove
+// the bucket completely when the count goes to 0. This method must find
+// a corresponding bucket otherwise there's a bug in the recording of dependencies.
+// Can be called concurrently by parallel GC threads.
+//
+void DependencyContext::remove_dependent_nmethod(nmethod* nm, bool expunge) {
+ assert_locked_or_safepoint(CodeCache_lock);
+ nmethodBucket* first = dependencies();
+ nmethodBucket* last = NULL;
+ for (nmethodBucket* b = first; b != NULL; b = b->next()) {
+ if (nm == b->get_nmethod()) {
+ int val = b->decrement();
+ guarantee(val >= 0, err_msg("Underflow: %d", val));
+ if (val == 0) {
+ if (expunge) {
+ if (last == NULL) {
+ set_dependencies(b->next());
+ } else {
+ last->set_next(b->next());
+ }
+ delete b;
+ if (UsePerfData) {
+ _perf_total_buckets_deallocated_count->inc();
+ }
+ } else {
+ // Mark the context as having stale entries, since it is not safe to
+ // expunge the list right now.
+ set_has_stale_entries(true);
+ if (UsePerfData) {
+ _perf_total_buckets_stale_count->inc();
+ _perf_total_buckets_stale_acc_count->inc();
+ }
+ }
+ }
+ if (expunge) {
+ // Remove stale entries from the list.
+ expunge_stale_entries();
+ }
+ return;
+ }
+ last = b;
+ }
+#ifdef ASSERT
+ tty->print_raw_cr("### can't find dependent nmethod");
+ nm->print();
+#endif // ASSERT
+ ShouldNotReachHere();
+}
+
+//
+// Reclaim all unused buckets.
+//
+void DependencyContext::expunge_stale_entries() {
+ assert_locked_or_safepoint(CodeCache_lock);
+ if (!has_stale_entries()) {
+ assert(!find_stale_entries(), "inconsistent info");
+ return;
+ }
+ nmethodBucket* first = dependencies();
+ nmethodBucket* last = NULL;
+ int removed = 0;
+ for (nmethodBucket* b = first; b != NULL;) {
+ assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
+ nmethodBucket* next = b->next();
+ if (b->count() == 0) {
+ if (last == NULL) {
+ first = next;
+ } else {
+ last->set_next(next);
+ }
+ removed++;
+ delete b;
+ // last stays the same.
+ } else {
+ last = b;
+ }
+ b = next;
+ }
+ set_dependencies(first);
+ set_has_stale_entries(false);
+ if (UsePerfData && removed > 0) {
+ _perf_total_buckets_deallocated_count->inc(removed);
+ _perf_total_buckets_stale_count->dec(removed);
+ }
+}
+
+//
+// Invalidate all dependencies in the context
+int DependencyContext::remove_all_dependents() {
+ assert_locked_or_safepoint(CodeCache_lock);
+ nmethodBucket* b = dependencies();
+ set_dependencies(NULL);
+ int marked = 0;
+ int removed = 0;
+ while (b != NULL) {
+ nmethod* nm = b->get_nmethod();
+ if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
+ nm->mark_for_deoptimization();
+ marked++;
+ }
+ nmethodBucket* next = b->next();
+ removed++;
+ delete b;
+ b = next;
+ }
+ set_has_stale_entries(false);
+ if (UsePerfData && removed > 0) {
+ _perf_total_buckets_deallocated_count->inc(removed);
+ }
+ return marked;
+}
+
+#ifndef PRODUCT
+void DependencyContext::print_dependent_nmethods(bool verbose) {
+ int idx = 0;
+ for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
+ nmethod* nm = b->get_nmethod();
+ tty->print("[%d] count=%d { ", idx++, b->count());
+ if (!verbose) {
+ nm->print_on(tty, "nmethod");
+ tty->print_cr(" } ");
+ } else {
+ nm->print();
+ nm->print_dependencies();
+ tty->print_cr("--- } ");
+ }
+ }
+}
+
+bool DependencyContext::is_dependent_nmethod(nmethod* nm) {
+ for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
+ if (nm == b->get_nmethod()) {
+#ifdef ASSERT
+ int count = b->count();
+ assert(count >= 0, "count shouldn't be negative");
+#endif
+ return true;
+ }
+ }
+ return false;
+}
+
+bool DependencyContext::find_stale_entries() {
+ for (nmethodBucket* b = dependencies(); b != NULL; b = b->next()) {
+ if (b->count() == 0) return true;
+ }
+ return false;
+}
+
+#endif //PRODUCT
+
+int nmethodBucket::decrement() {
+ return Atomic::add(-1, (volatile int *)&_count);
+}
+
+/////////////// Unit tests ///////////////
+
+#ifndef PRODUCT
+
+class TestDependencyContext {
+ public:
+ nmethod* _nmethods[3];
+
+ intptr_t _dependency_context;
+
+ TestDependencyContext() : _dependency_context(DependencyContext::EMPTY) {
+ CodeCache_lock->lock_without_safepoint_check();
+
+ DependencyContext depContext(&_dependency_context);
+
+ _nmethods[0] = reinterpret_cast<nmethod*>(0x8 * 0);
+ _nmethods[1] = reinterpret_cast<nmethod*>(0x8 * 1);
+ _nmethods[2] = reinterpret_cast<nmethod*>(0x8 * 2);
+
+ depContext.add_dependent_nmethod(_nmethods[2]);
+ depContext.add_dependent_nmethod(_nmethods[1]);
+ depContext.add_dependent_nmethod(_nmethods[0]);
+ }
+
+ ~TestDependencyContext() {
+ wipe();
+ CodeCache_lock->unlock();
+ }
+
+ static void testRemoveDependentNmethod(int id, bool delete_immediately) {
+ TestDependencyContext c;
+ DependencyContext depContext(&c._dependency_context);
+ assert(!has_stale_entries(depContext), "check");
+
+ nmethod* nm = c._nmethods[id];
+ depContext.remove_dependent_nmethod(nm, delete_immediately);
+
+ if (!delete_immediately) {
+ assert(has_stale_entries(depContext), "check");
+ assert(depContext.is_dependent_nmethod(nm), "check");
+ depContext.expunge_stale_entries();
+ }
+
+ assert(!has_stale_entries(depContext), "check");
+ assert(!depContext.is_dependent_nmethod(nm), "check");
+ }
+
+ static void testRemoveDependentNmethod() {
+ testRemoveDependentNmethod(0, false);
+ testRemoveDependentNmethod(1, false);
+ testRemoveDependentNmethod(2, false);
+
+ testRemoveDependentNmethod(0, true);
+ testRemoveDependentNmethod(1, true);
+ testRemoveDependentNmethod(2, true);
+ }
+
+ static void test() {
+ testRemoveDependentNmethod();
+ }
+
+ static bool has_stale_entries(DependencyContext ctx) {
+ assert(ctx.has_stale_entries() == ctx.find_stale_entries(), "check");
+ return ctx.has_stale_entries();
+ }
+
+ void wipe() {
+ DependencyContext ctx(&_dependency_context);
+ nmethodBucket* b = ctx.dependencies();
+ ctx.set_dependencies(NULL);
+ ctx.set_has_stale_entries(false);
+ while (b != NULL) {
+ nmethodBucket* next = b->next();
+ delete b;
+ b = next;
+ }
+ }
+};
+
+void TestDependencyContext_test() {
+ TestDependencyContext::test();
+}
+
+#endif // PRODUCT
\ No newline at end of file
diff --git a/hotspot/src/share/vm/code/dependencyContext.hpp b/hotspot/src/share/vm/code/dependencyContext.hpp
new file mode 100644
index 00000000..533112b8
--- /dev/null
+++ b/hotspot/src/share/vm/code/dependencyContext.hpp
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ *
+ */
+
+#ifndef SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP
+#define SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP
+
+#include "memory/allocation.hpp"
+#include "oops/oop.hpp"
+#include "runtime/handles.hpp"
+#include "runtime/perfData.hpp"
+
+class nmethod;
+class DepChange;
+
+//
+// nmethodBucket is used to record dependent nmethods for
+// deoptimization. nmethod dependencies are actually <klass, method>
+// pairs but we really only care about the klass part for purposes of
+// finding nmethods which might need to be deoptimized. Instead of
+// recording the method, a count of how many times a particular nmethod
+// was recorded is kept. This ensures that any recording errors are
+// noticed since an nmethod should be removed as many times are it's
+// added.
+//
+class nmethodBucket: public CHeapObj<mtClass> {
+ friend class VMStructs;
+ private:
+ nmethod* _nmethod;
+ int _count;
+ nmethodBucket* _next;
+
+ public:
+ nmethodBucket(nmethod* nmethod, nmethodBucket* next) :
+ _nmethod(nmethod), _next(next), _count(1) {}
+
+ int count() { return _count; }
+ int increment() { _count += 1; return _count; }
+ int decrement();
+ nmethodBucket* next() { return _next; }
+ void set_next(nmethodBucket* b) { _next = b; }
+ nmethod* get_nmethod() { return _nmethod; }
+};
+
+//
+// Utility class to manipulate nmethod dependency context.
+// The context consists of nmethodBucket* (a head of a linked list)
+// and a boolean flag (does the list contains stale entries). The structure is
+// encoded as an intptr_t: lower bit is used for the flag. It is possible since
+// nmethodBucket* is aligned - the structure is malloc'ed in C heap.
+// Dependency context can be attached either to an InstanceKlass (_dep_context field)
+// or CallSiteContext oop for call_site_target dependencies (see javaClasses.hpp).
+// DependencyContext class operates on some location which holds a intptr_t value.
+//
+class DependencyContext : public StackObj {
+ friend class VMStructs;
+ friend class TestDependencyContext;
+ private:
+ enum TagBits { _has_stale_entries_bit = 1, _has_stale_entries_mask = 1 };
+
+ intptr_t* _dependency_context_addr;
+
+ void set_dependencies(nmethodBucket* b) {
+ assert((intptr_t(b) & _has_stale_entries_mask) == 0, "should be aligned");
+ if (has_stale_entries()) {
+ *_dependency_context_addr = intptr_t(b) | _has_stale_entries_mask;
+ } else {
+ *_dependency_context_addr = intptr_t(b);
+ }
+ }
+
+ void set_has_stale_entries(bool x) {
+ if (x) {
+ *_dependency_context_addr |= _has_stale_entries_mask;
+ } else {
+ *_dependency_context_addr &= ~_has_stale_entries_mask;
+ }
+ }
+
+ nmethodBucket* dependencies() {
+ intptr_t value = *_dependency_context_addr;
+ return (nmethodBucket*) (value & ~_has_stale_entries_mask);
+ }
+
+ bool has_stale_entries() const {
+ intptr_t value = *_dependency_context_addr;
+ return (value & _has_stale_entries_mask) != 0;
+ }
+
+ static PerfCounter* _perf_total_buckets_allocated_count;
+ static PerfCounter* _perf_total_buckets_deallocated_count;
+ static PerfCounter* _perf_total_buckets_stale_count;
+ static PerfCounter* _perf_total_buckets_stale_acc_count;
+
+ public:
+#ifdef ASSERT
+ // Verification for dependency contexts rooted at Java objects.
+ Handle _base; // non-NULL if dependency context resides in an oop (e.g. CallSite).
+ oop _base_oop;
+
+ DependencyContext(intptr_t* addr, Handle base = Handle())
+ : _dependency_context_addr(addr), _base(base)
+ {
+ _base_oop = _base();
+ }
+
+ ~DependencyContext() {
+ // Base oop relocation invalidates _dependency_context_addr.
+ assert(_base_oop == _base(), "base oop relocation is forbidden");
+ }
+#else
+ DependencyContext(intptr_t* addr) : _dependency_context_addr(addr) {}
+#endif // ASSERT
+
+ static const intptr_t EMPTY = 0; // dependencies = NULL, has_stale_entries = false
+
+ static void init();
+
+ int mark_dependent_nmethods(DepChange& changes);
+ void add_dependent_nmethod(nmethod* nm, bool expunge_stale_entries = false);
+ void remove_dependent_nmethod(nmethod* nm, bool expunge_stale_entries = false);
+ int remove_all_dependents();
+
+ void expunge_stale_entries();
+
+#ifndef PRODUCT
+ void print_dependent_nmethods(bool verbose);
+ bool is_dependent_nmethod(nmethod* nm);
+ bool find_stale_entries();
+#endif //PRODUCT
+};
+#endif // SHARE_VM_CODE_DEPENDENCYCONTEXT_HPP
\ No newline at end of file
diff --git a/hotspot/src/share/vm/compiler/compileBroker.cpp b/hotspot/src/share/vm/compiler/compileBroker.cpp
index e8f97074..22372d07 100644
--- a/hotspot/src/share/vm/compiler/compileBroker.cpp
+++ b/hotspot/src/share/vm/compiler/compileBroker.cpp
@@ -26,6 +26,7 @@
#include "classfile/systemDictionary.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
+#include "code/dependencyContext.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/compileLog.hpp"
#include "compiler/compilerOracle.hpp"
@@ -919,7 +920,6 @@ void CompileBroker::compilation_init() {
PerfData::U_Ticks, CHECK);
}
-
if (UsePerfData) {
EXCEPTION_MARK;
diff --git a/hotspot/src/share/vm/oops/instanceKlass.cpp b/hotspot/src/share/vm/oops/instanceKlass.cpp
index ce297b68..1bff1309 100644
--- a/hotspot/src/share/vm/oops/instanceKlass.cpp
+++ b/hotspot/src/share/vm/oops/instanceKlass.cpp
@@ -28,6 +28,7 @@
#include "classfile/systemDictionaryShared.hpp"
#include "classfile/verifier.hpp"
#include "classfile/vmSymbols.hpp"
+#include "code/dependencyContext.hpp"
#include "compiler/compileBroker.hpp"
#include "gc_implementation/shared/markSweep.inline.hpp"
#include "gc_interface/collectedHeap.inline.hpp"
@@ -194,7 +195,6 @@ InstanceKlass* InstanceKlass::allocate_instance_klass(
int size = InstanceKlass::size(vtable_len, itable_len, nonstatic_oop_map_size,
access_flags.is_interface(), is_anonymous);
-
// Allocation
InstanceKlass* ik;
if (rt == REF_NONE) {
@@ -296,7 +296,7 @@ InstanceKlass::InstanceKlass(int vtable_len,
set_static_oop_field_count(0);
set_nonstatic_field_size(0);
set_is_marked_dependent(false);
- set_has_unloaded_dependent(false);
+ _dep_context = DependencyContext::EMPTY;
set_init_state(InstanceKlass::allocated);
set_init_thread(NULL);
set_init_state(allocated);
@@ -311,7 +311,6 @@ InstanceKlass::InstanceKlass(int vtable_len,
set_annotations(NULL);
set_jvmti_cached_class_field_map(NULL);
set_initial_method_idnum(0);
- _dependencies = NULL;
set_jvmti_cached_class_field_map(NULL);
set_cached_class_file(NULL);
set_initial_method_idnum(0);
@@ -2093,200 +2092,31 @@ jmethodID InstanceKlass::jmethod_id_or_null(Method* method) {
return id;
}
-int nmethodBucket::decrement() {
- return Atomic::add(-1, (volatile int *)&_count);
-}
-
-//
-// Walk the list of dependent nmethods searching for nmethods which
-// are dependent on the changes that were passed in and mark them for
-// deoptimization. Returns the number of nmethods found.
-//
-int nmethodBucket::mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes) {
- assert_locked_or_safepoint(CodeCache_lock);
- int found = 0;
- for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
- nmethod* nm = b->get_nmethod();
- // since dependencies aren't removed until an nmethod becomes a zombie,
- // the dependency list may contain nmethods which aren't alive.
- if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization() && nm->check_dependency_on(changes)) {
- if (TraceDependencies) {
- ResourceMark rm;
- tty->print_cr("Marked for deoptimization");
- changes.print();
- nm->print();
- nm->print_dependencies();
- }
- nm->mark_for_deoptimization();
- found++;
- }
- }
- return found;
-}
-
-//
-// Add an nmethodBucket to the list of dependencies for this nmethod.
-// It's possible that an nmethod has multiple dependencies on this klass
-// so a count is kept for each bucket to guarantee that creation and
-// deletion of dependencies is consistent. Returns new head of the list.
-//
-nmethodBucket* nmethodBucket::add_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
- assert_locked_or_safepoint(CodeCache_lock);
- for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
- if (nm == b->get_nmethod()) {
- b->increment();
- return deps;
- }
- }
- return new nmethodBucket(nm, deps);
-}
-
-//
-// Decrement count of the nmethod in the dependency list and remove
-// the bucket completely when the count goes to 0. This method must
-// find a corresponding bucket otherwise there's a bug in the
-// recording of dependencies. Returns true if the bucket was deleted,
-// or marked ready for reclaimation.
-bool nmethodBucket::remove_dependent_nmethod(nmethodBucket** deps, nmethod* nm, bool delete_immediately) {
- assert_locked_or_safepoint(CodeCache_lock);
-
- nmethodBucket* first = *deps;
- nmethodBucket* last = NULL;
- for (nmethodBucket* b = first; b != NULL; b = b->next()) {
- if (nm == b->get_nmethod()) {
- int val = b->decrement();
- guarantee(val >= 0, err_msg("Underflow: %d", val));
- if (val == 0) {
- if (delete_immediately) {
- if (last == NULL) {
- *deps = b->next();
- } else {
- last->set_next(b->next());
- }
- delete b;
- }
- }
- return true;
- }
- last = b;
- }
-
-#ifdef ASSERT
- tty->print_raw_cr("### can't find dependent nmethod");
- nm->print();
-#endif // ASSERT
- ShouldNotReachHere();
- return false;
-}
-
-// Convenience overload, for callers that don't want to delete the nmethodBucket entry.
-bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
- nmethodBucket** deps_addr = &deps;
- return remove_dependent_nmethod(deps_addr, nm, false /* Don't delete */);
-}
-
-//
-// Reclaim all unused buckets. Returns new head of the list.
-//
-nmethodBucket* nmethodBucket::clean_dependent_nmethods(nmethodBucket* deps) {
- nmethodBucket* first = deps;
- nmethodBucket* last = NULL;
- nmethodBucket* b = first;
-
- while (b != NULL) {
- assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
- nmethodBucket* next = b->next();
- if (b->count() == 0) {
- if (last == NULL) {
- first = next;
- } else {
- last->set_next(next);
- }
- delete b;
- // last stays the same.
- } else {
- last = b;
- }
- b = next;
- }
- return first;
-}
-
-#ifndef PRODUCT
-void nmethodBucket::print_dependent_nmethods(nmethodBucket* deps, bool verbose) {
- int idx = 0;
- for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
- nmethod* nm = b->get_nmethod();
- tty->print("[%d] count=%d { ", idx++, b->count());
- if (!verbose) {
- nm->print_on(tty, "nmethod");
- tty->print_cr(" } ");
- } else {
- nm->print();
- nm->print_dependencies();
- tty->print_cr("--- } ");
- }
- }
+inline DependencyContext InstanceKlass::dependencies() {
+ DependencyContext dep_context(&_dep_context);
+ return dep_context;
}
-bool nmethodBucket::is_dependent_nmethod(nmethodBucket* deps, nmethod* nm) {
- for (nmethodBucket* b = deps; b != NULL; b = b->next()) {
- if (nm == b->get_nmethod()) {
-#ifdef ASSERT
- int count = b->count();
- assert(count >= 0, err_msg("count shouldn't be negative: %d", count));
-#endif
- return true;
- }
- }
- return false;
-}
-#endif //PRODUCT
-
int InstanceKlass::mark_dependent_nmethods(DepChange& changes) {
- assert_locked_or_safepoint(CodeCache_lock);
- return nmethodBucket::mark_dependent_nmethods(_dependencies, changes);
-}
-
-void InstanceKlass::clean_dependent_nmethods() {
- assert_locked_or_safepoint(CodeCache_lock);
-
- if (has_unloaded_dependent()) {
- _dependencies = nmethodBucket::clean_dependent_nmethods(_dependencies);
- set_has_unloaded_dependent(false);
- }
-#ifdef ASSERT
- else {
- // Verification
- for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) {
- assert(b->count() >= 0, err_msg("bucket count: %d", b->count()));
- assert(b->count() != 0, "empty buckets need to be cleaned");
- }
- }
-#endif
+ return dependencies().mark_dependent_nmethods(changes);
}
void InstanceKlass::add_dependent_nmethod(nmethod* nm) {
- assert_locked_or_safepoint(CodeCache_lock);
- _dependencies = nmethodBucket::add_dependent_nmethod(_dependencies, nm);
+ dependencies().add_dependent_nmethod(nm);
}
void InstanceKlass::remove_dependent_nmethod(nmethod* nm, bool delete_immediately) {
- assert_locked_or_safepoint(CodeCache_lock);
-
- if (nmethodBucket::remove_dependent_nmethod(&_dependencies, nm, delete_immediately)) {
- set_has_unloaded_dependent(true);
- }
+ dependencies().remove_dependent_nmethod(nm, delete_immediately);
}
#ifndef PRODUCT
void InstanceKlass::print_dependent_nmethods(bool verbose) {
- nmethodBucket::print_dependent_nmethods(_dependencies, verbose);
+ dependencies().print_dependent_nmethods(verbose);
}
bool InstanceKlass::is_dependent_nmethod(nmethod* nm) {
- return nmethodBucket::is_dependent_nmethod(_dependencies, nm);
+ return dependencies().is_dependent_nmethod(nm);
}
#endif //PRODUCT
@@ -2583,7 +2413,9 @@ void InstanceKlass::clean_weak_instanceklass_links(BoolObjectClosure* is_alive)
clean_implementors_list(is_alive);
clean_method_data(is_alive);
- clean_dependent_nmethods();
+ // Since GC iterates InstanceKlasses sequentially, it is safe to remove stale entries here.
+ DependencyContext dep_context(&_dep_context);
+ dep_context.expunge_stale_entries();
}
void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
@@ -2630,6 +2462,8 @@ void InstanceKlass::remove_unshareable_info() {
constants()->remove_unshareable_info();
+ assert(_dep_context == DependencyContext::EMPTY, "dependency context is not shareable");
+
for (int i = 0; i < methods()->length(); i++) {
Method* m = methods()->at(i);
m->remove_unshareable_info();
@@ -2654,7 +2488,6 @@ void InstanceKlass::remove_unshareable_info() {
array_klasses_do(remove_unshareable_in_class);
// These are not allocated from metaspace. They are safe to set to NULL.
_member_names = NULL;
- _dependencies = NULL;
_osr_nmethods_head = NULL;
_init_thread = NULL;
}
@@ -2796,12 +2629,10 @@ void InstanceKlass::release_C_heap_structures() {
}
// release dependencies
- nmethodBucket* b = _dependencies;
- _dependencies = NULL;
- while (b != NULL) {
- nmethodBucket* next = b->next();
- delete b;
- b = next;
+ {
+ DependencyContext ctx(&_dep_context);
+ int marked = ctx.remove_all_dependents();
+ assert(marked == 0, "all dependencies should be already invalidated");
}
// Deallocate breakpoint records
diff --git a/hotspot/src/share/vm/oops/instanceKlass.hpp b/hotspot/src/share/vm/oops/instanceKlass.hpp
index 9750ae56..14556a38 100644
--- a/hotspot/src/share/vm/oops/instanceKlass.hpp
+++ b/hotspot/src/share/vm/oops/instanceKlass.hpp
@@ -83,15 +83,15 @@
// forward declaration for class -- see below for definition
-class SuperTypeClosure;
-class JNIid;
-class jniIdMapBase;
class BreakpointInfo;
-class fieldDescriptor;
class DepChange;
-class nmethodBucket;
+class DependencyContext;
+class fieldDescriptor;
+class jniIdMapBase;
+class JNIid;
class JvmtiCachedClassFieldMap;
class MemberNameTable;
+class SuperTypeClosure;
// This is used in iterators below.
class FieldClosure: public StackObj {
@@ -227,7 +227,6 @@ class InstanceKlass: public Klass {
// _misc_flags.
bool _is_marked_dependent; // used for marking during flushing and deoptimization
bool _is_being_redefined; // used for locking redefinition
- bool _has_unloaded_dependent;
enum {
_misc_rewritten = 1 << 0, // methods rewritten.
@@ -249,7 +248,7 @@ class InstanceKlass: public Klass {
MemberNameTable* _member_names; // Member names
JNIid* _jni_ids; // First JNI identifier for static fields in this class
jmethodID* _methods_jmethod_ids; // jmethodIDs corresponding to method_idnum, or NULL if none
- nmethodBucket* _dependencies; // list of dependent nmethods
+ intptr_t _dep_context; // packed DependencyContext structure
nmethod* _osr_nmethods_head; // Head of list of on-stack replacement nmethods for this class
BreakpointInfo* _breakpoints; // bpt lists, managed by Method*
// Linked instanceKlasses of previous versions
@@ -480,9 +479,6 @@ class InstanceKlass: public Klass {
bool is_marked_dependent() const { return _is_marked_dependent; }
void set_is_marked_dependent(bool value) { _is_marked_dependent = value; }
- bool has_unloaded_dependent() const { return _has_unloaded_dependent; }
- void set_has_unloaded_dependent(bool value) { _has_unloaded_dependent = value; }
-
// initialization (virtuals from Klass)
bool should_be_initialized() const; // means that initialize should be called
void initialize(TRAPS);
@@ -831,7 +827,8 @@ class InstanceKlass: public Klass {
JNIid* jni_id_for(int offset);
// maintenance of deoptimization dependencies
- int mark_dependent_nmethods(DepChange& changes);
+ inline DependencyContext dependencies();
+ int mark_dependent_nmethods(DepChange& changes);
void add_dependent_nmethod(nmethod* nm);
void remove_dependent_nmethod(nmethod* nm, bool delete_immediately);
@@ -1026,7 +1023,6 @@ class InstanceKlass: public Klass {
void clean_weak_instanceklass_links(BoolObjectClosure* is_alive);
void clean_implementors_list(BoolObjectClosure* is_alive);
void clean_method_data(BoolObjectClosure* is_alive);
- void clean_dependent_nmethods();
// Explicit metaspace deallocation of fields
// For RedefineClasses and class file parsing errors, we need to deallocate
@@ -1258,48 +1254,6 @@ class JNIid: public CHeapObj<mtClass> {
void verify(Klass* holder);
};
-
-//
-// nmethodBucket is used to record dependent nmethods for
-// deoptimization. nmethod dependencies are actually <klass, method>
-// pairs but we really only care about the klass part for purposes of
-// finding nmethods which might need to be deoptimized. Instead of
-// recording the method, a count of how many times a particular nmethod
-// was recorded is kept. This ensures that any recording errors are
-// noticed since an nmethod should be removed as many times are it's
-// added.
-//
-class nmethodBucket: public CHeapObj<mtClass> {
- friend class VMStructs;
- private:
- nmethod* _nmethod;
- int _count;
- nmethodBucket* _next;
-
- public:
- nmethodBucket(nmethod* nmethod, nmethodBucket* next) {
- _nmethod = nmethod;
- _next = next;
- _count = 1;
- }
- int count() { return _count; }
- int increment() { _count += 1; return _count; }
- int decrement();
- nmethodBucket* next() { return _next; }
- void set_next(nmethodBucket* b) { _next = b; }
- nmethod* get_nmethod() { return _nmethod; }
-
- static int mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes);
- static nmethodBucket* add_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
- static bool remove_dependent_nmethod(nmethodBucket** deps, nmethod* nm, bool delete_immediately);
- static bool remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
- static nmethodBucket* clean_dependent_nmethods(nmethodBucket* deps);
-#ifndef PRODUCT
- static void print_dependent_nmethods(nmethodBucket* deps, bool verbose);
- static bool is_dependent_nmethod(nmethodBucket* deps, nmethod* nm);
-#endif //PRODUCT
-};
-
// An iterator that's used to access the inner classes indices in the
// InstanceKlass::_inner_classes array.
class InnerClassesIterator : public StackObj {
diff --git a/hotspot/src/share/vm/prims/jni.cpp b/hotspot/src/share/vm/prims/jni.cpp
index c0d789b4..bfb902d5 100644
--- a/hotspot/src/share/vm/prims/jni.cpp
+++ b/hotspot/src/share/vm/prims/jni.cpp
@@ -5107,6 +5107,7 @@ _JNI_IMPORT_OR_EXPORT_ jint JNICALL JNI_GetDefaultJavaVMInitArgs(void *args_) {
unit_test_function_call
// Forward declaration
+void TestDependencyContext_test();
void TestOS_test();
void TestReservedSpace_test();
void TestReserveMemorySpecial_test();
@@ -5132,6 +5133,7 @@ void ChunkManager_test_list_index();
void execute_internal_vm_tests() {
if (ExecuteInternalVMTests) {
tty->print_cr("Running internal VM tests");
+ run_unit_test(TestDependencyContext_test());
run_unit_test(TestOS_test());
run_unit_test(TestReservedSpace_test());
run_unit_test(TestReserveMemorySpecial_test());
diff --git a/hotspot/src/share/vm/prims/methodHandles.cpp b/hotspot/src/share/vm/prims/methodHandles.cpp
index d950d4f3..231d62d2 100644
--- a/hotspot/src/share/vm/prims/methodHandles.cpp
+++ b/hotspot/src/share/vm/prims/methodHandles.cpp
@@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "classfile/symbolTable.hpp"
+#include "code/dependencyContext.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/oopMapCache.hpp"
@@ -946,30 +947,33 @@ int MethodHandles::find_MemberNames(KlassHandle k,
return rfill + overflow;
}
+// Is it safe to remove stale entries from a dependency list?
+static bool safe_to_expunge() {
+ // Since parallel GC threads can concurrently iterate over a dependency
+ // list during safepoint, it is safe to remove entries only when
+ // CodeCache lock is held.
+ return CodeCache_lock->owned_by_self();
+}
+
void MethodHandles::add_dependent_nmethod(oop call_site, nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
oop context = java_lang_invoke_CallSite::context(call_site);
- nmethodBucket* deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
-
- nmethodBucket* new_deps = nmethodBucket::add_dependent_nmethod(deps, nm);
- if (deps != new_deps) {
- java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context, new_deps);
- }
+ DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
+ // Try to purge stale entries on updates.
+ // Since GC doesn't clean dependency contexts rooted at CallSiteContext objects,
+ // in order to avoid memory leak, stale entries are purged whenever a dependency list
+ // is changed (both on addition and removal). Though memory reclamation is delayed,
+ // it avoids indefinite memory usage growth.
+ deps.add_dependent_nmethod(nm, /*expunge_stale_entries=*/safe_to_expunge());
}
void MethodHandles::remove_dependent_nmethod(oop call_site, nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
oop context = java_lang_invoke_CallSite::context(call_site);
- nmethodBucket* deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
-
- if (nmethodBucket::remove_dependent_nmethod(deps, nm)) {
- nmethodBucket* new_deps = nmethodBucket::clean_dependent_nmethods(deps);
- if (deps != new_deps) {
- java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context, new_deps);
- }
- }
+ DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
+ deps.remove_dependent_nmethod(nm, /*expunge_stale_entries=*/safe_to_expunge());
}
void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) {
@@ -978,21 +982,15 @@ void MethodHandles::flush_dependent_nmethods(Handle call_site, Handle target) {
int marked = 0;
CallSiteDepChange changes(call_site(), target());
{
+ No_Safepoint_Verifier nsv;
MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
oop context = java_lang_invoke_CallSite::context(call_site());
- nmethodBucket* deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
-
- marked = nmethodBucket::mark_dependent_nmethods(deps, changes);
- if (marked > 0) {
- nmethodBucket* new_deps = nmethodBucket::clean_dependent_nmethods(deps);
- if (deps != new_deps) {
- java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context, new_deps);
- }
- }
+ DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context);
+ marked = deps.mark_dependent_nmethods(changes);
}
if (marked > 0) {
- // At least one nmethod has been marked for deoptimization
+ // At least one nmethod has been marked for deoptimization.
VM_Deoptimize op;
VMThread::execute(&op);
}
@@ -1379,6 +1377,8 @@ JVM_ENTRY(void, MHN_setCallSiteTargetVolatile(JNIEnv* env, jobject igcls, jobjec
}
JVM_END
+// It is called by a Cleaner object which ensures that dropped CallSites properly
+// deallocate their dependency information.
JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject context_jh)) {
Handle context(THREAD, JNIHandles::resolve_non_null(context_jh));
{
@@ -1387,19 +1387,11 @@ JVM_ENTRY(void, MHN_clearCallSiteContext(JNIEnv* env, jobject igcls, jobject con
int marked = 0;
{
+ No_Safepoint_Verifier nsv;
MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag);
- nmethodBucket* b = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context());
- while(b != NULL) {
- nmethod* nm = b->get_nmethod();
- if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) {
- nm->mark_for_deoptimization();
- marked++;
- }
- nmethodBucket* next = b->next();
- delete b;
- b = next;
- }
- java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context(), NULL); // reset context
+ assert(safe_to_expunge(), "removal is not safe");
+ DependencyContext deps = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context());
+ marked = deps.remove_all_dependents();
}
if (marked > 0) {
// At least one nmethod has been marked for deoptimization
diff --git a/hotspot/src/share/vm/runtime/init.cpp b/hotspot/src/share/vm/runtime/init.cpp
index b1854092..f709db94 100644
--- a/hotspot/src/share/vm/runtime/init.cpp
+++ b/hotspot/src/share/vm/runtime/init.cpp
@@ -71,6 +71,7 @@ void InlineCacheBuffer_init();
void compilerOracle_init();
void compilationPolicy_init();
void compileBroker_init();
+void dependencyContext_init();
// Initialization after compiler initialization
bool universe_post_init(); // must happen after compiler_init
@@ -127,6 +128,7 @@ jint init_globals() {
compilerOracle_init();
compilationPolicy_init();
compileBroker_init();
+ dependencyContext_init();
VMRegImpl::set_regName();
if (!universe_post_init()) {
diff --git a/hotspot/src/share/vm/runtime/perfData.hpp b/hotspot/src/share/vm/runtime/perfData.hpp
index 4a62d2e0..b9f5c1a7 100644
--- a/hotspot/src/share/vm/runtime/perfData.hpp
+++ b/hotspot/src/share/vm/runtime/perfData.hpp
@@ -424,6 +424,7 @@ class PerfLongVariant : public PerfLong {
public:
inline void inc() { (*(jlong*)_valuep)++; }
inline void inc(jlong val) { (*(jlong*)_valuep) += val; }
+ inline void dec(jlong val) { inc(-val); }
inline void add(jlong val) { (*(jlong*)_valuep) += val; }
void clear_sample_helper() { _sample_helper = NULL; }
};
diff --git a/hotspot/src/share/vm/runtime/vmStructs.cpp b/hotspot/src/share/vm/runtime/vmStructs.cpp
index 744c43e0..5140c015 100644
--- a/hotspot/src/share/vm/runtime/vmStructs.cpp
+++ b/hotspot/src/share/vm/runtime/vmStructs.cpp
@@ -330,10 +330,6 @@ typedef OffsetCompactHashtable<const char*, Symbol*, symbol_equals_compact_hasht
nonstatic_field(InstanceKlass, _methods_jmethod_ids, jmethodID*) \
volatile_nonstatic_field(InstanceKlass, _idnum_allocated_count, u2) \
nonstatic_field(InstanceKlass, _annotations, Annotations*) \
- nonstatic_field(InstanceKlass, _dependencies, nmethodBucket*) \
- nonstatic_field(nmethodBucket, _nmethod, nmethod*) \
- nonstatic_field(nmethodBucket, _count, int) \
- nonstatic_field(nmethodBucket, _next, nmethodBucket*) \
nonstatic_field(InstanceKlass, _method_ordering, Array<int>*) \
nonstatic_field(InstanceKlass, _default_vtable_indices, Array<int>*) \
nonstatic_field(Klass, _super_check_offset, juint) \
@@ -1472,7 +1468,6 @@ typedef OffsetCompactHashtable<const char*, Symbol*, symbol_equals_compact_hasht
declare_toplevel_type(volatile Metadata*) \
\
declare_toplevel_type(DataLayout) \
- declare_toplevel_type(nmethodBucket) \
\
/********/ \
/* Oops */ \
--
2.17.1
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。