// unpark UNSAFE_ENTRY(void, Unsafe_Unpark(JNIEnv *env, jobject unsafe, jobject jthread)) UnsafeWrapper("Unsafe_Unpark"); Parker* p = NULL; if (jthread != NULL) { oop java_thread = JNIHandles::resolve_non_null(jthread); if (java_thread != NULL) { jlong lp = java_lang_Thread::park_event(java_thread); if (lp != 0) { // This cast is OK even though the jlong might have been read // non-atomically on 32bit systems, since there, one word will // always be zero anyway and the value set is always the same p = (Parker*)addr_from_java(lp); } else { // Grab lock if apparently null or using older version of library MutexLocker mu(Threads_lock); java_thread = JNIHandles::resolve_non_null(jthread); if (java_thread != NULL) { JavaThread* thr = java_lang_Thread::thread(java_thread); if (thr != NULL) { p = thr->parker(); if (p != NULL) { // Bind to Java thread for next time. java_lang_Thread::set_park_event(java_thread, addr_to_java(p)); } } } } } } if (p != NULL) { HS_DTRACE_PROBE1(hotspot, thread__unpark, p); p->unpark(); } UNSAFE_END
class Parker : public os::PlatformParker { private: volatileint _counter ; Parker * FreeNext ; JavaThread * AssociatedWith ; // Current association
public: Parker() : PlatformParker() { _counter = 0 ; FreeNext = NULL ; AssociatedWith = NULL ; } protected: ~Parker() { ShouldNotReachHere(); } public: // For simplicity of interface with Java, all forms of park (indefinite, // relative, and absolute) are multiplexed into one call. voidpark(bool isAbsolute, jlong time); voidunpark();
void Parker::park(bool isAbsolute, jlong time) { guarantee (_ParkEvent != NULL, "invariant") ; // First, demultiplex/decode time arguments if (time < 0) { // don't wait return; } elseif (time == 0 && !isAbsolute) { time = INFINITE; } elseif (isAbsolute) { time -= os::javaTimeMillis(); // convert to relative time if (time <= 0) // already elapsed return; } else { // relative time /= 1000000; // Must coarsen from nanos to millis if (time == 0) // Wait for the minimal time unit if zero time = 1; }
// Adding a lock prefix to an instruction on MP machine // VC++ doesn't like the lock prefix to be on a single line // so we can't insert a label after the lock prefix. // By emitting a lock prefix, we can define a label after it. #define LOCK_IF_MP(mp) __asm cmp mp, 0 \ __asm je L0 \ __asm _emit 0xF0 \ __asm L0:
// Fill bytes; larger units are filled atomically if everything is aligned. void Copy::fill_to_memory_atomic(void* to, size_t size, jubyte value) { address dst = (address) to; uintptr_t bits = (uintptr_t) to | (uintptr_t) size; if (bits % sizeof(jlong) == 0) { jlong fill = (julong)( (jubyte)value ); // zero-extend if (fill != 0) { fill += fill << 8; fill += fill << 16; fill += fill << 32; } //Copy::fill_to_jlongs_atomic((jlong*) dst, size / sizeof(jlong)); for (uintptr_t off = 0; off < size; off += sizeof(jlong)) { *(jlong*)(dst + off) = fill; } } elseif (bits % sizeof(jint) == 0) { jint fill = (juint)( (jubyte)value ); // zero-extend if (fill != 0) { fill += fill << 8; fill += fill << 16; } //Copy::fill_to_jints_atomic((jint*) dst, size / sizeof(jint)); for (uintptr_t off = 0; off < size; off += sizeof(jint)) { *(jint*)(dst + off) = fill; } } elseif (bits % sizeof(jshort) == 0) { jshort fill = (jushort)( (jubyte)value ); // zero-extend fill += fill << 8; //Copy::fill_to_jshorts_atomic((jshort*) dst, size / sizeof(jshort)); for (uintptr_t off = 0; off < size; off += sizeof(jshort)) { *(jshort*)(dst + off) = fill; } } else { // Not aligned, so no need to be atomic. // 内部使用 memset 实现 Copy::fill_to_bytes(dst, size, value); } }
Copies the values of num bytes from the location pointed by source to the memory block pointed by destination. Copying takes place as if an intermediate buffer were used, allowing the destination and source to overlap.
// Copy bytes; larger units are filled atomically if everything is aligned. void Copy::conjoint_memory_atomic(void* from, void* to, size_t size) { address src = (address) from; address dst = (address) to; uintptr_t bits = (uintptr_t) src | (uintptr_t) dst | (uintptr_t) size;
// (Note: We could improve performance by ignoring the low bits of size, // and putting a short cleanup loop after each bulk copy loop. // There are plenty of other ways to make this faster also, // and it's a slippery slope. For now, let's keep this code simple // since the simplicity helps clarify the atomicity semantics of // this operation. There are also CPU-specific assembly versions // which may or may not want to include such optimizations.)
if (bits % sizeof(jlong) == 0) { Copy::conjoint_jlongs_atomic((jlong*) src, (jlong*) dst, size / sizeof(jlong)); } elseif (bits % sizeof(jint) == 0) { Copy::conjoint_jints_atomic((jint*) src, (jint*) dst, size / sizeof(jint)); } elseif (bits % sizeof(jshort) == 0) { Copy::conjoint_jshorts_atomic((jshort*) src, (jshort*) dst, size / sizeof(jshort)); } else { // Not aligned, so no need to be atomic. // 内部使用 memmove 实现 Copy::conjoint_jbytes((void*) src, (void*) dst, size); } }
freeMemory
void free (void* ptr);
A block of memory previously allocated by a call to malloc, calloc or realloc is deallocated, making it available again for further allocations.