1 /* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_RUNTIME_THREAD_LIST_H_ 18 #define ART_RUNTIME_THREAD_LIST_H_ 19 20 #include "barrier.h" 21 #include "base/histogram.h" 22 #include "base/mutex.h" 23 #include "base/value_object.h" 24 #include "jni.h" 25 #include "reflective_handle_scope.h" 26 #include "suspend_reason.h" 27 28 #include <bitset> 29 #include <list> 30 #include <vector> 31 32 namespace art { 33 namespace gc { 34 namespace collector { 35 class GarbageCollector; 36 } // namespace collector 37 class GcPauseListener; 38 } // namespace gc 39 class Closure; 40 class IsMarkedVisitor; 41 class RootVisitor; 42 class Thread; 43 class TimingLogger; 44 enum VisitRootFlags : uint8_t; 45 46 class ThreadList { 47 public: 48 static constexpr uint32_t kMaxThreadId = 0xFFFF; 49 static constexpr uint32_t kInvalidThreadId = 0; 50 static constexpr uint32_t kMainThreadId = 1; 51 static constexpr uint64_t kDefaultThreadSuspendTimeout = 52 kIsDebugBuild ? 50'000'000'000ull : 10'000'000'000ull; 53 54 explicit ThreadList(uint64_t thread_suspend_timeout_ns); 55 ~ThreadList(); 56 57 void ShutDown(); 58 59 void DumpForSigQuit(std::ostream& os) 60 REQUIRES(!Locks::thread_list_lock_, !Locks::mutator_lock_); 61 // For thread suspend timeout dumps. 62 void Dump(std::ostream& os, bool dump_native_stack = true) 63 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 64 pid_t GetLockOwner(); // For SignalCatcher. 65 66 // Thread suspension support. 67 void ResumeAll() 68 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) 69 UNLOCK_FUNCTION(Locks::mutator_lock_); 70 bool Resume(Thread* thread, SuspendReason reason = SuspendReason::kInternal) 71 REQUIRES(!Locks::thread_suspend_count_lock_) WARN_UNUSED; 72 73 // Suspends all threads and gets exclusive access to the mutator lock. 74 // If long_suspend is true, then other threads who try to suspend will never timeout. 75 // long_suspend is currenly used for hprof since large heaps take a long time. 76 void SuspendAll(const char* cause, bool long_suspend = false) 77 EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_) 78 REQUIRES(!Locks::thread_list_lock_, 79 !Locks::thread_suspend_count_lock_, 80 !Locks::mutator_lock_); 81 82 // Suspend a thread using a peer, typically used by the debugger. Returns the thread on success, 83 // else null. The peer is used to identify the thread to avoid races with the thread terminating. 84 // If the thread should be suspended then value of request_suspension should be true otherwise 85 // the routine will wait for a previous suspend request. If the suspension times out then *timeout 86 // is set to true. 87 Thread* SuspendThreadByPeer(jobject peer, 88 bool request_suspension, 89 SuspendReason reason, 90 bool* timed_out) 91 REQUIRES(!Locks::mutator_lock_, 92 !Locks::thread_list_lock_, 93 !Locks::thread_suspend_count_lock_); 94 95 // Suspend a thread using its thread id, typically used by lock/monitor inflation. Returns the 96 // thread on success else null. The thread id is used to identify the thread to avoid races with 97 // the thread terminating. Note that as thread ids are recycled this may not suspend the expected 98 // thread, that may be terminating. If the suspension times out then *timeout is set to true. 99 Thread* SuspendThreadByThreadId(uint32_t thread_id, SuspendReason reason, bool* timed_out) 100 REQUIRES(!Locks::mutator_lock_, 101 !Locks::thread_list_lock_, 102 !Locks::thread_suspend_count_lock_); 103 104 // Find an existing thread (or self) by its thread id (not tid). 105 Thread* FindThreadByThreadId(uint32_t thread_id) REQUIRES(Locks::thread_list_lock_); 106 107 // Does the thread list still contain the given thread, or one at the same address? 108 // Used by Monitor to provide (mostly accurate) debugging information. 109 bool Contains(Thread* thread) REQUIRES(Locks::thread_list_lock_); 110 111 // Run a checkpoint on threads, running threads are not suspended but run the checkpoint inside 112 // of the suspend check. Returns how many checkpoints that are expected to run, including for 113 // already suspended threads for b/24191051. Run the callback, if non-null, inside the 114 // thread_list_lock critical section after determining the runnable/suspended states of the 115 // threads. 116 size_t RunCheckpoint(Closure* checkpoint_function, Closure* callback = nullptr) 117 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 118 119 // Run an empty checkpoint on threads. Wait until threads pass the next suspend point or are 120 // suspended. This is used to ensure that the threads finish or aren't in the middle of an 121 // in-flight mutator heap access (eg. a read barrier.) Runnable threads will respond by 122 // decrementing the empty checkpoint barrier count. This works even when the weak ref access is 123 // disabled. Only one concurrent use is currently supported. 124 void RunEmptyCheckpoint() 125 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 126 127 // Flip thread roots from from-space refs to to-space refs. Used by 128 // the concurrent copying collector. 129 size_t FlipThreadRoots(Closure* thread_flip_visitor, 130 Closure* flip_callback, 131 gc::collector::GarbageCollector* collector, 132 gc::GcPauseListener* pause_listener) 133 REQUIRES(!Locks::mutator_lock_, 134 !Locks::thread_list_lock_, 135 !Locks::thread_suspend_count_lock_); 136 137 // Iterates over all the threads. 138 void ForEach(void (*callback)(Thread*, void*), void* context) 139 REQUIRES(Locks::thread_list_lock_); 140 141 template<typename CallBack> ForEach(CallBack cb)142 void ForEach(CallBack cb) REQUIRES(Locks::thread_list_lock_) { 143 ForEach([](Thread* t, void* ctx) REQUIRES(Locks::thread_list_lock_) { 144 (*reinterpret_cast<CallBack*>(ctx))(t); 145 }, &cb); 146 } 147 148 // Add/remove current thread from list. 149 void Register(Thread* self) 150 REQUIRES(Locks::runtime_shutdown_lock_) 151 REQUIRES(!Locks::mutator_lock_, 152 !Locks::thread_list_lock_, 153 !Locks::thread_suspend_count_lock_); 154 void Unregister(Thread* self) 155 REQUIRES(!Locks::mutator_lock_, 156 !Locks::thread_list_lock_, 157 !Locks::thread_suspend_count_lock_); 158 159 void VisitRoots(RootVisitor* visitor, VisitRootFlags flags) const 160 REQUIRES_SHARED(Locks::mutator_lock_); 161 162 void VisitRootsForSuspendedThreads(RootVisitor* visitor) 163 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) 164 REQUIRES_SHARED(Locks::mutator_lock_); 165 166 void VisitReflectiveTargets(ReflectiveValueVisitor* visitor) const REQUIRES(Locks::mutator_lock_); 167 168 // Return a copy of the thread list. GetList()169 std::list<Thread*> GetList() REQUIRES(Locks::thread_list_lock_) { 170 return list_; 171 } 172 173 void DumpNativeStacks(std::ostream& os) 174 REQUIRES(!Locks::thread_list_lock_); 175 EmptyCheckpointBarrier()176 Barrier* EmptyCheckpointBarrier() { 177 return empty_checkpoint_barrier_.get(); 178 } 179 180 void SweepInterpreterCaches(IsMarkedVisitor* visitor) const 181 REQUIRES(!Locks::thread_list_lock_) 182 REQUIRES_SHARED(Locks::mutator_lock_); 183 184 void WaitForOtherNonDaemonThreadsToExit(bool check_no_birth = true) 185 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, 186 !Locks::mutator_lock_); 187 188 private: 189 uint32_t AllocThreadId(Thread* self); 190 void ReleaseThreadId(Thread* self, uint32_t id) REQUIRES(!Locks::allocated_thread_ids_lock_); 191 192 bool Contains(pid_t tid) REQUIRES(Locks::thread_list_lock_); 193 size_t RunCheckpoint(Closure* checkpoint_function, bool includeSuspended) 194 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 195 196 void DumpUnattachedThreads(std::ostream& os, bool dump_native_stack) 197 REQUIRES(!Locks::thread_list_lock_); 198 199 void SuspendAllDaemonThreadsForShutdown() 200 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 201 202 void SuspendAllInternal(Thread* self, 203 Thread* ignore1, 204 Thread* ignore2 = nullptr, 205 SuspendReason reason = SuspendReason::kInternal) 206 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 207 208 void AssertThreadsAreSuspended(Thread* self, Thread* ignore1, Thread* ignore2 = nullptr) 209 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); 210 211 std::bitset<kMaxThreadId> allocated_ids_ GUARDED_BY(Locks::allocated_thread_ids_lock_); 212 213 // The actual list of all threads. 214 std::list<Thread*> list_ GUARDED_BY(Locks::thread_list_lock_); 215 216 // Ongoing suspend all requests, used to ensure threads added to list_ respect SuspendAll. 217 int suspend_all_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); 218 219 // Number of threads unregistering, ~ThreadList blocks until this hits 0. 220 int unregistering_count_ GUARDED_BY(Locks::thread_list_lock_); 221 222 // Thread suspend time histogram. Only modified when all the threads are suspended, so guarding 223 // by mutator lock ensures no thread can read when another thread is modifying it. 224 Histogram<uint64_t> suspend_all_historam_ GUARDED_BY(Locks::mutator_lock_); 225 226 // Whether or not the current thread suspension is long. 227 bool long_suspend_; 228 229 // Whether the shutdown function has been called. This is checked in the destructor. It is an 230 // error to destroy a ThreadList instance without first calling ShutDown(). 231 bool shut_down_; 232 233 // Thread suspension timeout in nanoseconds. 234 const uint64_t thread_suspend_timeout_ns_; 235 236 std::unique_ptr<Barrier> empty_checkpoint_barrier_; 237 238 friend class Thread; 239 240 DISALLOW_COPY_AND_ASSIGN(ThreadList); 241 }; 242 243 // Helper for suspending all threads and getting exclusive access to the mutator lock. 244 class ScopedSuspendAll : public ValueObject { 245 public: 246 explicit ScopedSuspendAll(const char* cause, bool long_suspend = false) 247 EXCLUSIVE_LOCK_FUNCTION(Locks::mutator_lock_) 248 REQUIRES(!Locks::thread_list_lock_, 249 !Locks::thread_suspend_count_lock_, 250 !Locks::mutator_lock_); 251 // No REQUIRES(mutator_lock_) since the unlock function already asserts this. 252 ~ScopedSuspendAll() 253 REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) 254 UNLOCK_FUNCTION(Locks::mutator_lock_); 255 }; 256 257 } // namespace art 258 259 #endif // ART_RUNTIME_THREAD_LIST_H_ 260