1 /****************************************************************************** 2 * 3 * Copyright (C) 2014 Google, Inc. 4 * 5 * Licensed under the Apache License, Version 2.0 (the "License"); 6 * you may not use this file except in compliance with the License. 7 * You may obtain a copy of the License at: 8 * 9 * http://www.apache.org/licenses/LICENSE-2.0 10 * 11 * Unless required by applicable law or agreed to in writing, software 12 * distributed under the License is distributed on an "AS IS" BASIS, 13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 * See the License for the specific language governing permissions and 15 * limitations under the License. 16 * 17 ******************************************************************************/ 18 19 #define LOG_TAG "bt_osi_thread" 20 21 #include "osi/include/thread.h" 22 23 #include <atomic> 24 25 #include <base/logging.h> 26 #include <errno.h> 27 #include <malloc.h> 28 #include <pthread.h> 29 #include <string.h> 30 #include <sys/prctl.h> 31 #include <sys/resource.h> 32 #include <sys/types.h> 33 #include <unistd.h> 34 35 #include "osi/include/allocator.h" 36 #include "osi/include/compat.h" 37 #include "osi/include/fixed_queue.h" 38 #include "osi/include/log.h" 39 #include "osi/include/reactor.h" 40 #include "osi/include/semaphore.h" 41 42 struct thread_t { 43 std::atomic_bool is_joined{false}; 44 pthread_t pthread; 45 pid_t tid; 46 char name[THREAD_NAME_MAX + 1]; 47 reactor_t* reactor; 48 fixed_queue_t* work_queue; 49 }; 50 51 struct start_arg { 52 thread_t* thread; 53 semaphore_t* start_sem; 54 int error; 55 }; 56 57 typedef struct { 58 thread_fn func; 59 void* context; 60 } work_item_t; 61 62 static void* run_thread(void* start_arg); 63 static void work_queue_read_cb(void* context); 64 65 static const size_t DEFAULT_WORK_QUEUE_CAPACITY = 128; 66 67 thread_t* thread_new_sized(const char* name, size_t work_queue_capacity) { 68 CHECK(name != NULL); 69 CHECK(work_queue_capacity != 0); 70 71 thread_t* ret = static_cast<thread_t*>(osi_calloc(sizeof(thread_t))); 72 73 ret->reactor = reactor_new(); 74 if (!ret->reactor) goto error; 75 76 ret->work_queue = fixed_queue_new(work_queue_capacity); 77 if (!ret->work_queue) goto error; 78 79 // Start is on the stack, but we use a semaphore, so it's safe 80 struct start_arg start; 81 start.start_sem = semaphore_new(0); 82 if (!start.start_sem) goto error; 83 84 strncpy(ret->name, name, THREAD_NAME_MAX); 85 start.thread = ret; 86 start.error = 0; 87 pthread_create(&ret->pthread, NULL, run_thread, &start); 88 semaphore_wait(start.start_sem); 89 semaphore_free(start.start_sem); 90 91 if (start.error) goto error; 92 93 return ret; 94 95 error:; 96 if (ret) { 97 fixed_queue_free(ret->work_queue, osi_free); 98 reactor_free(ret->reactor); 99 } 100 osi_free(ret); 101 return NULL; 102 } 103 104 thread_t* thread_new(const char* name) { 105 return thread_new_sized(name, DEFAULT_WORK_QUEUE_CAPACITY); 106 } 107 108 void thread_free(thread_t* thread) { 109 if (!thread) return; 110 111 thread_stop(thread); 112 thread_join(thread); 113 114 fixed_queue_free(thread->work_queue, osi_free); 115 reactor_free(thread->reactor); 116 osi_free(thread); 117 } 118 119 void thread_join(thread_t* thread) { 120 CHECK(thread != NULL); 121 122 if (!std::atomic_exchange(&thread->is_joined, true)) 123 pthread_join(thread->pthread, NULL); 124 } 125 126 bool thread_post(thread_t* thread, thread_fn func, void* context) { 127 CHECK(thread != NULL); 128 CHECK(func != NULL); 129 130 // TODO(sharvil): if the current thread == |thread| and we've run out 131 // of queue space, we should abort this operation, otherwise we'll 132 // deadlock. 133 134 // Queue item is freed either when the queue itself is destroyed 135 // or when the item is removed from the queue for dispatch. 136 work_item_t* item = (work_item_t*)osi_malloc(sizeof(work_item_t)); 137 item->func = func; 138 item->context = context; 139 fixed_queue_enqueue(thread->work_queue, item); 140 return true; 141 } 142 143 void thread_stop(thread_t* thread) { 144 CHECK(thread != NULL); 145 reactor_stop(thread->reactor); 146 } 147 148 bool thread_set_priority(thread_t* thread, int priority) { 149 if (!thread) return false; 150 151 const int rc = setpriority(PRIO_PROCESS, thread->tid, priority); 152 if (rc < 0) { 153 LOG_ERROR(LOG_TAG, 154 "%s unable to set thread priority %d for tid %d, error %d", 155 __func__, priority, thread->tid, rc); 156 return false; 157 } 158 159 return true; 160 } 161 162 bool thread_set_rt_priority(thread_t* thread, int priority) { 163 if (!thread) return false; 164 165 struct sched_param rt_params; 166 rt_params.sched_priority = priority; 167 168 const int rc = sched_setscheduler(thread->tid, SCHED_FIFO, &rt_params); 169 if (rc != 0) { 170 LOG_ERROR(LOG_TAG, 171 "%s unable to set SCHED_FIFO priority %d for tid %d, error %s", 172 __func__, priority, thread->tid, strerror(errno)); 173 return false; 174 } 175 176 return true; 177 } 178 179 bool thread_is_self(const thread_t* thread) { 180 CHECK(thread != NULL); 181 return !!pthread_equal(pthread_self(), thread->pthread); 182 } 183 184 reactor_t* thread_get_reactor(const thread_t* thread) { 185 CHECK(thread != NULL); 186 return thread->reactor; 187 } 188 189 const char* thread_name(const thread_t* thread) { 190 CHECK(thread != NULL); 191 return thread->name; 192 } 193 194 static void* run_thread(void* start_arg) { 195 CHECK(start_arg != NULL); 196 197 struct start_arg* start = static_cast<struct start_arg*>(start_arg); 198 thread_t* thread = start->thread; 199 200 CHECK(thread != NULL); 201 202 if (prctl(PR_SET_NAME, (unsigned long)thread->name) == -1) { 203 LOG_ERROR(LOG_TAG, "%s unable to set thread name: %s", __func__, 204 strerror(errno)); 205 start->error = errno; 206 semaphore_post(start->start_sem); 207 return NULL; 208 } 209 thread->tid = gettid(); 210 211 LOG_INFO(LOG_TAG, "%s: thread id %d, thread name %s started", __func__, 212 thread->tid, thread->name); 213 214 semaphore_post(start->start_sem); 215 216 int fd = fixed_queue_get_dequeue_fd(thread->work_queue); 217 void* context = thread->work_queue; 218 219 reactor_object_t* work_queue_object = 220 reactor_register(thread->reactor, fd, context, work_queue_read_cb, NULL); 221 reactor_start(thread->reactor); 222 reactor_unregister(work_queue_object); 223 224 // Make sure we dispatch all queued work items before exiting the thread. 225 // This allows a caller to safely tear down by enqueuing a teardown 226 // work item and then joining the thread. 227 size_t count = 0; 228 work_item_t* item = 229 static_cast<work_item_t*>(fixed_queue_try_dequeue(thread->work_queue)); 230 while (item && count <= fixed_queue_capacity(thread->work_queue)) { 231 item->func(item->context); 232 osi_free(item); 233 item = 234 static_cast<work_item_t*>(fixed_queue_try_dequeue(thread->work_queue)); 235 ++count; 236 } 237 238 if (count > fixed_queue_capacity(thread->work_queue)) 239 LOG_DEBUG(LOG_TAG, "%s growing event queue on shutdown.", __func__); 240 241 LOG_WARN(LOG_TAG, "%s: thread id %d, thread name %s exited", __func__, 242 thread->tid, thread->name); 243 return NULL; 244 } 245 246 static void work_queue_read_cb(void* context) { 247 CHECK(context != NULL); 248 249 fixed_queue_t* queue = (fixed_queue_t*)context; 250 work_item_t* item = static_cast<work_item_t*>(fixed_queue_dequeue(queue)); 251 item->func(item->context); 252 osi_free(item); 253 } 254