1 /* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "rsCpuCore.h" 18 #include "rsCpuScript.h" 19 #include "rsCpuScriptGroup.h" 20 21 #include <malloc.h> 22 #include "rsContext.h" 23 24 #include <sys/types.h> 25 #include <sys/resource.h> 26 #include <sched.h> 27 #include <sys/syscall.h> 28 #include <string.h> 29 #include <unistd.h> 30 31 #if !defined(RS_SERVER) && !defined(RS_COMPATIBILITY_LIB) 32 #include <cutils/properties.h> 33 #include "utils/StopWatch.h" 34 #endif 35 36 #ifdef RS_SERVER 37 // Android exposes gettid(), standard Linux does not 38 static pid_t gettid() { 39 return syscall(SYS_gettid); 40 } 41 #endif 42 43 using namespace android; 44 using namespace android::renderscript; 45 46 typedef void (*outer_foreach_t)( 47 const android::renderscript::RsForEachStubParamStruct *, 48 uint32_t x1, uint32_t x2, 49 uint32_t instep, uint32_t outstep); 50 51 52 static pthread_key_t gThreadTLSKey = 0; 53 static uint32_t gThreadTLSKeyCount = 0; 54 static pthread_mutex_t gInitMutex = PTHREAD_MUTEX_INITIALIZER; 55 56 RsdCpuReference::~RsdCpuReference() { 57 } 58 59 RsdCpuReference * RsdCpuReference::create(Context *rsc, uint32_t version_major, 60 uint32_t version_minor, sym_lookup_t lfn, script_lookup_t slfn 61 #ifndef RS_COMPATIBILITY_LIB 62 , bcc::RSLinkRuntimeCallback pLinkRuntimeCallback, 63 RSSelectRTCallback pSelectRTCallback 64 #endif 65 ) { 66 67 RsdCpuReferenceImpl *cpu = new RsdCpuReferenceImpl(rsc); 68 if (!cpu) { 69 return NULL; 70 } 71 if (!cpu->init(version_major, version_minor, lfn, slfn)) { 72 delete cpu; 73 return NULL; 74 } 75 76 #ifndef RS_COMPATIBILITY_LIB 77 cpu->setLinkRuntimeCallback(pLinkRuntimeCallback); 78 cpu->setSelectRTCallback(pSelectRTCallback); 79 #endif 80 81 return cpu; 82 } 83 84 85 Context * RsdCpuReference::getTlsContext() { 86 ScriptTLSStruct * tls = (ScriptTLSStruct *)pthread_getspecific(gThreadTLSKey); 87 return tls->mContext; 88 } 89 90 const Script * RsdCpuReference::getTlsScript() { 91 ScriptTLSStruct * tls = (ScriptTLSStruct *)pthread_getspecific(gThreadTLSKey); 92 return tls->mScript; 93 } 94 95 pthread_key_t RsdCpuReference::getThreadTLSKey(){ return gThreadTLSKey; } 96 97 //////////////////////////////////////////////////////////// 98 /// 99 100 RsdCpuReferenceImpl::RsdCpuReferenceImpl(Context *rsc) { 101 mRSC = rsc; 102 103 version_major = 0; 104 version_minor = 0; 105 mInForEach = false; 106 memset(&mWorkers, 0, sizeof(mWorkers)); 107 memset(&mTlsStruct, 0, sizeof(mTlsStruct)); 108 mExit = false; 109 #ifndef RS_COMPATIBILITY_LIB 110 mLinkRuntimeCallback = NULL; 111 mSelectRTCallback = NULL; 112 #endif 113 } 114 115 116 void * RsdCpuReferenceImpl::helperThreadProc(void *vrsc) { 117 RsdCpuReferenceImpl *dc = (RsdCpuReferenceImpl *)vrsc; 118 119 uint32_t idx = __sync_fetch_and_add(&dc->mWorkers.mLaunchCount, 1); 120 121 //ALOGV("RS helperThread starting %p idx=%i", dc, idx); 122 123 dc->mWorkers.mLaunchSignals[idx].init(); 124 dc->mWorkers.mNativeThreadId[idx] = gettid(); 125 126 memset(&dc->mTlsStruct, 0, sizeof(dc->mTlsStruct)); 127 int status = pthread_setspecific(gThreadTLSKey, &dc->mTlsStruct); 128 if (status) { 129 ALOGE("pthread_setspecific %i", status); 130 } 131 132 #if 0 133 typedef struct {uint64_t bits[1024 / 64]; } cpu_set_t; 134 cpu_set_t cpuset; 135 memset(&cpuset, 0, sizeof(cpuset)); 136 cpuset.bits[idx / 64] |= 1ULL << (idx % 64); 137 int ret = syscall(241, rsc->mWorkers.mNativeThreadId[idx], 138 sizeof(cpuset), &cpuset); 139 ALOGE("SETAFFINITY ret = %i %s", ret, EGLUtils::strerror(ret)); 140 #endif 141 142 while (!dc->mExit) { 143 dc->mWorkers.mLaunchSignals[idx].wait(); 144 if (dc->mWorkers.mLaunchCallback) { 145 // idx +1 is used because the calling thread is always worker 0. 146 dc->mWorkers.mLaunchCallback(dc->mWorkers.mLaunchData, idx+1); 147 } 148 __sync_fetch_and_sub(&dc->mWorkers.mRunningCount, 1); 149 dc->mWorkers.mCompleteSignal.set(); 150 } 151 152 //ALOGV("RS helperThread exited %p idx=%i", dc, idx); 153 return NULL; 154 } 155 156 void RsdCpuReferenceImpl::launchThreads(WorkerCallback_t cbk, void *data) { 157 mWorkers.mLaunchData = data; 158 mWorkers.mLaunchCallback = cbk; 159 160 // fast path for very small launches 161 MTLaunchStruct *mtls = (MTLaunchStruct *)data; 162 if (mtls && mtls->fep.dimY <= 1 && mtls->xEnd <= mtls->xStart + mtls->mSliceSize) { 163 if (mWorkers.mLaunchCallback) { 164 mWorkers.mLaunchCallback(mWorkers.mLaunchData, 0); 165 } 166 return; 167 } 168 169 mWorkers.mRunningCount = mWorkers.mCount; 170 __sync_synchronize(); 171 172 for (uint32_t ct = 0; ct < mWorkers.mCount; ct++) { 173 mWorkers.mLaunchSignals[ct].set(); 174 } 175 176 // We use the calling thread as one of the workers so we can start without 177 // the delay of the thread wakeup. 178 if (mWorkers.mLaunchCallback) { 179 mWorkers.mLaunchCallback(mWorkers.mLaunchData, 0); 180 } 181 182 while (__sync_fetch_and_or(&mWorkers.mRunningCount, 0) != 0) { 183 mWorkers.mCompleteSignal.wait(); 184 } 185 } 186 187 188 void RsdCpuReferenceImpl::lockMutex() { 189 pthread_mutex_lock(&gInitMutex); 190 } 191 192 void RsdCpuReferenceImpl::unlockMutex() { 193 pthread_mutex_unlock(&gInitMutex); 194 } 195 196 bool RsdCpuReferenceImpl::init(uint32_t version_major, uint32_t version_minor, 197 sym_lookup_t lfn, script_lookup_t slfn) { 198 199 mSymLookupFn = lfn; 200 mScriptLookupFn = slfn; 201 202 lockMutex(); 203 if (!gThreadTLSKeyCount) { 204 int status = pthread_key_create(&gThreadTLSKey, NULL); 205 if (status) { 206 ALOGE("Failed to init thread tls key."); 207 unlockMutex(); 208 return false; 209 } 210 } 211 gThreadTLSKeyCount++; 212 unlockMutex(); 213 214 mTlsStruct.mContext = mRSC; 215 mTlsStruct.mScript = NULL; 216 int status = pthread_setspecific(gThreadTLSKey, &mTlsStruct); 217 if (status) { 218 ALOGE("pthread_setspecific %i", status); 219 } 220 221 int cpu = sysconf(_SC_NPROCESSORS_ONLN); 222 if(mRSC->props.mDebugMaxThreads) { 223 cpu = mRSC->props.mDebugMaxThreads; 224 } 225 if (cpu < 2) { 226 mWorkers.mCount = 0; 227 return true; 228 } 229 230 // Subtract one from the cpu count because we also use the command thread as a worker. 231 mWorkers.mCount = (uint32_t)(cpu - 1); 232 233 ALOGV("%p Launching thread(s), CPUs %i", mRSC, mWorkers.mCount + 1); 234 235 mWorkers.mThreadId = (pthread_t *) calloc(mWorkers.mCount, sizeof(pthread_t)); 236 mWorkers.mNativeThreadId = (pid_t *) calloc(mWorkers.mCount, sizeof(pid_t)); 237 mWorkers.mLaunchSignals = new Signal[mWorkers.mCount]; 238 mWorkers.mLaunchCallback = NULL; 239 240 mWorkers.mCompleteSignal.init(); 241 242 mWorkers.mRunningCount = mWorkers.mCount; 243 mWorkers.mLaunchCount = 0; 244 __sync_synchronize(); 245 246 pthread_attr_t threadAttr; 247 status = pthread_attr_init(&threadAttr); 248 if (status) { 249 ALOGE("Failed to init thread attribute."); 250 return false; 251 } 252 253 for (uint32_t ct=0; ct < mWorkers.mCount; ct++) { 254 status = pthread_create(&mWorkers.mThreadId[ct], &threadAttr, helperThreadProc, this); 255 if (status) { 256 mWorkers.mCount = ct; 257 ALOGE("Created fewer than expected number of RS threads."); 258 break; 259 } 260 } 261 while (__sync_fetch_and_or(&mWorkers.mRunningCount, 0) != 0) { 262 usleep(100); 263 } 264 265 pthread_attr_destroy(&threadAttr); 266 return true; 267 } 268 269 270 void RsdCpuReferenceImpl::setPriority(int32_t priority) { 271 for (uint32_t ct=0; ct < mWorkers.mCount; ct++) { 272 setpriority(PRIO_PROCESS, mWorkers.mNativeThreadId[ct], priority); 273 } 274 } 275 276 RsdCpuReferenceImpl::~RsdCpuReferenceImpl() { 277 mExit = true; 278 mWorkers.mLaunchData = NULL; 279 mWorkers.mLaunchCallback = NULL; 280 mWorkers.mRunningCount = mWorkers.mCount; 281 __sync_synchronize(); 282 for (uint32_t ct = 0; ct < mWorkers.mCount; ct++) { 283 mWorkers.mLaunchSignals[ct].set(); 284 } 285 void *res; 286 for (uint32_t ct = 0; ct < mWorkers.mCount; ct++) { 287 pthread_join(mWorkers.mThreadId[ct], &res); 288 } 289 rsAssert(__sync_fetch_and_or(&mWorkers.mRunningCount, 0) == 0); 290 291 // Global structure cleanup. 292 lockMutex(); 293 --gThreadTLSKeyCount; 294 if (!gThreadTLSKeyCount) { 295 pthread_key_delete(gThreadTLSKey); 296 } 297 unlockMutex(); 298 299 } 300 301 typedef void (*rs_t)(const void *, void *, const void *, uint32_t, uint32_t, uint32_t, uint32_t); 302 303 static void wc_xy(void *usr, uint32_t idx) { 304 MTLaunchStruct *mtls = (MTLaunchStruct *)usr; 305 RsForEachStubParamStruct p; 306 memcpy(&p, &mtls->fep, sizeof(p)); 307 p.lid = idx; 308 uint32_t sig = mtls->sig; 309 310 outer_foreach_t fn = (outer_foreach_t) mtls->kernel; 311 while (1) { 312 uint32_t slice = (uint32_t)__sync_fetch_and_add(&mtls->mSliceNum, 1); 313 uint32_t yStart = mtls->yStart + slice * mtls->mSliceSize; 314 uint32_t yEnd = yStart + mtls->mSliceSize; 315 yEnd = rsMin(yEnd, mtls->yEnd); 316 if (yEnd <= yStart) { 317 return; 318 } 319 320 //ALOGE("usr idx %i, x %i,%i y %i,%i", idx, mtls->xStart, mtls->xEnd, yStart, yEnd); 321 //ALOGE("usr ptr in %p, out %p", mtls->fep.ptrIn, mtls->fep.ptrOut); 322 323 for (p.y = yStart; p.y < yEnd; p.y++) { 324 p.out = mtls->fep.ptrOut + (mtls->fep.yStrideOut * p.y) + 325 (mtls->fep.eStrideOut * mtls->xStart); 326 p.in = mtls->fep.ptrIn + (mtls->fep.yStrideIn * p.y) + 327 (mtls->fep.eStrideIn * mtls->xStart); 328 fn(&p, mtls->xStart, mtls->xEnd, mtls->fep.eStrideIn, mtls->fep.eStrideOut); 329 } 330 } 331 } 332 333 static void wc_x(void *usr, uint32_t idx) { 334 MTLaunchStruct *mtls = (MTLaunchStruct *)usr; 335 RsForEachStubParamStruct p; 336 memcpy(&p, &mtls->fep, sizeof(p)); 337 p.lid = idx; 338 uint32_t sig = mtls->sig; 339 340 outer_foreach_t fn = (outer_foreach_t) mtls->kernel; 341 while (1) { 342 uint32_t slice = (uint32_t)__sync_fetch_and_add(&mtls->mSliceNum, 1); 343 uint32_t xStart = mtls->xStart + slice * mtls->mSliceSize; 344 uint32_t xEnd = xStart + mtls->mSliceSize; 345 xEnd = rsMin(xEnd, mtls->xEnd); 346 if (xEnd <= xStart) { 347 return; 348 } 349 350 //ALOGE("usr slice %i idx %i, x %i,%i", slice, idx, xStart, xEnd); 351 //ALOGE("usr ptr in %p, out %p", mtls->fep.ptrIn, mtls->fep.ptrOut); 352 353 p.out = mtls->fep.ptrOut + (mtls->fep.eStrideOut * xStart); 354 p.in = mtls->fep.ptrIn + (mtls->fep.eStrideIn * xStart); 355 fn(&p, xStart, xEnd, mtls->fep.eStrideIn, mtls->fep.eStrideOut); 356 } 357 } 358 359 void RsdCpuReferenceImpl::launchThreads(const Allocation * ain, Allocation * aout, 360 const RsScriptCall *sc, MTLaunchStruct *mtls) { 361 362 //android::StopWatch kernel_time("kernel time"); 363 364 if ((mWorkers.mCount >= 1) && mtls->isThreadable && !mInForEach) { 365 const size_t targetByteChunk = 16 * 1024; 366 mInForEach = true; 367 if (mtls->fep.dimY > 1) { 368 uint32_t s1 = mtls->fep.dimY / ((mWorkers.mCount + 1) * 4); 369 uint32_t s2 = 0; 370 371 // This chooses our slice size to rate limit atomic ops to 372 // one per 16k bytes of reads/writes. 373 if (mtls->fep.yStrideOut) { 374 s2 = targetByteChunk / mtls->fep.yStrideOut; 375 } else { 376 s2 = targetByteChunk / mtls->fep.yStrideIn; 377 } 378 mtls->mSliceSize = rsMin(s1, s2); 379 380 if(mtls->mSliceSize < 1) { 381 mtls->mSliceSize = 1; 382 } 383 384 // mtls->mSliceSize = 2; 385 launchThreads(wc_xy, mtls); 386 } else { 387 uint32_t s1 = mtls->fep.dimX / ((mWorkers.mCount + 1) * 4); 388 uint32_t s2 = 0; 389 390 // This chooses our slice size to rate limit atomic ops to 391 // one per 16k bytes of reads/writes. 392 if (mtls->fep.eStrideOut) { 393 s2 = targetByteChunk / mtls->fep.eStrideOut; 394 } else { 395 s2 = targetByteChunk / mtls->fep.eStrideIn; 396 } 397 mtls->mSliceSize = rsMin(s1, s2); 398 399 if(mtls->mSliceSize < 1) { 400 mtls->mSliceSize = 1; 401 } 402 403 launchThreads(wc_x, mtls); 404 } 405 mInForEach = false; 406 407 //ALOGE("launch 1"); 408 } else { 409 RsForEachStubParamStruct p; 410 memcpy(&p, &mtls->fep, sizeof(p)); 411 uint32_t sig = mtls->sig; 412 413 //ALOGE("launch 3"); 414 outer_foreach_t fn = (outer_foreach_t) mtls->kernel; 415 for (p.ar[0] = mtls->arrayStart; p.ar[0] < mtls->arrayEnd; p.ar[0]++) { 416 for (p.z = mtls->zStart; p.z < mtls->zEnd; p.z++) { 417 for (p.y = mtls->yStart; p.y < mtls->yEnd; p.y++) { 418 uint32_t offset = mtls->fep.dimY * mtls->fep.dimZ * p.ar[0] + 419 mtls->fep.dimY * p.z + p.y; 420 p.out = mtls->fep.ptrOut + (mtls->fep.yStrideOut * offset) + 421 (mtls->fep.eStrideOut * mtls->xStart); 422 p.in = mtls->fep.ptrIn + (mtls->fep.yStrideIn * offset) + 423 (mtls->fep.eStrideIn * mtls->xStart); 424 fn(&p, mtls->xStart, mtls->xEnd, mtls->fep.eStrideIn, mtls->fep.eStrideOut); 425 } 426 } 427 } 428 } 429 } 430 431 RsdCpuScriptImpl * RsdCpuReferenceImpl::setTLS(RsdCpuScriptImpl *sc) { 432 //ALOGE("setTls %p", sc); 433 ScriptTLSStruct * tls = (ScriptTLSStruct *)pthread_getspecific(gThreadTLSKey); 434 rsAssert(tls); 435 RsdCpuScriptImpl *old = tls->mImpl; 436 tls->mImpl = sc; 437 tls->mContext = mRSC; 438 if (sc) { 439 tls->mScript = sc->getScript(); 440 } else { 441 tls->mScript = NULL; 442 } 443 return old; 444 } 445 446 const RsdCpuReference::CpuSymbol * RsdCpuReferenceImpl::symLookup(const char *name) { 447 return mSymLookupFn(mRSC, name); 448 } 449 450 451 RsdCpuReference::CpuScript * RsdCpuReferenceImpl::createScript(const ScriptC *s, 452 char const *resName, char const *cacheDir, 453 uint8_t const *bitcode, size_t bitcodeSize, 454 uint32_t flags) { 455 456 RsdCpuScriptImpl *i = new RsdCpuScriptImpl(this, s); 457 if (!i->init(resName, cacheDir, bitcode, bitcodeSize, flags)) { 458 delete i; 459 return NULL; 460 } 461 return i; 462 } 463 464 extern RsdCpuScriptImpl * rsdIntrinsic_3DLUT(RsdCpuReferenceImpl *ctx, 465 const Script *s, const Element *e); 466 extern RsdCpuScriptImpl * rsdIntrinsic_Convolve3x3(RsdCpuReferenceImpl *ctx, 467 const Script *s, const Element *e); 468 extern RsdCpuScriptImpl * rsdIntrinsic_ColorMatrix(RsdCpuReferenceImpl *ctx, 469 const Script *s, const Element *e); 470 extern RsdCpuScriptImpl * rsdIntrinsic_LUT(RsdCpuReferenceImpl *ctx, 471 const Script *s, const Element *e); 472 extern RsdCpuScriptImpl * rsdIntrinsic_Convolve5x5(RsdCpuReferenceImpl *ctx, 473 const Script *s, const Element *e); 474 extern RsdCpuScriptImpl * rsdIntrinsic_Blur(RsdCpuReferenceImpl *ctx, 475 const Script *s, const Element *e); 476 extern RsdCpuScriptImpl * rsdIntrinsic_YuvToRGB(RsdCpuReferenceImpl *ctx, 477 const Script *s, const Element *e); 478 extern RsdCpuScriptImpl * rsdIntrinsic_Blend(RsdCpuReferenceImpl *ctx, 479 const Script *s, const Element *e); 480 481 RsdCpuReference::CpuScript * RsdCpuReferenceImpl::createIntrinsic(const Script *s, 482 RsScriptIntrinsicID iid, Element *e) { 483 484 RsdCpuScriptImpl *i = NULL; 485 switch (iid) { 486 case RS_SCRIPT_INTRINSIC_ID_3DLUT: 487 i = rsdIntrinsic_3DLUT(this, s, e); 488 break; 489 case RS_SCRIPT_INTRINSIC_ID_CONVOLVE_3x3: 490 i = rsdIntrinsic_Convolve3x3(this, s, e); 491 break; 492 case RS_SCRIPT_INTRINSIC_ID_COLOR_MATRIX: 493 i = rsdIntrinsic_ColorMatrix(this, s, e); 494 break; 495 case RS_SCRIPT_INTRINSIC_ID_LUT: 496 i = rsdIntrinsic_LUT(this, s, e); 497 break; 498 case RS_SCRIPT_INTRINSIC_ID_CONVOLVE_5x5: 499 i = rsdIntrinsic_Convolve5x5(this, s, e); 500 break; 501 case RS_SCRIPT_INTRINSIC_ID_BLUR: 502 i = rsdIntrinsic_Blur(this, s, e); 503 break; 504 case RS_SCRIPT_INTRINSIC_ID_YUV_TO_RGB: 505 i = rsdIntrinsic_YuvToRGB(this, s, e); 506 break; 507 case RS_SCRIPT_INTRINSIC_ID_BLEND: 508 i = rsdIntrinsic_Blend(this, s, e); 509 break; 510 511 default: 512 rsAssert(0); 513 } 514 515 return i; 516 } 517 518 RsdCpuReference::CpuScriptGroup * RsdCpuReferenceImpl::createScriptGroup(const ScriptGroup *sg) { 519 CpuScriptGroupImpl *sgi = new CpuScriptGroupImpl(this, sg); 520 if (!sgi->init()) { 521 delete sgi; 522 return NULL; 523 } 524 return sgi; 525 } 526 527 528