Lines Matching full:trace
103 * Store the original method here in case the trace ends with a
122 // Reset trace length
213 ALOGD("Trace length: %d State: %d", shadowSpace->traceLength,
217 /* Print decoded instructions in the current trace */
225 ALOGD("********** SHADOW TRACE DUMP **********");
227 addr = shadowSpace->trace[i].addr;
229 decInsn = &(shadowSpace->trace[i].decInsn);
254 * in a trace that was just executed. This routine is called for
255 * each instruction in the original trace, and compares state
260 * count with each trace exit, we could just single-step the right
293 * trace length when backward branches are involved.
389 * Success. If this shadowed trace included a single-stepped
401 * Switch off shadow replay mode. The next shadowed trace
421 shadowSpace->trace[shadowSpace->traceLength].addr = (int)pc;
422 shadowSpace->trace[shadowSpace->traceLength].decInsn = decInsn;
545 /* End current trace now & don't include current instruction */
568 * Walk the bucket chain to find an exact match for our PC and trace/method
660 /* Dump a trace description */
661 void dvmJitDumpTraceDesc(JitTraceDescription *trace)
669 ALOGD("Trace dump %#x, Method %s off %#x",(int)trace,
670 trace->method->name,trace->trace[curFrag].info.frag.startOffset);
671 dpcBase = trace->method->insns;
674 if (trace->trace[curFrag].isCode) {
676 curFrag, trace->trace[curFrag].info.frag.numInsts,
677 trace->trace[curFrag].info.frag.startOffset,
678 trace->trace[curFrag].info.frag.hint,
679 trace->trace[curFrag].info.frag.runEnd);
680 dpc = dpcBase + trace->trace[curFrag].info.frag.startOffset;
681 for (i=0; i<trace->trace[curFrag].info.frag.numInsts; i++) {
687 if (trace->trace[curFrag].info.frag.runEnd) {
692 (int)trace->trace[curFrag].info.meta);
701 * trace. That is, the trace runs will contain the following components:
702 * + trace run that ends with an invoke (existing entry)
712 self->trace[currTraceRun].info.meta = thisClass ?
714 self->trace[currTraceRun].isCode = false;
717 self->trace[currTraceRun].info.meta = thisClass ?
719 self->trace[currTraceRun].isCode = false;
722 self->trace[currTraceRun].info.meta = (void *) calleeMethod;
723 self->trace[currTraceRun].isCode = false;
728 * so add it to the trace. That is, this will add the trace run that includes
729 * the move-result to the trace list.
731 * + trace run that ends with an invoke (existing entry)
750 /* We need to start a new trace run */
753 self->trace[currTraceRun].info.frag.startOffset = offset + len;
754 self->trace[currTraceRun].info.frag.numInsts = 1;
755 self->trace[currTraceRun].info.frag.runEnd = false;
756 self->trace[currTraceRun].info.frag.hint = kJitHintNone;
757 self->trace[currTraceRun].isCode = true;
764 * Adds to the current trace request one instruction at a time, just
765 * before that instruction is interpreted. This is the primary trace
768 * to the current trace prior to interpretation. If the interpreter
771 * to interpretation, and also abort the trace request if the instruction
799 /* Grow the trace around the last PC if jitState is kJitTSelect */
802 /* Only add JIT support opcode to trace. End the trace if
811 * Treat {PACKED,SPARSE}_SWITCH as trace-ending instructions due
833 /* We need to start a new trace run */
837 self->trace[currTraceRun].info.frag.startOffset = offset;
838 self->trace[currTraceRun].info.frag.numInsts = 0;
839 self->trace[currTraceRun].info.frag.runEnd = false;
840 self->trace[currTraceRun].info.frag.hint = kJitHintNone;
841 self->trace[currTraceRun].isCode = true;
843 self->trace[self->currTraceRun].info.frag.numInsts++;
849 * the move-result* (if existent) into a separate trace run.
854 /* Will probably never hit this with the current trace builder */
874 * current class/method pair into the trace as well.
876 * it to the trace too.
898 * we don't want to start a trace with move-result as the first
899 * instruction (which is already included in the trace
909 /* Empty trace - set to bail to interpreter */
923 if (!self->trace[lastTraceDesc].isCode) {
925 self->trace[lastTraceDesc].info.frag.startOffset = 0;
926 self->trace[lastTraceDesc].info.frag.numInsts = 0;
927 self->trace[lastTraceDesc].info.frag.hint = kJitHintNone;
928 self->trace[lastTraceDesc].isCode = true;
931 /* Mark the end of the trace runs */
932 self->trace[lastTraceDesc].info.frag.runEnd = true;
939 ALOGE("Out of memory in trace selection");
947 memcpy((char*)&(desc->trace[0]),
948 (char*)&(self->trace[0]),
951 ALOGD("TraceGen: trace done, adding to queue");
984 * If we're done with trace selection, switch off the control flags.
1024 * the specified format (ie trace vs method). This routine needs to be fast.
1071 * If a translated code address, in trace format, exists for the davik byte code
1128 * has been reset between the time the trace was requested and
1149 * Determine if valid trace-bulding request is active. If so, set
1150 * the proper flags in interpBreak and return. Trace selection will
1157 * A note on trace "hotness" filtering:
1165 * trace head "key" (defined as filterKey below) to appear twice in
1178 * the value of the method pointer containing the trace as the filterKey.
1179 * Intuitively, this is saying that once any trace in a method appears hot,
1180 * immediately translate any other trace from that same method that
1195 * of the low-order bits of the Dalvik pc of the trace head. The
1210 // Shouldn't be here if already building a trace.
1216 trace requests or during stress mode */
1244 * Check for additional reasons that might force the trace select
1280 self->trace[0].info.frag.startOffset =
1282 self->trace[0].info.frag.numInsts = 0;
1283 self->trace[0].info.frag.runEnd = false;
1284 self->trace[0].info.frag.hint = kJitHintNone;
1285 self->trace[0].isCode = true;
1287 /* Turn on trace selection mode */
1290 ALOGD("Starting trace for %s at %#x",
1301 /* Cannot build trace this time */
1409 * Return the address of the next trace profile counter. This address
1410 * will be embedded in the generated code for the trace, and thus cannot
1411 * change while the trace exists.
1423 ALOGE("Failed to allocate block of trace profile counters");