Home | History | Annotate | Download | only in common
      1 // SPDX-License-Identifier: GPL-2.0+
      2 /*
      3  * Copyright (c) 2014 Google, Inc.
      4  */
      5 
      6 #define IOTRACE_IMPL
      7 
      8 #include <common.h>
      9 #include <mapmem.h>
     10 #include <asm/io.h>
     11 
     12 DECLARE_GLOBAL_DATA_PTR;
     13 
     14 /**
     15  * struct iotrace - current trace status and checksum
     16  *
     17  * @start:	Start address of iotrace buffer
     18  * @size:	Actual size of iotrace buffer in bytes
     19  * @needed_size: Needed of iotrace buffer in bytes
     20  * @offset:	Current write offset into iotrace buffer
     21  * @region_start: Address of IO region to trace
     22  * @region_size: Size of region to trace. if 0 will trace all address space
     23  * @crc32:	Current value of CRC chceksum of trace records
     24  * @enabled:	true if enabled, false if disabled
     25  */
     26 static struct iotrace {
     27 	ulong start;
     28 	ulong size;
     29 	ulong needed_size;
     30 	ulong offset;
     31 	ulong region_start;
     32 	ulong region_size;
     33 	u32 crc32;
     34 	bool enabled;
     35 } iotrace;
     36 
     37 static void add_record(int flags, const void *ptr, ulong value)
     38 {
     39 	struct iotrace_record srec, *rec = &srec;
     40 
     41 	/*
     42 	 * We don't support iotrace before relocation. Since the trace buffer
     43 	 * is set up by a command, it can't be enabled at present. To change
     44 	 * this we would need to set the iotrace buffer at build-time. See
     45 	 * lib/trace.c for how this might be done if you are interested.
     46 	 */
     47 	if (!(gd->flags & GD_FLG_RELOC) || !iotrace.enabled)
     48 		return;
     49 
     50 	if (iotrace.region_size)
     51 		if ((ulong)ptr < iotrace.region_start ||
     52 		    (ulong)ptr > iotrace.region_start + iotrace.region_size)
     53 			return;
     54 
     55 	/* Store it if there is room */
     56 	if (iotrace.offset + sizeof(*rec) < iotrace.size) {
     57 		rec = (struct iotrace_record *)map_sysmem(
     58 					iotrace.start + iotrace.offset,
     59 					sizeof(value));
     60 	} else {
     61 		WARN_ONCE(1, "WARNING: iotrace buffer exhausted, please check needed length using \"iotrace stats\"\n");
     62 		iotrace.needed_size += sizeof(struct iotrace_record);
     63 		return;
     64 	}
     65 
     66 	rec->timestamp = timer_get_us();
     67 	rec->flags = flags;
     68 	rec->addr = map_to_sysmem(ptr);
     69 	rec->value = value;
     70 
     71 	/* Update our checksum */
     72 	iotrace.crc32 = crc32(iotrace.crc32, (unsigned char *)rec,
     73 			      sizeof(*rec));
     74 
     75 	iotrace.needed_size += sizeof(struct iotrace_record);
     76 	iotrace.offset += sizeof(struct iotrace_record);
     77 }
     78 
     79 u32 iotrace_readl(const void *ptr)
     80 {
     81 	u32 v;
     82 
     83 	v = readl(ptr);
     84 	add_record(IOT_32 | IOT_READ, ptr, v);
     85 
     86 	return v;
     87 }
     88 
     89 void iotrace_writel(ulong value, const void *ptr)
     90 {
     91 	add_record(IOT_32 | IOT_WRITE, ptr, value);
     92 	writel(value, ptr);
     93 }
     94 
     95 u16 iotrace_readw(const void *ptr)
     96 {
     97 	u32 v;
     98 
     99 	v = readw(ptr);
    100 	add_record(IOT_16 | IOT_READ, ptr, v);
    101 
    102 	return v;
    103 }
    104 
    105 void iotrace_writew(ulong value, const void *ptr)
    106 {
    107 	add_record(IOT_16 | IOT_WRITE, ptr, value);
    108 	writew(value, ptr);
    109 }
    110 
    111 u8 iotrace_readb(const void *ptr)
    112 {
    113 	u32 v;
    114 
    115 	v = readb(ptr);
    116 	add_record(IOT_8 | IOT_READ, ptr, v);
    117 
    118 	return v;
    119 }
    120 
    121 void iotrace_writeb(ulong value, const void *ptr)
    122 {
    123 	add_record(IOT_8 | IOT_WRITE, ptr, value);
    124 	writeb(value, ptr);
    125 }
    126 
    127 void iotrace_reset_checksum(void)
    128 {
    129 	iotrace.crc32 = 0;
    130 }
    131 
    132 u32 iotrace_get_checksum(void)
    133 {
    134 	return iotrace.crc32;
    135 }
    136 
    137 void iotrace_set_region(ulong start, ulong size)
    138 {
    139 	iotrace.region_start = start;
    140 	iotrace.region_size = size;
    141 }
    142 
    143 void iotrace_reset_region(void)
    144 {
    145 	iotrace.region_start = 0;
    146 	iotrace.region_size = 0;
    147 }
    148 
    149 void iotrace_get_region(ulong *start, ulong *size)
    150 {
    151 	*start = iotrace.region_start;
    152 	*size = iotrace.region_size;
    153 }
    154 
    155 void iotrace_set_enabled(int enable)
    156 {
    157 	iotrace.enabled = enable;
    158 }
    159 
    160 int iotrace_get_enabled(void)
    161 {
    162 	return iotrace.enabled;
    163 }
    164 
    165 void iotrace_set_buffer(ulong start, ulong size)
    166 {
    167 	iotrace.start = start;
    168 	iotrace.size = size;
    169 	iotrace.offset = 0;
    170 	iotrace.crc32 = 0;
    171 }
    172 
    173 void iotrace_get_buffer(ulong *start, ulong *size, ulong *needed_size, ulong *offset, ulong *count)
    174 {
    175 	*start = iotrace.start;
    176 	*size = iotrace.size;
    177 	*needed_size = iotrace.needed_size;
    178 	*offset = iotrace.offset;
    179 	*count = iotrace.offset / sizeof(struct iotrace_record);
    180 }
    181