Home | History | Annotate | Download | only in lib
      1 // SPDX-License-Identifier: GPL-2.0+
      2 /*
      3  * Procedures for maintaining information about logical memory blocks.
      4  *
      5  * Peter Bergner, IBM Corp.	June 2001.
      6  * Copyright (C) 2001 Peter Bergner.
      7  */
      8 
      9 #include <common.h>
     10 #include <lmb.h>
     11 
     12 #define LMB_ALLOC_ANYWHERE	0
     13 
     14 void lmb_dump_all(struct lmb *lmb)
     15 {
     16 #ifdef DEBUG
     17 	unsigned long i;
     18 
     19 	debug("lmb_dump_all:\n");
     20 	debug("    memory.cnt		   = 0x%lx\n", lmb->memory.cnt);
     21 	debug("    memory.size		   = 0x%llx\n",
     22 	      (unsigned long long)lmb->memory.size);
     23 	for (i=0; i < lmb->memory.cnt ;i++) {
     24 		debug("    memory.reg[0x%lx].base   = 0x%llx\n", i,
     25 			(long long unsigned)lmb->memory.region[i].base);
     26 		debug("		   .size   = 0x%llx\n",
     27 			(long long unsigned)lmb->memory.region[i].size);
     28 	}
     29 
     30 	debug("\n    reserved.cnt	   = 0x%lx\n",
     31 		lmb->reserved.cnt);
     32 	debug("    reserved.size	   = 0x%llx\n",
     33 		(long long unsigned)lmb->reserved.size);
     34 	for (i=0; i < lmb->reserved.cnt ;i++) {
     35 		debug("    reserved.reg[0x%lx].base = 0x%llx\n", i,
     36 			(long long unsigned)lmb->reserved.region[i].base);
     37 		debug("		     .size = 0x%llx\n",
     38 			(long long unsigned)lmb->reserved.region[i].size);
     39 	}
     40 #endif /* DEBUG */
     41 }
     42 
     43 static long lmb_addrs_overlap(phys_addr_t base1,
     44 		phys_size_t size1, phys_addr_t base2, phys_size_t size2)
     45 {
     46 	return ((base1 < (base2+size2)) && (base2 < (base1+size1)));
     47 }
     48 
     49 static long lmb_addrs_adjacent(phys_addr_t base1, phys_size_t size1,
     50 		phys_addr_t base2, phys_size_t size2)
     51 {
     52 	if (base2 == base1 + size1)
     53 		return 1;
     54 	else if (base1 == base2 + size2)
     55 		return -1;
     56 
     57 	return 0;
     58 }
     59 
     60 static long lmb_regions_adjacent(struct lmb_region *rgn,
     61 		unsigned long r1, unsigned long r2)
     62 {
     63 	phys_addr_t base1 = rgn->region[r1].base;
     64 	phys_size_t size1 = rgn->region[r1].size;
     65 	phys_addr_t base2 = rgn->region[r2].base;
     66 	phys_size_t size2 = rgn->region[r2].size;
     67 
     68 	return lmb_addrs_adjacent(base1, size1, base2, size2);
     69 }
     70 
     71 static void lmb_remove_region(struct lmb_region *rgn, unsigned long r)
     72 {
     73 	unsigned long i;
     74 
     75 	for (i = r; i < rgn->cnt - 1; i++) {
     76 		rgn->region[i].base = rgn->region[i + 1].base;
     77 		rgn->region[i].size = rgn->region[i + 1].size;
     78 	}
     79 	rgn->cnt--;
     80 }
     81 
     82 /* Assumption: base addr of region 1 < base addr of region 2 */
     83 static void lmb_coalesce_regions(struct lmb_region *rgn,
     84 		unsigned long r1, unsigned long r2)
     85 {
     86 	rgn->region[r1].size += rgn->region[r2].size;
     87 	lmb_remove_region(rgn, r2);
     88 }
     89 
     90 void lmb_init(struct lmb *lmb)
     91 {
     92 	/* Create a dummy zero size LMB which will get coalesced away later.
     93 	 * This simplifies the lmb_add() code below...
     94 	 */
     95 	lmb->memory.region[0].base = 0;
     96 	lmb->memory.region[0].size = 0;
     97 	lmb->memory.cnt = 1;
     98 	lmb->memory.size = 0;
     99 
    100 	/* Ditto. */
    101 	lmb->reserved.region[0].base = 0;
    102 	lmb->reserved.region[0].size = 0;
    103 	lmb->reserved.cnt = 1;
    104 	lmb->reserved.size = 0;
    105 }
    106 
    107 /* This routine called with relocation disabled. */
    108 static long lmb_add_region(struct lmb_region *rgn, phys_addr_t base, phys_size_t size)
    109 {
    110 	unsigned long coalesced = 0;
    111 	long adjacent, i;
    112 
    113 	if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) {
    114 		rgn->region[0].base = base;
    115 		rgn->region[0].size = size;
    116 		return 0;
    117 	}
    118 
    119 	/* First try and coalesce this LMB with another. */
    120 	for (i=0; i < rgn->cnt; i++) {
    121 		phys_addr_t rgnbase = rgn->region[i].base;
    122 		phys_size_t rgnsize = rgn->region[i].size;
    123 
    124 		if ((rgnbase == base) && (rgnsize == size))
    125 			/* Already have this region, so we're done */
    126 			return 0;
    127 
    128 		adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize);
    129 		if ( adjacent > 0 ) {
    130 			rgn->region[i].base -= size;
    131 			rgn->region[i].size += size;
    132 			coalesced++;
    133 			break;
    134 		}
    135 		else if ( adjacent < 0 ) {
    136 			rgn->region[i].size += size;
    137 			coalesced++;
    138 			break;
    139 		}
    140 	}
    141 
    142 	if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) {
    143 		lmb_coalesce_regions(rgn, i, i+1);
    144 		coalesced++;
    145 	}
    146 
    147 	if (coalesced)
    148 		return coalesced;
    149 	if (rgn->cnt >= MAX_LMB_REGIONS)
    150 		return -1;
    151 
    152 	/* Couldn't coalesce the LMB, so add it to the sorted table. */
    153 	for (i = rgn->cnt-1; i >= 0; i--) {
    154 		if (base < rgn->region[i].base) {
    155 			rgn->region[i+1].base = rgn->region[i].base;
    156 			rgn->region[i+1].size = rgn->region[i].size;
    157 		} else {
    158 			rgn->region[i+1].base = base;
    159 			rgn->region[i+1].size = size;
    160 			break;
    161 		}
    162 	}
    163 
    164 	if (base < rgn->region[0].base) {
    165 		rgn->region[0].base = base;
    166 		rgn->region[0].size = size;
    167 	}
    168 
    169 	rgn->cnt++;
    170 
    171 	return 0;
    172 }
    173 
    174 /* This routine may be called with relocation disabled. */
    175 long lmb_add(struct lmb *lmb, phys_addr_t base, phys_size_t size)
    176 {
    177 	struct lmb_region *_rgn = &(lmb->memory);
    178 
    179 	return lmb_add_region(_rgn, base, size);
    180 }
    181 
    182 long lmb_free(struct lmb *lmb, phys_addr_t base, phys_size_t size)
    183 {
    184 	struct lmb_region *rgn = &(lmb->reserved);
    185 	phys_addr_t rgnbegin, rgnend;
    186 	phys_addr_t end = base + size;
    187 	int i;
    188 
    189 	rgnbegin = rgnend = 0; /* supress gcc warnings */
    190 
    191 	/* Find the region where (base, size) belongs to */
    192 	for (i=0; i < rgn->cnt; i++) {
    193 		rgnbegin = rgn->region[i].base;
    194 		rgnend = rgnbegin + rgn->region[i].size;
    195 
    196 		if ((rgnbegin <= base) && (end <= rgnend))
    197 			break;
    198 	}
    199 
    200 	/* Didn't find the region */
    201 	if (i == rgn->cnt)
    202 		return -1;
    203 
    204 	/* Check to see if we are removing entire region */
    205 	if ((rgnbegin == base) && (rgnend == end)) {
    206 		lmb_remove_region(rgn, i);
    207 		return 0;
    208 	}
    209 
    210 	/* Check to see if region is matching at the front */
    211 	if (rgnbegin == base) {
    212 		rgn->region[i].base = end;
    213 		rgn->region[i].size -= size;
    214 		return 0;
    215 	}
    216 
    217 	/* Check to see if the region is matching at the end */
    218 	if (rgnend == end) {
    219 		rgn->region[i].size -= size;
    220 		return 0;
    221 	}
    222 
    223 	/*
    224 	 * We need to split the entry -  adjust the current one to the
    225 	 * beginging of the hole and add the region after hole.
    226 	 */
    227 	rgn->region[i].size = base - rgn->region[i].base;
    228 	return lmb_add_region(rgn, end, rgnend - end);
    229 }
    230 
    231 long lmb_reserve(struct lmb *lmb, phys_addr_t base, phys_size_t size)
    232 {
    233 	struct lmb_region *_rgn = &(lmb->reserved);
    234 
    235 	return lmb_add_region(_rgn, base, size);
    236 }
    237 
    238 static long lmb_overlaps_region(struct lmb_region *rgn, phys_addr_t base,
    239 				phys_size_t size)
    240 {
    241 	unsigned long i;
    242 
    243 	for (i=0; i < rgn->cnt; i++) {
    244 		phys_addr_t rgnbase = rgn->region[i].base;
    245 		phys_size_t rgnsize = rgn->region[i].size;
    246 		if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) {
    247 			break;
    248 		}
    249 	}
    250 
    251 	return (i < rgn->cnt) ? i : -1;
    252 }
    253 
    254 phys_addr_t lmb_alloc(struct lmb *lmb, phys_size_t size, ulong align)
    255 {
    256 	return lmb_alloc_base(lmb, size, align, LMB_ALLOC_ANYWHERE);
    257 }
    258 
    259 phys_addr_t lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
    260 {
    261 	phys_addr_t alloc;
    262 
    263 	alloc = __lmb_alloc_base(lmb, size, align, max_addr);
    264 
    265 	if (alloc == 0)
    266 		printf("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n",
    267 		      (ulong)size, (ulong)max_addr);
    268 
    269 	return alloc;
    270 }
    271 
    272 static phys_addr_t lmb_align_down(phys_addr_t addr, phys_size_t size)
    273 {
    274 	return addr & ~(size - 1);
    275 }
    276 
    277 static phys_addr_t lmb_align_up(phys_addr_t addr, ulong size)
    278 {
    279 	return (addr + (size - 1)) & ~(size - 1);
    280 }
    281 
    282 phys_addr_t __lmb_alloc_base(struct lmb *lmb, phys_size_t size, ulong align, phys_addr_t max_addr)
    283 {
    284 	long i, j;
    285 	phys_addr_t base = 0;
    286 	phys_addr_t res_base;
    287 
    288 	for (i = lmb->memory.cnt-1; i >= 0; i--) {
    289 		phys_addr_t lmbbase = lmb->memory.region[i].base;
    290 		phys_size_t lmbsize = lmb->memory.region[i].size;
    291 
    292 		if (lmbsize < size)
    293 			continue;
    294 		if (max_addr == LMB_ALLOC_ANYWHERE)
    295 			base = lmb_align_down(lmbbase + lmbsize - size, align);
    296 		else if (lmbbase < max_addr) {
    297 			base = lmbbase + lmbsize;
    298 			if (base < lmbbase)
    299 				base = -1;
    300 			base = min(base, max_addr);
    301 			base = lmb_align_down(base - size, align);
    302 		} else
    303 			continue;
    304 
    305 		while (base && lmbbase <= base) {
    306 			j = lmb_overlaps_region(&lmb->reserved, base, size);
    307 			if (j < 0) {
    308 				/* This area isn't reserved, take it */
    309 				if (lmb_add_region(&lmb->reserved, base,
    310 							lmb_align_up(size,
    311 								align)) < 0)
    312 					return 0;
    313 				return base;
    314 			}
    315 			res_base = lmb->reserved.region[j].base;
    316 			if (res_base < size)
    317 				break;
    318 			base = lmb_align_down(res_base - size, align);
    319 		}
    320 	}
    321 	return 0;
    322 }
    323 
    324 int lmb_is_reserved(struct lmb *lmb, phys_addr_t addr)
    325 {
    326 	int i;
    327 
    328 	for (i = 0; i < lmb->reserved.cnt; i++) {
    329 		phys_addr_t upper = lmb->reserved.region[i].base +
    330 			lmb->reserved.region[i].size - 1;
    331 		if ((addr >= lmb->reserved.region[i].base) && (addr <= upper))
    332 			return 1;
    333 	}
    334 	return 0;
    335 }
    336 
    337 __weak void board_lmb_reserve(struct lmb *lmb)
    338 {
    339 	/* please define platform specific board_lmb_reserve() */
    340 }
    341 
    342 __weak void arch_lmb_reserve(struct lmb *lmb)
    343 {
    344 	/* please define platform specific arch_lmb_reserve() */
    345 }
    346