1 /* -*- mode: C; c-basic-offset: 3; -*- */ 2 3 #include <stdio.h> // fprintf 4 #include <assert.h> // assert 5 #if defined(__APPLE__) 6 #include <machine/endian.h> 7 #define __BYTE_ORDER BYTE_ORDER 8 #define __LITTLE_ENDIAN LITTLE_ENDIAN 9 #else 10 #include <endian.h> 11 #endif 12 #include <inttypes.h> 13 #include "vbits.h" 14 #include "vtest.h" 15 16 17 /* Return the bits of V if they fit into 64-bit. If V has fewer than 18 64 bits, the bit pattern is zero-extended to the left. */ 19 static uint64_t 20 get_bits64(vbits_t v) 21 { 22 switch (v.num_bits) { 23 case 1: return v.bits.u32; 24 case 8: return v.bits.u8; 25 case 16: return v.bits.u16; 26 case 32: return v.bits.u32; 27 case 64: return v.bits.u64; 28 case 128: 29 case 256: 30 /* fall through */ 31 default: 32 panic(__func__); 33 } 34 } 35 36 void 37 print_vbits(FILE *fp, vbits_t v) 38 { 39 switch (v.num_bits) { 40 case 1: fprintf(fp, "%08x", v.bits.u32); break; 41 case 8: fprintf(fp, "%02x", v.bits.u8); break; 42 case 16: fprintf(fp, "%04x", v.bits.u16); break; 43 case 32: fprintf(fp, "%08x", v.bits.u32); break; 44 case 64: fprintf(fp, "%016"PRIx64, v.bits.u64); break; 45 case 128: 46 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 47 fprintf(fp, "%016"PRIx64, v.bits.u128[1]); 48 fprintf(fp, "%016"PRIx64, v.bits.u128[0]); 49 } else { 50 fprintf(fp, "%016"PRIx64, v.bits.u128[0]); 51 fprintf(fp, "%016"PRIx64, v.bits.u128[1]); 52 } 53 break; 54 case 256: 55 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 56 fprintf(fp, "%016"PRIx64, v.bits.u256[3]); 57 fprintf(fp, "%016"PRIx64, v.bits.u256[2]); 58 fprintf(fp, "%016"PRIx64, v.bits.u256[1]); 59 fprintf(fp, "%016"PRIx64, v.bits.u256[0]); 60 } else { 61 fprintf(fp, "%016"PRIx64, v.bits.u256[0]); 62 fprintf(fp, "%016"PRIx64, v.bits.u256[1]); 63 fprintf(fp, "%016"PRIx64, v.bits.u256[2]); 64 fprintf(fp, "%016"PRIx64, v.bits.u256[3]); 65 } 66 break; 67 default: 68 panic(__func__); 69 } 70 } 71 72 73 /* Return a value where all bits are set to undefined. */ 74 vbits_t 75 undefined_vbits(unsigned num_bits) 76 { 77 vbits_t new = { .num_bits = num_bits }; 78 79 switch (num_bits) { 80 case 1: new.bits.u32 = 0x01; break; 81 case 8: new.bits.u8 = 0xff; break; 82 case 16: new.bits.u16 = 0xffff; break; 83 case 32: new.bits.u32 = ~0; break; 84 case 64: new.bits.u64 = ~0ull; break; 85 case 128: new.bits.u128[0] = ~0ull; 86 new.bits.u128[1] = ~0ull; 87 break; 88 case 256: new.bits.u256[0] = ~0ull; 89 new.bits.u256[1] = ~0ull; 90 new.bits.u256[2] = ~0ull; 91 new.bits.u256[3] = ~0ull; 92 break; 93 default: 94 panic(__func__); 95 } 96 return new; 97 } 98 99 100 /* Return a value where all bits are set to defined. */ 101 vbits_t 102 defined_vbits(unsigned num_bits) 103 { 104 vbits_t new = { .num_bits = num_bits }; 105 106 switch (num_bits) { 107 case 1: new.bits.u32 = 0x0; break; 108 case 8: new.bits.u8 = 0x0; break; 109 case 16: new.bits.u16 = 0x0; break; 110 case 32: new.bits.u32 = 0x0; break; 111 case 64: new.bits.u64 = 0x0; break; 112 case 128: new.bits.u128[0] = 0x0; 113 new.bits.u128[1] = 0x0; 114 break; 115 case 256: new.bits.u256[0] = 0x0; 116 new.bits.u256[1] = 0x0; 117 new.bits.u256[2] = 0x0; 118 new.bits.u256[3] = 0x0; 119 break; 120 default: 121 panic(__func__); 122 } 123 return new; 124 } 125 126 127 /* Return 1, if equal. */ 128 int 129 equal_vbits(vbits_t v1, vbits_t v2) 130 { 131 assert(v1.num_bits == v2.num_bits); 132 133 switch (v1.num_bits) { 134 case 1: return v1.bits.u32 == v2.bits.u32; 135 case 8: return v1.bits.u8 == v2.bits.u8; 136 case 16: return v1.bits.u16 == v2.bits.u16; 137 case 32: return v1.bits.u32 == v2.bits.u32; 138 case 64: return v1.bits.u64 == v2.bits.u64; 139 case 128: return v1.bits.u128[0] == v2.bits.u128[0] && 140 v1.bits.u128[1] == v2.bits.u128[1]; 141 case 256: return v1.bits.u256[0] == v2.bits.u256[0] && 142 v1.bits.u256[1] == v2.bits.u256[1] && 143 v1.bits.u256[2] == v2.bits.u256[2] && 144 v1.bits.u256[3] == v2.bits.u256[3]; 145 default: 146 panic(__func__); 147 } 148 } 149 150 151 /* Truncate the bit pattern in V1 to NUM_BITS bits */ 152 vbits_t 153 truncate_vbits(vbits_t v, unsigned num_bits) 154 { 155 assert(num_bits <= v.num_bits); 156 157 if (num_bits == v.num_bits) return v; 158 159 vbits_t new = { .num_bits = num_bits }; 160 161 if (num_bits <= 64) { 162 uint64_t bits; 163 164 if (v.num_bits <= 64) 165 bits = get_bits64(v); 166 else if (v.num_bits == 128) 167 if (__BYTE_ORDER == __LITTLE_ENDIAN) 168 bits = v.bits.u128[0]; 169 else 170 bits = v.bits.u128[1]; 171 else if (v.num_bits == 256) 172 if (__BYTE_ORDER == __LITTLE_ENDIAN) 173 bits = v.bits.u256[0]; 174 else 175 bits = v.bits.u256[3]; 176 else 177 panic(__func__); 178 179 switch (num_bits) { 180 case 1: new.bits.u32 = bits & 0x01; break; 181 case 8: new.bits.u8 = bits & 0xff; break; 182 case 16: new.bits.u16 = bits & 0xffff; break; 183 case 32: new.bits.u32 = bits & ~0u; break; 184 case 64: new.bits.u64 = bits & ~0ll; break; 185 default: 186 panic(__func__); 187 } 188 return new; 189 } 190 191 if (num_bits == 128) { 192 assert(v.num_bits == 256); 193 /* From 256 bits to 128 */ 194 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 195 new.bits.u128[0] = v.bits.u256[0]; 196 new.bits.u128[1] = v.bits.u256[1]; 197 } else { 198 new.bits.u128[0] = v.bits.u256[2]; 199 new.bits.u128[1] = v.bits.u256[3]; 200 } 201 return new; 202 } 203 204 /* Cannot truncate to 256 bits from something larger */ 205 panic(__func__); 206 } 207 208 209 /* Helper function to compute left_vbits */ 210 static uint64_t 211 left64(uint64_t x) 212 { 213 // left(x) = x | -x 214 return x | (~x + 1); 215 } 216 217 218 vbits_t 219 left_vbits(vbits_t v, unsigned num_bits) 220 { 221 assert(num_bits >= v.num_bits); 222 223 vbits_t new = { .num_bits = num_bits }; 224 225 if (v.num_bits <= 64) { 226 uint64_t bits = left64(get_bits64(v)); 227 228 switch (num_bits) { 229 case 8: new.bits.u8 = bits & 0xff; break; 230 case 16: new.bits.u16 = bits & 0xffff; break; 231 case 32: new.bits.u32 = bits & ~0u; break; 232 case 64: new.bits.u64 = bits & ~0ll; break; 233 case 128: 234 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 235 new.bits.u128[0] = bits; 236 if (bits & (1ull << 63)) { // MSB is set 237 new.bits.u128[1] = ~0ull; 238 } else { 239 new.bits.u128[1] = 0; 240 } 241 } else { 242 new.bits.u128[1] = bits; 243 if (bits & (1ull << 63)) { // MSB is set 244 new.bits.u128[0] = ~0ull; 245 } else { 246 new.bits.u128[0] = 0; 247 } 248 } 249 break; 250 case 256: 251 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 252 new.bits.u256[0] = bits; 253 if (bits & (1ull << 63)) { // MSB is set 254 new.bits.u256[1] = ~0ull; 255 new.bits.u256[2] = ~0ull; 256 new.bits.u256[3] = ~0ull; 257 } else { 258 new.bits.u256[1] = 0; 259 new.bits.u256[2] = 0; 260 new.bits.u256[3] = 0; 261 } 262 } else { 263 new.bits.u256[3] = bits; 264 if (bits & (1ull << 63)) { // MSB is set 265 new.bits.u256[0] = ~0ull; 266 new.bits.u256[1] = ~0ull; 267 new.bits.u256[2] = ~0ull; 268 } else { 269 new.bits.u256[0] = 0; 270 new.bits.u256[1] = 0; 271 new.bits.u256[2] = 0; 272 } 273 } 274 break; 275 default: 276 panic(__func__); 277 } 278 return new; 279 } 280 281 if (v.num_bits == 128) { 282 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 283 if (v.bits.u128[1] != 0) { 284 new.bits.u128[0] = v.bits.u128[0]; 285 new.bits.u128[1] = left64(v.bits.u128[1]); 286 } else { 287 new.bits.u128[0] = left64(v.bits.u128[0]); 288 if (new.bits.u128[0] & (1ull << 63)) { // MSB is set 289 new.bits.u128[1] = ~0ull; 290 } else { 291 new.bits.u128[1] = 0; 292 } 293 } 294 } else { 295 if (v.bits.u128[0] != 0) { 296 new.bits.u128[0] = left64(v.bits.u128[0]); 297 new.bits.u128[1] = v.bits.u128[1]; 298 } else { 299 new.bits.u128[1] = left64(v.bits.u128[1]); 300 if (new.bits.u128[1] & (1ull << 63)) { // MSB is set 301 new.bits.u128[0] = ~0ull; 302 } else { 303 new.bits.u128[0] = 0; 304 } 305 } 306 } 307 if (num_bits == 128) return new; 308 309 assert(num_bits == 256); 310 311 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 312 uint64_t b1 = new.bits.u128[1]; 313 uint64_t b0 = new.bits.u128[0]; 314 315 new.bits.u256[0] = b0; 316 new.bits.u256[1] = b1; 317 318 if (new.bits.u256[1] & (1ull << 63)) { // MSB is set 319 new.bits.u256[2] = ~0ull; 320 new.bits.u256[3] = ~0ull; 321 } else { 322 new.bits.u256[2] = 0; 323 new.bits.u256[3] = 0; 324 } 325 } else { 326 uint64_t b1 = new.bits.u128[0]; 327 uint64_t b0 = new.bits.u128[1]; 328 329 new.bits.u256[2] = b0; 330 new.bits.u256[3] = b1; 331 332 if (new.bits.u256[2] & (1ull << 63)) { // MSB is set 333 new.bits.u256[0] = ~0ull; 334 new.bits.u256[1] = ~0ull; 335 } else { 336 new.bits.u256[0] = 0; 337 new.bits.u256[1] = 0; 338 } 339 } 340 return new; 341 } 342 343 panic(__func__); 344 } 345 346 347 vbits_t 348 or_vbits(vbits_t v1, vbits_t v2) 349 { 350 assert(v1.num_bits == v2.num_bits); 351 352 vbits_t new = { .num_bits = v1.num_bits }; 353 354 switch (v1.num_bits) { 355 case 8: new.bits.u8 = v1.bits.u8 | v2.bits.u8; break; 356 case 16: new.bits.u16 = v1.bits.u16 | v2.bits.u16; break; 357 case 32: new.bits.u32 = v1.bits.u32 | v2.bits.u32; break; 358 case 64: new.bits.u64 = v1.bits.u64 | v2.bits.u64; break; 359 case 128: new.bits.u128[0] = v1.bits.u128[0] | v2.bits.u128[0]; 360 new.bits.u128[1] = v1.bits.u128[1] | v2.bits.u128[1]; 361 break; 362 case 256: new.bits.u256[0] = v1.bits.u256[0] | v2.bits.u256[0]; 363 new.bits.u256[1] = v1.bits.u256[1] | v2.bits.u256[1]; 364 new.bits.u256[2] = v1.bits.u256[2] | v2.bits.u256[2]; 365 new.bits.u256[3] = v1.bits.u256[3] | v2.bits.u256[3]; 366 break; 367 default: 368 panic(__func__); 369 } 370 371 return new; 372 } 373 374 375 vbits_t 376 and_vbits(vbits_t v1, vbits_t v2) 377 { 378 assert(v1.num_bits == v2.num_bits); 379 380 vbits_t new = { .num_bits = v1.num_bits }; 381 382 switch (v1.num_bits) { 383 case 8: new.bits.u8 = v1.bits.u8 & v2.bits.u8; break; 384 case 16: new.bits.u16 = v1.bits.u16 & v2.bits.u16; break; 385 case 32: new.bits.u32 = v1.bits.u32 & v2.bits.u32; break; 386 case 64: new.bits.u64 = v1.bits.u64 & v2.bits.u64; break; 387 case 128: new.bits.u128[0] = v1.bits.u128[0] & v2.bits.u128[0]; 388 new.bits.u128[1] = v1.bits.u128[1] & v2.bits.u128[1]; 389 break; 390 case 256: new.bits.u256[0] = v1.bits.u256[0] & v2.bits.u256[0]; 391 new.bits.u256[1] = v1.bits.u256[1] & v2.bits.u256[1]; 392 new.bits.u256[2] = v1.bits.u256[2] & v2.bits.u256[2]; 393 new.bits.u256[3] = v1.bits.u256[3] & v2.bits.u256[3]; 394 break; 395 default: 396 panic(__func__); 397 } 398 399 return new; 400 } 401 402 403 vbits_t 404 concat_vbits(vbits_t v1, vbits_t v2) 405 { 406 assert(v1.num_bits == v2.num_bits); 407 408 vbits_t new = { .num_bits = v1.num_bits * 2 }; 409 410 switch (v1.num_bits) { 411 case 8: new.bits.u16 = v1.bits.u8; 412 new.bits.u16 = (new.bits.u16 << 8) | v2.bits.u8; break; 413 case 16: new.bits.u32 = v1.bits.u16; 414 new.bits.u32 = (new.bits.u32 << 16) | v2.bits.u16; break; 415 case 32: new.bits.u64 = v1.bits.u32; 416 new.bits.u64 = (new.bits.u64 << 32) | v2.bits.u32; break; 417 case 64: 418 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 419 new.bits.u128[0] = v2.bits.u64; 420 new.bits.u128[1] = v1.bits.u64; 421 } else { 422 new.bits.u128[0] = v1.bits.u64; 423 new.bits.u128[1] = v2.bits.u64; 424 } 425 break; 426 case 128: 427 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 428 new.bits.u256[0] = v2.bits.u128[0]; 429 new.bits.u256[1] = v2.bits.u128[1]; 430 new.bits.u256[2] = v1.bits.u128[0]; 431 new.bits.u256[3] = v1.bits.u128[1]; 432 } else { 433 new.bits.u256[0] = v1.bits.u128[0]; 434 new.bits.u256[1] = v1.bits.u128[1]; 435 new.bits.u256[2] = v2.bits.u128[0]; 436 new.bits.u256[3] = v2.bits.u128[1]; 437 } 438 break; 439 case 256: /* Fall through */ 440 default: 441 panic(__func__); 442 } 443 444 return new; 445 } 446 447 448 vbits_t 449 upper_vbits(vbits_t v) 450 { 451 vbits_t new = { .num_bits = v.num_bits / 2 }; 452 453 switch (v.num_bits) { 454 case 16: new.bits.u8 = v.bits.u16 >> 8; break; 455 case 32: new.bits.u16 = v.bits.u32 >> 16; break; 456 case 64: new.bits.u32 = v.bits.u64 >> 32; break; 457 case 128: 458 if (__BYTE_ORDER == __LITTLE_ENDIAN) 459 new.bits.u64 = v.bits.u128[1]; 460 else 461 new.bits.u64 = v.bits.u128[0]; 462 break; 463 case 256: 464 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 465 new.bits.u128[0] = v.bits.u256[2]; 466 new.bits.u128[1] = v.bits.u256[3]; 467 } else { 468 new.bits.u128[0] = v.bits.u256[0]; 469 new.bits.u128[1] = v.bits.u256[1]; 470 } 471 break; 472 case 8: 473 default: 474 panic(__func__); 475 } 476 477 return new; 478 } 479 480 481 vbits_t 482 zextend_vbits(vbits_t v, unsigned num_bits) 483 { 484 assert(num_bits >= v.num_bits); 485 486 if (num_bits == v.num_bits) return v; 487 488 vbits_t new = { .num_bits = num_bits }; 489 490 if (v.num_bits <= 64) { 491 uint64_t bits = get_bits64(v); 492 493 switch (num_bits) { 494 case 8: new.bits.u8 = bits; break; 495 case 16: new.bits.u16 = bits; break; 496 case 32: new.bits.u32 = bits; break; 497 case 64: new.bits.u64 = bits; break; 498 case 128: 499 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 500 new.bits.u128[0] = bits; 501 new.bits.u128[1] = 0; 502 } else { 503 new.bits.u128[0] = 0; 504 new.bits.u128[1] = bits; 505 } 506 break; 507 case 256: 508 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 509 new.bits.u256[0] = bits; 510 new.bits.u256[1] = 0; 511 new.bits.u256[2] = 0; 512 new.bits.u256[3] = 0; 513 } else { 514 new.bits.u256[0] = 0; 515 new.bits.u256[1] = 0; 516 new.bits.u256[2] = 0; 517 new.bits.u256[3] = bits; 518 } 519 break; 520 default: 521 panic(__func__); 522 } 523 return new; 524 } 525 526 if (v.num_bits == 128) { 527 assert(num_bits == 256); 528 529 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 530 new.bits.u256[0] = v.bits.u128[0]; 531 new.bits.u256[1] = v.bits.u128[1]; 532 new.bits.u256[2] = 0; 533 new.bits.u256[3] = 0; 534 } else { 535 new.bits.u256[0] = 0; 536 new.bits.u256[1] = 0; 537 new.bits.u256[2] = v.bits.u128[1]; 538 new.bits.u256[3] = v.bits.u128[0]; 539 } 540 return new; 541 } 542 543 /* Cannot zero-extend a 256-bit value to something larger */ 544 panic(__func__); 545 } 546 547 548 vbits_t 549 sextend_vbits(vbits_t v, unsigned num_bits) 550 { 551 assert(num_bits >= v.num_bits); 552 553 int sextend = 0; 554 555 switch (v.num_bits) { 556 case 8: if (v.bits.u8 == 0x80) sextend = 1; break; 557 case 16: if (v.bits.u16 == 0x8000) sextend = 1; break; 558 case 32: if (v.bits.u32 == 0x80000000) sextend = 1; break; 559 case 64: if (v.bits.u64 == (1ull << 63)) sextend = 1; break; 560 case 128: if (v.bits.u128[1] == (1ull << 63)) sextend = 1; break; 561 case 256: if (v.bits.u256[3] == (1ull << 63)) sextend = 1; break; 562 563 default: 564 panic(__func__); 565 } 566 567 return sextend ? left_vbits(v, num_bits) : zextend_vbits(v, num_bits); 568 } 569 570 571 vbits_t 572 onehot_vbits(unsigned bitno, unsigned num_bits) 573 { 574 assert(bitno < num_bits); 575 576 vbits_t new = { .num_bits = num_bits }; 577 578 switch (num_bits) { 579 case 1: new.bits.u32 = 1 << bitno; break; 580 case 8: new.bits.u8 = 1 << bitno; break; 581 case 16: new.bits.u16 = 1 << bitno; break; 582 case 32: new.bits.u32 = 1u << bitno; break; 583 case 64: new.bits.u64 = 1ull << bitno; break; 584 case 128: 585 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 586 if (bitno < 64) { 587 new.bits.u128[0] = 1ull << bitno; 588 new.bits.u128[1] = 0; 589 } else { 590 new.bits.u128[0] = 0; 591 new.bits.u128[1] = 1ull << (bitno - 64); 592 } 593 } else { 594 if (bitno < 64) { 595 new.bits.u128[0] = 0; 596 new.bits.u128[1] = 1ull << bitno; 597 } else { 598 new.bits.u128[0] = 1ull << (bitno - 64); 599 new.bits.u128[1] = 0; 600 } 601 } 602 break; 603 case 256: 604 if (__BYTE_ORDER == __LITTLE_ENDIAN) { 605 if (bitno < 64) { 606 new.bits.u256[0] = 1ull << bitno; 607 new.bits.u256[1] = 0; 608 new.bits.u256[2] = 0; 609 new.bits.u256[3] = 0; 610 } else if (bitno < 128) { 611 new.bits.u256[0] = 0; 612 new.bits.u256[1] = 1ull << (bitno - 64); 613 new.bits.u256[2] = 0; 614 new.bits.u256[3] = 0; 615 } else if (bitno < 192) { 616 new.bits.u256[0] = 0; 617 new.bits.u256[1] = 0; 618 new.bits.u256[2] = 1ull << (bitno - 128); 619 new.bits.u256[3] = 0; 620 } else { 621 new.bits.u256[0] = 0; 622 new.bits.u256[1] = 0; 623 new.bits.u256[2] = 0; 624 new.bits.u256[3] = 1ull << (bitno - 192); 625 } 626 } else { 627 if (bitno < 64) { 628 new.bits.u256[0] = 0; 629 new.bits.u256[1] = 0; 630 new.bits.u256[2] = 0; 631 new.bits.u256[3] = 1ull << bitno; 632 } else if (bitno < 128) { 633 new.bits.u256[0] = 0; 634 new.bits.u256[1] = 0; 635 new.bits.u256[2] = 1ull << (bitno - 64); 636 new.bits.u256[3] = 0; 637 } else if (bitno < 192) { 638 new.bits.u256[0] = 0; 639 new.bits.u256[1] = 1ull << (bitno - 128); 640 new.bits.u256[2] = 0; 641 new.bits.u256[3] = 0; 642 } else { 643 new.bits.u256[0] = 1ull << (bitno - 192); 644 new.bits.u256[1] = 0; 645 new.bits.u256[2] = 0; 646 new.bits.u256[3] = 0; 647 } 648 } 649 break; 650 default: 651 panic(__func__); 652 } 653 return new; 654 } 655 656 657 int 658 completely_defined_vbits(vbits_t v) 659 { 660 return equal_vbits(v, defined_vbits(v.num_bits)); 661 } 662 663 664 vbits_t 665 shl_vbits(vbits_t v, unsigned shift_amount) 666 { 667 assert(shift_amount < v.num_bits); 668 669 vbits_t new = v; 670 671 switch (v.num_bits) { 672 case 8: new.bits.u8 <<= shift_amount; break; 673 case 16: new.bits.u16 <<= shift_amount; break; 674 case 32: new.bits.u32 <<= shift_amount; break; 675 case 64: new.bits.u64 <<= shift_amount; break; 676 case 128: /* fall through */ 677 case 256: /* fall through */ 678 default: 679 panic(__func__); 680 } 681 682 return new; 683 } 684 685 686 vbits_t 687 shr_vbits(vbits_t v, unsigned shift_amount) 688 { 689 assert(shift_amount < v.num_bits); 690 691 vbits_t new = v; 692 693 switch (v.num_bits) { 694 case 8: new.bits.u8 >>= shift_amount; break; 695 case 16: new.bits.u16 >>= shift_amount; break; 696 case 32: new.bits.u32 >>= shift_amount; break; 697 case 64: new.bits.u64 >>= shift_amount; break; 698 case 128: /* fall through */ 699 case 256: /* fall through */ 700 default: 701 panic(__func__); 702 } 703 704 return new; 705 } 706 707 708 vbits_t 709 sar_vbits(vbits_t v, unsigned shift_amount) 710 { 711 assert(shift_amount < v.num_bits); 712 713 vbits_t new = v; 714 int msb; 715 716 switch (v.num_bits) { 717 case 8: 718 new.bits.u8 >>= shift_amount; 719 msb = (v.bits.u8 & 0x80) != 0; 720 break; 721 case 16: 722 new.bits.u16 >>= shift_amount; 723 msb = (v.bits.u16 & 0x8000) != 0; 724 break; 725 case 32: 726 new.bits.u32 >>= shift_amount; 727 msb = (v.bits.u32 & (1u << 31)) != 0; 728 break; 729 case 64: 730 new.bits.u64 >>= shift_amount; 731 msb = (v.bits.u64 & (1ull << 63)) != 0; 732 break; 733 case 128: /* fall through */ 734 case 256: /* fall through */ 735 default: 736 panic(__func__); 737 } 738 739 if (msb) 740 new = left_vbits(new, new.num_bits); 741 return new; 742 } 743 744 /* Return a value for the POWER Iop_CmpORD class iops */ 745 vbits_t 746 cmpord_vbits(unsigned v1_num_bits, unsigned v2_num_bits) 747 { 748 vbits_t new = { .num_bits = v1_num_bits }; 749 750 /* Size of values being compared must be the same */ 751 assert( v1_num_bits == v2_num_bits); 752 753 /* Comparison only produces 32-bit or 64-bit value where 754 * the lower 3 bits are set to indicate, less than, equal and greater then. 755 */ 756 switch (v1_num_bits) { 757 case 32: 758 new.bits.u32 = 0xE; 759 break; 760 761 case 64: 762 new.bits.u64 = 0xE; 763 break; 764 765 default: 766 panic(__func__); 767 } 768 769 return new; 770 } 771