1 /* 2 * ARM Generic/Distributed Interrupt Controller 3 * 4 * Copyright (c) 2006-2007 CodeSourcery. 5 * Written by Paul Brook 6 * 7 * This code is licenced under the GPL. 8 */ 9 10 /* This file contains implementation code for the RealView EB interrupt 11 controller, MPCore distributed interrupt controller and ARMv7-M 12 Nested Vectored Interrupt Controller. */ 13 14 //#define DEBUG_GIC 15 16 #ifdef DEBUG_GIC 17 #define DPRINTF(fmt, ...) \ 18 do { printf("arm_gic: " fmt , ## __VA_ARGS__); } while (0) 19 #else 20 #define DPRINTF(fmt, ...) do {} while(0) 21 #endif 22 23 #ifdef NVIC 24 static const uint8_t gic_id[] = 25 { 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1 }; 26 /* The NVIC has 16 internal vectors. However these are not exposed 27 through the normal GIC interface. */ 28 #define GIC_BASE_IRQ 32 29 #else 30 static const uint8_t gic_id[] = 31 { 0x90, 0x13, 0x04, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }; 32 #define GIC_BASE_IRQ 0 33 #endif 34 35 #define FROM_SYSBUSGIC(type, dev) \ 36 DO_UPCAST(type, gic, FROM_SYSBUS(gic_state, dev)) 37 38 typedef struct gic_irq_state 39 { 40 /* ??? The documentation seems to imply the enable bits are global, even 41 for per-cpu interrupts. This seems strange. */ 42 unsigned enabled:1; 43 unsigned pending:NCPU; 44 unsigned active:NCPU; 45 unsigned level:NCPU; 46 unsigned model:1; /* 0 = N:N, 1 = 1:N */ 47 unsigned trigger:1; /* nonzero = edge triggered. */ 48 } gic_irq_state; 49 50 #define ALL_CPU_MASK ((1 << NCPU) - 1) 51 52 #define GIC_SET_ENABLED(irq) s->irq_state[irq].enabled = 1 53 #define GIC_CLEAR_ENABLED(irq) s->irq_state[irq].enabled = 0 54 #define GIC_TEST_ENABLED(irq) s->irq_state[irq].enabled 55 #define GIC_SET_PENDING(irq, cm) s->irq_state[irq].pending |= (cm) 56 #define GIC_CLEAR_PENDING(irq, cm) s->irq_state[irq].pending &= ~(cm) 57 #define GIC_TEST_PENDING(irq, cm) ((s->irq_state[irq].pending & (cm)) != 0) 58 #define GIC_SET_ACTIVE(irq, cm) s->irq_state[irq].active |= (cm) 59 #define GIC_CLEAR_ACTIVE(irq, cm) s->irq_state[irq].active &= ~(cm) 60 #define GIC_TEST_ACTIVE(irq, cm) ((s->irq_state[irq].active & (cm)) != 0) 61 #define GIC_SET_MODEL(irq) s->irq_state[irq].model = 1 62 #define GIC_CLEAR_MODEL(irq) s->irq_state[irq].model = 0 63 #define GIC_TEST_MODEL(irq) s->irq_state[irq].model 64 #define GIC_SET_LEVEL(irq, cm) s->irq_state[irq].level = (cm) 65 #define GIC_CLEAR_LEVEL(irq, cm) s->irq_state[irq].level &= ~(cm) 66 #define GIC_TEST_LEVEL(irq, cm) ((s->irq_state[irq].level & (cm)) != 0) 67 #define GIC_SET_TRIGGER(irq) s->irq_state[irq].trigger = 1 68 #define GIC_CLEAR_TRIGGER(irq) s->irq_state[irq].trigger = 0 69 #define GIC_TEST_TRIGGER(irq) s->irq_state[irq].trigger 70 #define GIC_GET_PRIORITY(irq, cpu) \ 71 (((irq) < 32) ? s->priority1[irq][cpu] : s->priority2[(irq) - 32]) 72 #ifdef NVIC 73 #define GIC_TARGET(irq) 1 74 #else 75 #define GIC_TARGET(irq) s->irq_target[irq] 76 #endif 77 78 typedef struct gic_state 79 { 80 SysBusDevice busdev; 81 qemu_irq parent_irq[NCPU]; 82 int enabled; 83 int cpu_enabled[NCPU]; 84 85 gic_irq_state irq_state[GIC_NIRQ]; 86 #ifndef NVIC 87 int irq_target[GIC_NIRQ]; 88 #endif 89 int priority1[32][NCPU]; 90 int priority2[GIC_NIRQ - 32]; 91 int last_active[GIC_NIRQ][NCPU]; 92 93 int priority_mask[NCPU]; 94 int running_irq[NCPU]; 95 int running_priority[NCPU]; 96 int current_pending[NCPU]; 97 98 int iomemtype; 99 } gic_state; 100 101 /* TODO: Many places that call this routine could be optimized. */ 102 /* Update interrupt status after enabled or pending bits have been changed. */ 103 static void gic_update(gic_state *s) 104 { 105 int best_irq; 106 int best_prio; 107 int irq; 108 int level; 109 int cpu; 110 int cm; 111 112 for (cpu = 0; cpu < NCPU; cpu++) { 113 cm = 1 << cpu; 114 s->current_pending[cpu] = 1023; 115 if (!s->enabled || !s->cpu_enabled[cpu]) { 116 qemu_irq_lower(s->parent_irq[cpu]); 117 return; 118 } 119 best_prio = 0x100; 120 best_irq = 1023; 121 for (irq = 0; irq < GIC_NIRQ; irq++) { 122 if (GIC_TEST_ENABLED(irq) && GIC_TEST_PENDING(irq, cm)) { 123 if (GIC_GET_PRIORITY(irq, cpu) < best_prio) { 124 best_prio = GIC_GET_PRIORITY(irq, cpu); 125 best_irq = irq; 126 } 127 } 128 } 129 level = 0; 130 if (best_prio <= s->priority_mask[cpu]) { 131 s->current_pending[cpu] = best_irq; 132 if (best_prio < s->running_priority[cpu]) { 133 DPRINTF("Raised pending IRQ %d\n", best_irq); 134 level = 1; 135 } 136 } 137 qemu_set_irq(s->parent_irq[cpu], level); 138 } 139 } 140 141 static void __attribute__((unused)) 142 gic_set_pending_private(gic_state *s, int cpu, int irq) 143 { 144 int cm = 1 << cpu; 145 146 if (GIC_TEST_PENDING(irq, cm)) 147 return; 148 149 DPRINTF("Set %d pending cpu %d\n", irq, cpu); 150 GIC_SET_PENDING(irq, cm); 151 gic_update(s); 152 } 153 154 /* Process a change in an external IRQ input. */ 155 static void gic_set_irq(void *opaque, int irq, int level) 156 { 157 gic_state *s = (gic_state *)opaque; 158 /* The first external input line is internal interrupt 32. */ 159 irq += 32; 160 if (level == GIC_TEST_LEVEL(irq, ALL_CPU_MASK)) 161 return; 162 163 if (level) { 164 GIC_SET_LEVEL(irq, ALL_CPU_MASK); 165 if (GIC_TEST_TRIGGER(irq) || GIC_TEST_ENABLED(irq)) { 166 DPRINTF("Set %d pending mask %x\n", irq, GIC_TARGET(irq)); 167 GIC_SET_PENDING(irq, GIC_TARGET(irq)); 168 } 169 } else { 170 GIC_CLEAR_LEVEL(irq, ALL_CPU_MASK); 171 } 172 gic_update(s); 173 } 174 175 static void gic_set_running_irq(gic_state *s, int cpu, int irq) 176 { 177 s->running_irq[cpu] = irq; 178 if (irq == 1023) { 179 s->running_priority[cpu] = 0x100; 180 } else { 181 s->running_priority[cpu] = GIC_GET_PRIORITY(irq, cpu); 182 } 183 gic_update(s); 184 } 185 186 static uint32_t gic_acknowledge_irq(gic_state *s, int cpu) 187 { 188 int new_irq; 189 int cm = 1 << cpu; 190 new_irq = s->current_pending[cpu]; 191 if (new_irq == 1023 192 || GIC_GET_PRIORITY(new_irq, cpu) >= s->running_priority[cpu]) { 193 DPRINTF("ACK no pending IRQ\n"); 194 return 1023; 195 } 196 s->last_active[new_irq][cpu] = s->running_irq[cpu]; 197 /* Clear pending flags for both level and edge triggered interrupts. 198 Level triggered IRQs will be reasserted once they become inactive. */ 199 GIC_CLEAR_PENDING(new_irq, GIC_TEST_MODEL(new_irq) ? ALL_CPU_MASK : cm); 200 gic_set_running_irq(s, cpu, new_irq); 201 DPRINTF("ACK %d\n", new_irq); 202 return new_irq; 203 } 204 205 static void gic_complete_irq(gic_state * s, int cpu, int irq) 206 { 207 int update = 0; 208 int cm = 1 << cpu; 209 DPRINTF("EOI %d\n", irq); 210 if (s->running_irq[cpu] == 1023) 211 return; /* No active IRQ. */ 212 if (irq != 1023) { 213 /* Mark level triggered interrupts as pending if they are still 214 raised. */ 215 if (!GIC_TEST_TRIGGER(irq) && GIC_TEST_ENABLED(irq) 216 && GIC_TEST_LEVEL(irq, cm) && (GIC_TARGET(irq) & cm) != 0) { 217 DPRINTF("Set %d pending mask %x\n", irq, cm); 218 GIC_SET_PENDING(irq, cm); 219 update = 1; 220 } 221 } 222 if (irq != s->running_irq[cpu]) { 223 /* Complete an IRQ that is not currently running. */ 224 int tmp = s->running_irq[cpu]; 225 while (s->last_active[tmp][cpu] != 1023) { 226 if (s->last_active[tmp][cpu] == irq) { 227 s->last_active[tmp][cpu] = s->last_active[irq][cpu]; 228 break; 229 } 230 tmp = s->last_active[tmp][cpu]; 231 } 232 if (update) { 233 gic_update(s); 234 } 235 } else { 236 /* Complete the current running IRQ. */ 237 gic_set_running_irq(s, cpu, s->last_active[s->running_irq[cpu]][cpu]); 238 } 239 } 240 241 static uint32_t gic_dist_readb(void *opaque, target_phys_addr_t offset) 242 { 243 gic_state *s = (gic_state *)opaque; 244 uint32_t res; 245 int irq; 246 int i; 247 int cpu; 248 int cm; 249 int mask; 250 251 cpu = gic_get_current_cpu(); 252 cm = 1 << cpu; 253 if (offset < 0x100) { 254 #ifndef NVIC 255 if (offset == 0) 256 return s->enabled; 257 if (offset == 4) 258 return ((GIC_NIRQ / 32) - 1) | ((NCPU - 1) << 5); 259 if (offset < 0x08) 260 return 0; 261 #endif 262 goto bad_reg; 263 } else if (offset < 0x200) { 264 /* Interrupt Set/Clear Enable. */ 265 if (offset < 0x180) 266 irq = (offset - 0x100) * 8; 267 else 268 irq = (offset - 0x180) * 8; 269 irq += GIC_BASE_IRQ; 270 if (irq >= GIC_NIRQ) 271 goto bad_reg; 272 res = 0; 273 for (i = 0; i < 8; i++) { 274 if (GIC_TEST_ENABLED(irq + i)) { 275 res |= (1 << i); 276 } 277 } 278 } else if (offset < 0x300) { 279 /* Interrupt Set/Clear Pending. */ 280 if (offset < 0x280) 281 irq = (offset - 0x200) * 8; 282 else 283 irq = (offset - 0x280) * 8; 284 irq += GIC_BASE_IRQ; 285 if (irq >= GIC_NIRQ) 286 goto bad_reg; 287 res = 0; 288 mask = (irq < 32) ? cm : ALL_CPU_MASK; 289 for (i = 0; i < 8; i++) { 290 if (GIC_TEST_PENDING(irq + i, mask)) { 291 res |= (1 << i); 292 } 293 } 294 } else if (offset < 0x400) { 295 /* Interrupt Active. */ 296 irq = (offset - 0x300) * 8 + GIC_BASE_IRQ; 297 if (irq >= GIC_NIRQ) 298 goto bad_reg; 299 res = 0; 300 mask = (irq < 32) ? cm : ALL_CPU_MASK; 301 for (i = 0; i < 8; i++) { 302 if (GIC_TEST_ACTIVE(irq + i, mask)) { 303 res |= (1 << i); 304 } 305 } 306 } else if (offset < 0x800) { 307 /* Interrupt Priority. */ 308 irq = (offset - 0x400) + GIC_BASE_IRQ; 309 if (irq >= GIC_NIRQ) 310 goto bad_reg; 311 res = GIC_GET_PRIORITY(irq, cpu); 312 #ifndef NVIC 313 } else if (offset < 0xc00) { 314 /* Interrupt CPU Target. */ 315 irq = (offset - 0x800) + GIC_BASE_IRQ; 316 if (irq >= GIC_NIRQ) 317 goto bad_reg; 318 if (irq >= 29 && irq <= 31) { 319 res = cm; 320 } else { 321 res = GIC_TARGET(irq); 322 } 323 } else if (offset < 0xf00) { 324 /* Interrupt Configuration. */ 325 irq = (offset - 0xc00) * 2 + GIC_BASE_IRQ; 326 if (irq >= GIC_NIRQ) 327 goto bad_reg; 328 res = 0; 329 for (i = 0; i < 4; i++) { 330 if (GIC_TEST_MODEL(irq + i)) 331 res |= (1 << (i * 2)); 332 if (GIC_TEST_TRIGGER(irq + i)) 333 res |= (2 << (i * 2)); 334 } 335 #endif 336 } else if (offset < 0xfe0) { 337 goto bad_reg; 338 } else /* offset >= 0xfe0 */ { 339 if (offset & 3) { 340 res = 0; 341 } else { 342 res = gic_id[(offset - 0xfe0) >> 2]; 343 } 344 } 345 return res; 346 bad_reg: 347 hw_error("gic_dist_readb: Bad offset %x\n", (int)offset); 348 return 0; 349 } 350 351 static uint32_t gic_dist_readw(void *opaque, target_phys_addr_t offset) 352 { 353 uint32_t val; 354 val = gic_dist_readb(opaque, offset); 355 val |= gic_dist_readb(opaque, offset + 1) << 8; 356 return val; 357 } 358 359 static uint32_t gic_dist_readl(void *opaque, target_phys_addr_t offset) 360 { 361 uint32_t val; 362 #ifdef NVIC 363 gic_state *s = (gic_state *)opaque; 364 uint32_t addr; 365 addr = offset; 366 if (addr < 0x100 || addr > 0xd00) 367 return nvic_readl(s, addr); 368 #endif 369 val = gic_dist_readw(opaque, offset); 370 val |= gic_dist_readw(opaque, offset + 2) << 16; 371 return val; 372 } 373 374 static void gic_dist_writeb(void *opaque, target_phys_addr_t offset, 375 uint32_t value) 376 { 377 gic_state *s = (gic_state *)opaque; 378 int irq; 379 int i; 380 int cpu; 381 382 cpu = gic_get_current_cpu(); 383 if (offset < 0x100) { 384 #ifdef NVIC 385 goto bad_reg; 386 #else 387 if (offset == 0) { 388 s->enabled = (value & 1); 389 DPRINTF("Distribution %sabled\n", s->enabled ? "En" : "Dis"); 390 } else if (offset < 4) { 391 /* ignored. */ 392 } else { 393 goto bad_reg; 394 } 395 #endif 396 } else if (offset < 0x180) { 397 /* Interrupt Set Enable. */ 398 irq = (offset - 0x100) * 8 + GIC_BASE_IRQ; 399 if (irq >= GIC_NIRQ) 400 goto bad_reg; 401 if (irq < 16) 402 value = 0xff; 403 for (i = 0; i < 8; i++) { 404 if (value & (1 << i)) { 405 int mask = (irq < 32) ? (1 << cpu) : GIC_TARGET(irq); 406 if (!GIC_TEST_ENABLED(irq + i)) 407 DPRINTF("Enabled IRQ %d\n", irq + i); 408 GIC_SET_ENABLED(irq + i); 409 /* If a raised level triggered IRQ enabled then mark 410 is as pending. */ 411 if (GIC_TEST_LEVEL(irq + i, mask) 412 && !GIC_TEST_TRIGGER(irq + i)) { 413 DPRINTF("Set %d pending mask %x\n", irq + i, mask); 414 GIC_SET_PENDING(irq + i, mask); 415 } 416 } 417 } 418 } else if (offset < 0x200) { 419 /* Interrupt Clear Enable. */ 420 irq = (offset - 0x180) * 8 + GIC_BASE_IRQ; 421 if (irq >= GIC_NIRQ) 422 goto bad_reg; 423 if (irq < 16) 424 value = 0; 425 for (i = 0; i < 8; i++) { 426 if (value & (1 << i)) { 427 if (GIC_TEST_ENABLED(irq + i)) 428 DPRINTF("Disabled IRQ %d\n", irq + i); 429 GIC_CLEAR_ENABLED(irq + i); 430 } 431 } 432 } else if (offset < 0x280) { 433 /* Interrupt Set Pending. */ 434 irq = (offset - 0x200) * 8 + GIC_BASE_IRQ; 435 if (irq >= GIC_NIRQ) 436 goto bad_reg; 437 if (irq < 16) 438 irq = 0; 439 440 for (i = 0; i < 8; i++) { 441 if (value & (1 << i)) { 442 GIC_SET_PENDING(irq + i, GIC_TARGET(irq)); 443 } 444 } 445 } else if (offset < 0x300) { 446 /* Interrupt Clear Pending. */ 447 irq = (offset - 0x280) * 8 + GIC_BASE_IRQ; 448 if (irq >= GIC_NIRQ) 449 goto bad_reg; 450 for (i = 0; i < 8; i++) { 451 /* ??? This currently clears the pending bit for all CPUs, even 452 for per-CPU interrupts. It's unclear whether this is the 453 corect behavior. */ 454 if (value & (1 << i)) { 455 GIC_CLEAR_PENDING(irq + i, ALL_CPU_MASK); 456 } 457 } 458 } else if (offset < 0x400) { 459 /* Interrupt Active. */ 460 goto bad_reg; 461 } else if (offset < 0x800) { 462 /* Interrupt Priority. */ 463 irq = (offset - 0x400) + GIC_BASE_IRQ; 464 if (irq >= GIC_NIRQ) 465 goto bad_reg; 466 if (irq < 32) { 467 s->priority1[irq][cpu] = value; 468 } else { 469 s->priority2[irq - 32] = value; 470 } 471 #ifndef NVIC 472 } else if (offset < 0xc00) { 473 /* Interrupt CPU Target. */ 474 irq = (offset - 0x800) + GIC_BASE_IRQ; 475 if (irq >= GIC_NIRQ) 476 goto bad_reg; 477 if (irq < 29) 478 value = 0; 479 else if (irq < 32) 480 value = ALL_CPU_MASK; 481 s->irq_target[irq] = value & ALL_CPU_MASK; 482 } else if (offset < 0xf00) { 483 /* Interrupt Configuration. */ 484 irq = (offset - 0xc00) * 4 + GIC_BASE_IRQ; 485 if (irq >= GIC_NIRQ) 486 goto bad_reg; 487 if (irq < 32) 488 value |= 0xaa; 489 for (i = 0; i < 4; i++) { 490 if (value & (1 << (i * 2))) { 491 GIC_SET_MODEL(irq + i); 492 } else { 493 GIC_CLEAR_MODEL(irq + i); 494 } 495 if (value & (2 << (i * 2))) { 496 GIC_SET_TRIGGER(irq + i); 497 } else { 498 GIC_CLEAR_TRIGGER(irq + i); 499 } 500 } 501 #endif 502 } else { 503 /* 0xf00 is only handled for 32-bit writes. */ 504 goto bad_reg; 505 } 506 gic_update(s); 507 return; 508 bad_reg: 509 hw_error("gic_dist_writeb: Bad offset %x\n", (int)offset); 510 } 511 512 static void gic_dist_writew(void *opaque, target_phys_addr_t offset, 513 uint32_t value) 514 { 515 gic_dist_writeb(opaque, offset, value & 0xff); 516 gic_dist_writeb(opaque, offset + 1, value >> 8); 517 } 518 519 static void gic_dist_writel(void *opaque, target_phys_addr_t offset, 520 uint32_t value) 521 { 522 gic_state *s = (gic_state *)opaque; 523 #ifdef NVIC 524 uint32_t addr; 525 addr = offset; 526 if (addr < 0x100 || (addr > 0xd00 && addr != 0xf00)) { 527 nvic_writel(s, addr, value); 528 return; 529 } 530 #endif 531 if (offset == 0xf00) { 532 int cpu; 533 int irq; 534 int mask; 535 536 cpu = gic_get_current_cpu(); 537 irq = value & 0x3ff; 538 switch ((value >> 24) & 3) { 539 case 0: 540 mask = (value >> 16) & ALL_CPU_MASK; 541 break; 542 case 1: 543 mask = 1 << cpu; 544 break; 545 case 2: 546 mask = ALL_CPU_MASK ^ (1 << cpu); 547 break; 548 default: 549 DPRINTF("Bad Soft Int target filter\n"); 550 mask = ALL_CPU_MASK; 551 break; 552 } 553 GIC_SET_PENDING(irq, mask); 554 gic_update(s); 555 return; 556 } 557 gic_dist_writew(opaque, offset, value & 0xffff); 558 gic_dist_writew(opaque, offset + 2, value >> 16); 559 } 560 561 static CPUReadMemoryFunc *gic_dist_readfn[] = { 562 gic_dist_readb, 563 gic_dist_readw, 564 gic_dist_readl 565 }; 566 567 static CPUWriteMemoryFunc *gic_dist_writefn[] = { 568 gic_dist_writeb, 569 gic_dist_writew, 570 gic_dist_writel 571 }; 572 573 #ifndef NVIC 574 static uint32_t gic_cpu_read(gic_state *s, int cpu, int offset) 575 { 576 switch (offset) { 577 case 0x00: /* Control */ 578 return s->cpu_enabled[cpu]; 579 case 0x04: /* Priority mask */ 580 return s->priority_mask[cpu]; 581 case 0x08: /* Binary Point */ 582 /* ??? Not implemented. */ 583 return 0; 584 case 0x0c: /* Acknowledge */ 585 return gic_acknowledge_irq(s, cpu); 586 case 0x14: /* Runing Priority */ 587 return s->running_priority[cpu]; 588 case 0x18: /* Highest Pending Interrupt */ 589 return s->current_pending[cpu]; 590 default: 591 hw_error("gic_cpu_read: Bad offset %x\n", (int)offset); 592 return 0; 593 } 594 } 595 596 static void gic_cpu_write(gic_state *s, int cpu, int offset, uint32_t value) 597 { 598 switch (offset) { 599 case 0x00: /* Control */ 600 s->cpu_enabled[cpu] = (value & 1); 601 DPRINTF("CPU %sabled\n", s->cpu_enabled ? "En" : "Dis"); 602 break; 603 case 0x04: /* Priority mask */ 604 s->priority_mask[cpu] = (value & 0xff); 605 break; 606 case 0x08: /* Binary Point */ 607 /* ??? Not implemented. */ 608 break; 609 case 0x10: /* End Of Interrupt */ 610 return gic_complete_irq(s, cpu, value & 0x3ff); 611 default: 612 hw_error("gic_cpu_write: Bad offset %x\n", (int)offset); 613 return; 614 } 615 gic_update(s); 616 } 617 #endif 618 619 static void gic_reset(gic_state *s) 620 { 621 int i; 622 memset(s->irq_state, 0, GIC_NIRQ * sizeof(gic_irq_state)); 623 for (i = 0 ; i < NCPU; i++) { 624 s->priority_mask[i] = 0xf0; 625 s->current_pending[i] = 1023; 626 s->running_irq[i] = 1023; 627 s->running_priority[i] = 0x100; 628 #ifdef NVIC 629 /* The NVIC doesn't have per-cpu interfaces, so enable by default. */ 630 s->cpu_enabled[i] = 1; 631 #else 632 s->cpu_enabled[i] = 0; 633 #endif 634 } 635 for (i = 0; i < 16; i++) { 636 GIC_SET_ENABLED(i); 637 GIC_SET_TRIGGER(i); 638 } 639 #ifdef NVIC 640 /* The NVIC is always enabled. */ 641 s->enabled = 1; 642 #else 643 s->enabled = 0; 644 #endif 645 } 646 647 static void gic_save(QEMUFile *f, void *opaque) 648 { 649 gic_state *s = (gic_state *)opaque; 650 int i; 651 int j; 652 653 qemu_put_be32(f, s->enabled); 654 for (i = 0; i < NCPU; i++) { 655 qemu_put_be32(f, s->cpu_enabled[i]); 656 #ifndef NVIC 657 qemu_put_be32(f, s->irq_target[i]); 658 #endif 659 for (j = 0; j < 32; j++) 660 qemu_put_be32(f, s->priority1[j][i]); 661 for (j = 0; j < GIC_NIRQ; j++) 662 qemu_put_be32(f, s->last_active[j][i]); 663 qemu_put_be32(f, s->priority_mask[i]); 664 qemu_put_be32(f, s->running_irq[i]); 665 qemu_put_be32(f, s->running_priority[i]); 666 qemu_put_be32(f, s->current_pending[i]); 667 } 668 for (i = 0; i < GIC_NIRQ - 32; i++) { 669 qemu_put_be32(f, s->priority2[i]); 670 } 671 for (i = 0; i < GIC_NIRQ; i++) { 672 qemu_put_byte(f, s->irq_state[i].enabled); 673 qemu_put_byte(f, s->irq_state[i].pending); 674 qemu_put_byte(f, s->irq_state[i].active); 675 qemu_put_byte(f, s->irq_state[i].level); 676 qemu_put_byte(f, s->irq_state[i].model); 677 qemu_put_byte(f, s->irq_state[i].trigger); 678 } 679 } 680 681 static int gic_load(QEMUFile *f, void *opaque, int version_id) 682 { 683 gic_state *s = (gic_state *)opaque; 684 int i; 685 int j; 686 687 if (version_id != 1) 688 return -EINVAL; 689 690 s->enabled = qemu_get_be32(f); 691 for (i = 0; i < NCPU; i++) { 692 s->cpu_enabled[i] = qemu_get_be32(f); 693 #ifndef NVIC 694 s->irq_target[i] = qemu_get_be32(f); 695 #endif 696 for (j = 0; j < 32; j++) 697 s->priority1[j][i] = qemu_get_be32(f); 698 for (j = 0; j < GIC_NIRQ; j++) 699 s->last_active[j][i] = qemu_get_be32(f); 700 s->priority_mask[i] = qemu_get_be32(f); 701 s->running_irq[i] = qemu_get_be32(f); 702 s->running_priority[i] = qemu_get_be32(f); 703 s->current_pending[i] = qemu_get_be32(f); 704 } 705 for (i = 0; i < GIC_NIRQ - 32; i++) { 706 s->priority2[i] = qemu_get_be32(f); 707 } 708 for (i = 0; i < GIC_NIRQ; i++) { 709 s->irq_state[i].enabled = qemu_get_byte(f); 710 s->irq_state[i].pending = qemu_get_byte(f); 711 s->irq_state[i].active = qemu_get_byte(f); 712 s->irq_state[i].level = qemu_get_byte(f); 713 s->irq_state[i].model = qemu_get_byte(f); 714 s->irq_state[i].trigger = qemu_get_byte(f); 715 } 716 717 return 0; 718 } 719 720 static void gic_init(gic_state *s) 721 { 722 int i; 723 724 qdev_init_gpio_in(&s->busdev.qdev, gic_set_irq, GIC_NIRQ - 32); 725 for (i = 0; i < NCPU; i++) { 726 sysbus_init_irq(&s->busdev, &s->parent_irq[i]); 727 } 728 s->iomemtype = cpu_register_io_memory(gic_dist_readfn, 729 gic_dist_writefn, s); 730 gic_reset(s); 731 register_savevm("arm_gic", -1, 1, gic_save, gic_load, s); 732 } 733