1 /* 2 * QEMU DMA emulation 3 * 4 * Copyright (c) 2003-2004 Vassili Karpov (malc) 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to deal 8 * in the Software without restriction, including without limitation the rights 9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 10 * copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 22 * THE SOFTWARE. 23 */ 24 #include "cpu.h" 25 #include "exec/cpu-common.h" 26 #include "hw/hw.h" 27 #include "hw/isa/isa.h" 28 29 /* #define DEBUG_DMA */ 30 31 #define dolog(...) fprintf (stderr, "dma: " __VA_ARGS__) 32 #ifdef DEBUG_DMA 33 #define linfo(...) fprintf (stderr, "dma: " __VA_ARGS__) 34 #define ldebug(...) fprintf (stderr, "dma: " __VA_ARGS__) 35 #else 36 #define linfo(...) 37 #define ldebug(...) 38 #endif 39 40 struct dma_regs { 41 int now[2]; 42 uint16_t base[2]; 43 uint8_t mode; 44 uint8_t page; 45 uint8_t pageh; 46 uint8_t dack; 47 uint8_t eop; 48 DMA_transfer_handler transfer_handler; 49 void *opaque; 50 }; 51 52 #define ADDR 0 53 #define COUNT 1 54 55 static struct dma_cont { 56 uint8_t status; 57 uint8_t command; 58 uint8_t mask; 59 uint8_t flip_flop; 60 int dshift; 61 struct dma_regs regs[4]; 62 } dma_controllers[2]; 63 64 enum { 65 CMD_MEMORY_TO_MEMORY = 0x01, 66 CMD_FIXED_ADDRESS = 0x02, 67 CMD_BLOCK_CONTROLLER = 0x04, 68 CMD_COMPRESSED_TIME = 0x08, 69 CMD_CYCLIC_PRIORITY = 0x10, 70 CMD_EXTENDED_WRITE = 0x20, 71 CMD_LOW_DREQ = 0x40, 72 CMD_LOW_DACK = 0x80, 73 CMD_NOT_SUPPORTED = CMD_MEMORY_TO_MEMORY | CMD_FIXED_ADDRESS 74 | CMD_COMPRESSED_TIME | CMD_CYCLIC_PRIORITY | CMD_EXTENDED_WRITE 75 | CMD_LOW_DREQ | CMD_LOW_DACK 76 77 }; 78 79 static void DMA_run (void); 80 81 static int channels[8] = {-1, 2, 3, 1, -1, -1, -1, 0}; 82 83 static void write_page (void *opaque, uint32_t nport, uint32_t data) 84 { 85 struct dma_cont *d = opaque; 86 int ichan; 87 88 ichan = channels[nport & 7]; 89 if (-1 == ichan) { 90 dolog ("invalid channel %#x %#x\n", nport, data); 91 return; 92 } 93 d->regs[ichan].page = data; 94 } 95 96 static void write_pageh (void *opaque, uint32_t nport, uint32_t data) 97 { 98 struct dma_cont *d = opaque; 99 int ichan; 100 101 ichan = channels[nport & 7]; 102 if (-1 == ichan) { 103 dolog ("invalid channel %#x %#x\n", nport, data); 104 return; 105 } 106 d->regs[ichan].pageh = data; 107 } 108 109 static uint32_t read_page (void *opaque, uint32_t nport) 110 { 111 struct dma_cont *d = opaque; 112 int ichan; 113 114 ichan = channels[nport & 7]; 115 if (-1 == ichan) { 116 dolog ("invalid channel read %#x\n", nport); 117 return 0; 118 } 119 return d->regs[ichan].page; 120 } 121 122 static uint32_t read_pageh (void *opaque, uint32_t nport) 123 { 124 struct dma_cont *d = opaque; 125 int ichan; 126 127 ichan = channels[nport & 7]; 128 if (-1 == ichan) { 129 dolog ("invalid channel read %#x\n", nport); 130 return 0; 131 } 132 return d->regs[ichan].pageh; 133 } 134 135 static inline void init_chan (struct dma_cont *d, int ichan) 136 { 137 struct dma_regs *r; 138 139 r = d->regs + ichan; 140 r->now[ADDR] = r->base[ADDR] << d->dshift; 141 r->now[COUNT] = 0; 142 } 143 144 static inline int getff (struct dma_cont *d) 145 { 146 int ff; 147 148 ff = d->flip_flop; 149 d->flip_flop = !ff; 150 return ff; 151 } 152 153 static uint32_t read_chan (void *opaque, uint32_t nport) 154 { 155 struct dma_cont *d = opaque; 156 int ichan, nreg, iport, ff, val, dir; 157 struct dma_regs *r; 158 159 iport = (nport >> d->dshift) & 0x0f; 160 ichan = iport >> 1; 161 nreg = iport & 1; 162 r = d->regs + ichan; 163 164 dir = ((r->mode >> 5) & 1) ? -1 : 1; 165 ff = getff (d); 166 if (nreg) 167 val = (r->base[COUNT] << d->dshift) - r->now[COUNT]; 168 else 169 val = r->now[ADDR] + r->now[COUNT] * dir; 170 171 ldebug ("read_chan %#x -> %d\n", iport, val); 172 return (val >> (d->dshift + (ff << 3))) & 0xff; 173 } 174 175 static void write_chan (void *opaque, uint32_t nport, uint32_t data) 176 { 177 struct dma_cont *d = opaque; 178 int iport, ichan, nreg; 179 struct dma_regs *r; 180 181 iport = (nport >> d->dshift) & 0x0f; 182 ichan = iport >> 1; 183 nreg = iport & 1; 184 r = d->regs + ichan; 185 if (getff (d)) { 186 r->base[nreg] = (r->base[nreg] & 0xff) | ((data << 8) & 0xff00); 187 init_chan (d, ichan); 188 } else { 189 r->base[nreg] = (r->base[nreg] & 0xff00) | (data & 0xff); 190 } 191 } 192 193 static void write_cont (void *opaque, uint32_t nport, uint32_t data) 194 { 195 struct dma_cont *d = opaque; 196 int iport, ichan = 0; 197 198 iport = (nport >> d->dshift) & 0x0f; 199 switch (iport) { 200 case 0x08: /* command */ 201 if ((data != 0) && (data & CMD_NOT_SUPPORTED)) { 202 dolog ("command %#x not supported\n", data); 203 return; 204 } 205 d->command = data; 206 break; 207 208 case 0x09: 209 ichan = data & 3; 210 if (data & 4) { 211 d->status |= 1 << (ichan + 4); 212 } 213 else { 214 d->status &= ~(1 << (ichan + 4)); 215 } 216 d->status &= ~(1 << ichan); 217 DMA_run(); 218 break; 219 220 case 0x0a: /* single mask */ 221 if (data & 4) 222 d->mask |= 1 << (data & 3); 223 else 224 d->mask &= ~(1 << (data & 3)); 225 DMA_run(); 226 break; 227 228 case 0x0b: /* mode */ 229 { 230 ichan = data & 3; 231 #ifdef DEBUG_DMA 232 { 233 int op, ai, dir, opmode; 234 op = (data >> 2) & 3; 235 ai = (data >> 4) & 1; 236 dir = (data >> 5) & 1; 237 opmode = (data >> 6) & 3; 238 239 linfo ("ichan %d, op %d, ai %d, dir %d, opmode %d\n", 240 ichan, op, ai, dir, opmode); 241 } 242 #endif 243 d->regs[ichan].mode = data; 244 break; 245 } 246 247 case 0x0c: /* clear flip flop */ 248 d->flip_flop = 0; 249 break; 250 251 case 0x0d: /* reset */ 252 d->flip_flop = 0; 253 d->mask = ~0; 254 d->status = 0; 255 d->command = 0; 256 break; 257 258 case 0x0e: /* clear mask for all channels */ 259 d->mask = 0; 260 DMA_run(); 261 break; 262 263 case 0x0f: /* write mask for all channels */ 264 d->mask = data; 265 DMA_run(); 266 break; 267 268 default: 269 dolog ("unknown iport %#x\n", iport); 270 break; 271 } 272 273 #ifdef DEBUG_DMA 274 if (0xc != iport) { 275 linfo ("write_cont: nport %#06x, ichan % 2d, val %#06x\n", 276 nport, ichan, data); 277 } 278 #endif 279 } 280 281 static uint32_t read_cont (void *opaque, uint32_t nport) 282 { 283 struct dma_cont *d = opaque; 284 int iport, val; 285 286 iport = (nport >> d->dshift) & 0x0f; 287 switch (iport) { 288 case 0x08: /* status */ 289 val = d->status; 290 d->status &= 0xf0; 291 break; 292 case 0x0f: /* mask */ 293 val = d->mask; 294 break; 295 default: 296 val = 0; 297 break; 298 } 299 300 ldebug ("read_cont: nport %#06x, iport %#04x val %#x\n", nport, iport, val); 301 return val; 302 } 303 304 int DMA_get_channel_mode (int nchan) 305 { 306 return dma_controllers[nchan > 3].regs[nchan & 3].mode; 307 } 308 309 void DMA_hold_DREQ (int nchan) 310 { 311 int ncont, ichan; 312 313 ncont = nchan > 3; 314 ichan = nchan & 3; 315 linfo ("held cont=%d chan=%d\n", ncont, ichan); 316 dma_controllers[ncont].status |= 1 << (ichan + 4); 317 DMA_run(); 318 } 319 320 void DMA_release_DREQ (int nchan) 321 { 322 int ncont, ichan; 323 324 ncont = nchan > 3; 325 ichan = nchan & 3; 326 linfo ("released cont=%d chan=%d\n", ncont, ichan); 327 dma_controllers[ncont].status &= ~(1 << (ichan + 4)); 328 DMA_run(); 329 } 330 331 static void channel_run (int ncont, int ichan) 332 { 333 int n; 334 struct dma_regs *r = &dma_controllers[ncont].regs[ichan]; 335 #ifdef DEBUG_DMA 336 int dir, opmode; 337 338 dir = (r->mode >> 5) & 1; 339 opmode = (r->mode >> 6) & 3; 340 341 if (dir) { 342 dolog ("DMA in address decrement mode\n"); 343 } 344 if (opmode != 1) { 345 dolog ("DMA not in single mode select %#x\n", opmode); 346 } 347 #endif 348 349 r = dma_controllers[ncont].regs + ichan; 350 if (r->transfer_handler) { 351 n = r->transfer_handler (r->opaque, ichan + (ncont << 2), 352 r->now[COUNT], (r->base[COUNT] + 1) << ncont); 353 r->now[COUNT] = n; 354 } 355 ldebug ("dma_pos %d size %d\n", n, (r->base[COUNT] + 1) << ncont); 356 } 357 358 static QEMUBH *dma_bh; 359 360 static void DMA_run (void) 361 { 362 struct dma_cont *d; 363 int icont, ichan; 364 int rearm = 0; 365 366 d = dma_controllers; 367 368 for (icont = 0; icont < 2; icont++, d++) { 369 for (ichan = 0; ichan < 4; ichan++) { 370 int mask; 371 372 mask = 1 << ichan; 373 374 if ((0 == (d->mask & mask)) && (0 != (d->status & (mask << 4)))) { 375 channel_run (icont, ichan); 376 rearm = 1; 377 } 378 } 379 } 380 381 if (rearm) 382 qemu_bh_schedule_idle(dma_bh); 383 } 384 385 static void DMA_run_bh(void *unused) 386 { 387 DMA_run(); 388 } 389 390 void DMA_register_channel (int nchan, 391 DMA_transfer_handler transfer_handler, 392 void *opaque) 393 { 394 struct dma_regs *r; 395 int ichan, ncont; 396 397 ncont = nchan > 3; 398 ichan = nchan & 3; 399 400 r = dma_controllers[ncont].regs + ichan; 401 r->transfer_handler = transfer_handler; 402 r->opaque = opaque; 403 } 404 405 int DMA_read_memory (int nchan, void *buf, int pos, int len) 406 { 407 struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3]; 408 hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; 409 410 if (r->mode & 0x20) { 411 int i; 412 uint8_t *p = buf; 413 414 cpu_physical_memory_read (addr - pos - len, buf, len); 415 /* What about 16bit transfers? */ 416 for (i = 0; i < len >> 1; i++) { 417 uint8_t b = p[len - i - 1]; 418 p[i] = b; 419 } 420 } 421 else 422 cpu_physical_memory_read (addr + pos, buf, len); 423 424 return len; 425 } 426 427 int DMA_write_memory (int nchan, void *buf, int pos, int len) 428 { 429 struct dma_regs *r = &dma_controllers[nchan > 3].regs[nchan & 3]; 430 hwaddr addr = ((r->pageh & 0x7f) << 24) | (r->page << 16) | r->now[ADDR]; 431 432 if (r->mode & 0x20) { 433 int i; 434 uint8_t *p = buf; 435 436 cpu_physical_memory_write (addr - pos - len, buf, len); 437 /* What about 16bit transfers? */ 438 for (i = 0; i < len; i++) { 439 uint8_t b = p[len - i - 1]; 440 p[i] = b; 441 } 442 } 443 else 444 cpu_physical_memory_write (addr + pos, buf, len); 445 446 return len; 447 } 448 449 /* request the emulator to transfer a new DMA memory block ASAP */ 450 void DMA_schedule(int nchan) 451 { 452 CPUState *cpu = current_cpu; 453 if (cpu) 454 cpu_exit(cpu); 455 } 456 457 static void dma_reset(void *opaque) 458 { 459 struct dma_cont *d = opaque; 460 write_cont (d, (0x0d << d->dshift), 0); 461 } 462 463 static int dma_phony_handler (void *opaque, int nchan, int dma_pos, int dma_len) 464 { 465 dolog ("unregistered DMA channel used nchan=%d dma_pos=%d dma_len=%d\n", 466 nchan, dma_pos, dma_len); 467 return dma_pos; 468 } 469 470 /* dshift = 0: 8 bit DMA, 1 = 16 bit DMA */ 471 static void dma_init2(struct dma_cont *d, int base, int dshift, 472 int page_base, int pageh_base) 473 { 474 static const int page_port_list[] = { 0x1, 0x2, 0x3, 0x7 }; 475 int i; 476 477 d->dshift = dshift; 478 for (i = 0; i < 8; i++) { 479 register_ioport_write (base + (i << dshift), 1, 1, write_chan, d); 480 register_ioport_read (base + (i << dshift), 1, 1, read_chan, d); 481 } 482 for (i = 0; i < ARRAY_SIZE (page_port_list); i++) { 483 register_ioport_write (page_base + page_port_list[i], 1, 1, 484 write_page, d); 485 register_ioport_read (page_base + page_port_list[i], 1, 1, 486 read_page, d); 487 if (pageh_base >= 0) { 488 register_ioport_write (pageh_base + page_port_list[i], 1, 1, 489 write_pageh, d); 490 register_ioport_read (pageh_base + page_port_list[i], 1, 1, 491 read_pageh, d); 492 } 493 } 494 for (i = 0; i < 8; i++) { 495 register_ioport_write (base + ((i + 8) << dshift), 1, 1, 496 write_cont, d); 497 register_ioport_read (base + ((i + 8) << dshift), 1, 1, 498 read_cont, d); 499 } 500 qemu_register_reset(dma_reset, 0, d); 501 dma_reset(d); 502 for (i = 0; i < ARRAY_SIZE (d->regs); ++i) { 503 d->regs[i].transfer_handler = dma_phony_handler; 504 } 505 } 506 507 static void dma_save (QEMUFile *f, void *opaque) 508 { 509 struct dma_cont *d = opaque; 510 int i; 511 512 /* qemu_put_8s (f, &d->status); */ 513 qemu_put_8s (f, &d->command); 514 qemu_put_8s (f, &d->mask); 515 qemu_put_8s (f, &d->flip_flop); 516 qemu_put_be32 (f, d->dshift); 517 518 for (i = 0; i < 4; ++i) { 519 struct dma_regs *r = &d->regs[i]; 520 qemu_put_be32 (f, r->now[0]); 521 qemu_put_be32 (f, r->now[1]); 522 qemu_put_be16s (f, &r->base[0]); 523 qemu_put_be16s (f, &r->base[1]); 524 qemu_put_8s (f, &r->mode); 525 qemu_put_8s (f, &r->page); 526 qemu_put_8s (f, &r->pageh); 527 qemu_put_8s (f, &r->dack); 528 qemu_put_8s (f, &r->eop); 529 } 530 } 531 532 static int dma_load (QEMUFile *f, void *opaque, int version_id) 533 { 534 struct dma_cont *d = opaque; 535 int i; 536 537 if (version_id != 1) 538 return -EINVAL; 539 540 /* qemu_get_8s (f, &d->status); */ 541 qemu_get_8s (f, &d->command); 542 qemu_get_8s (f, &d->mask); 543 qemu_get_8s (f, &d->flip_flop); 544 d->dshift=qemu_get_be32 (f); 545 546 for (i = 0; i < 4; ++i) { 547 struct dma_regs *r = &d->regs[i]; 548 r->now[0]=qemu_get_be32 (f); 549 r->now[1]=qemu_get_be32 (f); 550 qemu_get_be16s (f, &r->base[0]); 551 qemu_get_be16s (f, &r->base[1]); 552 qemu_get_8s (f, &r->mode); 553 qemu_get_8s (f, &r->page); 554 qemu_get_8s (f, &r->pageh); 555 qemu_get_8s (f, &r->dack); 556 qemu_get_8s (f, &r->eop); 557 } 558 559 DMA_run(); 560 561 return 0; 562 } 563 564 void DMA_init (int high_page_enable) 565 { 566 dma_init2(&dma_controllers[0], 0x00, 0, 0x80, 567 high_page_enable ? 0x480 : -1); 568 dma_init2(&dma_controllers[1], 0xc0, 1, 0x88, 569 high_page_enable ? 0x488 : -1); 570 register_savevm(NULL, 571 "dma", 572 0, 573 1, 574 dma_save, 575 dma_load, 576 &dma_controllers[0]); 577 578 register_savevm(NULL, 579 "dma", 580 1, 581 1, 582 dma_save, 583 dma_load, 584 &dma_controllers[1]); 585 586 dma_bh = qemu_bh_new(DMA_run_bh, NULL); 587 } 588