1 /************************************************************************** 2 * via-velocity.c: Etherboot device driver for the VIA 6120 Gigabit 3 * Changes for Etherboot port: 4 * Copyright (c) 2006 by Timothy Legge <tlegge (at) rogers.com> 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 * 20 * This driver is based on: 21 * via-velocity.c: VIA Velocity VT6120, VT6122 Ethernet driver 22 * The changes are (c) Copyright 2004, Red Hat Inc. 23 * <alan (at) redhat.com> 24 * Additional fixes and clean up: Francois Romieu 25 * 26 * Original code: 27 * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. 28 * All rights reserved. 29 * Author: Chuang Liang-Shing, AJ Jiang 30 * 31 * Linux Driver Version 2.6.15.4 32 * 33 * REVISION HISTORY: 34 * ================ 35 * 36 * v1.0 03-06-2006 timlegge Initial port of Linux driver 37 * 38 * Indent Options: indent -kr -i8 39 *************************************************************************/ 40 41 #include "etherboot.h" 42 #include "nic.h" 43 #include <gpxe/pci.h> 44 #include <gpxe/ethernet.h> 45 46 #include "via-velocity.h" 47 48 typedef int pci_power_t; 49 50 #define PCI_D0 ((int) 0) 51 #define PCI_D1 ((int) 1) 52 #define PCI_D2 ((int) 2) 53 #define PCI_D3hot ((int) 3) 54 #define PCI_D3cold ((int) 4) 55 #define PCI_POWER_ERROR ((int) -1) 56 57 58 /* Condensed operations for readability. */ 59 #define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr)) 60 #define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr)) 61 62 //FIXME: Move to pci.c 63 int pci_set_power_state(struct pci_device *dev, int state); 64 65 /* FIXME: Move BASE to the private structure */ 66 static u32 BASE; 67 68 /* NIC specific static variables go here */ 69 #define VELOCITY_PARAM(N,D) \ 70 static const int N[MAX_UNITS]=OPTION_DEFAULT; 71 /* MODULE_PARM(N, "1-" __MODULE_STRING(MAX_UNITS) "i");\ 72 MODULE_PARM_DESC(N, D); */ 73 74 VELOCITY_PARAM(RxDescriptors, "Number of receive descriptors"); 75 VELOCITY_PARAM(TxDescriptors, "Number of transmit descriptors"); 76 77 78 #define VLAN_ID_MIN 0 79 #define VLAN_ID_MAX 4095 80 #define VLAN_ID_DEF 0 81 /* VID_setting[] is used for setting the VID of NIC. 82 0: default VID. 83 1-4094: other VIDs. 84 */ 85 VELOCITY_PARAM(VID_setting, "802.1Q VLAN ID"); 86 87 #define RX_THRESH_MIN 0 88 #define RX_THRESH_MAX 3 89 #define RX_THRESH_DEF 0 90 /* rx_thresh[] is used for controlling the receive fifo threshold. 91 0: indicate the rxfifo threshold is 128 bytes. 92 1: indicate the rxfifo threshold is 512 bytes. 93 2: indicate the rxfifo threshold is 1024 bytes. 94 3: indicate the rxfifo threshold is store & forward. 95 */ 96 VELOCITY_PARAM(rx_thresh, "Receive fifo threshold"); 97 98 #define DMA_LENGTH_MIN 0 99 #define DMA_LENGTH_MAX 7 100 #define DMA_LENGTH_DEF 0 101 102 /* DMA_length[] is used for controlling the DMA length 103 0: 8 DWORDs 104 1: 16 DWORDs 105 2: 32 DWORDs 106 3: 64 DWORDs 107 4: 128 DWORDs 108 5: 256 DWORDs 109 6: SF(flush till emply) 110 7: SF(flush till emply) 111 */ 112 VELOCITY_PARAM(DMA_length, "DMA length"); 113 114 #define TAGGING_DEF 0 115 /* enable_tagging[] is used for enabling 802.1Q VID tagging. 116 0: disable VID seeting(default). 117 1: enable VID setting. 118 */ 119 VELOCITY_PARAM(enable_tagging, "Enable 802.1Q tagging"); 120 121 #define IP_ALIG_DEF 0 122 /* IP_byte_align[] is used for IP header DWORD byte aligned 123 0: indicate the IP header won't be DWORD byte aligned.(Default) . 124 1: indicate the IP header will be DWORD byte aligned. 125 In some enviroment, the IP header should be DWORD byte aligned, 126 or the packet will be droped when we receive it. (eg: IPVS) 127 */ 128 VELOCITY_PARAM(IP_byte_align, "Enable IP header dword aligned"); 129 130 #define TX_CSUM_DEF 1 131 /* txcsum_offload[] is used for setting the checksum offload ability of NIC. 132 (We only support RX checksum offload now) 133 0: disable csum_offload[checksum offload 134 1: enable checksum offload. (Default) 135 */ 136 VELOCITY_PARAM(txcsum_offload, "Enable transmit packet checksum offload"); 137 138 #define FLOW_CNTL_DEF 1 139 #define FLOW_CNTL_MIN 1 140 #define FLOW_CNTL_MAX 5 141 142 /* flow_control[] is used for setting the flow control ability of NIC. 143 1: hardware deafult - AUTO (default). Use Hardware default value in ANAR. 144 2: enable TX flow control. 145 3: enable RX flow control. 146 4: enable RX/TX flow control. 147 5: disable 148 */ 149 VELOCITY_PARAM(flow_control, "Enable flow control ability"); 150 151 #define MED_LNK_DEF 0 152 #define MED_LNK_MIN 0 153 #define MED_LNK_MAX 4 154 /* speed_duplex[] is used for setting the speed and duplex mode of NIC. 155 0: indicate autonegotiation for both speed and duplex mode 156 1: indicate 100Mbps half duplex mode 157 2: indicate 100Mbps full duplex mode 158 3: indicate 10Mbps half duplex mode 159 4: indicate 10Mbps full duplex mode 160 161 Note: 162 if EEPROM have been set to the force mode, this option is ignored 163 by driver. 164 */ 165 VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode"); 166 167 #define VAL_PKT_LEN_DEF 0 168 /* ValPktLen[] is used for setting the checksum offload ability of NIC. 169 0: Receive frame with invalid layer 2 length (Default) 170 1: Drop frame with invalid layer 2 length 171 */ 172 VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame"); 173 174 #define WOL_OPT_DEF 0 175 #define WOL_OPT_MIN 0 176 #define WOL_OPT_MAX 7 177 /* wol_opts[] is used for controlling wake on lan behavior. 178 0: Wake up if recevied a magic packet. (Default) 179 1: Wake up if link status is on/off. 180 2: Wake up if recevied an arp packet. 181 4: Wake up if recevied any unicast packet. 182 Those value can be sumed up to support more than one option. 183 */ 184 VELOCITY_PARAM(wol_opts, "Wake On Lan options"); 185 186 #define INT_WORKS_DEF 20 187 #define INT_WORKS_MIN 10 188 #define INT_WORKS_MAX 64 189 190 VELOCITY_PARAM(int_works, "Number of packets per interrupt services"); 191 192 /* The descriptors for this card are required to be aligned on 193 64 byte boundaries. As the align attribute does not guarantee alignment 194 greater than the alignment of the start address (which for Etherboot 195 is 16 bytes of alignment) it requires some extra steps. Add 64 to the 196 size of the array and the init_ring adjusts the alignment */ 197 198 /* Define the TX Descriptor */ 199 static u8 tx_ring[TX_DESC_DEF * sizeof(struct tx_desc) + 64]; 200 201 /* Create a static buffer of size PKT_BUF_SZ for each TX Descriptor. 202 All descriptors point to a part of this buffer */ 203 static u8 txb[(TX_DESC_DEF * PKT_BUF_SZ) + 64]; 204 205 /* Define the RX Descriptor */ 206 static u8 rx_ring[RX_DESC_DEF * sizeof(struct rx_desc) + 64]; 207 208 /* Create a static buffer of size PKT_BUF_SZ for each RX Descriptor 209 All descriptors point to a part of this buffer */ 210 static u8 rxb[(RX_DESC_DEF * PKT_BUF_SZ) + 64]; 211 212 static void velocity_init_info(struct pci_device *pdev, 213 struct velocity_info *vptr, 214 struct velocity_info_tbl *info); 215 static int velocity_get_pci_info(struct velocity_info *, 216 struct pci_device *pdev); 217 static int velocity_open(struct nic *nic, struct pci_device *pci); 218 219 static int velocity_soft_reset(struct velocity_info *vptr); 220 static void velocity_init_cam_filter(struct velocity_info *vptr); 221 static void mii_init(struct velocity_info *vptr, u32 mii_status); 222 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr); 223 static void velocity_print_link_status(struct velocity_info *vptr); 224 static void safe_disable_mii_autopoll(struct mac_regs *regs); 225 static void enable_flow_control_ability(struct velocity_info *vptr); 226 static void enable_mii_autopoll(struct mac_regs *regs); 227 static int velocity_mii_read(struct mac_regs *, u8 byIdx, u16 * pdata); 228 static int velocity_mii_write(struct mac_regs *, u8 byMiiAddr, u16 data); 229 static u32 mii_check_media_mode(struct mac_regs *regs); 230 static u32 check_connection_type(struct mac_regs *regs); 231 static int velocity_set_media_mode(struct velocity_info *vptr, 232 u32 mii_status); 233 234 235 /* 236 * Internal board variants. At the moment we have only one 237 */ 238 239 static struct velocity_info_tbl chip_info_table[] = { 240 {CHIP_TYPE_VT6110, 241 "VIA Networking Velocity Family Gigabit Ethernet Adapter", 256, 1, 242 0x00FFFFFFUL}, 243 {0, NULL, 0, 0, 0} 244 }; 245 246 /** 247 * velocity_set_int_opt - parser for integer options 248 * @opt: pointer to option value 249 * @val: value the user requested (or -1 for default) 250 * @min: lowest value allowed 251 * @max: highest value allowed 252 * @def: default value 253 * @name: property name 254 * @dev: device name 255 * 256 * Set an integer property in the module options. This function does 257 * all the verification and checking as well as reporting so that 258 * we don't duplicate code for each option. 259 */ 260 261 static void velocity_set_int_opt(int *opt, int val, int min, int max, 262 int def, char *name, const char *devname) 263 { 264 if (val == -1) { 265 printf("%s: set value of parameter %s to %d\n", 266 devname, name, def); 267 *opt = def; 268 } else if (val < min || val > max) { 269 printf 270 ("%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n", 271 devname, name, min, max); 272 *opt = def; 273 } else { 274 printf("%s: set value of parameter %s to %d\n", 275 devname, name, val); 276 *opt = val; 277 } 278 } 279 280 /** 281 * velocity_set_bool_opt - parser for boolean options 282 * @opt: pointer to option value 283 * @val: value the user requested (or -1 for default) 284 * @def: default value (yes/no) 285 * @flag: numeric value to set for true. 286 * @name: property name 287 * @dev: device name 288 * 289 * Set a boolean property in the module options. This function does 290 * all the verification and checking as well as reporting so that 291 * we don't duplicate code for each option. 292 */ 293 294 static void velocity_set_bool_opt(u32 * opt, int val, int def, u32 flag, 295 char *name, const char *devname) 296 { 297 (*opt) &= (~flag); 298 if (val == -1) { 299 printf("%s: set parameter %s to %s\n", 300 devname, name, def ? "TRUE" : "FALSE"); 301 *opt |= (def ? flag : 0); 302 } else if (val < 0 || val > 1) { 303 printf 304 ("%s: the value of parameter %s is invalid, the valid range is (0-1)\n", 305 devname, name); 306 *opt |= (def ? flag : 0); 307 } else { 308 printf("%s: set parameter %s to %s\n", 309 devname, name, val ? "TRUE" : "FALSE"); 310 *opt |= (val ? flag : 0); 311 } 312 } 313 314 /** 315 * velocity_get_options - set options on device 316 * @opts: option structure for the device 317 * @index: index of option to use in module options array 318 * @devname: device name 319 * 320 * Turn the module and command options into a single structure 321 * for the current device 322 */ 323 324 static void velocity_get_options(struct velocity_opt *opts, int index, 325 const char *devname) 326 { 327 328 /* FIXME Do the options need to be configurable */ 329 velocity_set_int_opt(&opts->rx_thresh, -1, RX_THRESH_MIN, 330 RX_THRESH_MAX, RX_THRESH_DEF, "rx_thresh", 331 devname); 332 velocity_set_int_opt(&opts->DMA_length, DMA_length[index], 333 DMA_LENGTH_MIN, DMA_LENGTH_MAX, 334 DMA_LENGTH_DEF, "DMA_length", devname); 335 velocity_set_int_opt(&opts->numrx, RxDescriptors[index], 336 RX_DESC_MIN, RX_DESC_MAX, RX_DESC_DEF, 337 "RxDescriptors", devname); 338 velocity_set_int_opt(&opts->numtx, TxDescriptors[index], 339 TX_DESC_MIN, TX_DESC_MAX, TX_DESC_DEF, 340 "TxDescriptors", devname); 341 velocity_set_int_opt(&opts->vid, VID_setting[index], VLAN_ID_MIN, 342 VLAN_ID_MAX, VLAN_ID_DEF, "VID_setting", 343 devname); 344 velocity_set_bool_opt(&opts->flags, enable_tagging[index], 345 TAGGING_DEF, VELOCITY_FLAGS_TAGGING, 346 "enable_tagging", devname); 347 velocity_set_bool_opt(&opts->flags, txcsum_offload[index], 348 TX_CSUM_DEF, VELOCITY_FLAGS_TX_CSUM, 349 "txcsum_offload", devname); 350 velocity_set_int_opt(&opts->flow_cntl, flow_control[index], 351 FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, 352 "flow_control", devname); 353 velocity_set_bool_opt(&opts->flags, IP_byte_align[index], 354 IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, 355 "IP_byte_align", devname); 356 velocity_set_bool_opt(&opts->flags, ValPktLen[index], 357 VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, 358 "ValPktLen", devname); 359 velocity_set_int_opt((void *) &opts->spd_dpx, speed_duplex[index], 360 MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, 361 "Media link mode", devname); 362 velocity_set_int_opt((int *) &opts->wol_opts, wol_opts[index], 363 WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, 364 "Wake On Lan options", devname); 365 velocity_set_int_opt((int *) &opts->int_works, int_works[index], 366 INT_WORKS_MIN, INT_WORKS_MAX, INT_WORKS_DEF, 367 "Interrupt service works", devname); 368 opts->numrx = (opts->numrx & ~3); 369 } 370 371 /** 372 * velocity_init_cam_filter - initialise CAM 373 * @vptr: velocity to program 374 * 375 * Initialize the content addressable memory used for filters. Load 376 * appropriately according to the presence of VLAN 377 */ 378 379 static void velocity_init_cam_filter(struct velocity_info *vptr) 380 { 381 struct mac_regs *regs = vptr->mac_regs; 382 383 /* Turn on MCFG_PQEN, turn off MCFG_RTGOPT */ 384 WORD_REG_BITS_SET(MCFG_PQEN, MCFG_RTGOPT, ®s->MCFG); 385 WORD_REG_BITS_ON(MCFG_VIDFR, ®s->MCFG); 386 387 /* Disable all CAMs */ 388 memset(vptr->vCAMmask, 0, sizeof(u8) * 8); 389 memset(vptr->mCAMmask, 0, sizeof(u8) * 8); 390 mac_set_cam_mask(regs, vptr->vCAMmask, VELOCITY_VLAN_ID_CAM); 391 mac_set_cam_mask(regs, vptr->mCAMmask, VELOCITY_MULTICAST_CAM); 392 393 /* Enable first VCAM */ 394 if (vptr->flags & VELOCITY_FLAGS_TAGGING) { 395 /* If Tagging option is enabled and VLAN ID is not zero, then 396 turn on MCFG_RTGOPT also */ 397 if (vptr->options.vid != 0) 398 WORD_REG_BITS_ON(MCFG_RTGOPT, ®s->MCFG); 399 400 mac_set_cam(regs, 0, (u8 *) & (vptr->options.vid), 401 VELOCITY_VLAN_ID_CAM); 402 vptr->vCAMmask[0] |= 1; 403 mac_set_cam_mask(regs, vptr->vCAMmask, 404 VELOCITY_VLAN_ID_CAM); 405 } else { 406 u16 temp = 0; 407 mac_set_cam(regs, 0, (u8 *) & temp, VELOCITY_VLAN_ID_CAM); 408 temp = 1; 409 mac_set_cam_mask(regs, (u8 *) & temp, 410 VELOCITY_VLAN_ID_CAM); 411 } 412 } 413 414 static inline void velocity_give_many_rx_descs(struct velocity_info *vptr) 415 { 416 struct mac_regs *regs = vptr->mac_regs; 417 int avail, dirty, unusable; 418 419 /* 420 * RD number must be equal to 4X per hardware spec 421 * (programming guide rev 1.20, p.13) 422 */ 423 if (vptr->rd_filled < 4) 424 return; 425 426 wmb(); 427 428 unusable = vptr->rd_filled & 0x0003; 429 dirty = vptr->rd_dirty - unusable; 430 for (avail = vptr->rd_filled & 0xfffc; avail; avail--) { 431 dirty = (dirty > 0) ? dirty - 1 : vptr->options.numrx - 1; 432 // printf("return dirty: %d\n", dirty); 433 vptr->rd_ring[dirty].rdesc0.owner = OWNED_BY_NIC; 434 } 435 436 writew(vptr->rd_filled & 0xfffc, ®s->RBRDU); 437 vptr->rd_filled = unusable; 438 } 439 440 static int velocity_rx_refill(struct velocity_info *vptr) 441 { 442 int dirty = vptr->rd_dirty, done = 0, ret = 0; 443 444 // printf("rx_refill - rd_curr = %d, dirty = %d\n", vptr->rd_curr, dirty); 445 do { 446 struct rx_desc *rd = vptr->rd_ring + dirty; 447 448 /* Fine for an all zero Rx desc at init time as well */ 449 if (rd->rdesc0.owner == OWNED_BY_NIC) 450 break; 451 // printf("rx_refill - after owner %d\n", dirty); 452 453 rd->inten = 1; 454 rd->pa_high = 0; 455 rd->rdesc0.len = cpu_to_le32(vptr->rx_buf_sz);; 456 457 done++; 458 dirty = (dirty < vptr->options.numrx - 1) ? dirty + 1 : 0; 459 } while (dirty != vptr->rd_curr); 460 461 if (done) { 462 // printf("\nGive Back Desc\n"); 463 vptr->rd_dirty = dirty; 464 vptr->rd_filled += done; 465 velocity_give_many_rx_descs(vptr); 466 } 467 468 return ret; 469 } 470 471 extern void hex_dump(const char *data, const unsigned int len); 472 /************************************************************************** 473 POLL - Wait for a frame 474 ***************************************************************************/ 475 static int velocity_poll(struct nic *nic, int retrieve) 476 { 477 /* Work out whether or not there's an ethernet packet ready to 478 * read. Return 0 if not. 479 */ 480 481 int rd_curr = vptr->rd_curr % RX_DESC_DEF; 482 struct rx_desc *rd = &(vptr->rd_ring[rd_curr]); 483 484 if (rd->rdesc0.owner == OWNED_BY_NIC) 485 return 0; 486 rmb(); 487 488 if ( ! retrieve ) return 1; 489 490 /* 491 * Don't drop CE or RL error frame although RXOK is off 492 */ 493 if ((rd->rdesc0.RSR & RSR_RXOK) 494 || (!(rd->rdesc0.RSR & RSR_RXOK) 495 && (rd->rdesc0.RSR & (RSR_CE | RSR_RL)))) { 496 497 nic->packetlen = rd->rdesc0.len; 498 // ptr->rxb + (rd_curr * PKT_BUF_SZ) 499 memcpy(nic->packet, bus_to_virt(rd->pa_low), 500 nic->packetlen - 4); 501 502 vptr->rd_curr++; 503 vptr->rd_curr = vptr->rd_curr % RX_DESC_DEF; 504 velocity_rx_refill(vptr); 505 return 1; /* Remove this line once this method is implemented */ 506 } 507 return 0; 508 } 509 510 #define TX_TIMEOUT (1000); 511 /************************************************************************** 512 TRANSMIT - Transmit a frame 513 ***************************************************************************/ 514 static void velocity_transmit(struct nic *nic, const char *dest, /* Destination */ 515 unsigned int type, /* Type */ 516 unsigned int size, /* size */ 517 const char *packet) 518 { /* Packet */ 519 u16 nstype; 520 u32 to; 521 u8 *ptxb; 522 unsigned int pktlen; 523 struct tx_desc *td_ptr; 524 525 int entry = vptr->td_curr % TX_DESC_DEF; 526 td_ptr = &(vptr->td_rings[entry]); 527 528 /* point to the current txb incase multiple tx_rings are used */ 529 ptxb = vptr->txb + (entry * PKT_BUF_SZ); 530 memcpy(ptxb, dest, ETH_ALEN); /* Destination */ 531 memcpy(ptxb + ETH_ALEN, nic->node_addr, ETH_ALEN); /* Source */ 532 nstype = htons((u16) type); /* Type */ 533 memcpy(ptxb + 2 * ETH_ALEN, (u8 *) & nstype, 2); /* Type */ 534 memcpy(ptxb + ETH_HLEN, packet, size); 535 536 td_ptr->tdesc1.TCPLS = TCPLS_NORMAL; 537 td_ptr->tdesc1.TCR = TCR0_TIC; 538 td_ptr->td_buf[0].queue = 0; 539 540 size += ETH_HLEN; 541 while (size < ETH_ZLEN) /* pad to min length */ 542 ptxb[size++] = '\0'; 543 544 if (size < ETH_ZLEN) { 545 // printf("Padd that packet\n"); 546 pktlen = ETH_ZLEN; 547 // memcpy(ptxb, skb->data, skb->len); 548 memset(ptxb + size, 0, ETH_ZLEN - size); 549 550 vptr->td_rings[entry].tdesc0.pktsize = pktlen; 551 vptr->td_rings[entry].td_buf[0].pa_low = virt_to_bus(ptxb); 552 vptr->td_rings[entry].td_buf[0].pa_high &= 553 cpu_to_le32(0xffff0000UL); 554 vptr->td_rings[entry].td_buf[0].bufsize = 555 vptr->td_rings[entry].tdesc0.pktsize; 556 vptr->td_rings[entry].tdesc1.CMDZ = 2; 557 } else { 558 // printf("Correct size packet\n"); 559 td_ptr->tdesc0.pktsize = size; 560 td_ptr->td_buf[0].pa_low = virt_to_bus(ptxb); 561 td_ptr->td_buf[0].pa_high = 0; 562 td_ptr->td_buf[0].bufsize = td_ptr->tdesc0.pktsize; 563 // tdinfo->nskb_dma = 1; 564 td_ptr->tdesc1.CMDZ = 2; 565 } 566 567 if (vptr->flags & VELOCITY_FLAGS_TAGGING) { 568 td_ptr->tdesc1.pqinf.VID = (vptr->options.vid & 0xfff); 569 td_ptr->tdesc1.pqinf.priority = 0; 570 td_ptr->tdesc1.pqinf.CFI = 0; 571 td_ptr->tdesc1.TCR |= TCR0_VETAG; 572 } 573 574 vptr->td_curr = (entry + 1); 575 576 { 577 578 int prev = entry - 1; 579 580 if (prev < 0) 581 prev = TX_DESC_DEF - 1; 582 td_ptr->tdesc0.owner |= OWNED_BY_NIC; 583 td_ptr = &(vptr->td_rings[prev]); 584 td_ptr->td_buf[0].queue = 1; 585 mac_tx_queue_wake(vptr->mac_regs, 0); 586 587 } 588 589 to = currticks() + TX_TIMEOUT; 590 while ((td_ptr->tdesc0.owner & OWNED_BY_NIC) && (currticks() < to)); /* wait */ 591 592 if (currticks() >= to) { 593 printf("TX Time Out"); 594 } 595 596 } 597 598 /************************************************************************** 599 DISABLE - Turn off ethernet interface 600 ***************************************************************************/ 601 static void velocity_disable(struct nic *nic __unused) 602 { 603 /* put the card in its initial state */ 604 /* This function serves 3 purposes. 605 * This disables DMA and interrupts so we don't receive 606 * unexpected packets or interrupts from the card after 607 * etherboot has finished. 608 * This frees resources so etherboot may use 609 * this driver on another interface 610 * This allows etherboot to reinitialize the interface 611 * if something is something goes wrong. 612 */ 613 struct mac_regs *regs = vptr->mac_regs; 614 mac_disable_int(regs); 615 writel(CR0_STOP, ®s->CR0Set); 616 writew(0xFFFF, ®s->TDCSRClr); 617 writeb(0xFF, ®s->RDCSRClr); 618 safe_disable_mii_autopoll(regs); 619 mac_clear_isr(regs); 620 621 /* Power down the chip */ 622 // pci_set_power_state(vptr->pdev, PCI_D3hot); 623 624 vptr->flags &= (~VELOCITY_FLAGS_OPENED); 625 } 626 627 /************************************************************************** 628 IRQ - handle interrupts 629 ***************************************************************************/ 630 static void velocity_irq(struct nic *nic __unused, irq_action_t action) 631 { 632 /* This routine is somewhat optional. Etherboot itself 633 * doesn't use interrupts, but they are required under some 634 * circumstances when we're acting as a PXE stack. 635 * 636 * If you don't implement this routine, the only effect will 637 * be that your driver cannot be used via Etherboot's UNDI 638 * API. This won't affect programs that use only the UDP 639 * portion of the PXE API, such as pxelinux. 640 */ 641 642 switch (action) { 643 case DISABLE: 644 case ENABLE: 645 /* Set receive interrupt enabled/disabled state */ 646 /* 647 outb ( action == ENABLE ? IntrMaskEnabled : IntrMaskDisabled, 648 nic->ioaddr + IntrMaskRegister ); 649 */ 650 break; 651 case FORCE: 652 /* Force NIC to generate a receive interrupt */ 653 /* 654 outb ( ForceInterrupt, nic->ioaddr + IntrForceRegister ); 655 */ 656 break; 657 } 658 } 659 660 static struct nic_operations velocity_operations = { 661 .connect = dummy_connect, 662 .poll = velocity_poll, 663 .transmit = velocity_transmit, 664 .irq = velocity_irq, 665 }; 666 667 /************************************************************************** 668 PROBE - Look for an adapter, this routine's visible to the outside 669 ***************************************************************************/ 670 static int velocity_probe( struct nic *nic, struct pci_device *pci) 671 { 672 int ret, i; 673 struct mac_regs *regs; 674 675 printf("via-velocity.c: Found %s Vendor=0x%hX Device=0x%hX\n", 676 pci->driver_name, pci->vendor, pci->device); 677 678 /* point to private storage */ 679 vptr = &vptx; 680 info = chip_info_table; 681 682 velocity_init_info(pci, vptr, info); 683 684 //FIXME: pci_enable_device(pci); 685 //FIXME: pci_set_power_state(pci, PCI_D0); 686 687 ret = velocity_get_pci_info(vptr, pci); 688 if (ret < 0) { 689 printf("Failed to find PCI device.\n"); 690 return 0; 691 } 692 693 regs = ioremap(vptr->memaddr, vptr->io_size); 694 if (regs == NULL) { 695 printf("Unable to remap io\n"); 696 return 0; 697 } 698 699 vptr->mac_regs = regs; 700 701 BASE = vptr->ioaddr; 702 703 printf("Chip ID: %hX\n", vptr->chip_id); 704 705 for (i = 0; i < 6; i++) 706 nic->node_addr[i] = readb(®s->PAR[i]); 707 708 DBG ( "%s: %s at ioaddr %#hX\n", pci->driver_name, eth_ntoa ( nic->node_addr ), 709 (unsigned int) BASE ); 710 711 velocity_get_options(&vptr->options, 0, pci->driver_name); 712 713 /* 714 * Mask out the options cannot be set to the chip 715 */ 716 vptr->options.flags &= 0x00FFFFFFUL; //info->flags = 0x00FFFFFFUL; 717 718 /* 719 * Enable the chip specified capbilities 720 */ 721 722 vptr->flags = 723 vptr->options. 724 flags | (0x00FFFFFFUL /*info->flags */ & 0xFF000000UL); 725 726 vptr->wol_opts = vptr->options.wol_opts; 727 vptr->flags |= VELOCITY_FLAGS_WOL_ENABLED; 728 729 vptr->phy_id = MII_GET_PHY_ID(vptr->mac_regs); 730 731 if (vptr->flags & VELOCITY_FLAGS_TX_CSUM) { 732 printf("features missing\n"); 733 } 734 735 /* and leave the chip powered down */ 736 // FIXME: pci_set_power_state(pci, PCI_D3hot); 737 738 check_connection_type(vptr->mac_regs); 739 velocity_open(nic, pci); 740 741 /* store NIC parameters */ 742 nic->nic_op = &velocity_operations; 743 return 1; 744 } 745 746 //#define IORESOURCE_IO 0x00000100 /* Resource type */ 747 748 /** 749 * velocity_init_info - init private data 750 * @pdev: PCI device 751 * @vptr: Velocity info 752 * @info: Board type 753 * 754 * Set up the initial velocity_info struct for the device that has been 755 * discovered. 756 */ 757 758 static void velocity_init_info(struct pci_device *pdev, 759 struct velocity_info *vptr, 760 struct velocity_info_tbl *info) 761 { 762 memset(vptr, 0, sizeof(struct velocity_info)); 763 764 vptr->pdev = pdev; 765 vptr->chip_id = info->chip_id; 766 vptr->io_size = info->io_size; 767 vptr->num_txq = info->txqueue; 768 vptr->multicast_limit = MCAM_SIZE; 769 770 printf 771 ("chip_id: 0x%hX, io_size: %d, num_txq %d, multicast_limit: %d\n", 772 vptr->chip_id, (unsigned int) vptr->io_size, vptr->num_txq, 773 vptr->multicast_limit); 774 printf("Name: %s\n", info->name); 775 776 // spin_lock_init(&vptr->lock); 777 // INIT_LIST_HEAD(&vptr->list); 778 } 779 780 /** 781 * velocity_get_pci_info - retrieve PCI info for device 782 * @vptr: velocity device 783 * @pdev: PCI device it matches 784 * 785 * Retrieve the PCI configuration space data that interests us from 786 * the kernel PCI layer 787 */ 788 789 #define IORESOURCE_IO 0x00000100 /* Resource type */ 790 #define IORESOURCE_PREFETCH 0x00001000 /* No side effects */ 791 792 #define IORESOURCE_MEM 0x00000200 793 #define BAR_0 0 794 #define BAR_1 1 795 #define BAR_5 5 796 #define PCI_BASE_ADDRESS_SPACE 0x01 /* 0 = memory, 1 = I/O */ 797 #define PCI_BASE_ADDRESS_SPACE_IO 0x01 798 #define PCI_BASE_ADDRESS_SPACE_MEMORY 0x00 799 #define PCI_BASE_ADDRESS_MEM_TYPE_MASK 0x06 800 #define PCI_BASE_ADDRESS_MEM_TYPE_32 0x00 /* 32 bit address */ 801 #define PCI_BASE_ADDRESS_MEM_TYPE_1M 0x02 /* Below 1M [obsolete] */ 802 #define PCI_BASE_ADDRESS_MEM_TYPE_64 0x04 /* 64 bit address */ 803 #define PCI_BASE_ADDRESS_MEM_PREFETCH 0x08 /* prefetchable? */ 804 //#define PCI_BASE_ADDRESS_MEM_MASK (~0x0fUL) 805 // #define PCI_BASE_ADDRESS_IO_MASK (~0x03UL) 806 807 unsigned long pci_resource_flags(struct pci_device *pdev, unsigned int bar) 808 { 809 uint32_t l, sz; 810 unsigned long flags = 0; 811 812 pci_read_config_dword(pdev, bar, &l); 813 pci_write_config_dword(pdev, bar, ~0); 814 pci_read_config_dword(pdev, bar, &sz); 815 pci_write_config_dword(pdev, bar, l); 816 817 if (!sz || sz == 0xffffffff) 818 printf("Weird size\n"); 819 if (l == 0xffffffff) 820 l = 0; 821 if ((l & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_MEMORY) { 822 /* sz = pci_size(l, sz, PCI_BASE_ADDRESS_MEM_MASK); 823 if (!sz) 824 continue; 825 res->start = l & PCI_BASE_ADDRESS_MEM_MASK; 826 */ flags |= l & ~PCI_BASE_ADDRESS_MEM_MASK; 827 printf("Memory Resource\n"); 828 } else { 829 // sz = pci_size(l, sz, PCI_BASE_ADDRESS_IO_MASK & 0xffff); 830 /// if (!sz) 831 /// continue; 832 // res->start = l & PCI_BASE_ADDRESS_IO_MASK; 833 flags |= l & ~PCI_BASE_ADDRESS_IO_MASK; 834 printf("I/O Resource\n"); 835 } 836 if (flags & PCI_BASE_ADDRESS_SPACE_IO) { 837 printf("Why is it here\n"); 838 flags |= IORESOURCE_IO; 839 } else { 840 printf("here\n"); 841 //flags &= ~IORESOURCE_IO; 842 } 843 844 845 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH) 846 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH; 847 848 849 return flags; 850 } 851 static int velocity_get_pci_info(struct velocity_info *vptr, 852 struct pci_device *pdev) 853 { 854 if (pci_read_config_byte(pdev, PCI_REVISION_ID, &vptr->rev_id) < 0) { 855 printf("DEBUG: pci_read_config_byte failed\n"); 856 return -1; 857 } 858 859 adjust_pci_device(pdev); 860 861 vptr->ioaddr = pci_bar_start(pdev, PCI_BASE_ADDRESS_0); 862 vptr->memaddr = pci_bar_start(pdev, PCI_BASE_ADDRESS_1); 863 864 printf("Looking for I/O Resource - Found:"); 865 if (! 866 (pci_resource_flags(pdev, PCI_BASE_ADDRESS_0) & IORESOURCE_IO)) 867 { 868 printf 869 ("DEBUG: region #0 is not an I/O resource, aborting.\n"); 870 return -1; 871 } 872 873 printf("Looking for Memory Resource - Found:"); 874 if ((pci_resource_flags(pdev, PCI_BASE_ADDRESS_1) & IORESOURCE_IO)) { 875 printf("DEBUG: region #1 is an I/O resource, aborting.\n"); 876 return -1; 877 } 878 879 if (pci_bar_size(pdev, PCI_BASE_ADDRESS_1) < 256) { 880 printf("DEBUG: region #1 is too small.\n"); 881 return -1; 882 } 883 vptr->pdev = pdev; 884 885 return 0; 886 } 887 888 /** 889 * velocity_print_link_status - link status reporting 890 * @vptr: velocity to report on 891 * 892 * Turn the link status of the velocity card into a kernel log 893 * description of the new link state, detailing speed and duplex 894 * status 895 */ 896 897 static void velocity_print_link_status(struct velocity_info *vptr) 898 { 899 900 if (vptr->mii_status & VELOCITY_LINK_FAIL) { 901 printf("failed to detect cable link\n"); 902 } else if (vptr->options.spd_dpx == SPD_DPX_AUTO) { 903 printf("Link autonegation"); 904 905 if (vptr->mii_status & VELOCITY_SPEED_1000) 906 printf(" speed 1000M bps"); 907 else if (vptr->mii_status & VELOCITY_SPEED_100) 908 printf(" speed 100M bps"); 909 else 910 printf(" speed 10M bps"); 911 912 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) 913 printf(" full duplex\n"); 914 else 915 printf(" half duplex\n"); 916 } else { 917 printf("Link forced"); 918 switch (vptr->options.spd_dpx) { 919 case SPD_DPX_100_HALF: 920 printf(" speed 100M bps half duplex\n"); 921 break; 922 case SPD_DPX_100_FULL: 923 printf(" speed 100M bps full duplex\n"); 924 break; 925 case SPD_DPX_10_HALF: 926 printf(" speed 10M bps half duplex\n"); 927 break; 928 case SPD_DPX_10_FULL: 929 printf(" speed 10M bps full duplex\n"); 930 break; 931 default: 932 break; 933 } 934 } 935 } 936 937 /** 938 * velocity_rx_reset - handle a receive reset 939 * @vptr: velocity we are resetting 940 * 941 * Reset the ownership and status for the receive ring side. 942 * Hand all the receive queue to the NIC. 943 */ 944 945 static void velocity_rx_reset(struct velocity_info *vptr) 946 { 947 948 struct mac_regs *regs = vptr->mac_regs; 949 int i; 950 951 //ptr->rd_dirty = vptr->rd_filled = vptr->rd_curr = 0; 952 953 /* 954 * Init state, all RD entries belong to the NIC 955 */ 956 for (i = 0; i < vptr->options.numrx; ++i) 957 vptr->rd_ring[i].rdesc0.owner = OWNED_BY_NIC; 958 959 writew(RX_DESC_DEF, ®s->RBRDU); 960 writel(virt_to_le32desc(vptr->rd_ring), ®s->RDBaseLo); 961 writew(0, ®s->RDIdx); 962 writew(RX_DESC_DEF - 1, ®s->RDCSize); 963 } 964 965 /** 966 * velocity_init_registers - initialise MAC registers 967 * @vptr: velocity to init 968 * @type: type of initialisation (hot or cold) 969 * 970 * Initialise the MAC on a reset or on first set up on the 971 * hardware. 972 */ 973 974 static void velocity_init_registers(struct nic *nic, 975 struct velocity_info *vptr, 976 enum velocity_init_type type) 977 { 978 struct mac_regs *regs = vptr->mac_regs; 979 int i, mii_status; 980 981 mac_wol_reset(regs); 982 983 switch (type) { 984 case VELOCITY_INIT_RESET: 985 case VELOCITY_INIT_WOL: 986 987 //netif_stop_queue(vptr->dev); 988 989 /* 990 * Reset RX to prevent RX pointer not on the 4X location 991 */ 992 velocity_rx_reset(vptr); 993 mac_rx_queue_run(regs); 994 mac_rx_queue_wake(regs); 995 996 mii_status = velocity_get_opt_media_mode(vptr); 997 998 if (velocity_set_media_mode(vptr, mii_status) != 999 VELOCITY_LINK_CHANGE) { 1000 velocity_print_link_status(vptr); 1001 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) 1002 printf("Link Failed\n"); 1003 // netif_wake_queue(vptr->dev); 1004 } 1005 1006 enable_flow_control_ability(vptr); 1007 1008 mac_clear_isr(regs); 1009 writel(CR0_STOP, ®s->CR0Clr); 1010 //writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), 1011 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), 1012 ®s->CR0Set); 1013 break; 1014 1015 case VELOCITY_INIT_COLD: 1016 default: 1017 /* 1018 * Do reset 1019 */ 1020 velocity_soft_reset(vptr); 1021 mdelay(5); 1022 1023 mac_eeprom_reload(regs); 1024 for (i = 0; i < 6; i++) { 1025 writeb(nic->node_addr[i], &(regs->PAR[i])); 1026 } 1027 /* 1028 * clear Pre_ACPI bit. 1029 */ 1030 BYTE_REG_BITS_OFF(CFGA_PACPI, &(regs->CFGA)); 1031 mac_set_rx_thresh(regs, vptr->options.rx_thresh); 1032 mac_set_dma_length(regs, vptr->options.DMA_length); 1033 1034 writeb(WOLCFG_SAM | WOLCFG_SAB, ®s->WOLCFGSet); 1035 /* 1036 * Back off algorithm use original IEEE standard 1037 */ 1038 BYTE_REG_BITS_SET(CFGB_OFSET, 1039 (CFGB_CRANDOM | CFGB_CAP | CFGB_MBA | 1040 CFGB_BAKOPT), ®s->CFGB); 1041 1042 /* 1043 * Init CAM filter 1044 */ 1045 velocity_init_cam_filter(vptr); 1046 1047 /* 1048 * Set packet filter: Receive directed and broadcast address 1049 */ 1050 //FIXME Multicast velocity_set_multi(nic); 1051 1052 /* 1053 * Enable MII auto-polling 1054 */ 1055 enable_mii_autopoll(regs); 1056 1057 vptr->int_mask = INT_MASK_DEF; 1058 1059 writel(virt_to_le32desc(vptr->rd_ring), ®s->RDBaseLo); 1060 writew(vptr->options.numrx - 1, ®s->RDCSize); 1061 mac_rx_queue_run(regs); 1062 mac_rx_queue_wake(regs); 1063 1064 writew(vptr->options.numtx - 1, ®s->TDCSize); 1065 1066 // for (i = 0; i < vptr->num_txq; i++) { 1067 writel(virt_to_le32desc(vptr->td_rings), 1068 &(regs->TDBaseLo[0])); 1069 mac_tx_queue_run(regs, 0); 1070 // } 1071 1072 init_flow_control_register(vptr); 1073 1074 writel(CR0_STOP, ®s->CR0Clr); 1075 writel((CR0_DPOLL | CR0_TXON | CR0_RXON | CR0_STRT), 1076 ®s->CR0Set); 1077 1078 mii_status = velocity_get_opt_media_mode(vptr); 1079 // netif_stop_queue(vptr->dev); 1080 1081 mii_init(vptr, mii_status); 1082 1083 if (velocity_set_media_mode(vptr, mii_status) != 1084 VELOCITY_LINK_CHANGE) { 1085 velocity_print_link_status(vptr); 1086 if (!(vptr->mii_status & VELOCITY_LINK_FAIL)) 1087 printf("Link Faaailll\n"); 1088 // netif_wake_queue(vptr->dev); 1089 } 1090 1091 enable_flow_control_ability(vptr); 1092 mac_hw_mibs_init(regs); 1093 mac_write_int_mask(vptr->int_mask, regs); 1094 mac_clear_isr(regs); 1095 1096 1097 } 1098 velocity_print_link_status(vptr); 1099 } 1100 1101 /** 1102 * velocity_soft_reset - soft reset 1103 * @vptr: velocity to reset 1104 * 1105 * Kick off a soft reset of the velocity adapter and then poll 1106 * until the reset sequence has completed before returning. 1107 */ 1108 1109 static int velocity_soft_reset(struct velocity_info *vptr) 1110 { 1111 struct mac_regs *regs = vptr->mac_regs; 1112 unsigned int i = 0; 1113 1114 writel(CR0_SFRST, ®s->CR0Set); 1115 1116 for (i = 0; i < W_MAX_TIMEOUT; i++) { 1117 udelay(5); 1118 if (!DWORD_REG_BITS_IS_ON(CR0_SFRST, ®s->CR0Set)) 1119 break; 1120 } 1121 1122 if (i == W_MAX_TIMEOUT) { 1123 writel(CR0_FORSRST, ®s->CR0Set); 1124 /* FIXME: PCI POSTING */ 1125 /* delay 2ms */ 1126 mdelay(2); 1127 } 1128 return 0; 1129 } 1130 1131 /** 1132 * velocity_init_rings - set up DMA rings 1133 * @vptr: Velocity to set up 1134 * 1135 * Allocate PCI mapped DMA rings for the receive and transmit layer 1136 * to use. 1137 */ 1138 1139 static int velocity_init_rings(struct velocity_info *vptr) 1140 { 1141 1142 int idx; 1143 1144 vptr->rd_curr = 0; 1145 vptr->td_curr = 0; 1146 memset(vptr->td_rings, 0, TX_DESC_DEF * sizeof(struct tx_desc)); 1147 memset(vptr->rd_ring, 0, RX_DESC_DEF * sizeof(struct rx_desc)); 1148 // memset(vptr->tx_buffs, 0, TX_DESC_DEF * PKT_BUF_SZ); 1149 1150 1151 for (idx = 0; idx < RX_DESC_DEF; idx++) { 1152 vptr->rd_ring[idx].rdesc0.RSR = 0; 1153 vptr->rd_ring[idx].rdesc0.len = 0; 1154 vptr->rd_ring[idx].rdesc0.reserved = 0; 1155 vptr->rd_ring[idx].rdesc0.owner = 0; 1156 vptr->rd_ring[idx].len = cpu_to_le32(vptr->rx_buf_sz); 1157 vptr->rd_ring[idx].inten = 1; 1158 vptr->rd_ring[idx].pa_low = 1159 virt_to_bus(vptr->rxb + (RX_DESC_DEF * idx)); 1160 vptr->rd_ring[idx].pa_high = 0; 1161 vptr->rd_ring[idx].rdesc0.owner = OWNED_BY_NIC; 1162 } 1163 1164 /* for (i = 0; idx < TX_DESC_DEF; idx++ ) { 1165 vptr->td_rings[idx].tdesc1.TCPLS = TCPLS_NORMAL; 1166 vptr->td_rings[idx].tdesc1.TCR = TCR0_TIC; 1167 vptr->td_rings[idx].td_buf[0].queue = 0; 1168 vptr->td_rings[idx].tdesc0.owner = ~OWNED_BY_NIC; 1169 vptr->td_rings[idx].tdesc0.pktsize = 0; 1170 vptr->td_rings[idx].td_buf[0].pa_low = cpu_to_le32(virt_to_bus(vptr->txb + (idx * PKT_BUF_SZ))); 1171 vptr->td_rings[idx].td_buf[0].pa_high = 0; 1172 vptr->td_rings[idx].td_buf[0].bufsize = 0; 1173 vptr->td_rings[idx].tdesc1.CMDZ = 2; 1174 } 1175 */ 1176 return 0; 1177 } 1178 1179 /** 1180 * velocity_open - interface activation callback 1181 * @dev: network layer device to open 1182 * 1183 * Called when the network layer brings the interface up. Returns 1184 * a negative posix error code on failure, or zero on success. 1185 * 1186 * All the ring allocation and set up is done on open for this 1187 * adapter to minimise memory usage when inactive 1188 */ 1189 1190 #define PCI_BYTE_REG_BITS_ON(x,i,p) do{\ 1191 u8 byReg;\ 1192 pci_read_config_byte((p), (i), &(byReg));\ 1193 (byReg) |= (x);\ 1194 pci_write_config_byte((p), (i), (byReg));\ 1195 } while (0) 1196 1197 // 1198 // Registers in the PCI configuration space 1199 // 1200 #define PCI_REG_COMMAND 0x04 // 1201 #define PCI_REG_MODE0 0x60 // 1202 #define PCI_REG_MODE1 0x61 // 1203 #define PCI_REG_MODE2 0x62 // 1204 #define PCI_REG_MODE3 0x63 // 1205 #define PCI_REG_DELAY_TIMER 0x64 // 1206 1207 // Bits in the (MODE2, 0x62) register 1208 // 1209 #define MODE2_PCEROPT 0x80 // take PCI bus ERror as a fatal and shutdown from software control 1210 #define MODE2_TXQ16 0x40 // TX write-back Queue control. 0->32 entries available in Tx write-back queue, 1->16 entries 1211 #define MODE2_TXPOST 0x08 // (Not support in VT3119) 1212 #define MODE2_AUTOOPT 0x04 // (VT3119 GHCI without such behavior) 1213 #define MODE2_MODE10T 0x02 // used to control tx Threshold for 10M case 1214 #define MODE2_TCPLSOPT 0x01 // TCP large send field update disable, hardware will not update related fields, leave it to software. 1215 1216 // 1217 // Bits in the MODE3 register 1218 // 1219 #define MODE3_MIION 0x04 // MII symbol codine error detect enable ?? 1220 1221 // Bits in the (COMMAND, 0x04) register 1222 #define COMMAND_BUSM 0x04 1223 #define COMMAND_WAIT 0x80 1224 static int velocity_open(struct nic *nic, struct pci_device *pci __unused) 1225 { 1226 int ret; 1227 1228 u8 diff; 1229 u32 TxPhyAddr, RxPhyAddr; 1230 u32 TxBufPhyAddr, RxBufPhyAddr; 1231 vptr->TxDescArrays = tx_ring; 1232 if (vptr->TxDescArrays == 0) 1233 printf("Allot Error"); 1234 1235 /* Tx Descriptor needs 64 bytes alignment; */ 1236 TxPhyAddr = virt_to_bus(vptr->TxDescArrays); 1237 printf("Unaligned Address : %X\n", TxPhyAddr); 1238 diff = 64 - (TxPhyAddr - ((TxPhyAddr >> 6) << 6)); 1239 TxPhyAddr += diff; 1240 vptr->td_rings = (struct tx_desc *) (vptr->TxDescArrays + diff); 1241 1242 printf("Aligned Address: %lX\n", virt_to_bus(vptr->td_rings)); 1243 vptr->tx_buffs = txb; 1244 /* Rx Buffer needs 64 bytes alignment; */ 1245 TxBufPhyAddr = virt_to_bus(vptr->tx_buffs); 1246 diff = 64 - (TxBufPhyAddr - ((TxBufPhyAddr >> 6) << 6)); 1247 TxBufPhyAddr += diff; 1248 vptr->txb = (unsigned char *) (vptr->tx_buffs + diff); 1249 1250 vptr->RxDescArrays = rx_ring; 1251 /* Rx Descriptor needs 64 bytes alignment; */ 1252 RxPhyAddr = virt_to_bus(vptr->RxDescArrays); 1253 diff = 64 - (RxPhyAddr - ((RxPhyAddr >> 6) << 6)); 1254 RxPhyAddr += diff; 1255 vptr->rd_ring = (struct rx_desc *) (vptr->RxDescArrays + diff); 1256 1257 vptr->rx_buffs = rxb; 1258 /* Rx Buffer needs 64 bytes alignment; */ 1259 RxBufPhyAddr = virt_to_bus(vptr->rx_buffs); 1260 diff = 64 - (RxBufPhyAddr - ((RxBufPhyAddr >> 6) << 6)); 1261 RxBufPhyAddr += diff; 1262 vptr->rxb = (unsigned char *) (vptr->rx_buffs + diff); 1263 1264 if (vptr->RxDescArrays == NULL || vptr->RxDescArrays == NULL) { 1265 printf("Allocate tx_ring or rd_ring failed\n"); 1266 return 0; 1267 } 1268 1269 vptr->rx_buf_sz = PKT_BUF_SZ; 1270 /* 1271 // turn this on to avoid retry forever 1272 PCI_BYTE_REG_BITS_ON(MODE2_PCEROPT, PCI_REG_MODE2, pci); 1273 // for some legacy BIOS and OS don't open BusM 1274 // bit in PCI configuration space. So, turn it on. 1275 PCI_BYTE_REG_BITS_ON(COMMAND_BUSM, PCI_REG_COMMAND, pci); 1276 // turn this on to detect MII coding error 1277 PCI_BYTE_REG_BITS_ON(MODE3_MIION, PCI_REG_MODE3, pci); 1278 */ 1279 ret = velocity_init_rings(vptr); 1280 1281 /* Ensure chip is running */ 1282 //FIXME: pci_set_power_state(vptr->pdev, PCI_D0); 1283 1284 velocity_init_registers(nic, vptr, VELOCITY_INIT_COLD); 1285 mac_write_int_mask(0, vptr->mac_regs); 1286 // _int(vptr->mac_regs); 1287 //mac_enable_int(vptr->mac_regs); 1288 1289 vptr->flags |= VELOCITY_FLAGS_OPENED; 1290 return 1; 1291 1292 } 1293 1294 /* 1295 * MII access , media link mode setting functions 1296 */ 1297 1298 1299 /** 1300 * mii_init - set up MII 1301 * @vptr: velocity adapter 1302 * @mii_status: links tatus 1303 * 1304 * Set up the PHY for the current link state. 1305 */ 1306 1307 static void mii_init(struct velocity_info *vptr, u32 mii_status __unused) 1308 { 1309 u16 BMCR; 1310 1311 switch (PHYID_GET_PHY_ID(vptr->phy_id)) { 1312 case PHYID_CICADA_CS8201: 1313 /* 1314 * Reset to hardware default 1315 */ 1316 MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, 1317 vptr->mac_regs); 1318 /* 1319 * Turn on ECHODIS bit in NWay-forced full mode and turn it 1320 * off it in NWay-forced half mode for NWay-forced v.s. 1321 * legacy-forced issue. 1322 */ 1323 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) 1324 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, 1325 vptr->mac_regs); 1326 else 1327 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, 1328 vptr->mac_regs); 1329 /* 1330 * Turn on Link/Activity LED enable bit for CIS8201 1331 */ 1332 MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs); 1333 break; 1334 case PHYID_VT3216_32BIT: 1335 case PHYID_VT3216_64BIT: 1336 /* 1337 * Reset to hardware default 1338 */ 1339 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, 1340 vptr->mac_regs); 1341 /* 1342 * Turn on ECHODIS bit in NWay-forced full mode and turn it 1343 * off it in NWay-forced half mode for NWay-forced v.s. 1344 * legacy-forced issue 1345 */ 1346 if (vptr->mii_status & VELOCITY_DUPLEX_FULL) 1347 MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, 1348 vptr->mac_regs); 1349 else 1350 MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, 1351 vptr->mac_regs); 1352 break; 1353 1354 case PHYID_MARVELL_1000: 1355 case PHYID_MARVELL_1000S: 1356 /* 1357 * Assert CRS on Transmit 1358 */ 1359 MII_REG_BITS_ON(PSCR_ACRSTX, MII_REG_PSCR, vptr->mac_regs); 1360 /* 1361 * Reset to hardware default 1362 */ 1363 MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, 1364 vptr->mac_regs); 1365 break; 1366 default: 1367 ; 1368 } 1369 velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR); 1370 if (BMCR & BMCR_ISO) { 1371 BMCR &= ~BMCR_ISO; 1372 velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR); 1373 } 1374 } 1375 1376 /** 1377 * safe_disable_mii_autopoll - autopoll off 1378 * @regs: velocity registers 1379 * 1380 * Turn off the autopoll and wait for it to disable on the chip 1381 */ 1382 1383 static void safe_disable_mii_autopoll(struct mac_regs *regs) 1384 { 1385 u16 ww; 1386 1387 /* turn off MAUTO */ 1388 writeb(0, ®s->MIICR); 1389 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { 1390 udelay(1); 1391 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) 1392 break; 1393 } 1394 } 1395 1396 /** 1397 * enable_mii_autopoll - turn on autopolling 1398 * @regs: velocity registers 1399 * 1400 * Enable the MII link status autopoll feature on the Velocity 1401 * hardware. Wait for it to enable. 1402 */ 1403 1404 static void enable_mii_autopoll(struct mac_regs *regs) 1405 { 1406 unsigned int ii; 1407 1408 writeb(0, &(regs->MIICR)); 1409 writeb(MIIADR_SWMPL, ®s->MIIADR); 1410 1411 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { 1412 udelay(1); 1413 if (BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) 1414 break; 1415 } 1416 1417 writeb(MIICR_MAUTO, ®s->MIICR); 1418 1419 for (ii = 0; ii < W_MAX_TIMEOUT; ii++) { 1420 udelay(1); 1421 if (!BYTE_REG_BITS_IS_ON(MIISR_MIDLE, ®s->MIISR)) 1422 break; 1423 } 1424 1425 } 1426 1427 /** 1428 * velocity_mii_read - read MII data 1429 * @regs: velocity registers 1430 * @index: MII register index 1431 * @data: buffer for received data 1432 * 1433 * Perform a single read of an MII 16bit register. Returns zero 1434 * on success or -ETIMEDOUT if the PHY did not respond. 1435 */ 1436 1437 static int velocity_mii_read(struct mac_regs *regs, u8 index, u16 * data) 1438 { 1439 u16 ww; 1440 1441 /* 1442 * Disable MIICR_MAUTO, so that mii addr can be set normally 1443 */ 1444 safe_disable_mii_autopoll(regs); 1445 1446 writeb(index, ®s->MIIADR); 1447 1448 BYTE_REG_BITS_ON(MIICR_RCMD, ®s->MIICR); 1449 1450 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { 1451 if (!(readb(®s->MIICR) & MIICR_RCMD)) 1452 break; 1453 } 1454 1455 *data = readw(®s->MIIDATA); 1456 1457 enable_mii_autopoll(regs); 1458 if (ww == W_MAX_TIMEOUT) 1459 return -1; 1460 return 0; 1461 } 1462 1463 /** 1464 * velocity_mii_write - write MII data 1465 * @regs: velocity registers 1466 * @index: MII register index 1467 * @data: 16bit data for the MII register 1468 * 1469 * Perform a single write to an MII 16bit register. Returns zero 1470 * on success or -ETIMEDOUT if the PHY did not respond. 1471 */ 1472 1473 static int velocity_mii_write(struct mac_regs *regs, u8 mii_addr, u16 data) 1474 { 1475 u16 ww; 1476 1477 /* 1478 * Disable MIICR_MAUTO, so that mii addr can be set normally 1479 */ 1480 safe_disable_mii_autopoll(regs); 1481 1482 /* MII reg offset */ 1483 writeb(mii_addr, ®s->MIIADR); 1484 /* set MII data */ 1485 writew(data, ®s->MIIDATA); 1486 1487 /* turn on MIICR_WCMD */ 1488 BYTE_REG_BITS_ON(MIICR_WCMD, ®s->MIICR); 1489 1490 /* W_MAX_TIMEOUT is the timeout period */ 1491 for (ww = 0; ww < W_MAX_TIMEOUT; ww++) { 1492 udelay(5); 1493 if (!(readb(®s->MIICR) & MIICR_WCMD)) 1494 break; 1495 } 1496 enable_mii_autopoll(regs); 1497 1498 if (ww == W_MAX_TIMEOUT) 1499 return -1; 1500 return 0; 1501 } 1502 1503 /** 1504 * velocity_get_opt_media_mode - get media selection 1505 * @vptr: velocity adapter 1506 * 1507 * Get the media mode stored in EEPROM or module options and load 1508 * mii_status accordingly. The requested link state information 1509 * is also returned. 1510 */ 1511 1512 static u32 velocity_get_opt_media_mode(struct velocity_info *vptr) 1513 { 1514 u32 status = 0; 1515 1516 switch (vptr->options.spd_dpx) { 1517 case SPD_DPX_AUTO: 1518 status = VELOCITY_AUTONEG_ENABLE; 1519 break; 1520 case SPD_DPX_100_FULL: 1521 status = VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL; 1522 break; 1523 case SPD_DPX_10_FULL: 1524 status = VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL; 1525 break; 1526 case SPD_DPX_100_HALF: 1527 status = VELOCITY_SPEED_100; 1528 break; 1529 case SPD_DPX_10_HALF: 1530 status = VELOCITY_SPEED_10; 1531 break; 1532 } 1533 vptr->mii_status = status; 1534 return status; 1535 } 1536 1537 /** 1538 * mii_set_auto_on - autonegotiate on 1539 * @vptr: velocity 1540 * 1541 * Enable autonegotation on this interface 1542 */ 1543 1544 static void mii_set_auto_on(struct velocity_info *vptr) 1545 { 1546 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs)) 1547 MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs); 1548 else 1549 MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); 1550 } 1551 1552 1553 /* 1554 static void mii_set_auto_off(struct velocity_info * vptr) 1555 { 1556 MII_REG_BITS_OFF(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); 1557 } 1558 */ 1559 1560 /** 1561 * set_mii_flow_control - flow control setup 1562 * @vptr: velocity interface 1563 * 1564 * Set up the flow control on this interface according to 1565 * the supplied user/eeprom options. 1566 */ 1567 1568 static void set_mii_flow_control(struct velocity_info *vptr) 1569 { 1570 /*Enable or Disable PAUSE in ANAR */ 1571 switch (vptr->options.flow_cntl) { 1572 case FLOW_CNTL_TX: 1573 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 1574 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 1575 break; 1576 1577 case FLOW_CNTL_RX: 1578 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 1579 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 1580 break; 1581 1582 case FLOW_CNTL_TX_RX: 1583 MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 1584 MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs); 1585 break; 1586 1587 case FLOW_CNTL_DISABLE: 1588 MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs); 1589 MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, 1590 vptr->mac_regs); 1591 break; 1592 default: 1593 break; 1594 } 1595 } 1596 1597 /** 1598 * velocity_set_media_mode - set media mode 1599 * @mii_status: old MII link state 1600 * 1601 * Check the media link state and configure the flow control 1602 * PHY and also velocity hardware setup accordingly. In particular 1603 * we need to set up CD polling and frame bursting. 1604 */ 1605 1606 static int velocity_set_media_mode(struct velocity_info *vptr, 1607 u32 mii_status) 1608 { 1609 u32 curr_status; 1610 struct mac_regs *regs = vptr->mac_regs; 1611 1612 vptr->mii_status = mii_check_media_mode(vptr->mac_regs); 1613 curr_status = vptr->mii_status & (~VELOCITY_LINK_FAIL); 1614 1615 /* Set mii link status */ 1616 set_mii_flow_control(vptr); 1617 1618 /* 1619 Check if new status is consisent with current status 1620 if (((mii_status & curr_status) & VELOCITY_AUTONEG_ENABLE) 1621 || (mii_status==curr_status)) { 1622 vptr->mii_status=mii_check_media_mode(vptr->mac_regs); 1623 vptr->mii_status=check_connection_type(vptr->mac_regs); 1624 printf(MSG_LEVEL_INFO, "Velocity link no change\n"); 1625 return 0; 1626 } 1627 */ 1628 1629 if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201) { 1630 MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, 1631 vptr->mac_regs); 1632 } 1633 1634 /* 1635 * If connection type is AUTO 1636 */ 1637 if (mii_status & VELOCITY_AUTONEG_ENABLE) { 1638 printf("Velocity is AUTO mode\n"); 1639 /* clear force MAC mode bit */ 1640 BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, ®s->CHIPGCR); 1641 /* set duplex mode of MAC according to duplex mode of MII */ 1642 MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, 1643 MII_REG_ANAR, vptr->mac_regs); 1644 MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, 1645 MII_REG_G1000CR, vptr->mac_regs); 1646 MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, 1647 vptr->mac_regs); 1648 1649 /* enable AUTO-NEGO mode */ 1650 mii_set_auto_on(vptr); 1651 } else { 1652 u16 ANAR; 1653 u8 CHIPGCR; 1654 1655 /* 1656 * 1. if it's 3119, disable frame bursting in halfduplex mode 1657 * and enable it in fullduplex mode 1658 * 2. set correct MII/GMII and half/full duplex mode in CHIPGCR 1659 * 3. only enable CD heart beat counter in 10HD mode 1660 */ 1661 1662 /* set force MAC mode bit */ 1663 BYTE_REG_BITS_ON(CHIPGCR_FCMODE, ®s->CHIPGCR); 1664 1665 CHIPGCR = readb(®s->CHIPGCR); 1666 CHIPGCR &= ~CHIPGCR_FCGMII; 1667 1668 if (mii_status & VELOCITY_DUPLEX_FULL) { 1669 CHIPGCR |= CHIPGCR_FCFDX; 1670 writeb(CHIPGCR, ®s->CHIPGCR); 1671 printf 1672 ("DEBUG: set Velocity to forced full mode\n"); 1673 if (vptr->rev_id < REV_ID_VT3216_A0) 1674 BYTE_REG_BITS_OFF(TCR_TB2BDIS, ®s->TCR); 1675 } else { 1676 CHIPGCR &= ~CHIPGCR_FCFDX; 1677 printf 1678 ("DEBUG: set Velocity to forced half mode\n"); 1679 writeb(CHIPGCR, ®s->CHIPGCR); 1680 if (vptr->rev_id < REV_ID_VT3216_A0) 1681 BYTE_REG_BITS_ON(TCR_TB2BDIS, ®s->TCR); 1682 } 1683 1684 MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, 1685 MII_REG_G1000CR, vptr->mac_regs); 1686 1687 if (!(mii_status & VELOCITY_DUPLEX_FULL) 1688 && (mii_status & VELOCITY_SPEED_10)) { 1689 BYTE_REG_BITS_OFF(TESTCFG_HBDIS, ®s->TESTCFG); 1690 } else { 1691 BYTE_REG_BITS_ON(TESTCFG_HBDIS, ®s->TESTCFG); 1692 } 1693 /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */ 1694 velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR); 1695 ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)); 1696 if (mii_status & VELOCITY_SPEED_100) { 1697 if (mii_status & VELOCITY_DUPLEX_FULL) 1698 ANAR |= ANAR_TXFD; 1699 else 1700 ANAR |= ANAR_TX; 1701 } else { 1702 if (mii_status & VELOCITY_DUPLEX_FULL) 1703 ANAR |= ANAR_10FD; 1704 else 1705 ANAR |= ANAR_10; 1706 } 1707 velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR); 1708 /* enable AUTO-NEGO mode */ 1709 mii_set_auto_on(vptr); 1710 /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */ 1711 } 1712 /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */ 1713 /* vptr->mii_status=check_connection_type(vptr->mac_regs); */ 1714 return VELOCITY_LINK_CHANGE; 1715 } 1716 1717 /** 1718 * mii_check_media_mode - check media state 1719 * @regs: velocity registers 1720 * 1721 * Check the current MII status and determine the link status 1722 * accordingly 1723 */ 1724 1725 static u32 mii_check_media_mode(struct mac_regs *regs) 1726 { 1727 u32 status = 0; 1728 u16 ANAR; 1729 1730 if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs)) 1731 status |= VELOCITY_LINK_FAIL; 1732 1733 if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs)) 1734 status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL; 1735 else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs)) 1736 status |= (VELOCITY_SPEED_1000); 1737 else { 1738 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 1739 if (ANAR & ANAR_TXFD) 1740 status |= 1741 (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL); 1742 else if (ANAR & ANAR_TX) 1743 status |= VELOCITY_SPEED_100; 1744 else if (ANAR & ANAR_10FD) 1745 status |= 1746 (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL); 1747 else 1748 status |= (VELOCITY_SPEED_10); 1749 } 1750 1751 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { 1752 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 1753 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) 1754 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { 1755 if (MII_REG_BITS_IS_ON 1756 (G1000CR_1000 | G1000CR_1000FD, 1757 MII_REG_G1000CR, regs)) 1758 status |= VELOCITY_AUTONEG_ENABLE; 1759 } 1760 } 1761 1762 return status; 1763 } 1764 1765 static u32 check_connection_type(struct mac_regs *regs) 1766 { 1767 u32 status = 0; 1768 u8 PHYSR0; 1769 u16 ANAR; 1770 PHYSR0 = readb(®s->PHYSR0); 1771 1772 /* 1773 if (!(PHYSR0 & PHYSR0_LINKGD)) 1774 status|=VELOCITY_LINK_FAIL; 1775 */ 1776 1777 if (PHYSR0 & PHYSR0_FDPX) 1778 status |= VELOCITY_DUPLEX_FULL; 1779 1780 if (PHYSR0 & PHYSR0_SPDG) 1781 status |= VELOCITY_SPEED_1000; 1782 if (PHYSR0 & PHYSR0_SPD10) 1783 status |= VELOCITY_SPEED_10; 1784 else 1785 status |= VELOCITY_SPEED_100; 1786 1787 if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) { 1788 velocity_mii_read(regs, MII_REG_ANAR, &ANAR); 1789 if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) 1790 == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) { 1791 if (MII_REG_BITS_IS_ON 1792 (G1000CR_1000 | G1000CR_1000FD, 1793 MII_REG_G1000CR, regs)) 1794 status |= VELOCITY_AUTONEG_ENABLE; 1795 } 1796 } 1797 1798 return status; 1799 } 1800 1801 /** 1802 * enable_flow_control_ability - flow control 1803 * @vptr: veloity to configure 1804 * 1805 * Set up flow control according to the flow control options 1806 * determined by the eeprom/configuration. 1807 */ 1808 1809 static void enable_flow_control_ability(struct velocity_info *vptr) 1810 { 1811 1812 struct mac_regs *regs = vptr->mac_regs; 1813 1814 switch (vptr->options.flow_cntl) { 1815 1816 case FLOW_CNTL_DEFAULT: 1817 if (BYTE_REG_BITS_IS_ON(PHYSR0_RXFLC, ®s->PHYSR0)) 1818 writel(CR0_FDXRFCEN, ®s->CR0Set); 1819 else 1820 writel(CR0_FDXRFCEN, ®s->CR0Clr); 1821 1822 if (BYTE_REG_BITS_IS_ON(PHYSR0_TXFLC, ®s->PHYSR0)) 1823 writel(CR0_FDXTFCEN, ®s->CR0Set); 1824 else 1825 writel(CR0_FDXTFCEN, ®s->CR0Clr); 1826 break; 1827 1828 case FLOW_CNTL_TX: 1829 writel(CR0_FDXTFCEN, ®s->CR0Set); 1830 writel(CR0_FDXRFCEN, ®s->CR0Clr); 1831 break; 1832 1833 case FLOW_CNTL_RX: 1834 writel(CR0_FDXRFCEN, ®s->CR0Set); 1835 writel(CR0_FDXTFCEN, ®s->CR0Clr); 1836 break; 1837 1838 case FLOW_CNTL_TX_RX: 1839 writel(CR0_FDXTFCEN, ®s->CR0Set); 1840 writel(CR0_FDXRFCEN, ®s->CR0Set); 1841 break; 1842 1843 case FLOW_CNTL_DISABLE: 1844 writel(CR0_FDXRFCEN, ®s->CR0Clr); 1845 writel(CR0_FDXTFCEN, ®s->CR0Clr); 1846 break; 1847 1848 default: 1849 break; 1850 } 1851 1852 } 1853 1854 /* FIXME: Move to pci.c */ 1855 /** 1856 * pci_set_power_state - Set the power state of a PCI device 1857 * @dev: PCI device to be suspended 1858 * @state: Power state we're entering 1859 * 1860 * Transition a device to a new power state, using the Power Management 1861 * Capabilities in the device's config space. 1862 * 1863 * RETURN VALUE: 1864 * -EINVAL if trying to enter a lower state than we're already in. 1865 * 0 if we're already in the requested state. 1866 * -EIO if device does not support PCI PM. 1867 * 0 if we can successfully change the power state. 1868 */ 1869 1870 int pci_set_power_state(struct pci_device *dev, int state) 1871 { 1872 int pm; 1873 u16 pmcsr; 1874 int current_state = 0; 1875 1876 /* bound the state we're entering */ 1877 if (state > 3) 1878 state = 3; 1879 1880 /* Validate current state: 1881 * Can enter D0 from any state, but if we can only go deeper 1882 * to sleep if we're already in a low power state 1883 */ 1884 if (state > 0 && current_state > state) 1885 return -1; 1886 else if (current_state == state) 1887 return 0; /* we're already there */ 1888 1889 /* find PCI PM capability in list */ 1890 pm = pci_find_capability(dev, PCI_CAP_ID_PM); 1891 1892 /* abort if the device doesn't support PM capabilities */ 1893 if (!pm) 1894 return -2; 1895 1896 /* check if this device supports the desired state */ 1897 if (state == 1 || state == 2) { 1898 u16 pmc; 1899 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc); 1900 if (state == 1 && !(pmc & PCI_PM_CAP_D1)) 1901 return -2; 1902 else if (state == 2 && !(pmc & PCI_PM_CAP_D2)) 1903 return -2; 1904 } 1905 1906 /* If we're in D3, force entire word to 0. 1907 * This doesn't affect PME_Status, disables PME_En, and 1908 * sets PowerState to 0. 1909 */ 1910 if (current_state >= 3) 1911 pmcsr = 0; 1912 else { 1913 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr); 1914 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 1915 pmcsr |= state; 1916 } 1917 1918 /* enter specified state */ 1919 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr); 1920 1921 /* Mandatory power management transition delays */ 1922 /* see PCI PM 1.1 5.6.1 table 18 */ 1923 if (state == 3 || current_state == 3) 1924 mdelay(10); 1925 else if (state == 2 || current_state == 2) 1926 udelay(200); 1927 current_state = state; 1928 1929 return 0; 1930 } 1931 1932 static struct pci_device_id velocity_nics[] = { 1933 PCI_ROM(0x1106, 0x3119, "via-velocity", "VIA Networking Velocity Family Gigabit Ethernet Adapter", 0), 1934 }; 1935 1936 PCI_DRIVER ( velocity_driver, velocity_nics, PCI_NO_CLASS ); 1937 1938 DRIVER ( "VIA-VELOCITY/PCI", nic_driver, pci_driver, velocity_driver, 1939 velocity_probe, velocity_disable ); 1940