1 /******************************************************************************** 2 Copyright (C) 2016 Marvell International Ltd. 3 4 Marvell BSD License Option 5 6 If you received this File from Marvell, you may opt to use, redistribute and/or 7 modify this File under the following licensing terms. 8 Redistribution and use in source and binary forms, with or without modification, 9 are permitted provided that the following conditions are met: 10 11 * Redistributions of source code must retain the above copyright notice, 12 this list of conditions and the following disclaimer. 13 14 * Redistributions in binary form must reproduce the above copyright 15 notice, this list of conditions and the following disclaimer in the 16 documentation and/or other materials provided with the distribution. 17 18 * Neither the name of Marvell nor the names of its contributors may be 19 used to endorse or promote products derived from this software without 20 specific prior written permission. 21 22 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND 23 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 24 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 25 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR 26 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 27 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 28 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON 29 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 31 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 33 *******************************************************************************/ 34 35 #include "Mvpp2Lib.h" 36 #include "Mvpp2LibHw.h" 37 #include "Pp2Dxe.h" 38 39 /* Parser configuration routines */ 40 41 /* Update parser Tcam and Sram hw entries */ 42 STATIC 43 INT32 44 Mvpp2PrsHwWrite ( 45 IN MVPP2_SHARED *Priv, 46 IN OUT MVPP2_PRS_ENTRY *Pe 47 ) 48 { 49 INT32 i; 50 51 if (Pe->Index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) { 52 return MVPP2_EINVAL; 53 } 54 55 /* Clear entry invalidation bit */ 56 Pe->Tcam.Word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; 57 58 /* Write Tcam Index - indirect access */ 59 Mvpp2Write (Priv, MVPP2_PRS_TCAM_IDX_REG, Pe->Index); 60 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) { 61 Mvpp2Write (Priv, MVPP2_PRS_TCAM_DATA_REG(i), Pe->Tcam.Word[i]); 62 } 63 64 /* Write Sram Index - indirect access */ 65 Mvpp2Write (Priv, MVPP2_PRS_SRAM_IDX_REG, Pe->Index); 66 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) { 67 Mvpp2Write (Priv, MVPP2_PRS_SRAM_DATA_REG(i), Pe->Sram.Word[i]); 68 } 69 70 return 0; 71 } 72 73 /* Read Tcam entry from hw */ 74 STATIC 75 INT32 76 Mvpp2PrsHwRead ( 77 IN MVPP2_SHARED *Priv, 78 IN OUT MVPP2_PRS_ENTRY *Pe 79 ) 80 { 81 INT32 i; 82 83 if (Pe->Index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) { 84 return MVPP2_EINVAL; 85 } 86 87 /* Write Tcam Index - indirect access */ 88 Mvpp2Write (Priv, MVPP2_PRS_TCAM_IDX_REG, Pe->Index); 89 90 Pe->Tcam.Word[MVPP2_PRS_TCAM_INV_WORD] = 91 Mvpp2Read (Priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); 92 if (Pe->Tcam.Word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) { 93 return MVPP2_PRS_TCAM_ENTRY_INVALID; 94 } 95 96 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) { 97 Pe->Tcam.Word[i] = Mvpp2Read (Priv, MVPP2_PRS_TCAM_DATA_REG(i)); 98 } 99 100 /* Write Sram Index - indirect access */ 101 Mvpp2Write (Priv, MVPP2_PRS_SRAM_IDX_REG, Pe->Index); 102 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) { 103 Pe->Sram.Word[i] = Mvpp2Read (Priv, MVPP2_PRS_SRAM_DATA_REG(i)); 104 } 105 106 return 0; 107 } 108 109 /* Invalidate Tcam hw entry */ 110 STATIC 111 VOID 112 Mvpp2PrsHwInv ( 113 IN MVPP2_SHARED *Priv, 114 IN INT32 Index 115 ) 116 { 117 /* Write Index - indirect access */ 118 Mvpp2Write (Priv, MVPP2_PRS_TCAM_IDX_REG, Index); 119 Mvpp2Write (Priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), 120 MVPP2_PRS_TCAM_INV_MASK); 121 } 122 123 /* Enable shadow table entry and set its lookup ID */ 124 STATIC 125 VOID 126 Mvpp2PrsShadowSet ( 127 IN MVPP2_SHARED *Priv, 128 IN INT32 Index, 129 IN INT32 Lu 130 ) 131 { 132 Priv->PrsShadow[Index].Valid = TRUE; 133 Priv->PrsShadow[Index].Lu = Lu; 134 } 135 136 /* Update Ri fields in shadow table entry */ 137 STATIC 138 VOID 139 Mvpp2PrsShadowRiSet ( 140 IN MVPP2_SHARED *Priv, 141 IN INT32 Index, 142 IN UINT32 Ri, 143 IN UINT32 RiMask 144 ) 145 { 146 Priv->PrsShadow[Index].RiMask = RiMask; 147 Priv->PrsShadow[Index].Ri = Ri; 148 } 149 150 /* Update lookup field in Tcam sw entry */ 151 STATIC 152 VOID 153 Mvpp2PrsTcamLuSet ( 154 IN OUT MVPP2_PRS_ENTRY *Pe, 155 IN UINT32 Lu 156 ) 157 { 158 INT32 EnableOff = MVPP2_PRS_TCAM_EN_OFFS (MVPP2_PRS_TCAM_LU_BYTE); 159 160 Pe->Tcam.Byte[MVPP2_PRS_TCAM_LU_BYTE] = Lu; 161 Pe->Tcam.Byte[EnableOff] = MVPP2_PRS_LU_MASK; 162 } 163 164 /* Update Mask for single Port in Tcam sw entry */ 165 STATIC 166 VOID 167 Mvpp2PrsTcamPortSet ( 168 IN OUT MVPP2_PRS_ENTRY *Pe, 169 IN UINT32 PortId, 170 IN BOOLEAN Add 171 ) 172 { 173 INT32 EnableOff = MVPP2_PRS_TCAM_EN_OFFS (MVPP2_PRS_TCAM_PORT_BYTE); 174 175 if (Add) { 176 Pe->Tcam.Byte[EnableOff] &= ~(1 << PortId); 177 } else { 178 Pe->Tcam.Byte[EnableOff] |= 1 << PortId; 179 } 180 } 181 182 /* Update Port map in Tcam sw entry */ 183 STATIC 184 VOID 185 Mvpp2PrsTcamPortMapSet ( 186 IN OUT MVPP2_PRS_ENTRY *Pe, 187 IN UINT32 PortMask 188 ) 189 { 190 INT32 EnableOff = MVPP2_PRS_TCAM_EN_OFFS (MVPP2_PRS_TCAM_PORT_BYTE); 191 UINT8 Mask = MVPP2_PRS_PORT_MASK; 192 193 Pe->Tcam.Byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; 194 Pe->Tcam.Byte[EnableOff] &= ~Mask; 195 Pe->Tcam.Byte[EnableOff] |= ~PortMask & MVPP2_PRS_PORT_MASK; 196 } 197 198 /* Obtain Port map from Tcam sw entry */ 199 STATIC 200 UINT32 201 Mvpp2PrsTcamPortMapGet ( 202 IN MVPP2_PRS_ENTRY *Pe 203 ) 204 { 205 INT32 EnableOff = MVPP2_PRS_TCAM_EN_OFFS (MVPP2_PRS_TCAM_PORT_BYTE); 206 207 return ~(Pe->Tcam.Byte[EnableOff]) & MVPP2_PRS_PORT_MASK; 208 } 209 210 /* Set Byte of data and its enable bits in Tcam sw entry */ 211 STATIC 212 VOID 213 Mvpp2PrsTcamDataByteSet ( 214 IN OUT MVPP2_PRS_ENTRY *Pe, 215 IN UINT32 Offs, 216 IN UINT8 Byte, 217 IN UINT8 Enable 218 ) 219 { 220 Pe->Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE(Offs)] = Byte; 221 Pe->Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(Offs)] = Enable; 222 } 223 224 /* Get Byte of data and its enable bits from Tcam sw entry */ 225 STATIC 226 VOID 227 Mvpp2PrsTcamDataByteGet ( 228 IN MVPP2_PRS_ENTRY *Pe, 229 IN UINT32 Offs, 230 OUT UINT8 *Byte, 231 OUT UINT8 *Enable 232 ) 233 { 234 *Byte = Pe->Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE(Offs)]; 235 *Enable = Pe->Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(Offs)]; 236 } 237 238 /* Compare Tcam data bytes with a pattern */ 239 STATIC 240 BOOLEAN 241 Mvpp2PrsTcamDataCmp ( 242 IN MVPP2_PRS_ENTRY *Pe, 243 IN INT32 Offset, 244 IN UINT16 Data 245 ) 246 { 247 INT32 ByteOffset = MVPP2_PRS_TCAM_DATA_BYTE(Offset); 248 UINT16 TcamData; 249 250 TcamData = (Pe->Tcam.Byte[ByteOffset + 1] << 8) | Pe->Tcam.Byte[ByteOffset]; 251 if (TcamData != Data) { 252 return FALSE; 253 } 254 255 return TRUE; 256 } 257 258 /* Update ai bits in Tcam sw entry */ 259 STATIC 260 VOID 261 Mvpp2PrsTcamAiUpdate ( 262 IN OUT MVPP2_PRS_ENTRY *Pe, 263 IN UINT32 Bits, 264 IN UINT32 Enable 265 ) 266 { 267 INT32 i, AiIdx = MVPP2_PRS_TCAM_AI_BYTE; 268 269 for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { 270 271 if (!(Enable & BIT (i))) { 272 continue; 273 } 274 275 if (Bits & BIT (i)) { 276 Pe->Tcam.Byte[AiIdx] |= 1 << i; 277 } else { 278 Pe->Tcam.Byte[AiIdx] &= ~(1 << i); 279 } 280 } 281 282 Pe->Tcam.Byte[MVPP2_PRS_TCAM_EN_OFFS (AiIdx)] |= Enable; 283 } 284 285 /* Get ai bits from Tcam sw entry */ 286 STATIC 287 INT32 288 Mvpp2PrsTcamAiGet ( 289 IN MVPP2_PRS_ENTRY *Pe 290 ) 291 { 292 return Pe->Tcam.Byte[MVPP2_PRS_TCAM_AI_BYTE]; 293 } 294 295 /* Get word of data and its enable bits from Tcam sw entry */ 296 STATIC 297 VOID 298 Mvpp2PrsTcamDataWordGet ( 299 IN MVPP2_PRS_ENTRY *Pe, 300 IN UINT32 DataOffset, 301 OUT UINT32 *Word, 302 OUT UINT32 *Enable 303 ) 304 { 305 INT32 Index, Position; 306 UINT8 Byte, Mask; 307 308 for (Index = 0; Index < 4; Index++) { 309 Position = (DataOffset * sizeof (INT32)) + Index; 310 Mvpp2PrsTcamDataByteGet (Pe, Position, &Byte, &Mask); 311 ((UINT8 *)Word)[Index] = Byte; 312 ((UINT8 *)Enable)[Index] = Mask; 313 } 314 } 315 316 /* Set ethertype in Tcam sw entry */ 317 STATIC 318 VOID 319 Mvpp2PrsMatchEtype ( 320 IN OUT MVPP2_PRS_ENTRY *Pe, 321 IN INT32 Offset, 322 IN UINT16 EtherType 323 ) 324 { 325 Mvpp2PrsTcamDataByteSet (Pe, Offset + 0, EtherType >> 8, 0xff); 326 Mvpp2PrsTcamDataByteSet (Pe, Offset + 1, EtherType & 0xff, 0xff); 327 } 328 329 /* Set bits in Sram sw entry */ 330 STATIC 331 VOID 332 Mvpp2PrsSramBitsSet ( 333 IN OUT MVPP2_PRS_ENTRY *Pe, 334 IN INT32 BitNum, 335 IN INT32 Val 336 ) 337 { 338 Pe->Sram.Byte[MVPP2_BIT_TO_BYTE(BitNum)] |= (Val << (BitNum % 8)); 339 } 340 341 /* Clear bits in Sram sw entry */ 342 STATIC 343 VOID 344 Mvpp2PrsSramBitsClear ( 345 IN OUT MVPP2_PRS_ENTRY *Pe, 346 IN INT32 BitNum, 347 IN INT32 Val 348 ) 349 { 350 Pe->Sram.Byte[MVPP2_BIT_TO_BYTE(BitNum)] &= ~(Val << (BitNum % 8)); 351 } 352 353 /* Update Ri bits in Sram sw entry */ 354 STATIC 355 VOID 356 Mvpp2PrsSramRiUpdate ( 357 IN OUT MVPP2_PRS_ENTRY *Pe, 358 IN UINT32 bits, 359 IN UINT32 Mask 360 ) 361 { 362 UINT32 i; 363 364 for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { 365 INT32 RiOff = MVPP2_PRS_SRAM_RI_OFFS; 366 367 if (!(Mask & BIT (i))) { 368 continue; 369 } 370 371 if (bits & BIT (i)) { 372 Mvpp2PrsSramBitsSet (Pe, RiOff + i, 1); 373 } else { 374 Mvpp2PrsSramBitsClear (Pe, RiOff + i, 1); 375 } 376 377 Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); 378 } 379 } 380 381 /* Obtain Ri bits from Sram sw entry */ 382 STATIC 383 INT32 384 Mvpp2PrsSramRiGet ( 385 IN MVPP2_PRS_ENTRY *Pe 386 ) 387 { 388 return Pe->Sram.Word[MVPP2_PRS_SRAM_RI_WORD]; 389 } 390 391 /* Update ai bits in Sram sw entry */ 392 STATIC 393 VOID 394 Mvpp2PrsSramAiUpdate ( 395 IN OUT MVPP2_PRS_ENTRY *Pe, 396 IN UINT32 Bits, 397 UINT32 Mask 398 ) 399 { 400 UINT32 i; 401 INT32 AiOff = MVPP2_PRS_SRAM_AI_OFFS; 402 403 for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { 404 405 if (!(Mask & BIT (i))) { 406 continue; 407 } 408 409 if (Bits & BIT (i)) { 410 Mvpp2PrsSramBitsSet (Pe, AiOff + i, 1); 411 } else { 412 Mvpp2PrsSramBitsClear (Pe, AiOff + i, 1); 413 } 414 415 Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); 416 } 417 } 418 419 /* Read ai bits from Sram sw entry */ 420 STATIC 421 INT32 422 Mvpp2PrsSramAiGet ( 423 IN MVPP2_PRS_ENTRY *Pe 424 ) 425 { 426 UINT8 bits; 427 INT32 AiOff = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); 428 INT32 AiEnOff = AiOff + 1; 429 INT32 AiShift = MVPP2_PRS_SRAM_AI_OFFS % 8; 430 431 bits = (Pe->Sram.Byte[AiOff] >> AiShift) | 432 (Pe->Sram.Byte[AiEnOff] << (8 - AiShift)); 433 434 return bits; 435 } 436 437 /* 438 * In Sram sw entry set lookup ID field of the 439 * Tcam key to be used in the next lookup iteration 440 */ 441 STATIC 442 VOID 443 Mvpp2PrsSramNextLuSet ( 444 IN OUT MVPP2_PRS_ENTRY *Pe, 445 IN UINT32 Lu 446 ) 447 { 448 INT32 SramNextOff = MVPP2_PRS_SRAM_NEXT_LU_OFFS; 449 450 Mvpp2PrsSramBitsClear (Pe, SramNextOff, MVPP2_PRS_SRAM_NEXT_LU_MASK); 451 Mvpp2PrsSramBitsSet (Pe, SramNextOff, Lu); 452 } 453 454 /* 455 * In the Sram sw entry set sign and value of the next lookup Offset 456 * and the Offset value generated to the classifier 457 */ 458 STATIC 459 VOID 460 Mvpp2PrsSramShiftSet ( 461 IN OUT MVPP2_PRS_ENTRY *Pe, 462 IN INT32 Shift, 463 IN UINT32 Op 464 ) 465 { 466 /* Set sign */ 467 if (Shift < 0) { 468 Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 469 Shift = -Shift; 470 } else { 471 Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); 472 } 473 474 /* Set value */ 475 Pe->Sram.Byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = (UINT8)Shift; 476 477 /* Reset and set operation */ 478 Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); 479 Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, Op); 480 481 /* Set base Offset as current */ 482 Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 483 } 484 485 /* 486 * In the Sram sw entry set sign and value of the user defined offset 487 * generated for the classifier 488 */ 489 STATIC 490 VOID 491 Mvpp2PrsSramOffsetSet ( 492 IN OUT MVPP2_PRS_ENTRY *Pe, 493 IN UINT32 Type, 494 IN INT32 Offset, 495 IN UINT32 Op 496 ) 497 { 498 UINT8 UdfByte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + MVPP2_PRS_SRAM_UDF_BITS); 499 UINT8 UdfByteOffset = (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)); 500 UINT8 OpSelUdfByte = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS); 501 UINT8 OpSelUdfByteOffset = (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)); 502 503 /* Set sign */ 504 if (Offset < 0) { 505 Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 506 Offset = -Offset; 507 } else { 508 Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); 509 } 510 511 /* Set value */ 512 Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_UDF_OFFS, MVPP2_PRS_SRAM_UDF_MASK); 513 Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_UDF_OFFS, Offset); 514 515 Pe->Sram.Byte[UdfByte] &= ~(MVPP2_PRS_SRAM_UDF_MASK >> UdfByteOffset); 516 Pe->Sram.Byte[UdfByte] |= (Offset >> UdfByteOffset); 517 518 /* Set Offset Type */ 519 Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, MVPP2_PRS_SRAM_UDF_TYPE_MASK); 520 Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, Type); 521 522 /* Set Offset operation */ 523 Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); 524 Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, Op); 525 526 Pe->Sram.Byte[OpSelUdfByte] &= ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> OpSelUdfByteOffset); 527 Pe->Sram.Byte[OpSelUdfByte] |= (Op >> OpSelUdfByteOffset); 528 529 /* Set base Offset as current */ 530 Mvpp2PrsSramBitsClear (Pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); 531 } 532 533 /* Find parser Flow entry */ 534 STATIC 535 MVPP2_PRS_ENTRY * 536 Mvpp2PrsFlowFind ( 537 IN MVPP2_SHARED *Priv, 538 IN INT32 Flow 539 ) 540 { 541 MVPP2_PRS_ENTRY *Pe; 542 INT32 Tid; 543 UINT32 Word, Enable; 544 545 Pe = Mvpp2Alloc (sizeof (*Pe)); 546 if (Pe == NULL) { 547 return NULL; 548 } 549 550 Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_FLOWS); 551 552 /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ 553 for (Tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; Tid >= 0; Tid--) { 554 UINT8 Bits; 555 556 if (!Priv->PrsShadow[Tid].Valid || Priv->PrsShadow[Tid].Lu != MVPP2_PRS_LU_FLOWS) { 557 continue; 558 } 559 560 Pe->Index = Tid; 561 Mvpp2PrsHwRead (Priv, Pe); 562 563 /* 564 * Check result info, because there maybe 565 * several TCAM lines to generate the same Flow 566 */ 567 Mvpp2PrsTcamDataWordGet (Pe, 0, &Word, &Enable); 568 if ((Word != 0) || (Enable != 0)) { 569 continue; 570 } 571 572 Bits = Mvpp2PrsSramAiGet (Pe); 573 574 /* Sram store classification lookup ID in AI Bits [5:0] */ 575 if ((Bits & MVPP2_PRS_FLOW_ID_MASK) == Flow) { 576 return Pe; 577 } 578 } 579 580 Mvpp2Free (Pe); 581 582 return NULL; 583 } 584 585 /* Return first free Tcam Index, seeking from start to end */ 586 STATIC 587 INT32 588 Mvpp2PrsTcamFirstFree ( 589 IN MVPP2_SHARED *Priv, 590 IN UINT8 Start, 591 IN UINT8 End 592 ) 593 { 594 INT32 Tid; 595 596 if (Start > End) { 597 Mvpp2SwapVariables (Start, End); 598 } 599 600 if (End >= MVPP2_PRS_TCAM_SRAM_SIZE) { 601 End = MVPP2_PRS_TCAM_SRAM_SIZE - 1; 602 } 603 604 for (Tid = Start; Tid <= End; Tid++) { 605 if (!Priv->PrsShadow[Tid].Valid) { 606 return Tid; 607 } 608 } 609 610 return MVPP2_EINVAL; 611 } 612 613 /* Enable/disable dropping all mac Da's */ 614 STATIC 615 VOID 616 Mvpp2PrsMacDropAllSet ( 617 IN MVPP2_SHARED *Priv, 618 IN INT32 PortId, 619 IN BOOLEAN Add 620 ) 621 { 622 MVPP2_PRS_ENTRY Pe; 623 624 if (Priv->PrsShadow[MVPP2_PE_DROP_ALL].Valid) { 625 /* Entry exist - update PortId only */ 626 Pe.Index = MVPP2_PE_DROP_ALL; 627 Mvpp2PrsHwRead (Priv, &Pe); 628 } else { 629 /* Entry doesn't exist - create new */ 630 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 631 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_MAC); 632 Pe.Index = MVPP2_PE_DROP_ALL; 633 634 /* Non-promiscuous mode for all Ports - DROP unknown packets */ 635 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_DROP_MASK); 636 637 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 638 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 639 640 /* Update shadow table */ 641 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MAC); 642 643 /* Mask all Ports */ 644 Mvpp2PrsTcamPortMapSet (&Pe, 0); 645 } 646 647 /* Update PortId Mask */ 648 Mvpp2PrsTcamPortSet (&Pe, PortId, Add); 649 650 Mvpp2PrsHwWrite (Priv, &Pe); 651 } 652 653 /* Set port to promiscuous mode */ 654 VOID 655 Mvpp2PrsMacPromiscSet ( 656 IN MVPP2_SHARED *Priv, 657 IN INT32 PortId, 658 IN BOOLEAN Add 659 ) 660 { 661 MVPP2_PRS_ENTRY Pe; 662 663 /* Promiscuous mode - Accept unknown packets */ 664 665 if (Priv->PrsShadow[MVPP2_PE_MAC_PROMISCUOUS].Valid) { 666 /* Entry exist - update port only */ 667 Pe.Index = MVPP2_PE_MAC_PROMISCUOUS; 668 Mvpp2PrsHwRead (Priv, &Pe); 669 } else { 670 /* Entry doesn't exist - create new */ 671 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 672 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_MAC); 673 Pe.Index = MVPP2_PE_MAC_PROMISCUOUS; 674 675 /* Continue - set next lookup */ 676 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_DSA); 677 678 /* Set result info bits */ 679 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L2_UCAST, MVPP2_PRS_RI_L2_CAST_MASK); 680 681 /* Shift to ethertype with 2 of MAC Address length */ 682 Mvpp2PrsSramShiftSet (&Pe, 2 * MV_ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 683 684 /* Mask all Ports */ 685 Mvpp2PrsTcamPortMapSet (&Pe, 0); 686 687 /* Update shadow table */ 688 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MAC); 689 } 690 691 /* Update port Mask */ 692 Mvpp2PrsTcamPortSet (&Pe, PortId, Add); 693 694 Mvpp2PrsHwWrite (Priv, &Pe); 695 } 696 697 /* Accept multicast */ 698 VOID 699 Mvpp2PrsMacMultiSet ( 700 IN MVPP2_SHARED *Priv, 701 IN INT32 PortId, 702 IN INT32 Index, 703 IN BOOLEAN Add 704 ) 705 { 706 MVPP2_PRS_ENTRY Pe; 707 UINT8 DaMc; 708 709 /* 710 * Ethernet multicast Address first Byte is 711 * 0x01 for IPv4 and 0x33 for IPv6 712 */ 713 DaMc = (Index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; 714 715 if (Priv->PrsShadow[Index].Valid) { 716 /* Entry exist - update port only */ 717 Pe.Index = Index; 718 Mvpp2PrsHwRead (Priv, &Pe); 719 } else { 720 /* Entry doesn't exist - create new */ 721 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 722 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_MAC); 723 Pe.Index = Index; 724 725 /* Continue - set next lookup */ 726 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_DSA); 727 728 /* Set result info bits */ 729 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L2_MCAST, MVPP2_PRS_RI_L2_CAST_MASK); 730 731 /* Update Tcam entry data first Byte */ 732 Mvpp2PrsTcamDataByteSet (&Pe, 0, DaMc, 0xff); 733 734 /* Shift to ethertype */ 735 Mvpp2PrsSramShiftSet (&Pe, 2 * MV_ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 736 737 /* Mask all ports */ 738 Mvpp2PrsTcamPortMapSet (&Pe, 0); 739 740 /* Update shadow table */ 741 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MAC); 742 } 743 744 /* Update port Mask */ 745 Mvpp2PrsTcamPortSet (&Pe, PortId, Add); 746 747 Mvpp2PrsHwWrite (Priv, &Pe); 748 } 749 750 /* Set entry for dsa packets */ 751 STATIC 752 VOID 753 Mvpp2PrsDsaTagSet ( 754 IN MVPP2_SHARED *Priv, 755 IN INT32 PortId, 756 IN BOOLEAN Add, 757 IN BOOLEAN Tagged, 758 IN BOOLEAN Extend 759 ) 760 { 761 MVPP2_PRS_ENTRY Pe; 762 INT32 Tid, Shift; 763 764 if (Extend) { 765 Tid = Tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; 766 Shift = 8; 767 } else { 768 Tid = Tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; 769 Shift = 4; 770 } 771 772 if (Priv->PrsShadow[Tid].Valid) { 773 /* Entry exist - update port only */ 774 Pe.Index = Tid; 775 Mvpp2PrsHwRead (Priv, &Pe); 776 } else { 777 /* Entry doesn't exist - create new */ 778 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 779 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_DSA); 780 Pe.Index = Tid; 781 782 /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/ 783 Mvpp2PrsSramShiftSet (&Pe, Shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 784 785 /* Update shadow table */ 786 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_DSA); 787 788 if (Tagged) { 789 /* Set Tagged bit in DSA tag */ 790 Mvpp2PrsTcamDataByteSet (&Pe, 0, MVPP2_PRS_TCAM_DSA_TAGGED_BIT, MVPP2_PRS_TCAM_DSA_TAGGED_BIT); 791 792 /* Clear all ai bits for next iteration */ 793 Mvpp2PrsSramAiUpdate (&Pe, 0, MVPP2_PRS_SRAM_AI_MASK); 794 795 /* If packet is Tagged continue check vlans */ 796 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_VLAN); 797 } else { 798 /* Set result info bits to 'no vlans' */ 799 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); 800 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_L2); 801 } 802 803 /* Mask all Ports */ 804 Mvpp2PrsTcamPortMapSet (&Pe, 0); 805 } 806 807 /* Update port Mask */ 808 Mvpp2PrsTcamPortSet (&Pe, PortId, Add); 809 810 Mvpp2PrsHwWrite (Priv, &Pe); 811 } 812 813 /* Set entry for dsa ethertype */ 814 STATIC 815 VOID 816 Mvpp2PrsDsaTagEthertypeSet ( 817 IN MVPP2_SHARED *Priv, 818 IN INT32 PortId, 819 IN BOOLEAN Add, 820 IN BOOLEAN Tagged, 821 IN BOOLEAN Extend 822 ) 823 { 824 MVPP2_PRS_ENTRY Pe; 825 INT32 Tid, Shift, PortMask; 826 827 if (Extend) { 828 Tid = Tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : MVPP2_PE_ETYPE_EDSA_UNTAGGED; 829 PortMask = 0; 830 Shift = 8; 831 } else { 832 Tid = Tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : MVPP2_PE_ETYPE_DSA_UNTAGGED; 833 PortMask = MVPP2_PRS_PORT_MASK; 834 Shift = 4; 835 } 836 837 if (Priv->PrsShadow[Tid].Valid) { 838 /* Entry exist - update PortId only */ 839 Pe.Index = Tid; 840 Mvpp2PrsHwRead (Priv, &Pe); 841 } else { 842 /* Entry doesn't exist - create new */ 843 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 844 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_DSA); 845 Pe.Index = Tid; 846 847 /* 848 * Set ethertype at offset 0 for DSA and 849 * clear it at offset 2 - obtained from Marvell. 850 */ 851 Mvpp2PrsMatchEtype (&Pe, 0, MV_ETH_P_EDSA); 852 Mvpp2PrsMatchEtype (&Pe, 2, 0); 853 854 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_DSA_MASK, 855 MVPP2_PRS_RI_DSA_MASK); 856 857 /* Shift ethertype + 2 Byte reserved + tag */ 858 Mvpp2PrsSramShiftSet (&Pe, 2 + MVPP2_ETH_TYPE_LEN + Shift, 859 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 860 861 /* Update shadow table */ 862 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_DSA); 863 864 if (Tagged) { 865 /* Set Tagged bit in DSA tag */ 866 Mvpp2PrsTcamDataByteSet ( 867 &Pe, 868 MVPP2_ETH_TYPE_LEN + 2 + 3, 869 MVPP2_PRS_TCAM_DSA_TAGGED_BIT, 870 MVPP2_PRS_TCAM_DSA_TAGGED_BIT 871 ); 872 873 /* Clear all ai bits for next iteration */ 874 Mvpp2PrsSramAiUpdate (&Pe, 0, MVPP2_PRS_SRAM_AI_MASK); 875 876 /* If packet is Tagged continue check vlans */ 877 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_VLAN); 878 } else { 879 /* Set result info bits to 'no vlans' */ 880 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); 881 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_L2); 882 } 883 884 /* Mask/unmask all ports, depending on dsa type */ 885 Mvpp2PrsTcamPortMapSet (&Pe, PortMask); 886 } 887 888 /* Update port Mask */ 889 Mvpp2PrsTcamPortSet (&Pe, PortId, Add); 890 891 Mvpp2PrsHwWrite (Priv, &Pe); 892 } 893 894 /* Search for existing single/triple vlan entry */ 895 STATIC 896 MVPP2_PRS_ENTRY * 897 Mvpp2PrsVlanFind ( 898 IN MVPP2_SHARED *Priv, 899 IN UINT16 Tpid, 900 IN INT32 Ai 901 ) 902 { 903 MVPP2_PRS_ENTRY *Pe; 904 INT32 Tid; 905 906 Pe = Mvpp2Alloc (sizeof (*Pe)); 907 if (Pe == NULL) { 908 return NULL; 909 } 910 911 Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_VLAN); 912 913 /* Go through the all entries with MVPP2_PRS_LU_VLAN */ 914 for (Tid = MVPP2_PE_FIRST_FREE_TID; Tid <= MVPP2_PE_LAST_FREE_TID; Tid++) { 915 UINT32 RiBits, AiBits; 916 BOOLEAN match; 917 918 if (!Priv->PrsShadow[Tid].Valid || Priv->PrsShadow[Tid].Lu != MVPP2_PRS_LU_VLAN) { 919 continue; 920 } 921 922 Pe->Index = Tid; 923 924 Mvpp2PrsHwRead (Priv, Pe); 925 match = Mvpp2PrsTcamDataCmp (Pe, 0, Mvpp2SwapBytes16 (Tpid)); 926 if (!match) { 927 continue; 928 } 929 930 /* Get vlan type */ 931 RiBits = Mvpp2PrsSramRiGet (Pe); 932 RiBits &= MVPP2_PRS_RI_VLAN_MASK; 933 934 /* Get current Ai value from Tcam */ 935 AiBits = Mvpp2PrsTcamAiGet (Pe); 936 937 /* Clear double vlan bit */ 938 AiBits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; 939 940 if (Ai != AiBits) { 941 continue; 942 } 943 944 if (RiBits == MVPP2_PRS_RI_VLAN_SINGLE || RiBits == MVPP2_PRS_RI_VLAN_TRIPLE) { 945 return Pe; 946 } 947 } 948 949 Mvpp2Free (Pe); 950 951 return NULL; 952 } 953 954 /* Add/update single/triple vlan entry */ 955 INT32 956 Mvpp2PrsVlanAdd ( 957 IN MVPP2_SHARED *Priv, 958 IN UINT16 Tpid, 959 IN INT32 Ai, 960 IN UINT32 PortMap 961 ) 962 { 963 MVPP2_PRS_ENTRY *Pe; 964 INT32 TidAux, Tid; 965 INT32 Ret = 0; 966 967 Pe = Mvpp2PrsVlanFind (Priv, Tpid, Ai); 968 969 if (!Pe) { 970 /* Create new Tcam entry */ 971 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); 972 if (Tid < 0) { 973 return Tid; 974 } 975 976 Pe = Mvpp2Alloc (sizeof (*Pe)); 977 if (Pe == NULL) { 978 return MVPP2_ENOMEM; 979 } 980 981 /* Get last double vlan Tid */ 982 for (TidAux = MVPP2_PE_LAST_FREE_TID; TidAux >= MVPP2_PE_FIRST_FREE_TID; TidAux--) { 983 UINT32 RiBits; 984 985 if (!Priv->PrsShadow[TidAux].Valid || Priv->PrsShadow[TidAux].Lu != MVPP2_PRS_LU_VLAN) { 986 continue; 987 } 988 989 Pe->Index = TidAux; 990 Mvpp2PrsHwRead (Priv, Pe); 991 RiBits = Mvpp2PrsSramRiGet (Pe); 992 if ((RiBits & MVPP2_PRS_RI_VLAN_MASK) == MVPP2_PRS_RI_VLAN_DOUBLE) { 993 break; 994 } 995 } 996 997 if (Tid <= TidAux) { 998 Ret = MVPP2_EINVAL; 999 goto error; 1000 } 1001 1002 Mvpp2Memset (Pe, 0 , sizeof (MVPP2_PRS_ENTRY)); 1003 Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_VLAN); 1004 Pe->Index = Tid; 1005 1006 /* Set VLAN type's offset to 0 bytes - obtained from Marvell */ 1007 Mvpp2PrsMatchEtype (Pe, 0, Tpid); 1008 1009 Mvpp2PrsSramNextLuSet (Pe, MVPP2_PRS_LU_L2); 1010 1011 /* Shift 4 bytes - skip 1 vlan tag */ 1012 Mvpp2PrsSramShiftSet (Pe, MVPP2_VLAN_TAG_LEN, 1013 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1014 1015 /* Clear all Ai bits for next iteration */ 1016 Mvpp2PrsSramAiUpdate (Pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1017 1018 if (Ai == MVPP2_PRS_SINGLE_VLAN_AI) { 1019 Mvpp2PrsSramRiUpdate (Pe, MVPP2_PRS_RI_VLAN_SINGLE, MVPP2_PRS_RI_VLAN_MASK); 1020 } else { 1021 Ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; 1022 Mvpp2PrsSramRiUpdate (Pe, MVPP2_PRS_RI_VLAN_TRIPLE, MVPP2_PRS_RI_VLAN_MASK); 1023 } 1024 1025 Mvpp2PrsTcamAiUpdate (Pe, Ai, MVPP2_PRS_SRAM_AI_MASK); 1026 1027 Mvpp2PrsShadowSet (Priv, Pe->Index, MVPP2_PRS_LU_VLAN); 1028 } 1029 1030 /* Update Ports' Mask */ 1031 Mvpp2PrsTcamPortMapSet (Pe, PortMap); 1032 Mvpp2PrsHwWrite (Priv, Pe); 1033 1034 error: 1035 Mvpp2Free (Pe); 1036 1037 return Ret; 1038 } 1039 1040 /* Get first free double vlan ai number */ 1041 INT32 1042 Mvpp2PrsDoubleVlanAiFreeGet ( 1043 IN MVPP2_SHARED *Priv 1044 ) 1045 { 1046 INT32 i; 1047 1048 for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { 1049 if (!Priv->PrsDoubleVlans[i]) { 1050 return i; 1051 } 1052 } 1053 1054 return MVPP2_EINVAL; 1055 } 1056 1057 /* Search for existing double vlan entry */ 1058 MVPP2_PRS_ENTRY *Mvpp2PrsDoubleVlanFind ( 1059 IN MVPP2_SHARED *Priv, 1060 IN UINT16 Tpid1, 1061 IN UINT16 Tpid2 1062 ) 1063 { 1064 MVPP2_PRS_ENTRY *Pe; 1065 INT32 Tid; 1066 1067 Pe = Mvpp2Alloc (sizeof (*Pe)); 1068 if (Pe == NULL) { 1069 return NULL; 1070 } 1071 1072 Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_VLAN); 1073 1074 /* Go through the all entries with MVPP2_PRS_LU_VLAN */ 1075 for (Tid = MVPP2_PE_FIRST_FREE_TID; Tid <= MVPP2_PE_LAST_FREE_TID; Tid++) { 1076 UINT32 RiMask; 1077 BOOLEAN match; 1078 1079 if (!Priv->PrsShadow[Tid].Valid || Priv->PrsShadow[Tid].Lu != MVPP2_PRS_LU_VLAN) { 1080 continue; 1081 } 1082 1083 Pe->Index = Tid; 1084 Mvpp2PrsHwRead (Priv, Pe); 1085 1086 match = Mvpp2PrsTcamDataCmp (Pe, 0, Mvpp2SwapBytes16 (Tpid1)) && 1087 Mvpp2PrsTcamDataCmp (Pe, 4, Mvpp2SwapBytes16 (Tpid2)); 1088 1089 if (!match) { 1090 continue; 1091 } 1092 1093 RiMask = Mvpp2PrsSramRiGet (Pe) & MVPP2_PRS_RI_VLAN_MASK; 1094 if (RiMask == MVPP2_PRS_RI_VLAN_DOUBLE) { 1095 return Pe; 1096 } 1097 } 1098 1099 Mvpp2Free (Pe); 1100 1101 return NULL; 1102 } 1103 1104 /* Add or update double vlan entry */ 1105 INT32 1106 Mvpp2PrsDoubleVlanAdd ( 1107 IN MVPP2_SHARED *Priv, 1108 IN UINT16 Tpid1, 1109 IN UINT16 Tpid2, 1110 IN UINT32 PortMap 1111 ) 1112 { 1113 MVPP2_PRS_ENTRY *Pe; 1114 INT32 TidAux, Tid, Ai, Ret = 0; 1115 1116 Pe = Mvpp2PrsDoubleVlanFind (Priv, Tpid1, Tpid2); 1117 1118 if (!Pe) { 1119 /* Create new Tcam entry */ 1120 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1121 if (Tid < 0) { 1122 return Tid; 1123 } 1124 1125 Pe = Mvpp2Alloc (sizeof (*Pe)); 1126 if (Pe == NULL) { 1127 return MVPP2_ENOMEM; 1128 } 1129 1130 /* Set Ai value for new double vlan entry */ 1131 Ai = Mvpp2PrsDoubleVlanAiFreeGet (Priv); 1132 if (Ai < 0) { 1133 Ret = Ai; 1134 goto error; 1135 } 1136 1137 /* Get first single/triple vlan Tid */ 1138 for (TidAux = MVPP2_PE_FIRST_FREE_TID; TidAux <= MVPP2_PE_LAST_FREE_TID; TidAux++) { 1139 UINT32 RiBits; 1140 1141 if (!Priv->PrsShadow[TidAux].Valid || Priv->PrsShadow[TidAux].Lu != MVPP2_PRS_LU_VLAN) { 1142 continue; 1143 } 1144 1145 Pe->Index = TidAux; 1146 Mvpp2PrsHwRead (Priv, Pe); 1147 RiBits = Mvpp2PrsSramRiGet (Pe); 1148 RiBits &= MVPP2_PRS_RI_VLAN_MASK; 1149 1150 if (RiBits == MVPP2_PRS_RI_VLAN_SINGLE || RiBits == MVPP2_PRS_RI_VLAN_TRIPLE) { 1151 break; 1152 } 1153 } 1154 1155 if (Tid >= TidAux) { 1156 Ret = MVPP2_ERANGE; 1157 goto error; 1158 } 1159 1160 Mvpp2Memset (Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1161 Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_VLAN); 1162 Pe->Index = Tid; 1163 1164 Priv->PrsDoubleVlans[Ai] = TRUE; 1165 1166 /* Set both VLAN types' offsets to 0 and 4 bytes - obtained from Marvell */ 1167 Mvpp2PrsMatchEtype (Pe, 0, Tpid1); 1168 Mvpp2PrsMatchEtype (Pe, 4, Tpid2); 1169 1170 Mvpp2PrsSramNextLuSet (Pe, MVPP2_PRS_LU_VLAN); 1171 1172 /* Shift 8 bytes - skip 2 vlan tags */ 1173 Mvpp2PrsSramShiftSet (Pe, 2 * MVPP2_VLAN_TAG_LEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1174 Mvpp2PrsSramRiUpdate (Pe, MVPP2_PRS_RI_VLAN_DOUBLE, MVPP2_PRS_RI_VLAN_MASK); 1175 Mvpp2PrsSramAiUpdate (Pe, Ai | MVPP2_PRS_DBL_VLAN_AI_BIT, MVPP2_PRS_SRAM_AI_MASK); 1176 1177 Mvpp2PrsShadowSet (Priv, Pe->Index, MVPP2_PRS_LU_VLAN); 1178 } 1179 1180 /* Update Ports' Mask */ 1181 Mvpp2PrsTcamPortMapSet (Pe, PortMap); 1182 Mvpp2PrsHwWrite (Priv, Pe); 1183 1184 error: 1185 Mvpp2Free (Pe); 1186 return Ret; 1187 } 1188 1189 /* IPv4 header parsing for fragmentation and L4 Offset */ 1190 STATIC 1191 INT32 1192 Mvpp2PrsIp4Proto ( 1193 IN MVPP2_SHARED *Priv, 1194 IN UINT16 Proto, 1195 IN UINT32 Ri, 1196 IN UINT32 RiMask 1197 ) 1198 { 1199 MVPP2_PRS_ENTRY Pe; 1200 INT32 Tid; 1201 1202 if ((Proto != MV_IPPR_TCP) && (Proto != MV_IPPR_UDP) && (Proto != MV_IPPR_IGMP)) { 1203 return MVPP2_EINVAL; 1204 } 1205 1206 /* Fragmented packet */ 1207 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1208 if (Tid < 0) { 1209 return Tid; 1210 } 1211 1212 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1213 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP4); 1214 Pe.Index = Tid; 1215 1216 /* Set next Lu to IPv4 - 12 bytes shift */ 1217 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP4); 1218 Mvpp2PrsSramShiftSet (&Pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1219 1220 /* Set L4 offset 4 bytes relative IPv4 header size (current position) */ 1221 Mvpp2PrsSramOffsetSet ( 1222 &Pe, 1223 MVPP2_PRS_SRAM_UDF_TYPE_L4, 1224 sizeof (Mvpp2Iphdr) - 4, 1225 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 1226 ); 1227 1228 Mvpp2PrsSramAiUpdate (&Pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); 1229 Mvpp2PrsSramRiUpdate (&Pe, Ri | MVPP2_PRS_RI_IP_FRAG_MASK, RiMask | MVPP2_PRS_RI_IP_FRAG_MASK); 1230 1231 Mvpp2PrsTcamDataByteSet (&Pe, 5, Proto, MVPP2_PRS_TCAM_PROTO_MASK); 1232 Mvpp2PrsTcamAiUpdate (&Pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 1233 1234 /* Unmask all Ports */ 1235 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 1236 1237 /* Update shadow table and hw entry */ 1238 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4); 1239 Mvpp2PrsHwWrite (Priv, &Pe); 1240 1241 /* Not fragmented packet */ 1242 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1243 if (Tid < 0) { 1244 return Tid; 1245 } 1246 1247 Pe.Index = Tid; 1248 1249 /* Clear Ri before updating */ 1250 Pe.Sram.Word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 1251 Pe.Sram.Word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 1252 Mvpp2PrsSramRiUpdate (&Pe, Ri, RiMask); 1253 1254 Mvpp2PrsTcamDataByteSet (&Pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); 1255 Mvpp2PrsTcamDataByteSet (&Pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); 1256 1257 /* Update shadow table and hw entry */ 1258 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4); 1259 Mvpp2PrsHwWrite (Priv, &Pe); 1260 1261 return 0; 1262 } 1263 1264 /* IPv4 L3 multicast or broadcast */ 1265 STATIC 1266 INT32 1267 Mvpp2PrsIp4Cast ( 1268 IN MVPP2_SHARED *Priv, 1269 IN UINT16 L3Cast 1270 ) 1271 { 1272 MVPP2_PRS_ENTRY Pe; 1273 INT32 Mask, Tid; 1274 1275 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1276 if (Tid < 0) { 1277 return Tid; 1278 } 1279 1280 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1281 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP4); 1282 Pe.Index = Tid; 1283 1284 switch (L3Cast) { 1285 case MVPP2_PRS_L3_MULTI_CAST: 1286 Mvpp2PrsTcamDataByteSet (&Pe, 0, MVPP2_PRS_IPV4_MC, MVPP2_PRS_IPV4_MC_MASK); 1287 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_MCAST, MVPP2_PRS_RI_L3_ADDR_MASK); 1288 break; 1289 case MVPP2_PRS_L3_BROAD_CAST: 1290 Mask = MVPP2_PRS_IPV4_BC_MASK; 1291 Mvpp2PrsTcamDataByteSet (&Pe, 0, Mask, Mask); 1292 Mvpp2PrsTcamDataByteSet (&Pe, 1, Mask, Mask); 1293 Mvpp2PrsTcamDataByteSet (&Pe, 2, Mask, Mask); 1294 Mvpp2PrsTcamDataByteSet (&Pe, 3, Mask, Mask); 1295 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_BCAST, MVPP2_PRS_RI_L3_ADDR_MASK); 1296 break; 1297 default: 1298 return MVPP2_EINVAL; 1299 } 1300 1301 /* Finished: go to Flowid generation */ 1302 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 1303 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1304 1305 Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); 1306 1307 /* Unmask all Ports */ 1308 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 1309 1310 /* Update shadow table and hw entry */ 1311 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4); 1312 Mvpp2PrsHwWrite (Priv, &Pe); 1313 1314 return 0; 1315 } 1316 1317 /* Set entries for protocols over IPv6 */ 1318 STATIC 1319 INT32 1320 Mvpp2PrsIp6Proto ( 1321 IN MVPP2_SHARED *Priv, 1322 IN UINT16 Proto, 1323 IN UINT32 Ri, 1324 IN UINT32 RiMask 1325 ) 1326 { 1327 MVPP2_PRS_ENTRY Pe; 1328 INT32 Tid; 1329 1330 if ((Proto != MV_IPPR_TCP) && (Proto != MV_IPPR_UDP) && 1331 (Proto != MV_IPPR_ICMPV6) && (Proto != MV_IPPR_IPIP)) 1332 { 1333 return MVPP2_EINVAL; 1334 } 1335 1336 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1337 if (Tid < 0) { 1338 return Tid; 1339 } 1340 1341 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1342 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6); 1343 Pe.Index = Tid; 1344 1345 /* Finished: go to Flowid generation */ 1346 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 1347 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1348 Mvpp2PrsSramRiUpdate (&Pe, Ri, RiMask); 1349 1350 /* Set offset for protocol 6 bytes relative to IPv6 header size */ 1351 Mvpp2PrsSramOffsetSet ( 1352 &Pe, 1353 MVPP2_PRS_SRAM_UDF_TYPE_L4, 1354 sizeof (Mvpp2Ipv6hdr) - 6, 1355 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 1356 ); 1357 1358 Mvpp2PrsTcamDataByteSet (&Pe, 0, Proto, MVPP2_PRS_TCAM_PROTO_MASK); 1359 Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1360 1361 /* Unmask all Ports */ 1362 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 1363 1364 /* Write HW */ 1365 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP6); 1366 Mvpp2PrsHwWrite (Priv, &Pe); 1367 1368 return 0; 1369 } 1370 1371 /* IPv6 L3 multicast entry */ 1372 STATIC 1373 INT32 1374 Mvpp2PrsIp6Cast ( 1375 IN MVPP2_SHARED *Priv, 1376 IN UINT16 L3Cast 1377 ) 1378 { 1379 MVPP2_PRS_ENTRY Pe; 1380 INT32 Tid; 1381 1382 if (L3Cast != MVPP2_PRS_L3_MULTI_CAST) { 1383 return MVPP2_EINVAL; 1384 } 1385 1386 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1387 if (Tid < 0) { 1388 return Tid; 1389 } 1390 1391 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1392 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6); 1393 Pe.Index = Tid; 1394 1395 /* Finished: go to Flowid generation */ 1396 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP6); 1397 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_MCAST, MVPP2_PRS_RI_L3_ADDR_MASK); 1398 Mvpp2PrsSramAiUpdate (&Pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1399 1400 /* Shift back to IPv6 by 18 bytes - byte count provided by Marvell */ 1401 Mvpp2PrsSramShiftSet (&Pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1402 1403 Mvpp2PrsTcamDataByteSet (&Pe, 0, MVPP2_PRS_IPV6_MC, MVPP2_PRS_IPV6_MC_MASK); 1404 Mvpp2PrsTcamAiUpdate (&Pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 1405 1406 /* Unmask all Ports */ 1407 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 1408 1409 /* Update shadow table and hw entry */ 1410 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP6); 1411 Mvpp2PrsHwWrite (Priv, &Pe); 1412 1413 return 0; 1414 } 1415 1416 /* Parser per-Port initialization */ 1417 STATIC 1418 VOID 1419 Mvpp2PrsHwPortInit ( 1420 IN MVPP2_SHARED *Priv, 1421 IN INT32 PortId, 1422 IN INT32 LuFirst, 1423 IN INT32 LuMax, 1424 IN INT32 Offset 1425 ) 1426 { 1427 UINT32 Val; 1428 1429 /* Set lookup ID */ 1430 Val = Mvpp2Read (Priv, MVPP2_PRS_INIT_LOOKUP_REG); 1431 Val &= ~MVPP2_PRS_PORT_LU_MASK (PortId); 1432 Val |= MVPP2_PRS_PORT_LU_VAL (PortId, LuFirst); 1433 Mvpp2Write (Priv, MVPP2_PRS_INIT_LOOKUP_REG, Val); 1434 1435 /* Set maximum number of loops for packet received from PortId */ 1436 Val = Mvpp2Read (Priv, MVPP2_PRS_MAX_LOOP_REG(PortId)); 1437 Val &= ~MVPP2_PRS_MAX_LOOP_MASK (PortId); 1438 Val |= MVPP2_PRS_MAX_LOOP_VAL (PortId, LuMax); 1439 Mvpp2Write (Priv, MVPP2_PRS_MAX_LOOP_REG(PortId), Val); 1440 1441 /* 1442 * Set initial Offset for packet header extraction for the first 1443 * searching loop 1444 */ 1445 Val = Mvpp2Read (Priv, MVPP2_PRS_INIT_OFFS_REG(PortId)); 1446 Val &= ~MVPP2_PRS_INIT_OFF_MASK (PortId); 1447 Val |= MVPP2_PRS_INIT_OFF_VAL (PortId, Offset); 1448 Mvpp2Write (Priv, MVPP2_PRS_INIT_OFFS_REG(PortId), Val); 1449 } 1450 1451 /* Default Flow entries initialization for all Ports */ 1452 STATIC 1453 VOID 1454 Mvpp2PrsDefFlowInit ( 1455 IN MVPP2_SHARED *Priv 1456 ) 1457 { 1458 MVPP2_PRS_ENTRY Pe; 1459 INT32 PortId; 1460 1461 for (PortId = 0; PortId < MVPP2_MAX_PORTS; PortId++) { 1462 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1463 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 1464 Pe.Index = MVPP2_PE_FIRST_DEFAULT_FLOW - PortId; 1465 1466 /* Mask all Ports */ 1467 Mvpp2PrsTcamPortMapSet (&Pe, 0); 1468 1469 /* Set Flow ID*/ 1470 Mvpp2PrsSramAiUpdate (&Pe, PortId, MVPP2_PRS_FLOW_ID_MASK); 1471 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 1472 1473 /* Update shadow table and hw entry */ 1474 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_FLOWS); 1475 Mvpp2PrsHwWrite (Priv, &Pe); 1476 } 1477 } 1478 1479 /* Set default entry for Marvell Header field */ 1480 STATIC 1481 VOID 1482 Mvpp2PrsMhInit ( 1483 IN MVPP2_SHARED *Priv 1484 ) 1485 { 1486 MVPP2_PRS_ENTRY Pe; 1487 1488 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1489 1490 Pe.Index = MVPP2_PE_MH_DEFAULT; 1491 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_MH); 1492 Mvpp2PrsSramShiftSet (&Pe, MVPP2_MH_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1493 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_MAC); 1494 1495 /* Unmask all Ports */ 1496 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 1497 1498 /* Update shadow table and hw entry */ 1499 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MH); 1500 Mvpp2PrsHwWrite (Priv, &Pe); 1501 } 1502 1503 /* 1504 * Set default entires (place holder) for promiscuous, non-promiscuous and 1505 * multicast MAC Addresses 1506 */ 1507 STATIC 1508 VOID 1509 Mvpp2PrsMacInit ( 1510 IN MVPP2_SHARED *Priv 1511 ) 1512 { 1513 MVPP2_PRS_ENTRY Pe; 1514 1515 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1516 1517 /* Non-promiscuous mode for all Ports - DROP unknown packets */ 1518 Pe.Index = MVPP2_PE_MAC_NON_PROMISCUOUS; 1519 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_MAC); 1520 1521 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_DROP_MASK, MVPP2_PRS_RI_DROP_MASK); 1522 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1523 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 1524 1525 /* Unmask all Ports */ 1526 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 1527 1528 /* Update shadow table and hw entry */ 1529 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MAC); 1530 Mvpp2PrsHwWrite (Priv, &Pe); 1531 1532 /* Place holders only - no Ports */ 1533 Mvpp2PrsMacDropAllSet (Priv, 0, FALSE); 1534 Mvpp2PrsMacPromiscSet (Priv, 0, FALSE); 1535 Mvpp2PrsMacMultiSet (Priv, MVPP2_PE_MAC_MC_ALL, 0, FALSE); 1536 Mvpp2PrsMacMultiSet (Priv, MVPP2_PE_MAC_MC_IP6, 0, FALSE); 1537 } 1538 1539 /* Set default entries for various types of dsa packets */ 1540 STATIC 1541 VOID 1542 Mvpp2PrsDsaInit ( 1543 IN MVPP2_SHARED *Priv 1544 ) 1545 { 1546 MVPP2_PRS_ENTRY Pe; 1547 1548 /* None tagged EDSA entry - place holder */ 1549 Mvpp2PrsDsaTagSet (Priv, 0, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 1550 1551 /* Tagged EDSA entry - place holder */ 1552 Mvpp2PrsDsaTagSet (Priv, 0, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 1553 1554 /* None tagged DSA entry - place holder */ 1555 Mvpp2PrsDsaTagSet (Priv, 0, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 1556 1557 /* Tagged DSA entry - place holder */ 1558 Mvpp2PrsDsaTagSet (Priv, 0, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 1559 1560 /* None tagged EDSA ethertype entry - place holder*/ 1561 Mvpp2PrsDsaTagEthertypeSet (Priv, 0, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 1562 1563 /* Tagged EDSA ethertype entry - place holder*/ 1564 Mvpp2PrsDsaTagEthertypeSet (Priv, 0, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 1565 1566 /* None tagged DSA ethertype entry */ 1567 Mvpp2PrsDsaTagEthertypeSet (Priv, 0, TRUE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 1568 1569 /* Tagged DSA ethertype entry */ 1570 Mvpp2PrsDsaTagEthertypeSet (Priv, 0, TRUE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 1571 1572 /* Set default entry, in case DSA or EDSA tag not found */ 1573 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1574 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_DSA); 1575 Pe.Index = MVPP2_PE_DSA_DEFAULT; 1576 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_VLAN); 1577 1578 /* Shift 0 bytes */ 1579 Mvpp2PrsSramShiftSet (&Pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1580 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_MAC); 1581 1582 /* Clear all Sram ai bits for next iteration */ 1583 Mvpp2PrsSramAiUpdate (&Pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1584 1585 /* Unmask all Ports */ 1586 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 1587 1588 Mvpp2PrsHwWrite (Priv, &Pe); 1589 } 1590 1591 /* Match basic ethertypes */ 1592 STATIC 1593 INT32 1594 Mvpp2PrsEtypeInit ( 1595 IN MVPP2_SHARED *Priv 1596 ) 1597 { 1598 MVPP2_PRS_ENTRY Pe; 1599 INT32 Tid; 1600 1601 /* Ethertype: PPPoE */ 1602 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1603 if (Tid < 0) { 1604 return Tid; 1605 } 1606 1607 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1608 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2); 1609 Pe.Index = Tid; 1610 1611 /* Set PPPoE type offset to 0 - obtained from Marvell */ 1612 Mvpp2PrsMatchEtype (&Pe, 0, MV_ETH_P_PPP_SES); 1613 1614 Mvpp2PrsSramShiftSet (&Pe, MVPP2_PPPOE_HDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1615 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_PPPOE); 1616 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_PPPOE_MASK, MVPP2_PRS_RI_PPPOE_MASK); 1617 1618 /* Update shadow table and hw entry */ 1619 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2); 1620 Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF; 1621 Priv->PrsShadow[Pe.Index].Finish = FALSE; 1622 Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_PPPOE_MASK, MVPP2_PRS_RI_PPPOE_MASK); 1623 Mvpp2PrsHwWrite (Priv, &Pe); 1624 1625 /* Ethertype: ARP */ 1626 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1627 if (Tid < 0) { 1628 return Tid; 1629 } 1630 1631 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1632 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2); 1633 Pe.Index = Tid; 1634 1635 /* Set ARP type offset to 0 - obtained from Marvell */ 1636 Mvpp2PrsMatchEtype (&Pe, 0, MV_ETH_P_ARP); 1637 1638 /* Generate Flow in the next iteration*/ 1639 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 1640 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1641 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_ARP, MVPP2_PRS_RI_L3_PROTO_MASK); 1642 1643 /* Set L3 Offset */ 1644 Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1645 1646 /* Update shadow table and hw entry */ 1647 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2); 1648 Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF; 1649 Priv->PrsShadow[Pe.Index].Finish = TRUE; 1650 Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_L3_ARP, MVPP2_PRS_RI_L3_PROTO_MASK); 1651 Mvpp2PrsHwWrite (Priv, &Pe); 1652 1653 /* Ethertype: LBTD */ 1654 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1655 if (Tid < 0) { 1656 return Tid; 1657 } 1658 1659 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1660 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2); 1661 Pe.Index = Tid; 1662 1663 /* Set LBTD type offset to 0 - obtained from Marvell */ 1664 Mvpp2PrsMatchEtype (&Pe, 0, MVPP2_IP_LBDT_TYPE); 1665 1666 /* Generate Flow in the next iteration*/ 1667 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 1668 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1669 Mvpp2PrsSramRiUpdate ( 1670 &Pe, 1671 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1672 MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK 1673 ); 1674 1675 /* Set L3 Offset */ 1676 Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1677 1678 /* Update shadow table and hw entry */ 1679 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2); 1680 Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF; 1681 Priv->PrsShadow[Pe.Index].Finish = TRUE; 1682 Mvpp2PrsShadowRiSet ( 1683 Priv, 1684 Pe.Index, 1685 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, 1686 MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK 1687 ); 1688 1689 Mvpp2PrsHwWrite (Priv, &Pe); 1690 1691 /* Ethertype: IPv4 without options */ 1692 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1693 if (Tid < 0) { 1694 return Tid; 1695 } 1696 1697 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1698 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2); 1699 Pe.Index = Tid; 1700 1701 /* Set IPv4 type offset to 0 - obtained from Marvell */ 1702 Mvpp2PrsMatchEtype (&Pe, 0, MV_ETH_P_IP); 1703 Mvpp2PrsTcamDataByteSet ( 1704 &Pe, 1705 MVPP2_ETH_TYPE_LEN, 1706 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 1707 MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK 1708 ); 1709 1710 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP4); 1711 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); 1712 1713 /* Skip EthType + 4 bytes of IP header */ 1714 Mvpp2PrsSramShiftSet (&Pe, MVPP2_ETH_TYPE_LEN + 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1715 1716 /* Set L3 Offset */ 1717 Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1718 1719 /* Update shadow table and hw entry */ 1720 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2); 1721 Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF; 1722 Priv->PrsShadow[Pe.Index].Finish = FALSE; 1723 Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); 1724 Mvpp2PrsHwWrite (Priv, &Pe); 1725 1726 /* Ethertype: IPv4 with options */ 1727 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1728 if (Tid < 0) { 1729 return Tid; 1730 } 1731 1732 Pe.Index = Tid; 1733 1734 /* Clear Tcam data before updating */ 1735 Pe.Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; 1736 Pe.Tcam.Byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; 1737 1738 Mvpp2PrsTcamDataByteSet (&Pe, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_IPV4_HEAD, MVPP2_PRS_IPV4_HEAD_MASK); 1739 1740 /* Clear Ri before updating */ 1741 Pe.Sram.Word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 1742 Pe.Sram.Word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 1743 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK); 1744 1745 /* Update shadow table and hw entry */ 1746 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2); 1747 Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF; 1748 Priv->PrsShadow[Pe.Index].Finish = FALSE; 1749 Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK); 1750 Mvpp2PrsHwWrite (Priv, &Pe); 1751 1752 /* Ethertype: IPv6 without options */ 1753 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1754 if (Tid < 0) { 1755 return Tid; 1756 } 1757 1758 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1759 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2); 1760 Pe.Index = Tid; 1761 1762 /* Set IPv6 type offset to 0 - obtained from Marvell */ 1763 Mvpp2PrsMatchEtype (&Pe, 0, MV_ETH_P_IPV6); 1764 1765 /* Skip DIP of IPV6 header - value provided by Marvell */ 1766 Mvpp2PrsSramShiftSet (&Pe, MVPP2_ETH_TYPE_LEN + 8 + MVPP2_MAX_L3_ADDR_SIZE, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1767 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP6); 1768 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK); 1769 1770 /* Set L3 Offset */ 1771 Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1772 1773 /* Update shadow table and hw entry */ 1774 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2); 1775 Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF; 1776 Priv->PrsShadow[Pe.Index].Finish = FALSE; 1777 Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK); 1778 Mvpp2PrsHwWrite (Priv, &Pe); 1779 1780 /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ 1781 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1782 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_L2); 1783 Pe.Index = MVPP2_PE_ETH_TYPE_UN; 1784 1785 /* Unmask all Ports */ 1786 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 1787 1788 /* Generate Flow in the next iteration*/ 1789 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1790 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 1791 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK); 1792 1793 /* Set L3 Offset even it's unknown L3 */ 1794 Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1795 1796 /* Update shadow table and hw entry */ 1797 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_L2); 1798 Priv->PrsShadow[Pe.Index].Udf = MVPP2_PRS_UDF_L2_DEF; 1799 Priv->PrsShadow[Pe.Index].Finish = TRUE; 1800 Mvpp2PrsShadowRiSet (Priv, Pe.Index, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK); 1801 Mvpp2PrsHwWrite (Priv, &Pe); 1802 1803 return 0; 1804 } 1805 1806 /* 1807 * Configure vlan entries and detect up to 2 successive VLAN tags. 1808 * Possible options: 1809 * 0x8100, 0x88A8 1810 * 0x8100, 0x8100 1811 * 0x8100, 0x88A8 1812 */ 1813 STATIC 1814 INT32 1815 Mvpp2PrsVlanInit ( 1816 IN MVPP2_SHARED *Priv 1817 ) 1818 { 1819 MVPP2_PRS_ENTRY Pe; 1820 INT32 Err; 1821 1822 /* Double VLAN: 0x8100, 0x88A8 */ 1823 Err = Mvpp2PrsDoubleVlanAdd (Priv, MV_ETH_P_8021Q, MV_ETH_P_8021AD, MVPP2_PRS_PORT_MASK); 1824 if (Err != 0) { 1825 return Err; 1826 } 1827 1828 /* Double VLAN: 0x8100, 0x8100 */ 1829 Err = Mvpp2PrsDoubleVlanAdd (Priv, MV_ETH_P_8021Q, MV_ETH_P_8021Q, MVPP2_PRS_PORT_MASK); 1830 if (Err != 0) { 1831 return Err; 1832 } 1833 1834 /* Single VLAN: 0x88a8 */ 1835 Err = Mvpp2PrsVlanAdd (Priv, MV_ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI, MVPP2_PRS_PORT_MASK); 1836 if (Err != 0) { 1837 return Err; 1838 } 1839 1840 /* Single VLAN: 0x8100 */ 1841 Err = Mvpp2PrsVlanAdd (Priv, MV_ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, MVPP2_PRS_PORT_MASK); 1842 if (Err != 0) { 1843 return Err; 1844 } 1845 1846 /* Set default double vlan entry */ 1847 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1848 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_VLAN); 1849 Pe.Index = MVPP2_PE_VLAN_DBL; 1850 1851 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_L2); 1852 1853 /* Clear ai for next iterations */ 1854 Mvpp2PrsSramAiUpdate (&Pe, 0, MVPP2_PRS_SRAM_AI_MASK); 1855 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_VLAN_DOUBLE, MVPP2_PRS_RI_VLAN_MASK); 1856 1857 Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_DBL_VLAN_AI_BIT, MVPP2_PRS_DBL_VLAN_AI_BIT); 1858 1859 /* Unmask all Ports */ 1860 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 1861 1862 /* Update shadow table and hw entry */ 1863 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_VLAN); 1864 Mvpp2PrsHwWrite (Priv, &Pe); 1865 1866 /* Set default vlan none entry */ 1867 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1868 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_VLAN); 1869 Pe.Index = MVPP2_PE_VLAN_NONE; 1870 1871 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_L2); 1872 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_VLAN_NONE, MVPP2_PRS_RI_VLAN_MASK); 1873 1874 /* Unmask all Ports */ 1875 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 1876 1877 /* Update shadow table and hw entry */ 1878 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_VLAN); 1879 Mvpp2PrsHwWrite (Priv, &Pe); 1880 1881 return 0; 1882 } 1883 1884 /* Set entries for PPPoE ethertype */ 1885 STATIC 1886 INT32 1887 Mvpp2PrsPppoeInit ( 1888 IN MVPP2_SHARED *Priv 1889 ) 1890 { 1891 MVPP2_PRS_ENTRY Pe; 1892 INT32 Tid; 1893 1894 /* IPv4 over PPPoE with options */ 1895 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1896 if (Tid < 0) { 1897 return Tid; 1898 } 1899 1900 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1901 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_PPPOE); 1902 Pe.Index = Tid; 1903 1904 /* Set IPv4 over PPPoE type offset to 0 - obtained from Marvell */ 1905 Mvpp2PrsMatchEtype (&Pe, 0, MV_PPP_IP); 1906 1907 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP4); 1908 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP4_OPT, MVPP2_PRS_RI_L3_PROTO_MASK); 1909 1910 /* Skip EthType + 4 bytes of IP header */ 1911 Mvpp2PrsSramShiftSet (&Pe, MVPP2_ETH_TYPE_LEN + 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1912 1913 /* Set L3 Offset */ 1914 Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1915 1916 /* Update shadow table and hw entry */ 1917 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_PPPOE); 1918 Mvpp2PrsHwWrite (Priv, &Pe); 1919 1920 /* IPv4 over PPPoE without options */ 1921 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1922 if (Tid < 0) { 1923 return Tid; 1924 } 1925 1926 Pe.Index = Tid; 1927 1928 Mvpp2PrsTcamDataByteSet ( 1929 &Pe, 1930 MVPP2_ETH_TYPE_LEN, 1931 MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, 1932 MVPP2_PRS_IPV4_HEAD_MASK | MVPP2_PRS_IPV4_IHL_MASK 1933 ); 1934 1935 /* Clear Ri before updating */ 1936 Pe.Sram.Word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; 1937 Pe.Sram.Word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; 1938 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP4, MVPP2_PRS_RI_L3_PROTO_MASK); 1939 1940 /* Update shadow table and hw entry */ 1941 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_PPPOE); 1942 Mvpp2PrsHwWrite (Priv, &Pe); 1943 1944 /* IPv6 over PPPoE */ 1945 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1946 if (Tid < 0) { 1947 return Tid; 1948 } 1949 1950 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1951 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_PPPOE); 1952 Pe.Index = Tid; 1953 1954 /* Set IPv6 over PPPoE type offset to 0 - obtained from Marvell */ 1955 Mvpp2PrsMatchEtype (&Pe, 0, MV_PPP_IPV6); 1956 1957 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP6); 1958 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_IP6, MVPP2_PRS_RI_L3_PROTO_MASK); 1959 1960 /* Skip EthType + 4 bytes of IPv6 header */ 1961 Mvpp2PrsSramShiftSet (&Pe, MVPP2_ETH_TYPE_LEN + 4, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 1962 1963 /* Set L3 Offset */ 1964 Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1965 1966 /* Update shadow table and hw entry */ 1967 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_PPPOE); 1968 Mvpp2PrsHwWrite (Priv, &Pe); 1969 1970 /* Non-IP over PPPoE */ 1971 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 1972 if (Tid < 0) { 1973 return Tid; 1974 } 1975 1976 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 1977 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_PPPOE); 1978 Pe.Index = Tid; 1979 1980 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_UN, MVPP2_PRS_RI_L3_PROTO_MASK); 1981 1982 /* Finished: go to Flowid generation */ 1983 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 1984 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 1985 1986 /* Set L3 Offset even if it's unknown L3 */ 1987 Mvpp2PrsSramOffsetSet (&Pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, MVPP2_ETH_TYPE_LEN, MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); 1988 1989 /* Update shadow table and hw entry */ 1990 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_PPPOE); 1991 Mvpp2PrsHwWrite (Priv, &Pe); 1992 1993 return 0; 1994 } 1995 1996 /* Initialize entries for IPv4 */ 1997 STATIC 1998 INT32 1999 Mvpp2PrsIp4Init ( 2000 IN MVPP2_SHARED *Priv 2001 ) 2002 { 2003 MVPP2_PRS_ENTRY Pe; 2004 INT32 Err; 2005 2006 /* Set entries for TCP, UDP and IGMP over IPv4 */ 2007 Err = Mvpp2PrsIp4Proto (Priv, MV_IPPR_TCP, MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_RI_L4_PROTO_MASK); 2008 if (Err != 0) { 2009 return Err; 2010 } 2011 2012 Err = Mvpp2PrsIp4Proto (Priv, MV_IPPR_UDP, MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_RI_L4_PROTO_MASK); 2013 if (Err != 0) { 2014 return Err; 2015 } 2016 2017 Err = Mvpp2PrsIp4Proto ( 2018 Priv, 2019 MV_IPPR_IGMP, 2020 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2021 MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK 2022 ); 2023 2024 if (Err != 0) { 2025 return Err; 2026 } 2027 2028 /* IPv4 Broadcast */ 2029 Err = Mvpp2PrsIp4Cast (Priv, MVPP2_PRS_L3_BROAD_CAST); 2030 if (Err != 0) { 2031 return Err; 2032 } 2033 2034 /* IPv4 Multicast */ 2035 Err = Mvpp2PrsIp4Cast (Priv, MVPP2_PRS_L3_MULTI_CAST); 2036 if (Err != 0) { 2037 return Err; 2038 } 2039 2040 /* Default IPv4 entry for unknown protocols */ 2041 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 2042 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP4); 2043 Pe.Index = MVPP2_PE_IP4_PROTO_UN; 2044 2045 /* Set next Lu to IPv4 and shift by 12 bytes - obtained from Marvell*/ 2046 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP4); 2047 Mvpp2PrsSramShiftSet (&Pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2048 2049 /* Set L4 offset 4 bytes relative IPv4 header size (current position) */ 2050 Mvpp2PrsSramOffsetSet ( 2051 &Pe, 2052 MVPP2_PRS_SRAM_UDF_TYPE_L4, 2053 sizeof (Mvpp2Iphdr) - 4, 2054 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 2055 ); 2056 2057 Mvpp2PrsSramAiUpdate (&Pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); 2058 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK); 2059 2060 Mvpp2PrsTcamAiUpdate (&Pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); 2061 2062 /* Unmask all Ports */ 2063 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 2064 2065 /* Update shadow table and hw entry */ 2066 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4); 2067 Mvpp2PrsHwWrite (Priv, &Pe); 2068 2069 /* Default IPv4 entry for unicast Address */ 2070 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 2071 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP4); 2072 Pe.Index = MVPP2_PE_IP4_ADDR_UN; 2073 2074 /* Finished: go to Flowid generation */ 2075 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 2076 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2077 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_UCAST, MVPP2_PRS_RI_L3_ADDR_MASK); 2078 2079 Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV4_DIP_AI_BIT, MVPP2_PRS_IPV4_DIP_AI_BIT); 2080 2081 /* Unmask all Ports */ 2082 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 2083 2084 /* Update shadow table and hw entry */ 2085 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4); 2086 Mvpp2PrsHwWrite (Priv, &Pe); 2087 2088 return 0; 2089 } 2090 2091 /* Initialize entries for IPv6 */ 2092 STATIC 2093 INT32 2094 Mvpp2PrsIp6Init ( 2095 IN MVPP2_SHARED *Priv 2096 ) 2097 { 2098 MVPP2_PRS_ENTRY Pe; 2099 INT32 Tid, Err; 2100 2101 /* Set entries for TCP, UDP and ICMP over IPv6 */ 2102 Err = Mvpp2PrsIp6Proto (Priv, MV_IPPR_TCP, MVPP2_PRS_RI_L4_TCP, MVPP2_PRS_RI_L4_PROTO_MASK); 2103 if (Err != 0) { 2104 return Err; 2105 } 2106 2107 Err = Mvpp2PrsIp6Proto (Priv, MV_IPPR_UDP, MVPP2_PRS_RI_L4_UDP, MVPP2_PRS_RI_L4_PROTO_MASK); 2108 if (Err != 0) { 2109 return Err; 2110 } 2111 2112 Err = Mvpp2PrsIp6Proto ( 2113 Priv, 2114 MV_IPPR_ICMPV6, 2115 MVPP2_PRS_RI_CPU_CODE_RX_SPEC | MVPP2_PRS_RI_UDF3_RX_SPECIAL, 2116 MVPP2_PRS_RI_CPU_CODE_MASK | MVPP2_PRS_RI_UDF3_MASK 2117 ); 2118 2119 if (Err != 0) { 2120 return Err; 2121 } 2122 2123 /* 2124 * IPv4 is the last header. This is similar case as 6-TCP or 17-UDP 2125 * Result Info: UDF7=1, DS lite 2126 */ 2127 Err = Mvpp2PrsIp6Proto (Priv, MV_IPPR_IPIP, MVPP2_PRS_RI_UDF7_IP6_LITE, MVPP2_PRS_RI_UDF7_MASK); 2128 if (Err != 0) { 2129 return Err; 2130 } 2131 2132 /* IPv6 multicast */ 2133 Err = Mvpp2PrsIp6Cast (Priv, MVPP2_PRS_L3_MULTI_CAST); 2134 if (Err != 0) { 2135 return Err; 2136 } 2137 2138 /* Entry for checking hop limit */ 2139 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, MVPP2_PE_LAST_FREE_TID); 2140 if (Tid < 0) { 2141 return Tid; 2142 } 2143 2144 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 2145 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6); 2146 Pe.Index = Tid; 2147 2148 /* Finished: go to Flowid generation */ 2149 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 2150 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2151 Mvpp2PrsSramRiUpdate ( 2152 &Pe, 2153 MVPP2_PRS_RI_L3_UN | MVPP2_PRS_RI_DROP_MASK, 2154 MVPP2_PRS_RI_L3_PROTO_MASK | MVPP2_PRS_RI_DROP_MASK 2155 ); 2156 2157 Mvpp2PrsTcamDataByteSet (&Pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); 2158 Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 2159 2160 /* Update shadow table and hw entry */ 2161 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4); 2162 Mvpp2PrsHwWrite (Priv, &Pe); 2163 2164 /* Default IPv6 entry for unknown protocols */ 2165 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 2166 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6); 2167 Pe.Index = MVPP2_PE_IP6_PROTO_UN; 2168 2169 /* Finished: go to Flowid generation */ 2170 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 2171 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2172 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK); 2173 2174 /* Set L4 offset 6 bytes relative IPv6 header size (current position) */ 2175 Mvpp2PrsSramOffsetSet ( 2176 &Pe, 2177 MVPP2_PRS_SRAM_UDF_TYPE_L4, 2178 sizeof (Mvpp2Ipv6hdr) - 6, 2179 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 2180 ); 2181 2182 Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 2183 2184 /* Unmask all Ports */ 2185 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 2186 2187 /* Update shadow table and hw entry */ 2188 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4); 2189 Mvpp2PrsHwWrite (Priv, &Pe); 2190 2191 /* Default IPv6 entry for unknown ext protocols */ 2192 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 2193 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6); 2194 Pe.Index = MVPP2_PE_IP6_EXT_PROTO_UN; 2195 2196 /* Finished: go to Flowid generation */ 2197 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_FLOWS); 2198 Mvpp2PrsSramBitsSet (&Pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); 2199 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L4_OTHER, MVPP2_PRS_RI_L4_PROTO_MASK); 2200 2201 Mvpp2PrsTcamAiUpdate (&Pe, MVPP2_PRS_IPV6_EXT_AI_BIT, MVPP2_PRS_IPV6_EXT_AI_BIT); 2202 2203 /* Unmask all Ports */ 2204 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 2205 2206 /* Update shadow table and hw entry */ 2207 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP4); 2208 Mvpp2PrsHwWrite (Priv, &Pe); 2209 2210 /* Default IPv6 entry for unicast Address */ 2211 Mvpp2Memset (&Pe, 0, sizeof (MVPP2_PRS_ENTRY)); 2212 Mvpp2PrsTcamLuSet (&Pe, MVPP2_PRS_LU_IP6); 2213 Pe.Index = MVPP2_PE_IP6_ADDR_UN; 2214 2215 /* Finished: go to IPv6 again */ 2216 Mvpp2PrsSramNextLuSet (&Pe, MVPP2_PRS_LU_IP6); 2217 Mvpp2PrsSramRiUpdate (&Pe, MVPP2_PRS_RI_L3_UCAST, MVPP2_PRS_RI_L3_ADDR_MASK); 2218 Mvpp2PrsSramAiUpdate (&Pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 2219 2220 /* Shift back to IPv6 by 18 bytes - byte count provided by Marvell */ 2221 Mvpp2PrsSramShiftSet (&Pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2222 Mvpp2PrsTcamAiUpdate (&Pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); 2223 2224 /* Unmask all Ports */ 2225 Mvpp2PrsTcamPortMapSet (&Pe, MVPP2_PRS_PORT_MASK); 2226 2227 /* Update shadow table and hw entry */ 2228 Mvpp2PrsShadowSet (Priv, Pe.Index, MVPP2_PRS_LU_IP6); 2229 Mvpp2PrsHwWrite (Priv, &Pe); 2230 2231 return 0; 2232 } 2233 2234 /* Parser default initialization */ 2235 INT32 2236 Mvpp2PrsDefaultInit ( 2237 IN MVPP2_SHARED *Priv 2238 ) 2239 { 2240 INT32 Err, Index, i; 2241 2242 /* Enable Tcam table */ 2243 Mvpp2Write (Priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); 2244 2245 /* Clear all Tcam and Sram entries */ 2246 for (Index = 0; Index < MVPP2_PRS_TCAM_SRAM_SIZE; Index++) { 2247 Mvpp2Write (Priv, MVPP2_PRS_TCAM_IDX_REG, Index); 2248 for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) { 2249 Mvpp2Write (Priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); 2250 } 2251 2252 Mvpp2Write (Priv, MVPP2_PRS_SRAM_IDX_REG, Index); 2253 for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) { 2254 Mvpp2Write (Priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); 2255 } 2256 } 2257 2258 /* Invalidate all Tcam entries */ 2259 for (Index = 0; Index < MVPP2_PRS_TCAM_SRAM_SIZE; Index++) { 2260 Mvpp2PrsHwInv (Priv, Index); 2261 } 2262 2263 /* Always start from lookup = 0 */ 2264 for (Index = 0; Index < MVPP2_MAX_PORTS; Index++) { 2265 Mvpp2PrsHwPortInit (Priv, Index, MVPP2_PRS_LU_MH, MVPP2_PRS_PORT_LU_MAX, 0); 2266 } 2267 2268 Mvpp2PrsDefFlowInit (Priv); 2269 2270 Mvpp2PrsMhInit (Priv); 2271 2272 Mvpp2PrsMacInit (Priv); 2273 2274 Mvpp2PrsDsaInit (Priv); 2275 2276 Err = Mvpp2PrsEtypeInit (Priv); 2277 if (Err != 0) { 2278 return Err; 2279 } 2280 2281 Err = Mvpp2PrsVlanInit (Priv); 2282 if (Err != 0) { 2283 return Err; 2284 } 2285 2286 Err = Mvpp2PrsPppoeInit (Priv); 2287 if (Err != 0) { 2288 return Err; 2289 } 2290 2291 Err = Mvpp2PrsIp6Init (Priv); 2292 if (Err != 0) { 2293 return Err; 2294 } 2295 2296 Err = Mvpp2PrsIp4Init (Priv); 2297 if (Err != 0) { 2298 return Err; 2299 } 2300 2301 return 0; 2302 } 2303 2304 /* Compare MAC DA with Tcam entry data */ 2305 STATIC 2306 BOOLEAN 2307 Mvpp2PrsMacRangeEquals ( 2308 IN MVPP2_PRS_ENTRY *Pe, 2309 IN const UINT8 *Da, 2310 IN UINT8 *Mask 2311 ) 2312 { 2313 UINT8 TcamByte, TcamMask; 2314 INT32 Index; 2315 2316 for (Index = 0; Index < MV_ETH_ALEN; Index++) { 2317 Mvpp2PrsTcamDataByteGet (Pe, Index, &TcamByte, &TcamMask); 2318 if (TcamMask != Mask[Index]) { 2319 return FALSE; 2320 } 2321 2322 if ((TcamMask & TcamByte) != (Da[Index] & Mask[Index])) { 2323 return FALSE; 2324 } 2325 } 2326 2327 return TRUE; 2328 } 2329 2330 /* Find Tcam entry with matched pair <MAC DA, Port> */ 2331 STATIC 2332 MVPP2_PRS_ENTRY * 2333 Mvpp2PrsMacDaRangeFind ( 2334 IN MVPP2_SHARED *Priv, 2335 IN INT32 Pmap, 2336 IN const UINT8 *Da, 2337 IN UINT8 *Mask, 2338 IN INT32 UdfType 2339 ) 2340 { 2341 MVPP2_PRS_ENTRY *Pe; 2342 INT32 Tid; 2343 2344 Pe = Mvpp2Alloc (sizeof (*Pe)); 2345 if (Pe == NULL) { 2346 return NULL; 2347 } 2348 Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_MAC); 2349 2350 /* Go through the all entires with MVPP2_PRS_LU_MAC */ 2351 for (Tid = MVPP2_PE_FIRST_FREE_TID; Tid <= MVPP2_PE_LAST_FREE_TID; Tid++) { 2352 UINT32 EntryPmap; 2353 2354 if (!Priv->PrsShadow[Tid].Valid || 2355 (Priv->PrsShadow[Tid].Lu != MVPP2_PRS_LU_MAC) || 2356 (Priv->PrsShadow[Tid].Udf != UdfType)) 2357 { 2358 continue; 2359 } 2360 2361 Pe->Index = Tid; 2362 Mvpp2PrsHwRead (Priv, Pe); 2363 EntryPmap = Mvpp2PrsTcamPortMapGet (Pe); 2364 2365 if (Mvpp2PrsMacRangeEquals (Pe, Da, Mask) && EntryPmap == Pmap) { 2366 return Pe; 2367 } 2368 } 2369 2370 Mvpp2Free (Pe); 2371 2372 return NULL; 2373 } 2374 2375 /* Update parser's mac Da entry */ 2376 INT32 2377 Mvpp2PrsMacDaAccept ( 2378 IN MVPP2_SHARED *Priv, 2379 IN INT32 PortId, 2380 IN const UINT8 *Da, 2381 IN BOOLEAN Add 2382 ) 2383 { 2384 MVPP2_PRS_ENTRY *Pe; 2385 UINT32 Pmap, Len, Ri; 2386 UINT8 Mask[MV_ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 2387 INT32 Tid; 2388 2389 /* Scan TCAM and see if entry with this <MAC DA, PortId> already exist */ 2390 Pe = Mvpp2PrsMacDaRangeFind (Priv, (1 << PortId), Da, Mask, MVPP2_PRS_UDF_MAC_DEF); 2391 2392 /* No such entry */ 2393 if (Pe == NULL) { 2394 if (!Add) { 2395 return 0; 2396 } 2397 2398 /* Create new TCAM entry */ 2399 /* Find first range mac entry*/ 2400 for (Tid = MVPP2_PE_FIRST_FREE_TID; Tid <= MVPP2_PE_LAST_FREE_TID; Tid++) { 2401 if (Priv->PrsShadow[Tid].Valid && 2402 (Priv->PrsShadow[Tid].Lu == MVPP2_PRS_LU_MAC) && 2403 (Priv->PrsShadow[Tid].Udf == MVPP2_PRS_UDF_MAC_RANGE)) 2404 { 2405 break; 2406 } 2407 } 2408 2409 2410 /* Go through the all entries from first to last */ 2411 Tid = Mvpp2PrsTcamFirstFree (Priv, MVPP2_PE_FIRST_FREE_TID, Tid - 1); 2412 if (Tid < 0) { 2413 return Tid; 2414 } 2415 2416 Pe = Mvpp2Alloc (sizeof (*Pe)); 2417 if (Pe == NULL) { 2418 return -1; 2419 } 2420 2421 Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_MAC); 2422 Pe->Index = Tid; 2423 2424 /* Mask all Ports */ 2425 Mvpp2PrsTcamPortMapSet (Pe, 0); 2426 } 2427 2428 /* Update PortId Mask */ 2429 Mvpp2PrsTcamPortSet (Pe, PortId, Add); 2430 2431 /* Invalidate the entry if no Ports are left enabled */ 2432 Pmap = Mvpp2PrsTcamPortMapGet (Pe); 2433 if (Pmap == 0) { 2434 if (Add) { 2435 Mvpp2Free (Pe); 2436 return -1; 2437 } 2438 2439 Mvpp2PrsHwInv (Priv, Pe->Index); 2440 Priv->PrsShadow[Pe->Index].Valid = FALSE; 2441 2442 Mvpp2Free (Pe); 2443 2444 return 0; 2445 } 2446 2447 /* Continue - set next lookup */ 2448 Mvpp2PrsSramNextLuSet (Pe, MVPP2_PRS_LU_DSA); 2449 2450 /* Set match on DA */ 2451 Len = MV_ETH_ALEN; 2452 while (Len--) { 2453 Mvpp2PrsTcamDataByteSet (Pe, Len, Da[Len], 0xff); 2454 } 2455 2456 /* Set result info bits */ 2457 if (Mvpp2IsBroadcastEtherAddr (Da)) { 2458 Ri = MVPP2_PRS_RI_L2_BCAST; 2459 } else if (Mvpp2IsMulticastEtherAddr (Da)) { 2460 Ri = MVPP2_PRS_RI_L2_MCAST; 2461 } else { 2462 Ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; 2463 } 2464 2465 Mvpp2PrsSramRiUpdate (Pe, Ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK); 2466 Mvpp2PrsShadowRiSet (Priv, Pe->Index, Ri, MVPP2_PRS_RI_L2_CAST_MASK | MVPP2_PRS_RI_MAC_ME_MASK); 2467 2468 /* Shift to ethertype */ 2469 Mvpp2PrsSramShiftSet (Pe, 2 * MV_ETH_ALEN, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); 2470 2471 /* Update shadow table and hw entry */ 2472 Priv->PrsShadow[Pe->Index].Udf = MVPP2_PRS_UDF_MAC_DEF; 2473 Mvpp2PrsShadowSet (Priv, Pe->Index, MVPP2_PRS_LU_MAC); 2474 Mvpp2PrsHwWrite (Priv, Pe); 2475 2476 Mvpp2Free (Pe); 2477 2478 return 0; 2479 } 2480 2481 /* Delete all Port's multicast simple (not range) entries */ 2482 VOID 2483 Mvpp2PrsMcastDelAll ( 2484 IN MVPP2_SHARED *Priv, 2485 IN INT32 PortId 2486 ) 2487 { 2488 MVPP2_PRS_ENTRY Pe; 2489 INT32 Index, Tid; 2490 2491 for (Tid = MVPP2_PE_FIRST_FREE_TID; Tid <= MVPP2_PE_LAST_FREE_TID; Tid++) { 2492 UINT8 Da[MV_ETH_ALEN], DaMask[MV_ETH_ALEN]; 2493 2494 if (!Priv->PrsShadow[Tid].Valid || 2495 (Priv->PrsShadow[Tid].Lu != MVPP2_PRS_LU_MAC) || 2496 (Priv->PrsShadow[Tid].Udf != MVPP2_PRS_UDF_MAC_DEF)) 2497 { 2498 continue; 2499 } 2500 2501 /* Only simple mac entries */ 2502 Pe.Index = Tid; 2503 Mvpp2PrsHwRead (Priv, &Pe); 2504 2505 /* Read mac Addr from entry */ 2506 for (Index = 0; Index < MV_ETH_ALEN; Index++) { 2507 Mvpp2PrsTcamDataByteGet (&Pe, Index, &Da[Index], &DaMask[Index]); 2508 } 2509 2510 if (Mvpp2IsMulticastEtherAddr (Da) && !Mvpp2IsBroadcastEtherAddr (Da)) { 2511 /* Delete this entry */ 2512 Mvpp2PrsMacDaAccept (Priv, PortId, Da, FALSE); 2513 } 2514 } 2515 } 2516 2517 INT32 2518 Mvpp2PrsTagModeSet ( 2519 IN MVPP2_SHARED *Priv, 2520 IN INT32 PortId, 2521 IN INT32 Type 2522 ) 2523 { 2524 switch (Type) { 2525 case MVPP2_TAG_TYPE_EDSA: 2526 /* Add PortId to EDSA entries */ 2527 Mvpp2PrsDsaTagSet (Priv, PortId, TRUE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2528 Mvpp2PrsDsaTagSet (Priv, PortId, TRUE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 2529 /* Remove PortId from DSA entries */ 2530 Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2531 Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 2532 break; 2533 case MVPP2_TAG_TYPE_DSA: 2534 /* Add PortId to DSA entries */ 2535 Mvpp2PrsDsaTagSet (Priv, PortId, TRUE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2536 Mvpp2PrsDsaTagSet (Priv, PortId, TRUE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 2537 2538 /* Remove PortId from EDSA entries */ 2539 Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2540 Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 2541 break; 2542 case MVPP2_TAG_TYPE_MH: 2543 case MVPP2_TAG_TYPE_NONE: 2544 /* Remove PortId form EDSA and DSA entries */ 2545 Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); 2546 Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); 2547 Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); 2548 Mvpp2PrsDsaTagSet (Priv, PortId, FALSE, MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); 2549 break; 2550 default: 2551 if ((Type < 0) || (Type > MVPP2_TAG_TYPE_EDSA)) { 2552 return MVPP2_EINVAL; 2553 } 2554 } 2555 2556 return 0; 2557 } 2558 2559 /* Set prs Flow for the Port */ 2560 INT32 2561 Mvpp2PrsDefFlow ( 2562 IN PP2DXE_PORT *Port 2563 ) 2564 { 2565 MVPP2_PRS_ENTRY *Pe; 2566 INT32 Tid; 2567 2568 Pe = Mvpp2PrsFlowFind (Port->Priv, Port->Id); 2569 2570 /* Such entry not exist */ 2571 if (Pe == NULL) { 2572 /* Go through the all entires from last to first */ 2573 Tid = Mvpp2PrsTcamFirstFree (Port->Priv, MVPP2_PE_LAST_FREE_TID, MVPP2_PE_FIRST_FREE_TID); 2574 if (Tid < 0) { 2575 return Tid; 2576 } 2577 2578 Pe = Mvpp2Alloc (sizeof (*Pe)); 2579 if (Pe == NULL) { 2580 return MVPP2_ENOMEM; 2581 } 2582 2583 Mvpp2PrsTcamLuSet (Pe, MVPP2_PRS_LU_FLOWS); 2584 Pe->Index = Tid; 2585 2586 /* Set Flow ID*/ 2587 Mvpp2PrsSramAiUpdate (Pe, Port->Id, MVPP2_PRS_FLOW_ID_MASK); 2588 Mvpp2PrsSramBitsSet (Pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); 2589 2590 /* Update shadow table */ 2591 Mvpp2PrsShadowSet (Port->Priv, Pe->Index, MVPP2_PRS_LU_FLOWS); 2592 } 2593 2594 Mvpp2PrsTcamPortMapSet (Pe, (1 << Port->Id)); 2595 Mvpp2PrsHwWrite (Port->Priv, Pe); 2596 Mvpp2Free (Pe); 2597 2598 return 0; 2599 } 2600 2601 /* Classifier configuration routines */ 2602 2603 /* Update classification Flow table RegValisters */ 2604 STATIC 2605 VOID 2606 Mvpp2ClsFlowWrite ( 2607 IN MVPP2_SHARED *Priv, 2608 IN MVPP2_CLS_FLOW_ENTRY *Fe 2609 ) 2610 { 2611 Mvpp2Write (Priv, MVPP2_CLS_FLOW_INDEX_REG, Fe->Index); 2612 Mvpp2Write (Priv, MVPP2_CLS_FLOW_TBL0_REG, Fe->Data[0]); 2613 Mvpp2Write (Priv, MVPP2_CLS_FLOW_TBL1_REG, Fe->Data[1]); 2614 Mvpp2Write (Priv, MVPP2_CLS_FLOW_TBL2_REG, Fe->Data[2]); 2615 } 2616 2617 /* Update classification lookup table RegValister */ 2618 VOID 2619 Mvpp2ClsLookupWrite ( 2620 IN MVPP2_SHARED *Priv, 2621 IN OUT MVPP2_CLS_LOOKUP_ENTRY *Le 2622 ) 2623 { 2624 UINT32 Val; 2625 2626 Val = (Le->Way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | Le->Lkpid; 2627 Mvpp2Write (Priv, MVPP2_CLS_LKP_INDEX_REG, Val); 2628 Mvpp2Write (Priv, MVPP2_CLS_LKP_TBL_REG, Le->Data); 2629 } 2630 2631 /* Classifier default initialization */ 2632 VOID 2633 Mvpp2ClsInit ( 2634 IN MVPP2_SHARED *Priv 2635 ) 2636 { 2637 MVPP2_CLS_LOOKUP_ENTRY Le; 2638 MVPP2_CLS_FLOW_ENTRY Fe; 2639 INT32 Index; 2640 2641 /* Enable classifier */ 2642 Mvpp2Write (Priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); 2643 2644 /* Clear classifier Flow table */ 2645 Mvpp2Memset (&Fe.Data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); 2646 for (Index = 0; Index < MVPP2_CLS_FLOWS_TBL_SIZE; Index++) { 2647 Fe.Index = Index; 2648 Mvpp2ClsFlowWrite (Priv, &Fe); 2649 } 2650 2651 /* Clear classifier lookup table */ 2652 Le.Data = 0; 2653 for (Index = 0; Index < MVPP2_CLS_LKP_TBL_SIZE; Index++) { 2654 Le.Lkpid = Index; 2655 Le.Way = 0; 2656 Mvpp2ClsLookupWrite (Priv, &Le); 2657 2658 Le.Way = 1; 2659 Mvpp2ClsLookupWrite (Priv, &Le); 2660 } 2661 } 2662 2663 VOID 2664 Mvpp2ClsPortConfig ( 2665 IN PP2DXE_PORT *Port 2666 ) 2667 { 2668 MVPP2_CLS_LOOKUP_ENTRY Le; 2669 UINT32 Val; 2670 2671 /* Set way for the Port */ 2672 Val = Mvpp2Read (Port->Priv, MVPP2_CLS_PORT_WAY_REG); 2673 Val &= ~MVPP2_CLS_PORT_WAY_MASK (Port->Id); 2674 Mvpp2Write (Port->Priv, MVPP2_CLS_PORT_WAY_REG, Val); 2675 2676 /* 2677 * Pick the entry to be accessed in lookup ID decoding table 2678 * according to the way and lkpid. 2679 */ 2680 Le.Lkpid = Port->Id; 2681 Le.Way = 0; 2682 Le.Data = 0; 2683 2684 /* Set initial CPU Queue for receiving packets */ 2685 Le.Data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; 2686 Le.Data |= Port->FirstRxq; 2687 2688 /* Disable classification engines */ 2689 Le.Data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; 2690 2691 /* Update lookup ID table entry */ 2692 Mvpp2ClsLookupWrite (Port->Priv, &Le); 2693 } 2694 2695 /* Set CPU Queue number for oversize packets */ 2696 VOID 2697 Mvpp2ClsOversizeRxqSet ( 2698 IN PP2DXE_PORT *Port 2699 ) 2700 { 2701 2702 Mvpp2Write ( 2703 Port->Priv, 2704 MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(Port->Id), 2705 Port->FirstRxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 2706 ); 2707 } 2708 2709 /* BM helper routines */ 2710 2711 VOID 2712 Mvpp2BmPoolHwCreate ( 2713 IN MVPP2_SHARED *Priv, 2714 IN MVPP2_BMS_POOL *BmPool, 2715 IN INT32 Size 2716 ) 2717 { 2718 BmPool->Size = Size; 2719 2720 Mvpp2Write (Priv, MVPP2_BM_POOL_BASE_REG(BmPool->Id), Lower32Bits (BmPool->PhysAddr)); 2721 Mvpp2Write (Priv, MVPP22_BM_POOL_BASE_HIGH_REG, (Upper32Bits (BmPool->PhysAddr) & MVPP22_BM_POOL_BASE_HIGH_REG)); 2722 Mvpp2Write (Priv, MVPP2_BM_POOL_SIZE_REG(BmPool->Id), BmPool->Size); 2723 } 2724 2725 /* Set Pool buffer Size */ 2726 VOID 2727 Mvpp2BmPoolBufsizeSet ( 2728 IN MVPP2_SHARED *Priv, 2729 IN MVPP2_BMS_POOL *BmPool, 2730 IN INT32 BufSize 2731 ) 2732 { 2733 UINT32 Val; 2734 2735 BmPool->BufSize = BufSize; 2736 2737 Val = MVPP2_ALIGN (BufSize, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); 2738 Mvpp2Write (Priv, MVPP2_POOL_BUF_SIZE_REG(BmPool->Id), Val); 2739 } 2740 2741 VOID 2742 Mvpp2BmStop ( 2743 IN MVPP2_SHARED *Priv, 2744 IN INT32 Pool 2745 ) 2746 { 2747 UINT32 Val, i; 2748 2749 for (i = 0; i < MVPP2_BM_SIZE; i++) { 2750 Mvpp2Read (Priv, MVPP2_BM_PHY_ALLOC_REG(0)); 2751 } 2752 2753 Val = Mvpp2Read (Priv, MVPP2_BM_POOL_CTRL_REG(Pool)); 2754 Val |= MVPP2_BM_STOP_MASK; 2755 Mvpp2Write (Priv, MVPP2_BM_POOL_CTRL_REG(Pool), Val); 2756 } 2757 2758 VOID 2759 Mvpp2BmIrqClear ( 2760 IN MVPP2_SHARED *Priv, 2761 IN INT32 Pool 2762 ) 2763 { 2764 /* Mask BM all interrupts */ 2765 Mvpp2Write (Priv, MVPP2_BM_INTR_MASK_REG(Pool), 0); 2766 2767 /* Clear BM cause RegValister */ 2768 Mvpp2Write (Priv, MVPP2_BM_INTR_CAUSE_REG(Pool), 0); 2769 } 2770 2771 /* Attach long Pool to Rxq */ 2772 VOID 2773 Mvpp2RxqLongPoolSet ( 2774 IN PP2DXE_PORT *Port, 2775 IN INT32 Lrxq, 2776 IN INT32 LongPool 2777 ) 2778 { 2779 UINT32 Val; 2780 INT32 Prxq; 2781 2782 /* Get Queue physical ID */ 2783 Prxq = Port->Rxqs[Lrxq].Id; 2784 2785 Val = Mvpp2Read (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq)); 2786 Val &= ~MVPP2_RXQ_POOL_LONG_MASK; 2787 Val |= ((LongPool << MVPP2_RXQ_POOL_LONG_OFFS) & MVPP2_RXQ_POOL_LONG_MASK); 2788 2789 Mvpp2Write (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq), Val); 2790 } 2791 2792 /* Attach short Pool to Rxq */ 2793 VOID 2794 Mvpp2RxqShortPoolSet ( 2795 IN PP2DXE_PORT *Port, 2796 IN INT32 Lrxq, 2797 IN INT32 ShortPool 2798 ) 2799 { 2800 UINT32 Val; 2801 INT32 Prxq; 2802 2803 /* Get Queue physical ID */ 2804 Prxq = Port->Rxqs[Lrxq].Id; 2805 2806 Val = Mvpp2Read (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq)); 2807 Val &= ~MVPP2_RXQ_POOL_SHORT_MASK; 2808 Val |= ((ShortPool << MVPP2_RXQ_POOL_SHORT_OFFS) & MVPP2_RXQ_POOL_SHORT_MASK); 2809 2810 Mvpp2Write (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq), Val); 2811 } 2812 2813 /* Release multicast buffer */ 2814 VOID 2815 Mvpp2BmPoolMcPut ( 2816 IN PP2DXE_PORT *Port, 2817 IN INT32 Pool, 2818 IN UINT32 BufPhysAddr, 2819 IN UINT32 BufVirtAddr, 2820 IN INT32 McId 2821 ) 2822 { 2823 UINT32 Val = 0; 2824 2825 Val |= (McId & MVPP2_BM_MC_ID_MASK); 2826 Mvpp2Write (Port->Priv, MVPP2_BM_MC_RLS_REG, Val); 2827 2828 Mvpp2BmPoolPut (Port->Priv, Pool, BufPhysAddr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK, BufVirtAddr); 2829 } 2830 2831 /* Refill BM Pool */ 2832 VOID 2833 Mvpp2PoolRefill ( 2834 IN PP2DXE_PORT *Port, 2835 IN UINT32 Bm, 2836 IN UINT32 PhysAddr, 2837 IN UINT32 cookie 2838 ) 2839 { 2840 INT32 Pool = Mvpp2BmCookiePoolGet (Bm); 2841 2842 Mvpp2BmPoolPut (Port->Priv, Pool, PhysAddr, cookie); 2843 } 2844 2845 INTN 2846 Mvpp2BmPoolCtrl ( 2847 IN MVPP2_SHARED *Priv, 2848 IN INTN Pool, 2849 IN enum Mvpp2Command Cmd 2850 ) 2851 { 2852 UINT32 RegVal = 0; 2853 RegVal = Mvpp2Read (Priv, MVPP2_BM_POOL_CTRL_REG(Pool)); 2854 2855 switch (Cmd) { 2856 case MVPP2_START: 2857 RegVal |= MVPP2_BM_START_MASK; 2858 break; 2859 2860 case MVPP2_STOP: 2861 RegVal |= MVPP2_BM_STOP_MASK; 2862 break; 2863 2864 default: 2865 return -1; 2866 } 2867 Mvpp2Write (Priv, MVPP2_BM_POOL_CTRL_REG(Pool), RegVal); 2868 2869 return 0; 2870 } 2871 2872 /* Mask the current CPU's Rx/Tx interrupts */ 2873 VOID 2874 Mvpp2InterruptsMask ( 2875 IN VOID *arg 2876 ) 2877 { 2878 PP2DXE_PORT *Port = arg; 2879 2880 Mvpp2Write (Port->Priv, MVPP2_ISR_RX_TX_MASK_REG(Port->Id), 0); 2881 } 2882 2883 /* Unmask the current CPU's Rx/Tx interrupts */ 2884 VOID 2885 Mvpp2InterruptsUnmask ( 2886 IN VOID *arg 2887 ) 2888 { 2889 PP2DXE_PORT *Port = arg; 2890 2891 Mvpp2Write ( 2892 Port->Priv, 2893 MVPP2_ISR_RX_TX_MASK_REG(Port->Id), 2894 (MVPP2_CAUSE_MISC_SUM_MASK | MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK) 2895 ); 2896 } 2897 2898 /* MAC configuration routines */ 2899 2900 STATIC 2901 VOID 2902 Mvpp2PortMiiSet ( 2903 IN PP2DXE_PORT *Port 2904 ) 2905 { 2906 UINT32 Val; 2907 2908 Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_2_REG); 2909 2910 switch (Port->PhyInterface) { 2911 case MV_MODE_SGMII: 2912 Val |= MVPP2_GMAC_INBAND_AN_MASK; 2913 break; 2914 case MV_MODE_RGMII: 2915 Val |= MVPP2_GMAC_PORT_RGMII_MASK; 2916 default: 2917 Val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; 2918 } 2919 2920 Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_2_REG, Val); 2921 } 2922 2923 STATIC 2924 VOID Mvpp2PortFcAdvEnable ( 2925 IN PP2DXE_PORT *Port 2926 ) 2927 { 2928 UINT32 Val; 2929 2930 Val = Mvpp2GmacRead (Port, MVPP2_GMAC_AUTONEG_CONFIG); 2931 Val |= MVPP2_GMAC_FC_ADV_EN; 2932 Mvpp2GmacWrite (Port, MVPP2_GMAC_AUTONEG_CONFIG, Val); 2933 } 2934 2935 VOID 2936 Mvpp2PortEnable ( 2937 IN PP2DXE_PORT *Port 2938 ) 2939 { 2940 UINT32 Val; 2941 2942 Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_0_REG); 2943 Val |= MVPP2_GMAC_PORT_EN_MASK; 2944 Val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; 2945 Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_0_REG, Val); 2946 } 2947 2948 VOID 2949 Mvpp2PortDisable ( 2950 IN PP2DXE_PORT *Port 2951 ) 2952 { 2953 UINT32 Val; 2954 2955 Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_0_REG); 2956 Val &= ~(MVPP2_GMAC_PORT_EN_MASK); 2957 Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_0_REG, Val); 2958 } 2959 2960 /* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ 2961 STATIC 2962 VOID 2963 Mvpp2PortPeriodicXonDisable ( 2964 IN PP2DXE_PORT *Port 2965 ) 2966 { 2967 UINT32 Val; 2968 2969 Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_1_REG) & ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; 2970 Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_1_REG, Val); 2971 } 2972 2973 /* Configure loopback Port */ 2974 STATIC 2975 VOID 2976 Mvpp2PortReset ( 2977 IN PP2DXE_PORT *Port 2978 ) 2979 { 2980 UINT32 Val; 2981 2982 Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_2_REG) & ~MVPP2_GMAC_PORT_RESET_MASK; 2983 Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_2_REG, Val); 2984 2985 while (Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_2_REG) & MVPP2_GMAC_PORT_RESET_MASK) { 2986 continue; 2987 } 2988 } 2989 2990 /* Set defaults to the MVPP2 Port */ 2991 VOID 2992 Mvpp2DefaultsSet ( 2993 IN PP2DXE_PORT *Port 2994 ) 2995 { 2996 INT32 TxPortNum, Val, Queue, pTxq; 2997 2998 /* Disable Legacy WRR, Disable EJP, Release from Reset */ 2999 TxPortNum = Mvpp2EgressPort (Port); 3000 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, TxPortNum); 3001 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); 3002 3003 /* Close bandwidth for all Queues */ 3004 for (Queue = 0; Queue < MVPP2_MAX_TXQ; Queue++) { 3005 pTxq = Mvpp2TxqPhys (Port->Id, Queue); 3006 Mvpp2Write (Port->Priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(pTxq), 0); 3007 } 3008 3009 3010 /* Set refill period to 1 Usec, refill tokens and bucket Size to maximum */ 3011 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PERIOD_REG, Port->Priv->Tclk / MVPP2_USEC_PER_SEC); 3012 Val = Mvpp2Read (Port->Priv, MVPP2_TXP_SCHED_REFILL_REG); 3013 Val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; 3014 Val |= MVPP2_TXP_REFILL_PERIOD_MASK (1); 3015 Val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; 3016 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_REFILL_REG, Val); 3017 Val = MVPP2_TXP_TOKEN_SIZE_MAX; 3018 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, Val); 3019 3020 /* Set MaximumLowLatencyPacketSize value to 256 */ 3021 Mvpp2Write ( 3022 Port->Priv, 3023 MVPP2_RX_CTRL_REG(Port->Id), 3024 MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | MVPP2_RX_LOW_LATENCY_PKT_SIZE (256) 3025 ); 3026 3027 /* Mask all interrupts to all present cpus */ 3028 Mvpp2InterruptsDisable (Port, 0x1); 3029 } 3030 3031 /* Enable/disable receiving packets */ 3032 VOID 3033 Mvpp2IngressEnable ( 3034 IN PP2DXE_PORT *Port 3035 ) 3036 { 3037 UINT32 Val; 3038 INT32 Lrxq, Queue; 3039 3040 for (Lrxq = 0; Lrxq < RxqNumber; Lrxq++) { 3041 Queue = Port->Rxqs[Lrxq].Id; 3042 Val = Mvpp2Read (Port->Priv, MVPP2_RXQ_CONFIG_REG(Queue)); 3043 Val &= ~MVPP2_RXQ_DISABLE_MASK; 3044 Mvpp2Write (Port->Priv, MVPP2_RXQ_CONFIG_REG(Queue), Val); 3045 } 3046 } 3047 3048 VOID 3049 Mvpp2IngressDisable ( 3050 IN PP2DXE_PORT *Port 3051 ) 3052 { 3053 UINT32 Val; 3054 INT32 Lrxq, Queue; 3055 3056 for (Lrxq = 0; Lrxq < RxqNumber; Lrxq++) { 3057 Queue = Port->Rxqs[Lrxq].Id; 3058 Val = Mvpp2Read (Port->Priv, MVPP2_RXQ_CONFIG_REG(Queue)); 3059 Val |= MVPP2_RXQ_DISABLE_MASK; 3060 Mvpp2Write (Port->Priv, MVPP2_RXQ_CONFIG_REG(Queue), Val); 3061 } 3062 } 3063 3064 /* Enable transmit via physical egress Queue - HW starts take descriptors from DRAM */ 3065 VOID 3066 Mvpp2EgressEnable ( 3067 IN PP2DXE_PORT *Port 3068 ) 3069 { 3070 UINT32 qmap; 3071 INT32 Queue; 3072 INT32 TxPortNum = Mvpp2EgressPort (Port); 3073 3074 /* Enable all initialized TXs. */ 3075 qmap = 0; 3076 for (Queue = 0; Queue < TxqNumber; Queue++) { 3077 MVPP2_TX_QUEUE *Txq = &Port->Txqs[Queue]; 3078 3079 if (Txq->Descs != NULL) { 3080 qmap |= (1 << Queue); 3081 } 3082 } 3083 3084 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, TxPortNum); 3085 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); 3086 } 3087 3088 /* Disable transmit via physical egress Queue - HW doesn't take descriptors from DRAM */ 3089 VOID 3090 Mvpp2EgressDisable ( 3091 IN PP2DXE_PORT *Port 3092 ) 3093 { 3094 UINT32 RegData; 3095 INT32 Delay; 3096 INT32 TxPortNum = Mvpp2EgressPort (Port); 3097 3098 /* Issue stop command for active channels only */ 3099 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, TxPortNum); 3100 RegData = (Mvpp2Read (Port->Priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & MVPP2_TXP_SCHED_ENQ_MASK; 3101 if (RegData != 0) { 3102 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_Q_CMD_REG, (RegData << MVPP2_TXP_SCHED_DISQ_OFFSET)); 3103 } 3104 3105 /* Wait for all Tx activity to terminate. */ 3106 Delay = 0; 3107 do { 3108 if (Delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { 3109 Mvpp2Printf ("Tx stop timed out, status=0x%08x\n", RegData); 3110 break; 3111 } 3112 Mvpp2Mdelay (1); 3113 Delay++; 3114 3115 /* Check Port TX Command RegValister that all Tx Queues are stopped */ 3116 RegData = Mvpp2Read (Port->Priv, MVPP2_TXP_SCHED_Q_CMD_REG); 3117 } while (RegData & MVPP2_TXP_SCHED_ENQ_MASK); 3118 } 3119 3120 /* Rx descriptors helper methods */ 3121 3122 /* Set rx Queue Offset */ 3123 STATIC 3124 VOID 3125 Mvpp2RxqOffsetSet ( 3126 IN PP2DXE_PORT *Port, 3127 IN INT32 Prxq, 3128 IN INT32 Offset 3129 ) 3130 { 3131 UINT32 Val; 3132 3133 /* Convert Offset from bytes to units of 32 bytes */ 3134 Offset = Offset >> 5; 3135 3136 /* Clear previous value */ 3137 Val = Mvpp2Read (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq)); 3138 Val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; 3139 3140 /* Update packet Offset in received buffer */ 3141 Val |= ((Offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & MVPP2_RXQ_PACKET_OFFSET_MASK); 3142 Mvpp2Write (Port->Priv, MVPP2_RXQ_CONFIG_REG(Prxq), Val); 3143 } 3144 3145 /* Obtain BM cookie information from descriptor */ 3146 UINT32 3147 Mvpp2BmCookieBuild ( 3148 IN MVPP2_RX_DESC *RxDesc, 3149 IN INT32 Cpu 3150 ) 3151 { 3152 INT32 Pool; 3153 UINT32 ret; 3154 3155 Pool = (RxDesc->status & MVPP2_RXD_BM_POOL_ID_MASK) >> MVPP2_RXD_BM_POOL_ID_OFFS; 3156 3157 ret = ((Pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | ((Cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); 3158 3159 return ret; 3160 } 3161 3162 /* Tx descriptors helper methods */ 3163 3164 INT32 3165 Mvpp2TxqDrainSet ( 3166 IN PP2DXE_PORT *Port, 3167 IN INT32 Txq, 3168 IN BOOLEAN En 3169 ) 3170 { 3171 UINT32 RegVal; 3172 INT32 pTxq = Mvpp2TxqPhys (Port->Id, Txq); 3173 3174 Mvpp2Write (Port->Priv, MVPP2_TXQ_NUM_REG, pTxq); 3175 RegVal = Mvpp2Read (Port->Priv, MVPP2_TXQ_PREF_BUF_REG); 3176 3177 if (En) { 3178 RegVal |= MVPP2_TXQ_DRAIN_EN_MASK; 3179 } else { 3180 RegVal &= ~MVPP2_TXQ_DRAIN_EN_MASK; 3181 } 3182 3183 Mvpp2Write (Port->Priv, MVPP2_TXQ_PREF_BUF_REG, RegVal); 3184 3185 return 0; 3186 } 3187 3188 /* Get number of Tx descriptors waiting to be transmitted by HW */ 3189 INT32 3190 Mvpp2TxqPendDescNumGet ( 3191 IN PP2DXE_PORT *Port, 3192 IN MVPP2_TX_QUEUE *Txq 3193 ) 3194 { 3195 UINT32 Val; 3196 3197 Mvpp2Write (Port->Priv, MVPP2_TXQ_NUM_REG, Txq->Id); 3198 Val = Mvpp2Read (Port->Priv, MVPP2_TXQ_PENDING_REG); 3199 3200 return Val & MVPP2_TXQ_PENDING_MASK; 3201 } 3202 3203 /* Get number of occupied aggRegValated Tx descriptors */ 3204 UINT32 3205 Mvpp2AggrTxqPendDescNumGet ( 3206 IN MVPP2_SHARED *Priv, 3207 IN INT32 Cpu 3208 ) 3209 { 3210 UINT32 RegVal; 3211 3212 RegVal = Mvpp2Read (Priv, MVPP2_AGGR_TXQ_STATUS_REG(Cpu)); 3213 3214 return RegVal & MVPP2_AGGR_TXQ_PENDING_MASK; 3215 } 3216 3217 /* Get pointer to next Tx descriptor to be processed (send) by HW */ 3218 MVPP2_TX_DESC * 3219 Mvpp2TxqNextDescGet ( 3220 MVPP2_TX_QUEUE *Txq 3221 ) 3222 { 3223 INT32 TxDesc = Txq->NextDescToProc; 3224 3225 Txq->NextDescToProc = MVPP2_QUEUE_NEXT_DESC (Txq, TxDesc); 3226 3227 return Txq->Descs + TxDesc; 3228 } 3229 3230 /* Update HW with number of aggRegValated Tx descriptors to be sent */ 3231 VOID 3232 Mvpp2AggrTxqPendDescAdd ( 3233 IN PP2DXE_PORT *Port, 3234 IN INT32 Pending 3235 ) 3236 { 3237 /* AggRegValated access - relevant TXQ number is written in TX desc */ 3238 Mvpp2Write (Port->Priv, MVPP2_AGGR_TXQ_UPDATE_REG, Pending); 3239 } 3240 3241 /* 3242 * Check if there are enough free descriptors in aggRegValated Txq. 3243 * If not, update the number of occupied descriptors and repeat the check. 3244 */ 3245 INT32 3246 Mvpp2AggrDescNumCheck ( 3247 IN MVPP2_SHARED *Priv, 3248 IN MVPP2_TX_QUEUE *AggrTxq, 3249 IN INT32 Num, 3250 IN INT32 Cpu 3251 ) 3252 { 3253 UINT32 Val; 3254 3255 if ((AggrTxq->count + Num) > AggrTxq->Size) { 3256 /* Update number of occupied aggRegValated Tx descriptors */ 3257 Val = Mvpp2Read (Priv, MVPP2_AGGR_TXQ_STATUS_REG(Cpu)); 3258 AggrTxq->count = Val & MVPP2_AGGR_TXQ_PENDING_MASK; 3259 } 3260 3261 if ((AggrTxq->count + Num) > AggrTxq->Size) { 3262 return MVPP2_ENOMEM; 3263 } 3264 3265 return 0; 3266 } 3267 3268 /* Reserved Tx descriptors allocation request */ 3269 INT32 3270 Mvpp2TxqAllocReservedDesc ( 3271 IN MVPP2_SHARED *Priv, 3272 IN MVPP2_TX_QUEUE *Txq, 3273 IN INT32 Num 3274 ) 3275 { 3276 UINT32 Val; 3277 3278 Val = (Txq->Id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | Num; 3279 Mvpp2Write (Priv, MVPP2_TXQ_RSVD_REQ_REG, Val); 3280 3281 Val = Mvpp2Read (Priv, MVPP2_TXQ_RSVD_RSLT_REG); 3282 3283 return Val & MVPP2_TXQ_RSVD_RSLT_MASK; 3284 } 3285 3286 /* 3287 * Release the last allocated Tx descriptor. Useful to handle DMA 3288 * mapping failures in the Tx path. 3289 */ 3290 VOID 3291 Mvpp2TxqDescPut ( 3292 IN MVPP2_TX_QUEUE *Txq 3293 ) 3294 { 3295 if (Txq->NextDescToProc == 0) { 3296 Txq->NextDescToProc = Txq->LastDesc - 1; 3297 } else { 3298 Txq->NextDescToProc--; 3299 } 3300 } 3301 3302 /* Set Tx descriptors fields relevant for CSUM calculation */ 3303 UINT32 3304 Mvpp2TxqDescCsum ( 3305 IN INT32 L3Offs, 3306 IN INT32 L3Proto, 3307 IN INT32 IpHdrLen, 3308 IN INT32 L4Proto 3309 ) 3310 { 3311 UINT32 command; 3312 3313 /* 3314 * Fields: L3_Offset, IP_hdrlen, L3_type, G_IPV4Chk, 3315 * G_L4_chk, L4_type required only for checksum calculation 3316 */ 3317 command = (L3Offs << MVPP2_TXD_L3_OFF_SHIFT); 3318 command |= (IpHdrLen << MVPP2_TXD_IP_HLEN_SHIFT); 3319 command |= MVPP2_TXD_IP_CSUM_DISABLE; 3320 3321 if (L3Proto == Mvpp2SwapBytes16 (MV_ETH_P_IP)) { 3322 command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ 3323 command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ 3324 } else { 3325 command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ 3326 } 3327 3328 if (L4Proto == MV_IPPR_TCP) { 3329 command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ 3330 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 3331 } else if (L4Proto == MV_IPPR_UDP) { 3332 command |= MVPP2_TXD_L4_UDP; /* enable UDP */ 3333 command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ 3334 } else { 3335 command |= MVPP2_TXD_L4_CSUM_NOT; 3336 } 3337 3338 return command; 3339 } 3340 3341 /* Clear counter of sent packets */ 3342 VOID 3343 Mvpp2TxqSentCounterClear ( 3344 IN OUT VOID *arg 3345 ) 3346 { 3347 PP2DXE_PORT *Port = arg; 3348 INT32 Queue; 3349 3350 for (Queue = 0; Queue < TxqNumber; Queue++) { 3351 INT32 Id = Port->Txqs[Queue].Id; 3352 3353 Mvpp2Read (Port->Priv, MVPP2_TXQ_SENT_REG(Id)); 3354 } 3355 } 3356 3357 /* Change maximum receive Size of the Port */ 3358 VOID 3359 Mvpp2GmacMaxRxSizeSet ( 3360 IN PP2DXE_PORT *Port 3361 ) 3362 { 3363 UINT32 Val; 3364 3365 Val = Mvpp2GmacRead (Port, MVPP2_GMAC_CTRL_0_REG); 3366 Val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; 3367 Val |= (((Port->PktSize - MVPP2_MH_SIZE) / 2) << MVPP2_GMAC_MAX_RX_SIZE_OFFS); 3368 Mvpp2GmacWrite (Port, MVPP2_GMAC_CTRL_0_REG, Val); 3369 } 3370 3371 /* Set max sizes for Tx Queues */ 3372 VOID 3373 Mvpp2TxpMaxTxSizeSet ( 3374 IN PP2DXE_PORT *Port 3375 ) 3376 { 3377 UINT32 Val, Size, mtu; 3378 INT32 Txq, TxPortNum; 3379 3380 mtu = Port->PktSize * 8; 3381 if (mtu > MVPP2_TXP_MTU_MAX) { 3382 mtu = MVPP2_TXP_MTU_MAX; 3383 } 3384 3385 /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ 3386 mtu = 3 * mtu; 3387 3388 /* Indirect access to RegValisters */ 3389 TxPortNum = Mvpp2EgressPort (Port); 3390 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, TxPortNum); 3391 3392 /* Set MTU */ 3393 Val = Mvpp2Read (Port->Priv, MVPP2_TXP_SCHED_MTU_REG); 3394 Val &= ~MVPP2_TXP_MTU_MAX; 3395 Val |= mtu; 3396 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_MTU_REG, Val); 3397 3398 /* TXP token Size and all TXQs token Size must be larger that MTU */ 3399 Val = Mvpp2Read (Port->Priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); 3400 Size = Val & MVPP2_TXP_TOKEN_SIZE_MAX; 3401 if (Size < mtu) { 3402 Size = mtu; 3403 Val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; 3404 Val |= Size; 3405 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, Val); 3406 } 3407 3408 for (Txq = 0; Txq < TxqNumber; Txq++) { 3409 Val = Mvpp2Read (Port->Priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(Txq)); 3410 Size = Val & MVPP2_TXQ_TOKEN_SIZE_MAX; 3411 3412 if (Size < mtu) { 3413 Size = mtu; 3414 Val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; 3415 Val |= Size; 3416 Mvpp2Write (Port->Priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(Txq), Val); 3417 } 3418 } 3419 } 3420 3421 /* 3422 * Set the number of packets that will be received before Rx interrupt 3423 * will be generated by HW. 3424 */ 3425 VOID 3426 Mvpp2RxPktsCoalSet ( 3427 IN PP2DXE_PORT *Port, 3428 IN OUT MVPP2_RX_QUEUE *Rxq, 3429 IN UINT32 Pkts 3430 ) 3431 { 3432 UINT32 Val; 3433 3434 Val = (Pkts & MVPP2_OCCUPIED_THRESH_MASK); 3435 Mvpp2Write (Port->Priv, MVPP2_RXQ_NUM_REG, Rxq->Id); 3436 Mvpp2Write (Port->Priv, MVPP2_RXQ_THRESH_REG, Val); 3437 3438 Rxq->PktsCoal = Pkts; 3439 } 3440 3441 /* Set the time Delay in Usec before Rx INT32errupt */ 3442 VOID 3443 Mvpp2RxTimeCoalSet ( 3444 IN PP2DXE_PORT *Port, 3445 IN OUT MVPP2_RX_QUEUE *Rxq, 3446 IN UINT32 Usec 3447 ) 3448 { 3449 UINT32 Val; 3450 3451 Val = (Port->Priv->Tclk / MVPP2_USEC_PER_SEC) * Usec; 3452 Mvpp2Write (Port->Priv, MVPP2_ISR_RX_THRESHOLD_REG(Rxq->Id), Val); 3453 3454 Rxq->TimeCoal = Usec; 3455 } 3456 3457 /* Rx/Tx Queue initialization/cleanup methods */ 3458 3459 /* Configure RXQ's */ 3460 VOID 3461 Mvpp2RxqHwInit ( 3462 IN PP2DXE_PORT *Port, 3463 IN OUT MVPP2_RX_QUEUE *Rxq 3464 ) 3465 { 3466 Rxq->LastDesc = Rxq->Size - 1; 3467 3468 /* Zero occupied and non-occupied counters - direct access */ 3469 Mvpp2Write (Port->Priv, MVPP2_RXQ_STATUS_REG(Rxq->Id), 0); 3470 3471 /* Set Rx descriptors Queue starting Address - indirect access */ 3472 Mvpp2Write (Port->Priv, MVPP2_RXQ_NUM_REG, Rxq->Id); 3473 Mvpp2Write (Port->Priv, MVPP2_RXQ_DESC_ADDR_REG, Rxq->DescsPhys >> MVPP22_DESC_ADDR_SHIFT); 3474 Mvpp2Write (Port->Priv, MVPP2_RXQ_DESC_SIZE_REG, Rxq->Size); 3475 Mvpp2Write (Port->Priv, MVPP2_RXQ_INDEX_REG, 0); 3476 3477 /* Set Offset */ 3478 Mvpp2RxqOffsetSet (Port, Rxq->Id, MVPP2_RXQ_OFFSET); 3479 3480 /* Set coalescing pkts and time */ 3481 Mvpp2RxPktsCoalSet (Port, Rxq, MVPP2_RX_COAL_PKTS); 3482 Mvpp2RxTimeCoalSet (Port, Rxq, Rxq->TimeCoal); 3483 3484 /* Add number of descriptors ready for receiving packets */ 3485 Mvpp2RxqStatusUpdate (Port, Rxq->Id, 0, Rxq->Size); 3486 } 3487 3488 /* Push packets received by the RXQ to BM Pool */ 3489 VOID 3490 Mvpp2RxqDropPkts ( 3491 IN PP2DXE_PORT *Port, 3492 IN OUT MVPP2_RX_QUEUE *Rxq, 3493 IN INT32 Cpu 3494 ) 3495 { 3496 INT32 RxReceived; 3497 3498 RxReceived = Mvpp2RxqReceived (Port, Rxq->Id); 3499 if (!RxReceived) { 3500 return; 3501 } 3502 3503 Mvpp2RxqStatusUpdate (Port, Rxq->Id, RxReceived, RxReceived); 3504 } 3505 3506 VOID 3507 Mvpp2RxqHwDeinit ( 3508 IN PP2DXE_PORT *Port, 3509 IN OUT MVPP2_RX_QUEUE *Rxq 3510 ) 3511 { 3512 Rxq->Descs = NULL; 3513 Rxq->LastDesc = 0; 3514 Rxq->NextDescToProc = 0; 3515 Rxq->DescsPhys = 0; 3516 3517 /* 3518 * Clear Rx descriptors Queue starting Address and Size; 3519 * free descriptor number 3520 */ 3521 Mvpp2Write (Port->Priv, MVPP2_RXQ_STATUS_REG(Rxq->Id), 0); 3522 Mvpp2Write (Port->Priv, MVPP2_RXQ_NUM_REG, Rxq->Id); 3523 Mvpp2Write (Port->Priv, MVPP2_RXQ_DESC_ADDR_REG, 0); 3524 Mvpp2Write (Port->Priv, MVPP2_RXQ_DESC_SIZE_REG, 0); 3525 } 3526 3527 /* Configure TXQ's */ 3528 VOID 3529 Mvpp2TxqHwInit ( 3530 IN PP2DXE_PORT *Port, 3531 IN OUT MVPP2_TX_QUEUE *Txq 3532 ) 3533 { 3534 INT32 Desc, DescPerTxq, TxPortNum; 3535 UINT32 Val; 3536 3537 Txq->LastDesc = Txq->Size - 1; 3538 3539 /* Set Tx descriptors Queue starting Address - indirect access */ 3540 Mvpp2Write (Port->Priv, MVPP2_TXQ_NUM_REG, Txq->Id); 3541 Mvpp2Write (Port->Priv, MVPP2_TXQ_DESC_ADDR_REG, Txq->DescsPhys); 3542 Mvpp2Write (Port->Priv, MVPP2_TXQ_DESC_SIZE_REG, Txq->Size & MVPP2_TXQ_DESC_SIZE_MASK); 3543 Mvpp2Write (Port->Priv, MVPP2_TXQ_INDEX_REG, 0); 3544 Mvpp2Write (Port->Priv, MVPP2_TXQ_RSVD_CLR_REG, Txq->Id << MVPP2_TXQ_RSVD_CLR_OFFSET); 3545 Val = Mvpp2Read (Port->Priv, MVPP2_TXQ_PENDING_REG); 3546 Val &= ~MVPP2_TXQ_PENDING_MASK; 3547 Mvpp2Write (Port->Priv, MVPP2_TXQ_PENDING_REG, Val); 3548 3549 /* 3550 * Calculate base Address in prefetch buffer. We reserve 16 descriptors 3551 * for each existing TXQ. 3552 * TCONTS for PON Port must be continuous from 0 to MVPP2_MAX_TCONT 3553 * GBE Ports assumed to be continious from 0 to MVPP2_MAX_PORTS 3554 */ 3555 DescPerTxq = 16; 3556 Desc = (Port->Id * MVPP2_MAX_TXQ * DescPerTxq) + (Txq->LogId * DescPerTxq); 3557 3558 Mvpp2Write ( 3559 Port->Priv, 3560 MVPP2_TXQ_PREF_BUF_REG, 3561 MVPP2_PREF_BUF_PTR (Desc) | MVPP2_PREF_BUF_SIZE_16 | MVPP2_PREF_BUF_THRESH (DescPerTxq/2) 3562 ); 3563 3564 /* WRR / EJP configuration - indirect access */ 3565 TxPortNum = Mvpp2EgressPort (Port); 3566 Mvpp2Write (Port->Priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, TxPortNum); 3567 3568 Val = Mvpp2Read (Port->Priv, MVPP2_TXQ_SCHED_REFILL_REG(Txq->LogId)); 3569 Val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; 3570 Val |= MVPP2_TXQ_REFILL_PERIOD_MASK (1); 3571 Val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; 3572 Mvpp2Write (Port->Priv, MVPP2_TXQ_SCHED_REFILL_REG(Txq->LogId), Val); 3573 3574 Val = MVPP2_TXQ_TOKEN_SIZE_MAX; 3575 Mvpp2Write (Port->Priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(Txq->LogId), Val); 3576 } 3577 3578 VOID 3579 Mvpp2TxqHwDeinit ( 3580 IN PP2DXE_PORT *Port, 3581 IN OUT MVPP2_TX_QUEUE *Txq 3582 ) 3583 { 3584 Txq->Descs = NULL; 3585 Txq->LastDesc = 0; 3586 Txq->NextDescToProc = 0; 3587 Txq->DescsPhys = 0; 3588 3589 /* Set minimum bandwidth for disabled TXQs */ 3590 Mvpp2Write (Port->Priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(Txq->Id), 0); 3591 3592 /* Set Tx descriptors Queue starting Address and Size */ 3593 Mvpp2Write (Port->Priv, MVPP2_TXQ_NUM_REG, Txq->Id); 3594 Mvpp2Write (Port->Priv, MVPP2_TXQ_DESC_ADDR_REG, 0); 3595 Mvpp2Write (Port->Priv, MVPP2_TXQ_DESC_SIZE_REG, 0); 3596 } 3597 3598 /* Allocate and initialize descriptors for aggr TXQ */ 3599 VOID 3600 Mvpp2AggrTxqHwInit ( 3601 IN OUT MVPP2_TX_QUEUE *AggrTxq, 3602 IN INT32 DescNum, 3603 IN INT32 Cpu, 3604 IN MVPP2_SHARED *Priv 3605 ) 3606 { 3607 AggrTxq->LastDesc = AggrTxq->Size - 1; 3608 3609 /* Aggr TXQ no Reset WA */ 3610 AggrTxq->NextDescToProc = Mvpp2Read (Priv, MVPP2_AGGR_TXQ_INDEX_REG(Cpu)); 3611 3612 /* Set Tx descriptors Queue starting Address (indirect access) */ 3613 Mvpp2Write (Priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(Cpu), AggrTxq->DescsPhys >> MVPP22_DESC_ADDR_SHIFT); 3614 Mvpp2Write (Priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(Cpu), DescNum & MVPP2_AGGR_TXQ_DESC_SIZE_MASK); 3615 } 3616 3617 /* Enable gmac */ 3618 VOID 3619 Mvpp2PortPowerUp ( 3620 IN PP2DXE_PORT *Port 3621 ) 3622 { 3623 Mvpp2PortMiiSet (Port); 3624 Mvpp2PortPeriodicXonDisable (Port); 3625 Mvpp2PortFcAdvEnable (Port); 3626 Mvpp2PortReset (Port); 3627 } 3628 3629 /* Initialize Rx FIFO's */ 3630 VOID 3631 Mvpp2RxFifoInit ( 3632 IN MVPP2_SHARED *Priv 3633 ) 3634 { 3635 INT32 PortId; 3636 3637 for (PortId = 0; PortId < MVPP2_MAX_PORTS; PortId++) { 3638 Mvpp2Write (Priv, MVPP2_RX_DATA_FIFO_SIZE_REG(PortId), MVPP2_RX_FIFO_PORT_DATA_SIZE); 3639 Mvpp2Write (Priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(PortId), MVPP2_RX_FIFO_PORT_ATTR_SIZE); 3640 } 3641 3642 Mvpp2Write (Priv, MVPP2_RX_MIN_PKT_SIZE_REG, MVPP2_RX_FIFO_PORT_MIN_PKT); 3643 Mvpp2Write (Priv, MVPP2_RX_FIFO_INIT_REG, 0x1); 3644 } 3645 3646 VOID 3647 MvGop110NetcActivePort ( 3648 IN PP2DXE_PORT *Port, 3649 IN UINT32 PortId, 3650 IN UINT32 Val 3651 ) 3652 { 3653 UINT32 RegVal; 3654 3655 RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_PORTS_CONTROL_1); 3656 RegVal &= ~(NETC_PORTS_ACTIVE_MASK (PortId)); 3657 3658 Val <<= NETC_PORTS_ACTIVE_OFFSET (PortId); 3659 Val &= NETC_PORTS_ACTIVE_MASK (PortId); 3660 3661 RegVal |= Val; 3662 3663 Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_PORTS_CONTROL_1, RegVal); 3664 } 3665 3666 STATIC 3667 VOID 3668 MvGop110NetcXauiEnable ( 3669 IN PP2DXE_PORT *Port, 3670 IN UINT32 PortId, 3671 IN UINT32 Val 3672 ) 3673 { 3674 UINT32 RegVal; 3675 3676 RegVal = Mvpp2Rfu1Read (Port->Priv, SD1_CONTROL_1_REG); 3677 RegVal &= ~SD1_CONTROL_XAUI_EN_MASK; 3678 3679 Val <<= SD1_CONTROL_XAUI_EN_OFFSET; 3680 Val &= SD1_CONTROL_XAUI_EN_MASK; 3681 3682 RegVal |= Val; 3683 3684 Mvpp2Rfu1Write (Port->Priv, SD1_CONTROL_1_REG, RegVal); 3685 } 3686 3687 STATIC 3688 VOID 3689 MvGop110NetcRxaui0Enable ( 3690 IN PP2DXE_PORT *Port, 3691 IN UINT32 PortId, 3692 IN UINT32 Val 3693 ) 3694 { 3695 UINT32 RegVal; 3696 3697 RegVal = Mvpp2Rfu1Read (Port->Priv, SD1_CONTROL_1_REG); 3698 RegVal &= ~SD1_CONTROL_RXAUI0_L23_EN_MASK; 3699 3700 Val <<= SD1_CONTROL_RXAUI0_L23_EN_OFFSET; 3701 Val &= SD1_CONTROL_RXAUI0_L23_EN_MASK; 3702 3703 RegVal |= Val; 3704 3705 Mvpp2Rfu1Write (Port->Priv, SD1_CONTROL_1_REG, RegVal); 3706 } 3707 3708 STATIC 3709 VOID 3710 MvGop110NetcRxaui1Enable ( 3711 IN PP2DXE_PORT *Port, 3712 IN UINT32 PortId, 3713 IN UINT32 Val 3714 ) 3715 { 3716 UINT32 RegVal; 3717 3718 RegVal = Mvpp2Rfu1Read (Port->Priv, SD1_CONTROL_1_REG); 3719 RegVal &= ~SD1_CONTROL_RXAUI1_L45_EN_MASK; 3720 3721 Val <<= SD1_CONTROL_RXAUI1_L45_EN_OFFSET; 3722 Val &= SD1_CONTROL_RXAUI1_L45_EN_MASK; 3723 3724 RegVal |= Val; 3725 3726 Mvpp2Rfu1Write (Port->Priv, SD1_CONTROL_1_REG, RegVal); 3727 } 3728 3729 STATIC 3730 VOID 3731 MvGop110NetcMiiMode ( 3732 IN PP2DXE_PORT *Port, 3733 IN UINT32 PortId, 3734 IN UINT32 Val 3735 ) 3736 { 3737 UINT32 RegVal; 3738 3739 RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_CONTROL_0); 3740 RegVal &= ~NETC_GBE_PORT1_MII_MODE_MASK; 3741 3742 Val <<= NETC_GBE_PORT1_MII_MODE_OFFSET; 3743 Val &= NETC_GBE_PORT1_MII_MODE_MASK; 3744 3745 RegVal |= Val; 3746 3747 Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_CONTROL_0, RegVal); 3748 } 3749 3750 STATIC 3751 VOID 3752 MvGop110NetcGopReset ( 3753 IN PP2DXE_PORT *Port, 3754 IN UINT32 Val 3755 ) 3756 { 3757 UINT32 RegVal; 3758 3759 RegVal = Mvpp2Rfu1Read (Port->Priv, MV_GOP_SOFT_RESET_1_REG); 3760 RegVal &= ~NETC_GOP_SOFT_RESET_MASK; 3761 3762 Val <<= NETC_GOP_SOFT_RESET_OFFSET; 3763 Val &= NETC_GOP_SOFT_RESET_MASK; 3764 3765 RegVal |= Val; 3766 3767 Mvpp2Rfu1Write (Port->Priv, MV_GOP_SOFT_RESET_1_REG, RegVal); 3768 } 3769 3770 STATIC 3771 VOID 3772 MvGop110NetcGopClockLogicSet ( 3773 IN PP2DXE_PORT *Port, 3774 IN UINT32 Val 3775 ) 3776 { 3777 UINT32 RegVal; 3778 3779 RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0); 3780 RegVal &= ~NETC_CLK_DIV_PHASE_MASK; 3781 3782 Val <<= NETC_CLK_DIV_PHASE_OFFSET; 3783 Val &= NETC_CLK_DIV_PHASE_MASK; 3784 3785 RegVal |= Val; 3786 3787 Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0, RegVal); 3788 } 3789 3790 STATIC 3791 VOID 3792 MvGop110NetcPortRfReset ( 3793 IN PP2DXE_PORT *Port, 3794 IN UINT32 PortId, 3795 IN UINT32 Val 3796 ) 3797 { 3798 UINT32 RegVal; 3799 3800 RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_PORTS_CONTROL_1); 3801 RegVal &= ~(NETC_PORT_GIG_RF_RESET_MASK (PortId)); 3802 3803 Val <<= NETC_PORT_GIG_RF_RESET_OFFSET (PortId); 3804 Val &= NETC_PORT_GIG_RF_RESET_MASK (PortId); 3805 3806 RegVal |= Val; 3807 3808 Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_PORTS_CONTROL_1, RegVal); 3809 } 3810 3811 STATIC 3812 VOID 3813 MvGop110NetcGbeSgmiiModeSelect ( 3814 IN PP2DXE_PORT *Port, 3815 IN UINT32 PortId, 3816 IN UINT32 Val 3817 ) 3818 { 3819 UINT32 RegVal, Mask, Offset; 3820 3821 if (PortId == 2) { 3822 Mask = NETC_GBE_PORT0_SGMII_MODE_MASK; 3823 Offset = NETC_GBE_PORT0_SGMII_MODE_OFFSET; 3824 } else { 3825 Mask = NETC_GBE_PORT1_SGMII_MODE_MASK; 3826 Offset = NETC_GBE_PORT1_SGMII_MODE_OFFSET; 3827 } 3828 RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_CONTROL_0); 3829 RegVal &= ~Mask; 3830 3831 Val <<= Offset; 3832 Val &= Mask; 3833 3834 RegVal |= Val; 3835 3836 Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_CONTROL_0, RegVal); 3837 } 3838 3839 STATIC 3840 VOID 3841 MvGop110NetcBusWidthSelect ( 3842 IN PP2DXE_PORT *Port, 3843 IN UINT32 Val 3844 ) 3845 { 3846 UINT32 RegVal; 3847 3848 RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0); 3849 RegVal &= ~NETC_BUS_WIDTH_SELECT_MASK; 3850 3851 Val <<= NETC_BUS_WIDTH_SELECT_OFFSET; 3852 Val &= NETC_BUS_WIDTH_SELECT_MASK; 3853 3854 RegVal |= Val; 3855 3856 Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0, RegVal); 3857 } 3858 3859 STATIC 3860 VOID 3861 MvGop110NetcSampleStagesTiming ( 3862 IN PP2DXE_PORT *Port, 3863 IN UINT32 Val 3864 ) 3865 { 3866 UINT32 RegVal; 3867 3868 RegVal = Mvpp2Rfu1Read (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0); 3869 RegVal &= ~NETC_GIG_RX_DATA_SAMPLE_MASK; 3870 3871 Val <<= NETC_GIG_RX_DATA_SAMPLE_OFFSET; 3872 Val &= NETC_GIG_RX_DATA_SAMPLE_MASK; 3873 3874 RegVal |= Val; 3875 3876 Mvpp2Rfu1Write (Port->Priv, MV_NETCOMP_PORTS_CONTROL_0, RegVal); 3877 } 3878 3879 STATIC 3880 VOID 3881 MvGop110NetcMacToXgmii ( 3882 IN PP2DXE_PORT *Port, 3883 IN UINT32 PortId, 3884 IN enum MvNetcPhase Phase 3885 ) 3886 { 3887 switch (Phase) { 3888 case MV_NETC_FIRST_PHASE: 3889 3890 /* Set Bus Width to HB mode = 1 */ 3891 MvGop110NetcBusWidthSelect (Port, 0x1); 3892 3893 /* Select RGMII mode */ 3894 MvGop110NetcGbeSgmiiModeSelect (Port, PortId, MV_NETC_GBE_XMII); 3895 break; 3896 case MV_NETC_SECOND_PHASE: 3897 3898 /* De-assert the relevant PortId HB Reset */ 3899 MvGop110NetcPortRfReset (Port, PortId, 0x1); 3900 break; 3901 } 3902 } 3903 3904 STATIC 3905 VOID 3906 MvGop110NetcMacToSgmii ( 3907 IN PP2DXE_PORT *Port, 3908 IN UINT32 PortId, 3909 IN enum MvNetcPhase Phase 3910 ) 3911 { 3912 switch (Phase) { 3913 case MV_NETC_FIRST_PHASE: 3914 3915 /* Set Bus Width to HB mode = 1 */ 3916 MvGop110NetcBusWidthSelect (Port, 1); 3917 3918 /* Select SGMII mode */ 3919 if (PortId >= 1) { 3920 MvGop110NetcGbeSgmiiModeSelect (Port, PortId, MV_NETC_GBE_SGMII); 3921 } 3922 3923 /* Configure the sample stages */ 3924 MvGop110NetcSampleStagesTiming (Port, 0); 3925 break; 3926 case MV_NETC_SECOND_PHASE: 3927 3928 /* De-assert the relevant PortId HB Reset */ 3929 MvGop110NetcPortRfReset (Port, PortId, 1); 3930 break; 3931 } 3932 } 3933 3934 STATIC 3935 VOID 3936 MvGop110NetcMacToRxaui ( 3937 IN PP2DXE_PORT *Port, 3938 IN UINT32 PortId, 3939 IN enum MvNetcPhase Phase, 3940 IN enum MvNetcLanes Lanes 3941 ) 3942 { 3943 /* Currently only RXAUI0 supPorted */ 3944 if (PortId != 0) 3945 return; 3946 3947 switch (Phase) { 3948 case MV_NETC_FIRST_PHASE: 3949 3950 /* RXAUI Serdes/s Clock alignment */ 3951 if (Lanes == MV_NETC_LANE_23) { 3952 MvGop110NetcRxaui0Enable (Port, PortId, 1); 3953 } else { 3954 MvGop110NetcRxaui1Enable (Port, PortId, 1); 3955 } 3956 break; 3957 case MV_NETC_SECOND_PHASE: 3958 3959 /* De-assert the relevant PortId HB Reset */ 3960 MvGop110NetcPortRfReset (Port, PortId, 1); 3961 break; 3962 } 3963 } 3964 3965 STATIC 3966 VOID 3967 MvGop110NetcMacToXaui ( 3968 IN PP2DXE_PORT *Port, 3969 IN UINT32 PortId, 3970 IN enum MvNetcPhase Phase 3971 ) 3972 { 3973 switch (Phase) { 3974 case MV_NETC_FIRST_PHASE: 3975 3976 /* RXAUI Serdes/s Clock alignment */ 3977 MvGop110NetcXauiEnable (Port, PortId, 1); 3978 break; 3979 case MV_NETC_SECOND_PHASE: 3980 3981 /* De-assert the relevant PortId HB Reset */ 3982 MvGop110NetcPortRfReset (Port, PortId, 1); 3983 break; 3984 } 3985 } 3986 3987 INT32 3988 MvGop110NetcInit ( 3989 IN PP2DXE_PORT *Port, 3990 IN UINT32 NetCompConfig, 3991 IN enum MvNetcPhase Phase 3992 ) 3993 { 3994 UINT32 c = NetCompConfig; 3995 3996 if (c & MV_NETC_GE_MAC0_RXAUI_L23) { 3997 MvGop110NetcMacToRxaui (Port, 0, Phase, MV_NETC_LANE_23); 3998 } 3999 4000 if (c & MV_NETC_GE_MAC0_RXAUI_L45) { 4001 MvGop110NetcMacToRxaui (Port, 0, Phase, MV_NETC_LANE_45); 4002 } 4003 4004 if (c & MV_NETC_GE_MAC0_XAUI) { 4005 MvGop110NetcMacToXaui (Port, 0, Phase); 4006 } 4007 4008 if (c & MV_NETC_GE_MAC2_SGMII) { 4009 MvGop110NetcMacToSgmii (Port, 2, Phase); 4010 } else { 4011 MvGop110NetcMacToXgmii (Port, 2, Phase); 4012 } 4013 4014 if (c & MV_NETC_GE_MAC3_SGMII) { 4015 MvGop110NetcMacToSgmii (Port, 3, Phase); 4016 } else { 4017 MvGop110NetcMacToXgmii (Port, 3, Phase); 4018 if (c & MV_NETC_GE_MAC3_RGMII) { 4019 MvGop110NetcMiiMode (Port, 3, MV_NETC_GBE_RGMII); 4020 } else { 4021 MvGop110NetcMiiMode (Port, 3, MV_NETC_GBE_MII); 4022 } 4023 } 4024 4025 /* Activate gop Ports 0, 2, 3 */ 4026 MvGop110NetcActivePort (Port, 0, 1); 4027 MvGop110NetcActivePort (Port, 2, 1); 4028 MvGop110NetcActivePort (Port, 3, 1); 4029 4030 if (Phase == MV_NETC_SECOND_PHASE) { 4031 4032 /* Enable the GOP internal clock logic */ 4033 MvGop110NetcGopClockLogicSet (Port, 1); 4034 4035 /* De-assert GOP unit Reset */ 4036 MvGop110NetcGopReset (Port, 1); 4037 } 4038 4039 return 0; 4040 } 4041 4042 UINT32 4043 MvpPp2xGop110NetcCfgCreate ( 4044 IN PP2DXE_PORT *Port 4045 ) 4046 { 4047 UINT32 Val = 0; 4048 4049 if (Port->GopIndex == 0) { 4050 if (Port->PhyInterface == MV_MODE_XAUI) { 4051 Val |= MV_NETC_GE_MAC0_XAUI; 4052 } else if (Port->PhyInterface == MV_MODE_RXAUI) { 4053 Val |= MV_NETC_GE_MAC0_RXAUI_L23; 4054 } 4055 } 4056 4057 if (Port->GopIndex == 2) { 4058 if (Port->PhyInterface == MV_MODE_SGMII) { 4059 Val |= MV_NETC_GE_MAC2_SGMII; 4060 } 4061 } 4062 4063 if (Port->GopIndex == 3) { 4064 if (Port->PhyInterface == MV_MODE_SGMII) { 4065 Val |= MV_NETC_GE_MAC3_SGMII; 4066 } else if (Port->PhyInterface == MV_MODE_RGMII) { 4067 Val |= MV_NETC_GE_MAC3_RGMII; 4068 } 4069 } 4070 4071 return Val; 4072 } 4073 4074 /* 4075 * Initialize physical Port. Configure the Port mode and 4076 * all its elements accordingly. 4077 */ 4078 INT32 4079 MvGop110PortInit ( 4080 IN PP2DXE_PORT *Port 4081 ) 4082 { 4083 4084 switch (Port->PhyInterface) { 4085 case MV_MODE_RGMII: 4086 MvGop110GmacReset (Port, RESET); 4087 4088 /* Configure PCS */ 4089 MvGop110GpcsModeCfg (Port, FALSE); 4090 MvGop110BypassClkCfg (Port, TRUE); 4091 4092 /* Configure MAC */ 4093 MvGop110GmacModeCfg (Port); 4094 4095 /* PCS unreset */ 4096 MvGop110GpcsReset (Port, UNRESET); 4097 4098 /* MAC unreset */ 4099 MvGop110GmacReset (Port, UNRESET); 4100 break; 4101 case MV_MODE_SGMII: 4102 case MV_MODE_QSGMII: 4103 4104 /* Configure PCS */ 4105 MvGop110GpcsModeCfg (Port, TRUE); 4106 4107 /* Configure MAC */ 4108 MvGop110GmacModeCfg (Port); 4109 4110 /* Select proper MAC mode */ 4111 MvGop110Xlg2GigMacCfg (Port); 4112 4113 /* PCS unreset */ 4114 MvGop110GpcsReset (Port, UNRESET); 4115 4116 /* MAC unreset */ 4117 MvGop110GmacReset (Port, UNRESET); 4118 break; 4119 default: 4120 return -1; 4121 } 4122 4123 return 0; 4124 } 4125 4126 /* Set the MAC to Reset or exit from Reset */ 4127 INT32 4128 MvGop110GmacReset ( 4129 IN PP2DXE_PORT *Port, 4130 IN enum MvReset ResetCmd 4131 ) 4132 { 4133 UINT32 RegAddr; 4134 UINT32 Val; 4135 4136 RegAddr = MVPP2_PORT_CTRL2_REG; 4137 4138 Val = MvGop110GmacRead (Port, RegAddr); 4139 4140 if (ResetCmd == RESET) { 4141 Val |= MVPP2_PORT_CTRL2_PORTMACRESET_MASK; 4142 } else { 4143 Val &= ~MVPP2_PORT_CTRL2_PORTMACRESET_MASK; 4144 } 4145 4146 MvGop110GmacWrite (Port, RegAddr, Val); 4147 4148 return 0; 4149 } 4150 4151 /* Enable/Disable Port to work with Gig PCS */ 4152 INT32 4153 MvGop110GpcsModeCfg ( 4154 IN PP2DXE_PORT *Port, 4155 BOOLEAN En 4156 ) 4157 { 4158 UINT32 Val; 4159 4160 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG); 4161 4162 if (En) { 4163 Val |= MVPP2_PORT_CTRL2_PCS_EN_MASK; 4164 } else { 4165 Val &= ~MVPP2_PORT_CTRL2_PCS_EN_MASK; 4166 } 4167 4168 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val); 4169 4170 return 0; 4171 } 4172 4173 INT32 4174 MvGop110BypassClkCfg ( 4175 IN PP2DXE_PORT *Port, 4176 IN BOOLEAN En 4177 ) 4178 { 4179 UINT32 Val; 4180 4181 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG); 4182 4183 if (En) { 4184 Val |= MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_MASK; 4185 } else { 4186 Val &= ~MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_MASK; 4187 } 4188 4189 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val); 4190 4191 return 0; 4192 } 4193 4194 INT32 4195 MvGop110GpcsReset ( 4196 IN PP2DXE_PORT *Port, 4197 IN enum MvReset ResetCmd 4198 ) 4199 { 4200 UINT32 RegData; 4201 4202 RegData = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG); 4203 4204 if (ResetCmd == RESET) { 4205 U32_SET_FIELD ( 4206 RegData, 4207 MVPP2_PORT_CTRL2_SGMII_MODE_MASK, 4208 0 4209 ); 4210 4211 } else { 4212 U32_SET_FIELD ( 4213 RegData, 4214 MVPP2_PORT_CTRL2_SGMII_MODE_MASK, 4215 1 << MVPP2_PORT_CTRL2_SGMII_MODE_OFFS 4216 ); 4217 4218 } 4219 4220 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, RegData); 4221 4222 return 0; 4223 } 4224 4225 VOID 4226 MvGop110Xlg2GigMacCfg ( 4227 IN PP2DXE_PORT *Port 4228 ) 4229 { 4230 UINT32 RegVal; 4231 4232 /* Relevant only for MAC0 (XLG0 and GMAC0) */ 4233 if (Port->GopIndex > 0) { 4234 return; 4235 } 4236 4237 /* Configure 1Gig MAC mode */ 4238 RegVal = Mvpp2XlgRead (Port, MV_XLG_PORT_MAC_CTRL3_REG); 4239 U32_SET_FIELD ( 4240 RegVal, 4241 MV_XLG_MAC_CTRL3_MACMODESELECT_MASK, 4242 (0 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS) 4243 ); 4244 4245 Mvpp2XlgWrite (Port, MV_XLG_PORT_MAC_CTRL3_REG, RegVal); 4246 } 4247 4248 /* Set the internal mux's to the required MAC in the GOP */ 4249 INT32 4250 MvGop110GmacModeCfg ( 4251 IN PP2DXE_PORT *Port 4252 ) 4253 { 4254 UINT32 RegAddr; 4255 UINT32 Val; 4256 4257 /* Set TX FIFO thresholds */ 4258 switch (Port->PhyInterface) { 4259 case MV_MODE_SGMII: 4260 if (Port->Speed == MV_PORT_SPEED_2500) { 4261 MvGop110GmacSgmii25Cfg (Port); 4262 } else { 4263 MvGop110GmacSgmiiCfg (Port); 4264 } 4265 break; 4266 case MV_MODE_RGMII: 4267 MvGop110GmacRgmiiCfg (Port); 4268 break; 4269 case MV_MODE_QSGMII: 4270 MvGop110GmacQsgmiiCfg (Port); 4271 break; 4272 default: 4273 return -1; 4274 } 4275 4276 /* Jumbo frame supPort - 0x1400*2= 0x2800 bytes */ 4277 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG); 4278 U32_SET_FIELD ( 4279 Val, 4280 MVPP2_PORT_CTRL0_FRAMESIZELIMIT_MASK, 4281 (0x1400 << MVPP2_PORT_CTRL0_FRAMESIZELIMIT_OFFS) 4282 ); 4283 4284 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, Val); 4285 4286 /* PeriodicXonEn disable */ 4287 RegAddr = MVPP2_PORT_CTRL1_REG; 4288 Val = MvGop110GmacRead (Port, RegAddr); 4289 Val &= ~MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_MASK; 4290 MvGop110GmacWrite (Port, RegAddr, Val); 4291 4292 /* Mask all Ports interrupts */ 4293 MvGop110GmacPortLinkEventMask (Port); 4294 4295 #if MV_PP2x_INTERRUPT 4296 /* Unmask link change interrupt */ 4297 Val = MvGop110GmacRead (Port, MVPP2_INTERRUPT_MASK_REG); 4298 Val |= MVPP2_INTERRUPT_CAUSE_LINK_CHANGE_MASK; 4299 Val |= 1; /* Unmask summary bit */ 4300 MvGop110GmacWrite (Port, MVPP2_INTERRUPT_MASK_REG, Val); 4301 #endif 4302 4303 return 0; 4304 } 4305 4306 VOID 4307 MvGop110GmacRgmiiCfg ( 4308 IN PP2DXE_PORT *Port 4309 ) 4310 { 4311 UINT32 Val, thresh, an; 4312 4313 /* Configure minimal level of the Tx FIFO before the lower part starts to read a packet*/ 4314 thresh = MV_RGMII_TX_FIFO_MIN_TH; 4315 Val = MvGop110GmacRead (Port, MVPP2_PORT_FIFO_CFG_1_REG); 4316 U32_SET_FIELD ( 4317 Val, 4318 MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK, 4319 (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS) 4320 ); 4321 4322 MvGop110GmacWrite (Port, MVPP2_PORT_FIFO_CFG_1_REG, Val); 4323 4324 /* Disable bypass of sync module */ 4325 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL4_REG); 4326 Val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK; 4327 4328 /* Configure DP clock select according to mode */ 4329 Val &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK; 4330 Val |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 4331 Val |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK; 4332 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL4_REG, Val); 4333 4334 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG); 4335 Val &= ~MVPP2_PORT_CTRL2_DIS_PADING_OFFS; 4336 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val); 4337 4338 /* Configure GIG MAC to SGMII mode */ 4339 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG); 4340 Val &= ~MVPP2_PORT_CTRL0_PORTTYPE_MASK; 4341 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, Val); 4342 4343 /* configure AN 0xb8e8 */ 4344 an = MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK | 4345 MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK | 4346 MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK | 4347 MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK | 4348 MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK; 4349 MvGop110GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, an); 4350 } 4351 4352 VOID 4353 MvGop110GmacSgmii25Cfg ( 4354 IN PP2DXE_PORT *Port 4355 ) 4356 { 4357 UINT32 Val, thresh, an; 4358 4359 /* 4360 * Configure minimal level of the Tx FIFO before 4361 * the lower part starts to read a packet. 4362 */ 4363 thresh = MV_SGMII2_5_TX_FIFO_MIN_TH; 4364 Val = MvGop110GmacRead (Port, MVPP2_PORT_FIFO_CFG_1_REG); 4365 U32_SET_FIELD ( 4366 Val, 4367 MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK, 4368 (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS) 4369 ); 4370 4371 MvGop110GmacWrite (Port, MVPP2_PORT_FIFO_CFG_1_REG, Val); 4372 4373 /* Disable bypass of sync module */ 4374 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL4_REG); 4375 Val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK; 4376 4377 /* Configure DP clock select according to mode */ 4378 Val |= MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK; 4379 4380 /* Configure QSGMII bypass according to mode */ 4381 Val |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 4382 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL4_REG, Val); 4383 4384 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG); 4385 Val |= MVPP2_PORT_CTRL2_DIS_PADING_OFFS; 4386 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val); 4387 4388 /* Configure GIG MAC to 1000Base-X mode connected to a fiber transceiver */ 4389 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG); 4390 Val |= MVPP2_PORT_CTRL0_PORTTYPE_MASK; 4391 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, Val); 4392 4393 /* configure AN 0x9268 */ 4394 an = MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK | 4395 MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK | 4396 MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK | 4397 MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK | 4398 MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK | 4399 MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK | 4400 MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK; 4401 MvGop110GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, an); 4402 } 4403 4404 VOID 4405 MvGop110GmacSgmiiCfg ( 4406 IN PP2DXE_PORT *Port 4407 ) 4408 { 4409 UINT32 Val, thresh, an; 4410 4411 /* 4412 * Configure minimal level of the Tx FIFO before 4413 * the lower part starts to read a packet. 4414 */ 4415 thresh = MV_SGMII_TX_FIFO_MIN_TH; 4416 Val = MvGop110GmacRead (Port, MVPP2_PORT_FIFO_CFG_1_REG); 4417 U32_SET_FIELD (Val, MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK, 4418 (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)); 4419 MvGop110GmacWrite (Port, MVPP2_PORT_FIFO_CFG_1_REG, Val); 4420 4421 /* Disable bypass of sync module */ 4422 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL4_REG); 4423 Val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK; 4424 4425 /* Configure DP clock select according to mode */ 4426 Val &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK; 4427 4428 /* Configure QSGMII bypass according to mode */ 4429 Val |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 4430 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL4_REG, Val); 4431 4432 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG); 4433 Val |= MVPP2_PORT_CTRL2_DIS_PADING_OFFS; 4434 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val); 4435 4436 /* Configure GIG MAC to SGMII mode */ 4437 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG); 4438 Val &= ~MVPP2_PORT_CTRL0_PORTTYPE_MASK; 4439 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, Val); 4440 4441 /* Configure AN */ 4442 an = MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK | 4443 MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK | 4444 MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK | 4445 MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK | 4446 MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK | 4447 MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK; 4448 MvGop110GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, an); 4449 } 4450 4451 VOID 4452 MvGop110GmacQsgmiiCfg ( 4453 IN PP2DXE_PORT *Port 4454 ) 4455 { 4456 UINT32 Val, thresh, an; 4457 4458 /* 4459 * Configure minimal level of the Tx FIFO before 4460 * the lower part starts to read a packet. 4461 */ 4462 thresh = MV_SGMII_TX_FIFO_MIN_TH; 4463 Val = MvGop110GmacRead (Port, MVPP2_PORT_FIFO_CFG_1_REG); 4464 U32_SET_FIELD ( 4465 Val, 4466 MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK, 4467 (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS) 4468 ); 4469 4470 MvGop110GmacWrite (Port, MVPP2_PORT_FIFO_CFG_1_REG, Val); 4471 4472 /* Disable bypass of sync module */ 4473 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL4_REG); 4474 Val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK; 4475 4476 /* Configure DP clock select according to mode */ 4477 Val &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK; 4478 Val &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK; 4479 4480 /* Configure QSGMII bypass according to mode */ 4481 Val &= ~MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; 4482 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL4_REG, Val); 4483 4484 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL2_REG); 4485 Val &= ~MVPP2_PORT_CTRL2_DIS_PADING_OFFS; 4486 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL2_REG, Val); 4487 4488 /* Configure GIG MAC to SGMII mode */ 4489 Val = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG); 4490 Val &= ~MVPP2_PORT_CTRL0_PORTTYPE_MASK; 4491 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, Val); 4492 4493 /* Configure AN 0xB8EC */ 4494 an = MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK | 4495 MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK | 4496 MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK | 4497 MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK | 4498 MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK | 4499 MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK; 4500 MvGop110GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, an); 4501 } 4502 4503 INT32 4504 Mvpp2SmiPhyAddrCfg ( 4505 IN PP2DXE_PORT *Port, 4506 IN INT32 PortId, 4507 IN INT32 Addr 4508 ) 4509 { 4510 Mvpp2SmiWrite (Port->Priv, MV_SMI_PHY_ADDRESS_REG(PortId), Addr); 4511 4512 return 0; 4513 } 4514 4515 BOOLEAN 4516 MvGop110PortIsLinkUp ( 4517 IN PP2DXE_PORT *Port 4518 ) 4519 { 4520 switch (Port->PhyInterface) { 4521 case MV_MODE_RGMII: 4522 case MV_MODE_SGMII: 4523 case MV_MODE_QSGMII: 4524 return MvGop110GmacLinkStatusGet (Port); 4525 case MV_MODE_XAUI: 4526 case MV_MODE_RXAUI: 4527 return FALSE; 4528 default: 4529 return FALSE; 4530 } 4531 } 4532 4533 /* Get MAC link status */ 4534 BOOLEAN 4535 MvGop110GmacLinkStatusGet ( 4536 IN PP2DXE_PORT *Port 4537 ) 4538 { 4539 UINT32 RegAddr; 4540 UINT32 Val; 4541 4542 RegAddr = MVPP2_PORT_STATUS0_REG; 4543 4544 Val = MvGop110GmacRead (Port, RegAddr); 4545 4546 return (Val & 1) ? TRUE : FALSE; 4547 } 4548 4549 VOID 4550 MvGop110PortDisable ( 4551 IN PP2DXE_PORT *Port 4552 ) 4553 { 4554 switch (Port->PhyInterface) { 4555 case MV_MODE_RGMII: 4556 case MV_MODE_SGMII: 4557 case MV_MODE_QSGMII: 4558 MvGop110GmacPortDisable (Port); 4559 break; 4560 default: 4561 return; 4562 } 4563 } 4564 4565 VOID 4566 MvGop110PortEnable ( 4567 IN PP2DXE_PORT *Port 4568 ) 4569 { 4570 switch (Port->PhyInterface) { 4571 case MV_MODE_RGMII: 4572 case MV_MODE_SGMII: 4573 case MV_MODE_QSGMII: 4574 MvGop110GmacPortEnable (Port); 4575 break; 4576 default: 4577 return; 4578 } 4579 } 4580 4581 /* Enable Port and MIB counters */ 4582 VOID 4583 MvGop110GmacPortEnable ( 4584 IN PP2DXE_PORT *Port 4585 ) 4586 { 4587 UINT32 RegVal; 4588 4589 RegVal = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG); 4590 RegVal |= MVPP2_PORT_CTRL0_PORTEN_MASK; 4591 RegVal |= MVPP2_PORT_CTRL0_COUNT_EN_MASK; 4592 4593 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, RegVal); 4594 } 4595 4596 /* Disable Port */ 4597 VOID 4598 MvGop110GmacPortDisable ( 4599 IN PP2DXE_PORT *Port 4600 ) 4601 { 4602 UINT32 RegVal; 4603 4604 /* Mask all Ports interrupts */ 4605 MvGop110GmacPortLinkEventMask (Port); 4606 4607 RegVal = MvGop110GmacRead (Port, MVPP2_PORT_CTRL0_REG); 4608 RegVal &= ~MVPP2_PORT_CTRL0_PORTEN_MASK; 4609 4610 MvGop110GmacWrite (Port, MVPP2_PORT_CTRL0_REG, RegVal); 4611 } 4612 4613 VOID 4614 MvGop110GmacPortLinkEventMask ( 4615 IN PP2DXE_PORT *Port 4616 ) 4617 { 4618 UINT32 RegVal; 4619 4620 RegVal = MvGop110GmacRead (Port, MV_GMAC_INTERRUPT_SUM_MASK_REG); 4621 RegVal &= ~MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK; 4622 MvGop110GmacWrite (Port, MV_GMAC_INTERRUPT_SUM_MASK_REG, RegVal); 4623 } 4624 4625 INT32 4626 MvGop110PortEventsMask ( 4627 IN PP2DXE_PORT *Port 4628 ) 4629 { 4630 4631 switch (Port->PhyInterface) { 4632 case MV_MODE_RGMII: 4633 case MV_MODE_SGMII: 4634 case MV_MODE_QSGMII: 4635 MvGop110GmacPortLinkEventMask (Port); 4636 break; 4637 default: 4638 return -1; 4639 } 4640 4641 return 0; 4642 } 4643 4644 INT32 4645 MvGop110FlCfg ( 4646 IN PP2DXE_PORT *Port 4647 ) 4648 { 4649 switch (Port->PhyInterface) { 4650 case MV_MODE_RGMII: 4651 case MV_MODE_SGMII: 4652 case MV_MODE_QSGMII: 4653 /* Disable AN */ 4654 MvGop110SpeedDuplexSet (Port, Port->Speed, MV_PORT_DUPLEX_FULL); 4655 break; 4656 case MV_MODE_XAUI: 4657 case MV_MODE_RXAUI: 4658 return 0; 4659 default: 4660 return -1; 4661 } 4662 4663 return 0; 4664 } 4665 4666 /* Set Port Speed and Duplex */ 4667 INT32 4668 MvGop110SpeedDuplexSet ( 4669 IN PP2DXE_PORT *Port, 4670 IN INT32 Speed, 4671 IN enum MvPortDuplex Duplex 4672 ) 4673 { 4674 switch (Port->PhyInterface) { 4675 case MV_MODE_RGMII: 4676 case MV_MODE_SGMII: 4677 case MV_MODE_QSGMII: 4678 MvGop110GmacSpeedDuplexSet (Port, Speed, Duplex); 4679 break; 4680 case MV_MODE_XAUI: 4681 case MV_MODE_RXAUI: 4682 break; 4683 default: 4684 return -1; 4685 } 4686 4687 return 0; 4688 } 4689 4690 /* 4691 * Sets Port Speed to Auto Negotiation / 1000 / 100 / 10 Mbps. 4692 * Sets Port Duplex to Auto Negotiation / Full / Half Duplex. 4693 */ 4694 INT32 4695 MvGop110GmacSpeedDuplexSet ( 4696 IN PP2DXE_PORT *Port, 4697 IN INT32 Speed, 4698 IN enum MvPortDuplex Duplex 4699 ) 4700 { 4701 UINT32 RegVal; 4702 4703 RegVal = Mvpp2GmacRead (Port, MVPP2_PORT_AUTO_NEG_CFG_REG); 4704 4705 switch (Speed) { 4706 case MV_PORT_SPEED_2500: 4707 case MV_PORT_SPEED_1000: 4708 RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK; 4709 RegVal |= MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK; 4710 /* The 100/10 bit doesn't matter in this case */ 4711 break; 4712 case MV_PORT_SPEED_100: 4713 RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK; 4714 RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK; 4715 RegVal |= MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK; 4716 break; 4717 case MV_PORT_SPEED_10: 4718 RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK; 4719 RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK; 4720 RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK; 4721 break; 4722 default: 4723 return MVPP2_EINVAL; 4724 } 4725 4726 switch (Duplex) { 4727 case MV_PORT_DUPLEX_AN: 4728 RegVal |= MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK; 4729 /* The other bits don't matter in this case */ 4730 break; 4731 case MV_PORT_DUPLEX_HALF: 4732 RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK; 4733 RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK; 4734 break; 4735 case MV_PORT_DUPLEX_FULL: 4736 RegVal &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK; 4737 RegVal |= MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK; 4738 break; 4739 default: 4740 return MVPP2_EINVAL; 4741 } 4742 4743 Mvpp2GmacWrite (Port, MVPP2_PORT_AUTO_NEG_CFG_REG, RegVal); 4744 4745 return 0; 4746 } 4747 4748 VOID 4749 Mvpp2AxiConfig ( 4750 IN MVPP2_SHARED *Priv 4751 ) 4752 { 4753 /* Config AXI Read&Write Normal and Soop mode */ 4754 Mvpp2Write (Priv, MVPP22_AXI_BM_WR_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); 4755 Mvpp2Write (Priv, MVPP22_AXI_BM_RD_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); 4756 Mvpp2Write (Priv, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); 4757 Mvpp2Write (Priv, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); 4758 Mvpp2Write (Priv, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); 4759 Mvpp2Write (Priv, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); 4760 Mvpp2Write (Priv, MVPP22_AXI_RX_DATA_WR_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); 4761 Mvpp2Write (Priv, MVPP22_AXI_TX_DATA_RD_ATTR_REG, MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); 4762 } 4763 4764 /* Cleanup Tx Ports */ 4765 VOID 4766 Mvpp2TxpClean ( 4767 IN PP2DXE_PORT *Port, 4768 IN INT32 Txp, 4769 IN MVPP2_TX_QUEUE *Txq 4770 ) 4771 { 4772 INT32 Delay, Pending; 4773 UINT32 RegVal; 4774 4775 Mvpp2Write (Port->Priv, MVPP2_TXQ_NUM_REG, Txq->Id); 4776 RegVal = Mvpp2Read (Port->Priv, MVPP2_TXQ_PREF_BUF_REG); 4777 RegVal |= MVPP2_TXQ_DRAIN_EN_MASK; 4778 Mvpp2Write (Port->Priv, MVPP2_TXQ_PREF_BUF_REG, RegVal); 4779 4780 /* 4781 * The Queue has been stopped so wait for all packets 4782 * to be transmitted. 4783 */ 4784 Delay = 0; 4785 do { 4786 if (Delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { 4787 Mvpp2Printf ("Port %d: cleaning Queue %d timed out\n", Port->Id, Txq->LogId); 4788 break; 4789 } 4790 Mvpp2Mdelay (1); 4791 Delay++; 4792 4793 Pending = Mvpp2TxqPendDescNumGet (Port, Txq); 4794 } while (Pending); 4795 4796 RegVal &= ~MVPP2_TXQ_DRAIN_EN_MASK; 4797 Mvpp2Write (Port->Priv, MVPP2_TXQ_PREF_BUF_REG, RegVal); 4798 } 4799 4800 /* Cleanup all Tx Queues */ 4801 VOID 4802 Mvpp2CleanupTxqs ( 4803 IN PP2DXE_PORT *Port 4804 ) 4805 { 4806 MVPP2_TX_QUEUE *Txq; 4807 INT32 Txp, Queue; 4808 UINT32 RegVal; 4809 4810 RegVal = Mvpp2Read (Port->Priv, MVPP2_TX_PORT_FLUSH_REG); 4811 4812 /* Reset Tx Ports and delete Tx Queues */ 4813 for (Txp = 0; Txp < Port->TxpNum; Txp++) { 4814 RegVal |= MVPP2_TX_PORT_FLUSH_MASK (Port->Id); 4815 Mvpp2Write (Port->Priv, MVPP2_TX_PORT_FLUSH_REG, RegVal); 4816 4817 for (Queue = 0; Queue < TxqNumber; Queue++) { 4818 Txq = &Port->Txqs[Txp * TxqNumber + Queue]; 4819 Mvpp2TxpClean (Port, Txp, Txq); 4820 Mvpp2TxqHwDeinit (Port, Txq); 4821 } 4822 4823 RegVal &= ~MVPP2_TX_PORT_FLUSH_MASK (Port->Id); 4824 Mvpp2Write (Port->Priv, MVPP2_TX_PORT_FLUSH_REG, RegVal); 4825 } 4826 } 4827 4828 /* Cleanup all Rx Queues */ 4829 VOID 4830 Mvpp2CleanupRxqs ( 4831 IN PP2DXE_PORT *Port 4832 ) 4833 { 4834 INT32 Queue; 4835 4836 for (Queue = 0; Queue < RxqNumber; Queue++) { 4837 Mvpp2RxqHwDeinit (Port, &Port->Rxqs[Queue]); 4838 } 4839 } 4840