blob: 95a8ba0759f1aa61bf13517db07d94f644897443 [file] [log] [blame]
Richard Frankel503d3fc2015-05-22 18:14:37 -04001/*
2 * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32#include "common.h"
33#include "regs.h"
34#include "sge_defs.h"
35#include "firmware_exports.h"
36
37/**
38 * t3_wait_op_done_val - wait until an operation is completed
39 * @adapter: the adapter performing the operation
40 * @reg: the register to check for completion
41 * @mask: a single-bit field within @reg that indicates completion
42 * @polarity: the value of the field when the operation is completed
43 * @attempts: number of check iterations
44 * @delay: delay in usecs between iterations
45 * @valp: where to store the value of the register at completion time
46 *
47 * Wait until an operation is completed by checking a bit in a register
48 * up to @attempts times. If @valp is not NULL the value of the register
49 * at the time it indicated completion is stored there. Returns 0 if the
50 * operation completes and -EAGAIN otherwise.
51 */
52
53int t3_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
54 int polarity, int attempts, int delay, u32 *valp)
55{
56 while (1) {
57 u32 val = t3_read_reg(adapter, reg);
58
59 if (!!(val & mask) == polarity) {
60 if (valp)
61 *valp = val;
62 return 0;
63 }
64 if (--attempts == 0)
65 return -EAGAIN;
66 if (delay)
67 udelay(delay);
68 }
69}
70
71/**
72 * t3_write_regs - write a bunch of registers
73 * @adapter: the adapter to program
74 * @p: an array of register address/register value pairs
75 * @n: the number of address/value pairs
76 * @offset: register address offset
77 *
78 * Takes an array of register address/register value pairs and writes each
79 * value to the corresponding register. Register addresses are adjusted
80 * by the supplied offset.
81 */
82void t3_write_regs(struct adapter *adapter, const struct addr_val_pair *p,
83 int n, unsigned int offset)
84{
85 while (n--) {
86 t3_write_reg(adapter, p->reg_addr + offset, p->val);
87 p++;
88 }
89}
90
91/**
92 * t3_set_reg_field - set a register field to a value
93 * @adapter: the adapter to program
94 * @addr: the register address
95 * @mask: specifies the portion of the register to modify
96 * @val: the new value for the register field
97 *
98 * Sets a register field specified by the supplied mask to the
99 * given value.
100 */
101void t3_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
102 u32 val)
103{
104 u32 v = t3_read_reg(adapter, addr) & ~mask;
105
106 t3_write_reg(adapter, addr, v | val);
107 t3_read_reg(adapter, addr); /* flush */
108}
109
110/**
111 * t3_read_indirect - read indirectly addressed registers
112 * @adap: the adapter
113 * @addr_reg: register holding the indirect address
114 * @data_reg: register holding the value of the indirect register
115 * @vals: where the read register values are stored
116 * @start_idx: index of first indirect register to read
117 * @nregs: how many indirect registers to read
118 *
119 * Reads registers that are accessed indirectly through an address/data
120 * register pair.
121 */
122static void t3_read_indirect(struct adapter *adap, unsigned int addr_reg,
123 unsigned int data_reg, u32 *vals,
124 unsigned int nregs, unsigned int start_idx)
125{
126 while (nregs--) {
127 t3_write_reg(adap, addr_reg, start_idx);
128 *vals++ = t3_read_reg(adap, data_reg);
129 start_idx++;
130 }
131}
132
133/**
134 * t3_mc7_bd_read - read from MC7 through backdoor accesses
135 * @mc7: identifies MC7 to read from
136 * @start: index of first 64-bit word to read
137 * @n: number of 64-bit words to read
138 * @buf: where to store the read result
139 *
140 * Read n 64-bit words from MC7 starting at word start, using backdoor
141 * accesses.
142 */
143int t3_mc7_bd_read(struct mc7 *mc7, unsigned int start, unsigned int n,
144 u64 *buf)
145{
146 static const int shift[] = { 0, 0, 16, 24 };
147 static const int step[] = { 0, 32, 16, 8 };
148
149 unsigned int size64 = mc7->size / 8; /* # of 64-bit words */
150 struct adapter *adap = mc7->adapter;
151
152 if (start >= size64 || start + n > size64)
153 return -EINVAL;
154
155 start *= (8 << mc7->width);
156 while (n--) {
157 int i;
158 u64 val64 = 0;
159
160 for (i = (1 << mc7->width) - 1; i >= 0; --i) {
161 int attempts = 10;
162 u32 val;
163
164 t3_write_reg(adap, mc7->offset + A_MC7_BD_ADDR, start);
165 t3_write_reg(adap, mc7->offset + A_MC7_BD_OP, 0);
166 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_OP);
167 while ((val & F_BUSY) && attempts--)
168 val = t3_read_reg(adap,
169 mc7->offset + A_MC7_BD_OP);
170 if (val & F_BUSY)
171 return -EIO;
172
173 val = t3_read_reg(adap, mc7->offset + A_MC7_BD_DATA1);
174 if (mc7->width == 0) {
175 val64 = t3_read_reg(adap,
176 mc7->offset +
177 A_MC7_BD_DATA0);
178 val64 |= (u64) val << 32;
179 } else {
180 if (mc7->width > 1)
181 val >>= shift[mc7->width];
182 val64 |= (u64) val << (step[mc7->width] * i);
183 }
184 start += 8;
185 }
186 *buf++ = val64;
187 }
188 return 0;
189}
190
191/*
192 * Initialize MI1.
193 */
194static void mi1_init(struct adapter *adap, const struct adapter_info *ai)
195{
196 u32 clkdiv = adap->params.vpd.cclk / (2 * adap->params.vpd.mdc) - 1;
197 u32 val = F_PREEN | V_CLKDIV(clkdiv);
198
199 t3_write_reg(adap, A_MI1_CFG, val);
200}
201
202#define MDIO_ATTEMPTS 20
203
204/*
205 * MI1 read/write operations for clause 22 PHYs.
206 */
207static int t3_mi1_read(struct net_device *dev, int phy_addr, int mmd_addr,
208 u16 reg_addr)
209{
210 struct port_info *pi = netdev_priv(dev);
211 struct adapter *adapter = pi->adapter;
212 int ret;
213 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
214
215 mutex_lock(&adapter->mdio_lock);
216 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
217 t3_write_reg(adapter, A_MI1_ADDR, addr);
218 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(2));
219 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
220 if (!ret)
221 ret = t3_read_reg(adapter, A_MI1_DATA);
222 mutex_unlock(&adapter->mdio_lock);
223 return ret;
224}
225
226static int t3_mi1_write(struct net_device *dev, int phy_addr, int mmd_addr,
227 u16 reg_addr, u16 val)
228{
229 struct port_info *pi = netdev_priv(dev);
230 struct adapter *adapter = pi->adapter;
231 int ret;
232 u32 addr = V_REGADDR(reg_addr) | V_PHYADDR(phy_addr);
233
234 mutex_lock(&adapter->mdio_lock);
235 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), V_ST(1));
236 t3_write_reg(adapter, A_MI1_ADDR, addr);
237 t3_write_reg(adapter, A_MI1_DATA, val);
238 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
239 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0, MDIO_ATTEMPTS, 10);
240 mutex_unlock(&adapter->mdio_lock);
241 return ret;
242}
243
244static const struct mdio_ops mi1_mdio_ops = {
245 .read = t3_mi1_read,
246 .write = t3_mi1_write,
247 .mode_support = MDIO_SUPPORTS_C22
248};
249
250/*
251 * Performs the address cycle for clause 45 PHYs.
252 * Must be called with the MDIO_LOCK held.
253 */
254static int mi1_wr_addr(struct adapter *adapter, int phy_addr, int mmd_addr,
255 int reg_addr)
256{
257 u32 addr = V_REGADDR(mmd_addr) | V_PHYADDR(phy_addr);
258
259 t3_set_reg_field(adapter, A_MI1_CFG, V_ST(M_ST), 0);
260 t3_write_reg(adapter, A_MI1_ADDR, addr);
261 t3_write_reg(adapter, A_MI1_DATA, reg_addr);
262 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(0));
263 return t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
264 MDIO_ATTEMPTS, 10);
265}
266
267/*
268 * MI1 read/write operations for indirect-addressed PHYs.
269 */
270static int mi1_ext_read(struct net_device *dev, int phy_addr, int mmd_addr,
271 u16 reg_addr)
272{
273 struct port_info *pi = netdev_priv(dev);
274 struct adapter *adapter = pi->adapter;
275 int ret;
276
277 mutex_lock(&adapter->mdio_lock);
278 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
279 if (!ret) {
280 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(3));
281 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
282 MDIO_ATTEMPTS, 10);
283 if (!ret)
284 ret = t3_read_reg(adapter, A_MI1_DATA);
285 }
286 mutex_unlock(&adapter->mdio_lock);
287 return ret;
288}
289
290static int mi1_ext_write(struct net_device *dev, int phy_addr, int mmd_addr,
291 u16 reg_addr, u16 val)
292{
293 struct port_info *pi = netdev_priv(dev);
294 struct adapter *adapter = pi->adapter;
295 int ret;
296
297 mutex_lock(&adapter->mdio_lock);
298 ret = mi1_wr_addr(adapter, phy_addr, mmd_addr, reg_addr);
299 if (!ret) {
300 t3_write_reg(adapter, A_MI1_DATA, val);
301 t3_write_reg(adapter, A_MI1_OP, V_MDI_OP(1));
302 ret = t3_wait_op_done(adapter, A_MI1_OP, F_BUSY, 0,
303 MDIO_ATTEMPTS, 10);
304 }
305 mutex_unlock(&adapter->mdio_lock);
306 return ret;
307}
308
309static const struct mdio_ops mi1_mdio_ext_ops = {
310 .read = mi1_ext_read,
311 .write = mi1_ext_write,
312 .mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22
313};
314
315/**
316 * t3_mdio_change_bits - modify the value of a PHY register
317 * @phy: the PHY to operate on
318 * @mmd: the device address
319 * @reg: the register address
320 * @clear: what part of the register value to mask off
321 * @set: what part of the register value to set
322 *
323 * Changes the value of a PHY register by applying a mask to its current
324 * value and ORing the result with a new value.
325 */
326int t3_mdio_change_bits(struct cphy *phy, int mmd, int reg, unsigned int clear,
327 unsigned int set)
328{
329 int ret;
330 unsigned int val;
331
332 ret = t3_mdio_read(phy, mmd, reg, &val);
333 if (!ret) {
334 val &= ~clear;
335 ret = t3_mdio_write(phy, mmd, reg, val | set);
336 }
337 return ret;
338}
339
340/**
341 * t3_phy_reset - reset a PHY block
342 * @phy: the PHY to operate on
343 * @mmd: the device address of the PHY block to reset
344 * @wait: how long to wait for the reset to complete in 1ms increments
345 *
346 * Resets a PHY block and optionally waits for the reset to complete.
347 * @mmd should be 0 for 10/100/1000 PHYs and the device address to reset
348 * for 10G PHYs.
349 */
350int t3_phy_reset(struct cphy *phy, int mmd, int wait)
351{
352 int err;
353 unsigned int ctl;
354
355 err = t3_mdio_change_bits(phy, mmd, MDIO_CTRL1, MDIO_CTRL1_LPOWER,
356 MDIO_CTRL1_RESET);
357 if (err || !wait)
358 return err;
359
360 do {
361 err = t3_mdio_read(phy, mmd, MDIO_CTRL1, &ctl);
362 if (err)
363 return err;
364 ctl &= MDIO_CTRL1_RESET;
365 if (ctl)
366 msleep(1);
367 } while (ctl && --wait);
368
369 return ctl ? -1 : 0;
370}
371
372/**
373 * t3_phy_advertise - set the PHY advertisement registers for autoneg
374 * @phy: the PHY to operate on
375 * @advert: bitmap of capabilities the PHY should advertise
376 *
377 * Sets a 10/100/1000 PHY's advertisement registers to advertise the
378 * requested capabilities.
379 */
380int t3_phy_advertise(struct cphy *phy, unsigned int advert)
381{
382 int err;
383 unsigned int val = 0;
384
385 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_CTRL1000, &val);
386 if (err)
387 return err;
388
389 val &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
390 if (advert & ADVERTISED_1000baseT_Half)
391 val |= ADVERTISE_1000HALF;
392 if (advert & ADVERTISED_1000baseT_Full)
393 val |= ADVERTISE_1000FULL;
394
395 err = t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_CTRL1000, val);
396 if (err)
397 return err;
398
399 val = 1;
400 if (advert & ADVERTISED_10baseT_Half)
401 val |= ADVERTISE_10HALF;
402 if (advert & ADVERTISED_10baseT_Full)
403 val |= ADVERTISE_10FULL;
404 if (advert & ADVERTISED_100baseT_Half)
405 val |= ADVERTISE_100HALF;
406 if (advert & ADVERTISED_100baseT_Full)
407 val |= ADVERTISE_100FULL;
408 if (advert & ADVERTISED_Pause)
409 val |= ADVERTISE_PAUSE_CAP;
410 if (advert & ADVERTISED_Asym_Pause)
411 val |= ADVERTISE_PAUSE_ASYM;
412 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
413}
414
415/**
416 * t3_phy_advertise_fiber - set fiber PHY advertisement register
417 * @phy: the PHY to operate on
418 * @advert: bitmap of capabilities the PHY should advertise
419 *
420 * Sets a fiber PHY's advertisement register to advertise the
421 * requested capabilities.
422 */
423int t3_phy_advertise_fiber(struct cphy *phy, unsigned int advert)
424{
425 unsigned int val = 0;
426
427 if (advert & ADVERTISED_1000baseT_Half)
428 val |= ADVERTISE_1000XHALF;
429 if (advert & ADVERTISED_1000baseT_Full)
430 val |= ADVERTISE_1000XFULL;
431 if (advert & ADVERTISED_Pause)
432 val |= ADVERTISE_1000XPAUSE;
433 if (advert & ADVERTISED_Asym_Pause)
434 val |= ADVERTISE_1000XPSE_ASYM;
435 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_ADVERTISE, val);
436}
437
438/**
439 * t3_set_phy_speed_duplex - force PHY speed and duplex
440 * @phy: the PHY to operate on
441 * @speed: requested PHY speed
442 * @duplex: requested PHY duplex
443 *
444 * Force a 10/100/1000 PHY's speed and duplex. This also disables
445 * auto-negotiation except for GigE, where auto-negotiation is mandatory.
446 */
447int t3_set_phy_speed_duplex(struct cphy *phy, int speed, int duplex)
448{
449 int err;
450 unsigned int ctl;
451
452 err = t3_mdio_read(phy, MDIO_DEVAD_NONE, MII_BMCR, &ctl);
453 if (err)
454 return err;
455
456 if (speed >= 0) {
457 ctl &= ~(BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
458 if (speed == SPEED_100)
459 ctl |= BMCR_SPEED100;
460 else if (speed == SPEED_1000)
461 ctl |= BMCR_SPEED1000;
462 }
463 if (duplex >= 0) {
464 ctl &= ~(BMCR_FULLDPLX | BMCR_ANENABLE);
465 if (duplex == DUPLEX_FULL)
466 ctl |= BMCR_FULLDPLX;
467 }
468 if (ctl & BMCR_SPEED1000) /* auto-negotiation required for GigE */
469 ctl |= BMCR_ANENABLE;
470 return t3_mdio_write(phy, MDIO_DEVAD_NONE, MII_BMCR, ctl);
471}
472
473int t3_phy_lasi_intr_enable(struct cphy *phy)
474{
475 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
476 MDIO_PMA_LASI_LSALARM);
477}
478
479int t3_phy_lasi_intr_disable(struct cphy *phy)
480{
481 return t3_mdio_write(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0);
482}
483
484int t3_phy_lasi_intr_clear(struct cphy *phy)
485{
486 u32 val;
487
488 return t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
489}
490
491int t3_phy_lasi_intr_handler(struct cphy *phy)
492{
493 unsigned int status;
494 int err = t3_mdio_read(phy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT,
495 &status);
496
497 if (err)
498 return err;
499 return (status & MDIO_PMA_LASI_LSALARM) ? cphy_cause_link_change : 0;
500}
501
502static const struct adapter_info t3_adap_info[] = {
503 {1, 1, 0,
504 F_GPIO2_OEN | F_GPIO4_OEN |
505 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
506 &mi1_mdio_ops, "Chelsio PE9000"},
507 {1, 1, 0,
508 F_GPIO2_OEN | F_GPIO4_OEN |
509 F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
510 &mi1_mdio_ops, "Chelsio T302"},
511 {1, 0, 0,
512 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
513 F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
514 { 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
515 &mi1_mdio_ext_ops, "Chelsio T310"},
516 {1, 1, 0,
517 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
518 F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
519 F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
520 { S_GPIO9, S_GPIO3 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
521 &mi1_mdio_ext_ops, "Chelsio T320"},
522 {},
523 {},
524 {1, 0, 0,
525 F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
526 F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
527 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
528 &mi1_mdio_ext_ops, "Chelsio T310" },
529 {1, 0, 0,
530 F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
531 F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL,
532 { S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
533 &mi1_mdio_ext_ops, "Chelsio N320E-G2" },
534};
535
536/*
537 * Return the adapter_info structure with a given index. Out-of-range indices
538 * return NULL.
539 */
540const struct adapter_info *t3_get_adapter_info(unsigned int id)
541{
542 return id < ARRAY_SIZE(t3_adap_info) ? &t3_adap_info[id] : NULL;
543}
544
545struct port_type_info {
546 int (*phy_prep)(struct cphy *phy, struct adapter *adapter,
547 int phy_addr, const struct mdio_ops *ops);
548};
549
550static const struct port_type_info port_types[] = {
551 { NULL },
552 { t3_ael1002_phy_prep },
553 { t3_vsc8211_phy_prep },
554 { NULL},
555 { t3_xaui_direct_phy_prep },
556 { t3_ael2005_phy_prep },
557 { t3_qt2045_phy_prep },
558 { t3_ael1006_phy_prep },
559 { NULL },
560 { t3_aq100x_phy_prep },
561 { t3_ael2020_phy_prep },
562};
563
564#define VPD_ENTRY(name, len) \
565 u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
566
567/*
568 * Partial EEPROM Vital Product Data structure. Includes only the ID and
569 * VPD-R sections.
570 */
571struct t3_vpd {
572 u8 id_tag;
573 u8 id_len[2];
574 u8 id_data[16];
575 u8 vpdr_tag;
576 u8 vpdr_len[2];
577 VPD_ENTRY(pn, 16); /* part number */
578 VPD_ENTRY(ec, 16); /* EC level */
579 VPD_ENTRY(sn, SERNUM_LEN); /* serial number */
580 VPD_ENTRY(na, 12); /* MAC address base */
581 VPD_ENTRY(cclk, 6); /* core clock */
582 VPD_ENTRY(mclk, 6); /* mem clock */
583 VPD_ENTRY(uclk, 6); /* uP clk */
584 VPD_ENTRY(mdc, 6); /* MDIO clk */
585 VPD_ENTRY(mt, 2); /* mem timing */
586 VPD_ENTRY(xaui0cfg, 6); /* XAUI0 config */
587 VPD_ENTRY(xaui1cfg, 6); /* XAUI1 config */
588 VPD_ENTRY(port0, 2); /* PHY0 complex */
589 VPD_ENTRY(port1, 2); /* PHY1 complex */
590 VPD_ENTRY(port2, 2); /* PHY2 complex */
591 VPD_ENTRY(port3, 2); /* PHY3 complex */
592 VPD_ENTRY(rv, 1); /* csum */
593 u32 pad; /* for multiple-of-4 sizing and alignment */
594};
595
596#define EEPROM_MAX_POLL 40
597#define EEPROM_STAT_ADDR 0x4000
598#define VPD_BASE 0xc00
599
600/**
601 * t3_seeprom_read - read a VPD EEPROM location
602 * @adapter: adapter to read
603 * @addr: EEPROM address
604 * @data: where to store the read data
605 *
606 * Read a 32-bit word from a location in VPD EEPROM using the card's PCI
607 * VPD ROM capability. A zero is written to the flag bit when the
608 * addres is written to the control register. The hardware device will
609 * set the flag to 1 when 4 bytes have been read into the data register.
610 */
611int t3_seeprom_read(struct adapter *adapter, u32 addr, __le32 *data)
612{
613 u16 val;
614 int attempts = EEPROM_MAX_POLL;
615 u32 v;
616 unsigned int base = adapter->params.pci.vpd_cap_addr;
617
618 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
619 return -EINVAL;
620
621 pci_write_config_word(adapter->pdev, base + PCI_VPD_ADDR, addr);
622 do {
623 udelay(10);
624 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
625 } while (!(val & PCI_VPD_ADDR_F) && --attempts);
626
627 if (!(val & PCI_VPD_ADDR_F)) {
628 CH_ERR(adapter, "reading EEPROM address 0x%x failed\n", addr);
629 return -EIO;
630 }
631 pci_read_config_dword(adapter->pdev, base + PCI_VPD_DATA, &v);
632 *data = cpu_to_le32(v);
633 return 0;
634}
635
636/**
637 * t3_seeprom_write - write a VPD EEPROM location
638 * @adapter: adapter to write
639 * @addr: EEPROM address
640 * @data: value to write
641 *
642 * Write a 32-bit word to a location in VPD EEPROM using the card's PCI
643 * VPD ROM capability.
644 */
645int t3_seeprom_write(struct adapter *adapter, u32 addr, __le32 data)
646{
647 u16 val;
648 int attempts = EEPROM_MAX_POLL;
649 unsigned int base = adapter->params.pci.vpd_cap_addr;
650
651 if ((addr >= EEPROMSIZE && addr != EEPROM_STAT_ADDR) || (addr & 3))
652 return -EINVAL;
653
654 pci_write_config_dword(adapter->pdev, base + PCI_VPD_DATA,
655 le32_to_cpu(data));
656 pci_write_config_word(adapter->pdev,base + PCI_VPD_ADDR,
657 addr | PCI_VPD_ADDR_F);
658 do {
659 msleep(1);
660 pci_read_config_word(adapter->pdev, base + PCI_VPD_ADDR, &val);
661 } while ((val & PCI_VPD_ADDR_F) && --attempts);
662
663 if (val & PCI_VPD_ADDR_F) {
664 CH_ERR(adapter, "write to EEPROM address 0x%x failed\n", addr);
665 return -EIO;
666 }
667 return 0;
668}
669
670/**
671 * t3_seeprom_wp - enable/disable EEPROM write protection
672 * @adapter: the adapter
673 * @enable: 1 to enable write protection, 0 to disable it
674 *
675 * Enables or disables write protection on the serial EEPROM.
676 */
677int t3_seeprom_wp(struct adapter *adapter, int enable)
678{
679 return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
680}
681
682/*
683 * Convert a character holding a hex digit to a number.
684 */
685static unsigned int hex2int(unsigned char c)
686{
687 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
688}
689
690/**
691 * get_vpd_params - read VPD parameters from VPD EEPROM
692 * @adapter: adapter to read
693 * @p: where to store the parameters
694 *
695 * Reads card parameters stored in VPD EEPROM.
696 */
697static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
698{
699 int i, addr, ret;
700 struct t3_vpd vpd;
701
702 /*
703 * Card information is normally at VPD_BASE but some early cards had
704 * it at 0.
705 */
706 ret = t3_seeprom_read(adapter, VPD_BASE, (__le32 *)&vpd);
707 if (ret)
708 return ret;
709 addr = vpd.id_tag == 0x82 ? VPD_BASE : 0;
710
711 for (i = 0; i < sizeof(vpd); i += 4) {
712 ret = t3_seeprom_read(adapter, addr + i,
713 (__le32 *)((u8 *)&vpd + i));
714 if (ret)
715 return ret;
716 }
717
718 p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
719 p->mclk = simple_strtoul(vpd.mclk_data, NULL, 10);
720 p->uclk = simple_strtoul(vpd.uclk_data, NULL, 10);
721 p->mdc = simple_strtoul(vpd.mdc_data, NULL, 10);
722 p->mem_timing = simple_strtoul(vpd.mt_data, NULL, 10);
723 memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
724
725 /* Old eeproms didn't have port information */
726 if (adapter->params.rev == 0 && !vpd.port0_data[0]) {
727 p->port_type[0] = uses_xaui(adapter) ? 1 : 2;
728 p->port_type[1] = uses_xaui(adapter) ? 6 : 2;
729 } else {
730 p->port_type[0] = hex2int(vpd.port0_data[0]);
731 p->port_type[1] = hex2int(vpd.port1_data[0]);
732 p->xauicfg[0] = simple_strtoul(vpd.xaui0cfg_data, NULL, 16);
733 p->xauicfg[1] = simple_strtoul(vpd.xaui1cfg_data, NULL, 16);
734 }
735
736 for (i = 0; i < 6; i++)
737 p->eth_base[i] = hex2int(vpd.na_data[2 * i]) * 16 +
738 hex2int(vpd.na_data[2 * i + 1]);
739 return 0;
740}
741
742/* serial flash and firmware constants */
743enum {
744 SF_ATTEMPTS = 5, /* max retries for SF1 operations */
745 SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
746 SF_SIZE = SF_SEC_SIZE * 8, /* serial flash size */
747
748 /* flash command opcodes */
749 SF_PROG_PAGE = 2, /* program page */
750 SF_WR_DISABLE = 4, /* disable writes */
751 SF_RD_STATUS = 5, /* read status register */
752 SF_WR_ENABLE = 6, /* enable writes */
753 SF_RD_DATA_FAST = 0xb, /* read flash */
754 SF_ERASE_SECTOR = 0xd8, /* erase sector */
755
756 FW_FLASH_BOOT_ADDR = 0x70000, /* start address of FW in flash */
757 FW_VERS_ADDR = 0x7fffc, /* flash address holding FW version */
758 FW_MIN_SIZE = 8 /* at least version and csum */
759};
760
761/**
762 * sf1_read - read data from the serial flash
763 * @adapter: the adapter
764 * @byte_cnt: number of bytes to read
765 * @cont: whether another operation will be chained
766 * @valp: where to store the read data
767 *
768 * Reads up to 4 bytes of data from the serial flash. The location of
769 * the read needs to be specified prior to calling this by issuing the
770 * appropriate commands to the serial flash.
771 */
772static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
773 u32 *valp)
774{
775 int ret;
776
777 if (!byte_cnt || byte_cnt > 4)
778 return -EINVAL;
779 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
780 return -EBUSY;
781 t3_write_reg(adapter, A_SF_OP, V_CONT(cont) | V_BYTECNT(byte_cnt - 1));
782 ret = t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
783 if (!ret)
784 *valp = t3_read_reg(adapter, A_SF_DATA);
785 return ret;
786}
787
788/**
789 * sf1_write - write data to the serial flash
790 * @adapter: the adapter
791 * @byte_cnt: number of bytes to write
792 * @cont: whether another operation will be chained
793 * @val: value to write
794 *
795 * Writes up to 4 bytes of data to the serial flash. The location of
796 * the write needs to be specified prior to calling this by issuing the
797 * appropriate commands to the serial flash.
798 */
799static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
800 u32 val)
801{
802 if (!byte_cnt || byte_cnt > 4)
803 return -EINVAL;
804 if (t3_read_reg(adapter, A_SF_OP) & F_BUSY)
805 return -EBUSY;
806 t3_write_reg(adapter, A_SF_DATA, val);
807 t3_write_reg(adapter, A_SF_OP,
808 V_CONT(cont) | V_BYTECNT(byte_cnt - 1) | V_OP(1));
809 return t3_wait_op_done(adapter, A_SF_OP, F_BUSY, 0, SF_ATTEMPTS, 10);
810}
811
812/**
813 * flash_wait_op - wait for a flash operation to complete
814 * @adapter: the adapter
815 * @attempts: max number of polls of the status register
816 * @delay: delay between polls in ms
817 *
818 * Wait for a flash operation to complete by polling the status register.
819 */
820static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
821{
822 int ret;
823 u32 status;
824
825 while (1) {
826 if ((ret = sf1_write(adapter, 1, 1, SF_RD_STATUS)) != 0 ||
827 (ret = sf1_read(adapter, 1, 0, &status)) != 0)
828 return ret;
829 if (!(status & 1))
830 return 0;
831 if (--attempts == 0)
832 return -EAGAIN;
833 if (delay)
834 msleep(delay);
835 }
836}
837
838/**
839 * t3_read_flash - read words from serial flash
840 * @adapter: the adapter
841 * @addr: the start address for the read
842 * @nwords: how many 32-bit words to read
843 * @data: where to store the read data
844 * @byte_oriented: whether to store data as bytes or as words
845 *
846 * Read the specified number of 32-bit words from the serial flash.
847 * If @byte_oriented is set the read data is stored as a byte array
848 * (i.e., big-endian), otherwise as 32-bit words in the platform's
849 * natural endianess.
850 */
851int t3_read_flash(struct adapter *adapter, unsigned int addr,
852 unsigned int nwords, u32 *data, int byte_oriented)
853{
854 int ret;
855
856 if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
857 return -EINVAL;
858
859 addr = swab32(addr) | SF_RD_DATA_FAST;
860
861 if ((ret = sf1_write(adapter, 4, 1, addr)) != 0 ||
862 (ret = sf1_read(adapter, 1, 1, data)) != 0)
863 return ret;
864
865 for (; nwords; nwords--, data++) {
866 ret = sf1_read(adapter, 4, nwords > 1, data);
867 if (ret)
868 return ret;
869 if (byte_oriented)
870 *data = htonl(*data);
871 }
872 return 0;
873}
874
875/**
876 * t3_write_flash - write up to a page of data to the serial flash
877 * @adapter: the adapter
878 * @addr: the start address to write
879 * @n: length of data to write
880 * @data: the data to write
881 *
882 * Writes up to a page of data (256 bytes) to the serial flash starting
883 * at the given address.
884 */
885static int t3_write_flash(struct adapter *adapter, unsigned int addr,
886 unsigned int n, const u8 *data)
887{
888 int ret;
889 u32 buf[64];
890 unsigned int i, c, left, val, offset = addr & 0xff;
891
892 if (addr + n > SF_SIZE || offset + n > 256)
893 return -EINVAL;
894
895 val = swab32(addr) | SF_PROG_PAGE;
896
897 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
898 (ret = sf1_write(adapter, 4, 1, val)) != 0)
899 return ret;
900
901 for (left = n; left; left -= c) {
902 c = min(left, 4U);
903 for (val = 0, i = 0; i < c; ++i)
904 val = (val << 8) + *data++;
905
906 ret = sf1_write(adapter, c, c != left, val);
907 if (ret)
908 return ret;
909 }
910 if ((ret = flash_wait_op(adapter, 5, 1)) != 0)
911 return ret;
912
913 /* Read the page to verify the write succeeded */
914 ret = t3_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
915 if (ret)
916 return ret;
917
918 if (memcmp(data - n, (u8 *) buf + offset, n))
919 return -EIO;
920 return 0;
921}
922
923/**
924 * t3_get_tp_version - read the tp sram version
925 * @adapter: the adapter
926 * @vers: where to place the version
927 *
928 * Reads the protocol sram version from sram.
929 */
930int t3_get_tp_version(struct adapter *adapter, u32 *vers)
931{
932 int ret;
933
934 /* Get version loaded in SRAM */
935 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
936 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
937 1, 1, 5, 1);
938 if (ret)
939 return ret;
940
941 *vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
942
943 return 0;
944}
945
946/**
947 * t3_check_tpsram_version - read the tp sram version
948 * @adapter: the adapter
949 *
950 * Reads the protocol sram version from flash.
951 */
952int t3_check_tpsram_version(struct adapter *adapter)
953{
954 int ret;
955 u32 vers;
956 unsigned int major, minor;
957
958 if (adapter->params.rev == T3_REV_A)
959 return 0;
960
961
962 ret = t3_get_tp_version(adapter, &vers);
963 if (ret)
964 return ret;
965
966 major = G_TP_VERSION_MAJOR(vers);
967 minor = G_TP_VERSION_MINOR(vers);
968
969 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
970 return 0;
971 else {
972 CH_ERR(adapter, "found wrong TP version (%u.%u), "
973 "driver compiled for version %d.%d\n", major, minor,
974 TP_VERSION_MAJOR, TP_VERSION_MINOR);
975 }
976 return -EINVAL;
977}
978
979/**
980 * t3_check_tpsram - check if provided protocol SRAM
981 * is compatible with this driver
982 * @adapter: the adapter
983 * @tp_sram: the firmware image to write
984 * @size: image size
985 *
986 * Checks if an adapter's tp sram is compatible with the driver.
987 * Returns 0 if the versions are compatible, a negative error otherwise.
988 */
989int t3_check_tpsram(struct adapter *adapter, const u8 *tp_sram,
990 unsigned int size)
991{
992 u32 csum;
993 unsigned int i;
994 const __be32 *p = (const __be32 *)tp_sram;
995
996 /* Verify checksum */
997 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
998 csum += ntohl(p[i]);
999 if (csum != 0xffffffff) {
1000 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
1001 csum);
1002 return -EINVAL;
1003 }
1004
1005 return 0;
1006}
1007
1008enum fw_version_type {
1009 FW_VERSION_N3,
1010 FW_VERSION_T3
1011};
1012
1013/**
1014 * t3_get_fw_version - read the firmware version
1015 * @adapter: the adapter
1016 * @vers: where to place the version
1017 *
1018 * Reads the FW version from flash.
1019 */
1020int t3_get_fw_version(struct adapter *adapter, u32 *vers)
1021{
1022 return t3_read_flash(adapter, FW_VERS_ADDR, 1, vers, 0);
1023}
1024
1025/**
1026 * t3_check_fw_version - check if the FW is compatible with this driver
1027 * @adapter: the adapter
1028 *
1029 * Checks if an adapter's FW is compatible with the driver. Returns 0
1030 * if the versions are compatible, a negative error otherwise.
1031 */
1032int t3_check_fw_version(struct adapter *adapter)
1033{
1034 int ret;
1035 u32 vers;
1036 unsigned int type, major, minor;
1037
1038 ret = t3_get_fw_version(adapter, &vers);
1039 if (ret)
1040 return ret;
1041
1042 type = G_FW_VERSION_TYPE(vers);
1043 major = G_FW_VERSION_MAJOR(vers);
1044 minor = G_FW_VERSION_MINOR(vers);
1045
1046 if (type == FW_VERSION_T3 && major == FW_VERSION_MAJOR &&
1047 minor == FW_VERSION_MINOR)
1048 return 0;
1049 else if (major != FW_VERSION_MAJOR || minor < FW_VERSION_MINOR)
1050 CH_WARN(adapter, "found old FW minor version(%u.%u), "
1051 "driver compiled for version %u.%u\n", major, minor,
1052 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1053 else {
1054 CH_WARN(adapter, "found newer FW version(%u.%u), "
1055 "driver compiled for version %u.%u\n", major, minor,
1056 FW_VERSION_MAJOR, FW_VERSION_MINOR);
1057 return 0;
1058 }
1059 return -EINVAL;
1060}
1061
1062/**
1063 * t3_flash_erase_sectors - erase a range of flash sectors
1064 * @adapter: the adapter
1065 * @start: the first sector to erase
1066 * @end: the last sector to erase
1067 *
1068 * Erases the sectors in the given range.
1069 */
1070static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
1071{
1072 while (start <= end) {
1073 int ret;
1074
1075 if ((ret = sf1_write(adapter, 1, 0, SF_WR_ENABLE)) != 0 ||
1076 (ret = sf1_write(adapter, 4, 0,
1077 SF_ERASE_SECTOR | (start << 8))) != 0 ||
1078 (ret = flash_wait_op(adapter, 5, 500)) != 0)
1079 return ret;
1080 start++;
1081 }
1082 return 0;
1083}
1084
1085/*
1086 * t3_load_fw - download firmware
1087 * @adapter: the adapter
1088 * @fw_data: the firmware image to write
1089 * @size: image size
1090 *
1091 * Write the supplied firmware image to the card's serial flash.
1092 * The FW image has the following sections: @size - 8 bytes of code and
1093 * data, followed by 4 bytes of FW version, followed by the 32-bit
1094 * 1's complement checksum of the whole image.
1095 */
1096int t3_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size)
1097{
1098 u32 csum;
1099 unsigned int i;
1100 const __be32 *p = (const __be32 *)fw_data;
1101 int ret, addr, fw_sector = FW_FLASH_BOOT_ADDR >> 16;
1102
1103 if ((size & 3) || size < FW_MIN_SIZE)
1104 return -EINVAL;
1105 if (size > FW_VERS_ADDR + 8 - FW_FLASH_BOOT_ADDR)
1106 return -EFBIG;
1107
1108 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
1109 csum += ntohl(p[i]);
1110 if (csum != 0xffffffff) {
1111 CH_ERR(adapter, "corrupted firmware image, checksum %u\n",
1112 csum);
1113 return -EINVAL;
1114 }
1115
1116 ret = t3_flash_erase_sectors(adapter, fw_sector, fw_sector);
1117 if (ret)
1118 goto out;
1119
1120 size -= 8; /* trim off version and checksum */
1121 for (addr = FW_FLASH_BOOT_ADDR; size;) {
1122 unsigned int chunk_size = min(size, 256U);
1123
1124 ret = t3_write_flash(adapter, addr, chunk_size, fw_data);
1125 if (ret)
1126 goto out;
1127
1128 addr += chunk_size;
1129 fw_data += chunk_size;
1130 size -= chunk_size;
1131 }
1132
1133 ret = t3_write_flash(adapter, FW_VERS_ADDR, 4, fw_data);
1134out:
1135 if (ret)
1136 CH_ERR(adapter, "firmware download failed, error %d\n", ret);
1137 return ret;
1138}
1139
1140#define CIM_CTL_BASE 0x2000
1141
1142/**
1143 * t3_cim_ctl_blk_read - read a block from CIM control region
1144 *
1145 * @adap: the adapter
1146 * @addr: the start address within the CIM control region
1147 * @n: number of words to read
1148 * @valp: where to store the result
1149 *
1150 * Reads a block of 4-byte words from the CIM control region.
1151 */
1152int t3_cim_ctl_blk_read(struct adapter *adap, unsigned int addr,
1153 unsigned int n, unsigned int *valp)
1154{
1155 int ret = 0;
1156
1157 if (t3_read_reg(adap, A_CIM_HOST_ACC_CTRL) & F_HOSTBUSY)
1158 return -EBUSY;
1159
1160 for ( ; !ret && n--; addr += 4) {
1161 t3_write_reg(adap, A_CIM_HOST_ACC_CTRL, CIM_CTL_BASE + addr);
1162 ret = t3_wait_op_done(adap, A_CIM_HOST_ACC_CTRL, F_HOSTBUSY,
1163 0, 5, 2);
1164 if (!ret)
1165 *valp++ = t3_read_reg(adap, A_CIM_HOST_ACC_DATA);
1166 }
1167 return ret;
1168}
1169
1170static void t3_gate_rx_traffic(struct cmac *mac, u32 *rx_cfg,
1171 u32 *rx_hash_high, u32 *rx_hash_low)
1172{
1173 /* stop Rx unicast traffic */
1174 t3_mac_disable_exact_filters(mac);
1175
1176 /* stop broadcast, multicast, promiscuous mode traffic */
1177 *rx_cfg = t3_read_reg(mac->adapter, A_XGM_RX_CFG);
1178 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1179 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1180 F_DISBCAST);
1181
1182 *rx_hash_high = t3_read_reg(mac->adapter, A_XGM_RX_HASH_HIGH);
1183 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, 0);
1184
1185 *rx_hash_low = t3_read_reg(mac->adapter, A_XGM_RX_HASH_LOW);
1186 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, 0);
1187
1188 /* Leave time to drain max RX fifo */
1189 msleep(1);
1190}
1191
1192static void t3_open_rx_traffic(struct cmac *mac, u32 rx_cfg,
1193 u32 rx_hash_high, u32 rx_hash_low)
1194{
1195 t3_mac_enable_exact_filters(mac);
1196 t3_set_reg_field(mac->adapter, A_XGM_RX_CFG,
1197 F_ENHASHMCAST | F_DISBCAST | F_COPYALLFRAMES,
1198 rx_cfg);
1199 t3_write_reg(mac->adapter, A_XGM_RX_HASH_HIGH, rx_hash_high);
1200 t3_write_reg(mac->adapter, A_XGM_RX_HASH_LOW, rx_hash_low);
1201}
1202
1203/**
1204 * t3_link_changed - handle interface link changes
1205 * @adapter: the adapter
1206 * @port_id: the port index that changed link state
1207 *
1208 * Called when a port's link settings change to propagate the new values
1209 * to the associated PHY and MAC. After performing the common tasks it
1210 * invokes an OS-specific handler.
1211 */
1212void t3_link_changed(struct adapter *adapter, int port_id)
1213{
1214 int link_ok, speed, duplex, fc;
1215 struct port_info *pi = adap2pinfo(adapter, port_id);
1216 struct cphy *phy = &pi->phy;
1217 struct cmac *mac = &pi->mac;
1218 struct link_config *lc = &pi->link_config;
1219
1220 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1221
1222 if (!lc->link_ok && link_ok) {
1223 u32 rx_cfg, rx_hash_high, rx_hash_low;
1224 u32 status;
1225
1226 t3_xgm_intr_enable(adapter, port_id);
1227 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1228 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1229 t3_mac_enable(mac, MAC_DIRECTION_RX);
1230
1231 status = t3_read_reg(adapter, A_XGM_INT_STATUS + mac->offset);
1232 if (status & F_LINKFAULTCHANGE) {
1233 mac->stats.link_faults++;
1234 pi->link_fault = 1;
1235 }
1236 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1237 }
1238
1239 if (lc->requested_fc & PAUSE_AUTONEG)
1240 fc &= lc->requested_fc;
1241 else
1242 fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1243
1244 if (link_ok == lc->link_ok && speed == lc->speed &&
1245 duplex == lc->duplex && fc == lc->fc)
1246 return; /* nothing changed */
1247
1248 if (link_ok != lc->link_ok && adapter->params.rev > 0 &&
1249 uses_xaui(adapter)) {
1250 if (link_ok)
1251 t3b_pcs_reset(mac);
1252 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1253 link_ok ? F_TXACTENABLE | F_RXEN : 0);
1254 }
1255 lc->link_ok = link_ok;
1256 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1257 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1258
1259 if (link_ok && speed >= 0 && lc->autoneg == AUTONEG_ENABLE) {
1260 /* Set MAC speed, duplex, and flow control to match PHY. */
1261 t3_mac_set_speed_duplex_fc(mac, speed, duplex, fc);
1262 lc->fc = fc;
1263 }
1264
1265 t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
1266 speed, duplex, fc);
1267}
1268
1269void t3_link_fault(struct adapter *adapter, int port_id)
1270{
1271 struct port_info *pi = adap2pinfo(adapter, port_id);
1272 struct cmac *mac = &pi->mac;
1273 struct cphy *phy = &pi->phy;
1274 struct link_config *lc = &pi->link_config;
1275 int link_ok, speed, duplex, fc, link_fault;
1276 u32 rx_cfg, rx_hash_high, rx_hash_low;
1277
1278 t3_gate_rx_traffic(mac, &rx_cfg, &rx_hash_high, &rx_hash_low);
1279
1280 if (adapter->params.rev > 0 && uses_xaui(adapter))
1281 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset, 0);
1282
1283 t3_write_reg(adapter, A_XGM_RX_CTRL + mac->offset, 0);
1284 t3_mac_enable(mac, MAC_DIRECTION_RX);
1285
1286 t3_open_rx_traffic(mac, rx_cfg, rx_hash_high, rx_hash_low);
1287
1288 link_fault = t3_read_reg(adapter,
1289 A_XGM_INT_STATUS + mac->offset);
1290 link_fault &= F_LINKFAULTCHANGE;
1291
1292 link_ok = lc->link_ok;
1293 speed = lc->speed;
1294 duplex = lc->duplex;
1295 fc = lc->fc;
1296
1297 phy->ops->get_link_status(phy, &link_ok, &speed, &duplex, &fc);
1298
1299 if (link_fault) {
1300 lc->link_ok = 0;
1301 lc->speed = SPEED_INVALID;
1302 lc->duplex = DUPLEX_INVALID;
1303
1304 t3_os_link_fault(adapter, port_id, 0);
1305
1306 /* Account link faults only when the phy reports a link up */
1307 if (link_ok)
1308 mac->stats.link_faults++;
1309 } else {
1310 if (link_ok)
1311 t3_write_reg(adapter, A_XGM_XAUI_ACT_CTRL + mac->offset,
1312 F_TXACTENABLE | F_RXEN);
1313
1314 pi->link_fault = 0;
1315 lc->link_ok = (unsigned char)link_ok;
1316 lc->speed = speed < 0 ? SPEED_INVALID : speed;
1317 lc->duplex = duplex < 0 ? DUPLEX_INVALID : duplex;
1318 t3_os_link_fault(adapter, port_id, link_ok);
1319 }
1320}
1321
1322/**
1323 * t3_link_start - apply link configuration to MAC/PHY
1324 * @phy: the PHY to setup
1325 * @mac: the MAC to setup
1326 * @lc: the requested link configuration
1327 *
1328 * Set up a port's MAC and PHY according to a desired link configuration.
1329 * - If the PHY can auto-negotiate first decide what to advertise, then
1330 * enable/disable auto-negotiation as desired, and reset.
1331 * - If the PHY does not auto-negotiate just reset it.
1332 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
1333 * otherwise do it later based on the outcome of auto-negotiation.
1334 */
1335int t3_link_start(struct cphy *phy, struct cmac *mac, struct link_config *lc)
1336{
1337 unsigned int fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
1338
1339 lc->link_ok = 0;
1340 if (lc->supported & SUPPORTED_Autoneg) {
1341 lc->advertising &= ~(ADVERTISED_Asym_Pause | ADVERTISED_Pause);
1342 if (fc) {
1343 lc->advertising |= ADVERTISED_Asym_Pause;
1344 if (fc & PAUSE_RX)
1345 lc->advertising |= ADVERTISED_Pause;
1346 }
1347 phy->ops->advertise(phy, lc->advertising);
1348
1349 if (lc->autoneg == AUTONEG_DISABLE) {
1350 lc->speed = lc->requested_speed;
1351 lc->duplex = lc->requested_duplex;
1352 lc->fc = (unsigned char)fc;
1353 t3_mac_set_speed_duplex_fc(mac, lc->speed, lc->duplex,
1354 fc);
1355 /* Also disables autoneg */
1356 phy->ops->set_speed_duplex(phy, lc->speed, lc->duplex);
1357 } else
1358 phy->ops->autoneg_enable(phy);
1359 } else {
1360 t3_mac_set_speed_duplex_fc(mac, -1, -1, fc);
1361 lc->fc = (unsigned char)fc;
1362 phy->ops->reset(phy, 0);
1363 }
1364 return 0;
1365}
1366
1367/**
1368 * t3_set_vlan_accel - control HW VLAN extraction
1369 * @adapter: the adapter
1370 * @ports: bitmap of adapter ports to operate on
1371 * @on: enable (1) or disable (0) HW VLAN extraction
1372 *
1373 * Enables or disables HW extraction of VLAN tags for the given port.
1374 */
1375void t3_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on)
1376{
1377 t3_set_reg_field(adapter, A_TP_OUT_CONFIG,
1378 ports << S_VLANEXTRACTIONENABLE,
1379 on ? (ports << S_VLANEXTRACTIONENABLE) : 0);
1380}
1381
1382struct intr_info {
1383 unsigned int mask; /* bits to check in interrupt status */
1384 const char *msg; /* message to print or NULL */
1385 short stat_idx; /* stat counter to increment or -1 */
1386 unsigned short fatal; /* whether the condition reported is fatal */
1387};
1388
1389/**
1390 * t3_handle_intr_status - table driven interrupt handler
1391 * @adapter: the adapter that generated the interrupt
1392 * @reg: the interrupt status register to process
1393 * @mask: a mask to apply to the interrupt status
1394 * @acts: table of interrupt actions
1395 * @stats: statistics counters tracking interrupt occurences
1396 *
1397 * A table driven interrupt handler that applies a set of masks to an
1398 * interrupt status word and performs the corresponding actions if the
1399 * interrupts described by the mask have occured. The actions include
1400 * optionally printing a warning or alert message, and optionally
1401 * incrementing a stat counter. The table is terminated by an entry
1402 * specifying mask 0. Returns the number of fatal interrupt conditions.
1403 */
1404static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
1405 unsigned int mask,
1406 const struct intr_info *acts,
1407 unsigned long *stats)
1408{
1409 int fatal = 0;
1410 unsigned int status = t3_read_reg(adapter, reg) & mask;
1411
1412 for (; acts->mask; ++acts) {
1413 if (!(status & acts->mask))
1414 continue;
1415 if (acts->fatal) {
1416 fatal++;
1417 CH_ALERT(adapter, "%s (0x%x)\n",
1418 acts->msg, status & acts->mask);
1419 } else if (acts->msg)
1420 CH_WARN(adapter, "%s (0x%x)\n",
1421 acts->msg, status & acts->mask);
1422 if (acts->stat_idx >= 0)
1423 stats[acts->stat_idx]++;
1424 }
1425 if (status) /* clear processed interrupts */
1426 t3_write_reg(adapter, reg, status);
1427 return fatal;
1428}
1429
1430#define SGE_INTR_MASK (F_RSPQDISABLED | \
1431 F_UC_REQ_FRAMINGERROR | F_R_REQ_FRAMINGERROR | \
1432 F_CPPARITYERROR | F_OCPARITYERROR | F_RCPARITYERROR | \
1433 F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
1434 V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
1435 F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
1436 F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
1437 F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
1438 F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
1439 F_LOPIODRBDROPERR)
1440#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
1441 F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
1442 F_NFASRCHFAIL)
1443#define MC7_INTR_MASK (F_AE | F_UE | F_CE | V_PE(M_PE))
1444#define XGM_INTR_MASK (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1445 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR) | \
1446 F_TXFIFO_UNDERRUN)
1447#define PCIX_INTR_MASK (F_MSTDETPARERR | F_SIGTARABT | F_RCVTARABT | \
1448 F_RCVMSTABT | F_SIGSYSERR | F_DETPARERR | \
1449 F_SPLCMPDIS | F_UNXSPLCMP | F_RCVSPLCMPERR | \
1450 F_DETCORECCERR | F_DETUNCECCERR | F_PIOPARERR | \
1451 V_WFPARERR(M_WFPARERR) | V_RFPARERR(M_RFPARERR) | \
1452 V_CFPARERR(M_CFPARERR) /* | V_MSIXPARERR(M_MSIXPARERR) */)
1453#define PCIE_INTR_MASK (F_UNXSPLCPLERRR | F_UNXSPLCPLERRC | F_PCIE_PIOPARERR |\
1454 F_PCIE_WFPARERR | F_PCIE_RFPARERR | F_PCIE_CFPARERR | \
1455 /* V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR) | */ \
1456 F_RETRYBUFPARERR | F_RETRYLUTPARERR | F_RXPARERR | \
1457 F_TXPARERR | V_BISTERR(M_BISTERR))
1458#define ULPRX_INTR_MASK (F_PARERRDATA | F_PARERRPCMD | F_ARBPF1PERR | \
1459 F_ARBPF0PERR | F_ARBFPERR | F_PCMDMUXPERR | \
1460 F_DATASELFRAMEERR1 | F_DATASELFRAMEERR0)
1461#define ULPTX_INTR_MASK 0xfc
1462#define CPLSW_INTR_MASK (F_CIM_OP_MAP_PERR | F_TP_FRAMING_ERROR | \
1463 F_SGE_FRAMING_ERROR | F_CIM_FRAMING_ERROR | \
1464 F_ZERO_SWITCH_ERROR)
1465#define CIM_INTR_MASK (F_BLKWRPLINT | F_BLKRDPLINT | F_BLKWRCTLINT | \
1466 F_BLKRDCTLINT | F_BLKWRFLASHINT | F_BLKRDFLASHINT | \
1467 F_SGLWRFLASHINT | F_WRBLKFLASHINT | F_BLKWRBOOTINT | \
1468 F_FLASHRANGEINT | F_SDRAMRANGEINT | F_RSVDSPACEINT | \
1469 F_DRAMPARERR | F_ICACHEPARERR | F_DCACHEPARERR | \
1470 F_OBQSGEPARERR | F_OBQULPHIPARERR | F_OBQULPLOPARERR | \
1471 F_IBQSGELOPARERR | F_IBQSGEHIPARERR | F_IBQULPPARERR | \
1472 F_IBQTPPARERR | F_ITAGPARERR | F_DTAGPARERR)
1473#define PMTX_INTR_MASK (F_ZERO_C_CMD_ERROR | ICSPI_FRM_ERR | OESPI_FRM_ERR | \
1474 V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR) | \
1475 V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR))
1476#define PMRX_INTR_MASK (F_ZERO_E_CMD_ERROR | IESPI_FRM_ERR | OCSPI_FRM_ERR | \
1477 V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR) | \
1478 V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR))
1479#define MPS_INTR_MASK (V_TX0TPPARERRENB(M_TX0TPPARERRENB) | \
1480 V_TX1TPPARERRENB(M_TX1TPPARERRENB) | \
1481 V_RXTPPARERRENB(M_RXTPPARERRENB) | \
1482 V_MCAPARERRENB(M_MCAPARERRENB))
1483#define XGM_EXTRA_INTR_MASK (F_LINKFAULTCHANGE)
1484#define PL_INTR_MASK (F_T3DBG | F_XGMAC0_0 | F_XGMAC0_1 | F_MC5A | F_PM1_TX | \
1485 F_PM1_RX | F_ULP2_TX | F_ULP2_RX | F_TP1 | F_CIM | \
1486 F_MC7_CM | F_MC7_PMTX | F_MC7_PMRX | F_SGE3 | F_PCIM0 | \
1487 F_MPS0 | F_CPL_SWITCH)
1488/*
1489 * Interrupt handler for the PCIX1 module.
1490 */
1491static void pci_intr_handler(struct adapter *adapter)
1492{
1493 static const struct intr_info pcix1_intr_info[] = {
1494 {F_MSTDETPARERR, "PCI master detected parity error", -1, 1},
1495 {F_SIGTARABT, "PCI signaled target abort", -1, 1},
1496 {F_RCVTARABT, "PCI received target abort", -1, 1},
1497 {F_RCVMSTABT, "PCI received master abort", -1, 1},
1498 {F_SIGSYSERR, "PCI signaled system error", -1, 1},
1499 {F_DETPARERR, "PCI detected parity error", -1, 1},
1500 {F_SPLCMPDIS, "PCI split completion discarded", -1, 1},
1501 {F_UNXSPLCMP, "PCI unexpected split completion error", -1, 1},
1502 {F_RCVSPLCMPERR, "PCI received split completion error", -1,
1503 1},
1504 {F_DETCORECCERR, "PCI correctable ECC error",
1505 STAT_PCI_CORR_ECC, 0},
1506 {F_DETUNCECCERR, "PCI uncorrectable ECC error", -1, 1},
1507 {F_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1508 {V_WFPARERR(M_WFPARERR), "PCI write FIFO parity error", -1,
1509 1},
1510 {V_RFPARERR(M_RFPARERR), "PCI read FIFO parity error", -1,
1511 1},
1512 {V_CFPARERR(M_CFPARERR), "PCI command FIFO parity error", -1,
1513 1},
1514 {V_MSIXPARERR(M_MSIXPARERR), "PCI MSI-X table/PBA parity "
1515 "error", -1, 1},
1516 {0}
1517 };
1518
1519 if (t3_handle_intr_status(adapter, A_PCIX_INT_CAUSE, PCIX_INTR_MASK,
1520 pcix1_intr_info, adapter->irq_stats))
1521 t3_fatal_err(adapter);
1522}
1523
1524/*
1525 * Interrupt handler for the PCIE module.
1526 */
1527static void pcie_intr_handler(struct adapter *adapter)
1528{
1529 static const struct intr_info pcie_intr_info[] = {
1530 {F_PEXERR, "PCI PEX error", -1, 1},
1531 {F_UNXSPLCPLERRR,
1532 "PCI unexpected split completion DMA read error", -1, 1},
1533 {F_UNXSPLCPLERRC,
1534 "PCI unexpected split completion DMA command error", -1, 1},
1535 {F_PCIE_PIOPARERR, "PCI PIO FIFO parity error", -1, 1},
1536 {F_PCIE_WFPARERR, "PCI write FIFO parity error", -1, 1},
1537 {F_PCIE_RFPARERR, "PCI read FIFO parity error", -1, 1},
1538 {F_PCIE_CFPARERR, "PCI command FIFO parity error", -1, 1},
1539 {V_PCIE_MSIXPARERR(M_PCIE_MSIXPARERR),
1540 "PCI MSI-X table/PBA parity error", -1, 1},
1541 {F_RETRYBUFPARERR, "PCI retry buffer parity error", -1, 1},
1542 {F_RETRYLUTPARERR, "PCI retry LUT parity error", -1, 1},
1543 {F_RXPARERR, "PCI Rx parity error", -1, 1},
1544 {F_TXPARERR, "PCI Tx parity error", -1, 1},
1545 {V_BISTERR(M_BISTERR), "PCI BIST error", -1, 1},
1546 {0}
1547 };
1548
1549 if (t3_read_reg(adapter, A_PCIE_INT_CAUSE) & F_PEXERR)
1550 CH_ALERT(adapter, "PEX error code 0x%x\n",
1551 t3_read_reg(adapter, A_PCIE_PEX_ERR));
1552
1553 if (t3_handle_intr_status(adapter, A_PCIE_INT_CAUSE, PCIE_INTR_MASK,
1554 pcie_intr_info, adapter->irq_stats))
1555 t3_fatal_err(adapter);
1556}
1557
1558/*
1559 * TP interrupt handler.
1560 */
1561static void tp_intr_handler(struct adapter *adapter)
1562{
1563 static const struct intr_info tp_intr_info[] = {
1564 {0xffffff, "TP parity error", -1, 1},
1565 {0x1000000, "TP out of Rx pages", -1, 1},
1566 {0x2000000, "TP out of Tx pages", -1, 1},
1567 {0}
1568 };
1569
1570 static struct intr_info tp_intr_info_t3c[] = {
1571 {0x1fffffff, "TP parity error", -1, 1},
1572 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1573 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
1574 {0}
1575 };
1576
1577 if (t3_handle_intr_status(adapter, A_TP_INT_CAUSE, 0xffffffff,
1578 adapter->params.rev < T3_REV_C ?
1579 tp_intr_info : tp_intr_info_t3c, NULL))
1580 t3_fatal_err(adapter);
1581}
1582
1583/*
1584 * CIM interrupt handler.
1585 */
1586static void cim_intr_handler(struct adapter *adapter)
1587{
1588 static const struct intr_info cim_intr_info[] = {
1589 {F_RSVDSPACEINT, "CIM reserved space write", -1, 1},
1590 {F_SDRAMRANGEINT, "CIM SDRAM address out of range", -1, 1},
1591 {F_FLASHRANGEINT, "CIM flash address out of range", -1, 1},
1592 {F_BLKWRBOOTINT, "CIM block write to boot space", -1, 1},
1593 {F_WRBLKFLASHINT, "CIM write to cached flash space", -1, 1},
1594 {F_SGLWRFLASHINT, "CIM single write to flash space", -1, 1},
1595 {F_BLKRDFLASHINT, "CIM block read from flash space", -1, 1},
1596 {F_BLKWRFLASHINT, "CIM block write to flash space", -1, 1},
1597 {F_BLKRDCTLINT, "CIM block read from CTL space", -1, 1},
1598 {F_BLKWRCTLINT, "CIM block write to CTL space", -1, 1},
1599 {F_BLKRDPLINT, "CIM block read from PL space", -1, 1},
1600 {F_BLKWRPLINT, "CIM block write to PL space", -1, 1},
1601 {F_DRAMPARERR, "CIM DRAM parity error", -1, 1},
1602 {F_ICACHEPARERR, "CIM icache parity error", -1, 1},
1603 {F_DCACHEPARERR, "CIM dcache parity error", -1, 1},
1604 {F_OBQSGEPARERR, "CIM OBQ SGE parity error", -1, 1},
1605 {F_OBQULPHIPARERR, "CIM OBQ ULPHI parity error", -1, 1},
1606 {F_OBQULPLOPARERR, "CIM OBQ ULPLO parity error", -1, 1},
1607 {F_IBQSGELOPARERR, "CIM IBQ SGELO parity error", -1, 1},
1608 {F_IBQSGEHIPARERR, "CIM IBQ SGEHI parity error", -1, 1},
1609 {F_IBQULPPARERR, "CIM IBQ ULP parity error", -1, 1},
1610 {F_IBQTPPARERR, "CIM IBQ TP parity error", -1, 1},
1611 {F_ITAGPARERR, "CIM itag parity error", -1, 1},
1612 {F_DTAGPARERR, "CIM dtag parity error", -1, 1},
1613 {0}
1614 };
1615
1616 if (t3_handle_intr_status(adapter, A_CIM_HOST_INT_CAUSE, 0xffffffff,
1617 cim_intr_info, NULL))
1618 t3_fatal_err(adapter);
1619}
1620
1621/*
1622 * ULP RX interrupt handler.
1623 */
1624static void ulprx_intr_handler(struct adapter *adapter)
1625{
1626 static const struct intr_info ulprx_intr_info[] = {
1627 {F_PARERRDATA, "ULP RX data parity error", -1, 1},
1628 {F_PARERRPCMD, "ULP RX command parity error", -1, 1},
1629 {F_ARBPF1PERR, "ULP RX ArbPF1 parity error", -1, 1},
1630 {F_ARBPF0PERR, "ULP RX ArbPF0 parity error", -1, 1},
1631 {F_ARBFPERR, "ULP RX ArbF parity error", -1, 1},
1632 {F_PCMDMUXPERR, "ULP RX PCMDMUX parity error", -1, 1},
1633 {F_DATASELFRAMEERR1, "ULP RX frame error", -1, 1},
1634 {F_DATASELFRAMEERR0, "ULP RX frame error", -1, 1},
1635 {0}
1636 };
1637
1638 if (t3_handle_intr_status(adapter, A_ULPRX_INT_CAUSE, 0xffffffff,
1639 ulprx_intr_info, NULL))
1640 t3_fatal_err(adapter);
1641}
1642
1643/*
1644 * ULP TX interrupt handler.
1645 */
1646static void ulptx_intr_handler(struct adapter *adapter)
1647{
1648 static const struct intr_info ulptx_intr_info[] = {
1649 {F_PBL_BOUND_ERR_CH0, "ULP TX channel 0 PBL out of bounds",
1650 STAT_ULP_CH0_PBL_OOB, 0},
1651 {F_PBL_BOUND_ERR_CH1, "ULP TX channel 1 PBL out of bounds",
1652 STAT_ULP_CH1_PBL_OOB, 0},
1653 {0xfc, "ULP TX parity error", -1, 1},
1654 {0}
1655 };
1656
1657 if (t3_handle_intr_status(adapter, A_ULPTX_INT_CAUSE, 0xffffffff,
1658 ulptx_intr_info, adapter->irq_stats))
1659 t3_fatal_err(adapter);
1660}
1661
1662#define ICSPI_FRM_ERR (F_ICSPI0_FIFO2X_RX_FRAMING_ERROR | \
1663 F_ICSPI1_FIFO2X_RX_FRAMING_ERROR | F_ICSPI0_RX_FRAMING_ERROR | \
1664 F_ICSPI1_RX_FRAMING_ERROR | F_ICSPI0_TX_FRAMING_ERROR | \
1665 F_ICSPI1_TX_FRAMING_ERROR)
1666#define OESPI_FRM_ERR (F_OESPI0_RX_FRAMING_ERROR | \
1667 F_OESPI1_RX_FRAMING_ERROR | F_OESPI0_TX_FRAMING_ERROR | \
1668 F_OESPI1_TX_FRAMING_ERROR | F_OESPI0_OFIFO2X_TX_FRAMING_ERROR | \
1669 F_OESPI1_OFIFO2X_TX_FRAMING_ERROR)
1670
1671/*
1672 * PM TX interrupt handler.
1673 */
1674static void pmtx_intr_handler(struct adapter *adapter)
1675{
1676 static const struct intr_info pmtx_intr_info[] = {
1677 {F_ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1},
1678 {ICSPI_FRM_ERR, "PMTX ispi framing error", -1, 1},
1679 {OESPI_FRM_ERR, "PMTX ospi framing error", -1, 1},
1680 {V_ICSPI_PAR_ERROR(M_ICSPI_PAR_ERROR),
1681 "PMTX ispi parity error", -1, 1},
1682 {V_OESPI_PAR_ERROR(M_OESPI_PAR_ERROR),
1683 "PMTX ospi parity error", -1, 1},
1684 {0}
1685 };
1686
1687 if (t3_handle_intr_status(adapter, A_PM1_TX_INT_CAUSE, 0xffffffff,
1688 pmtx_intr_info, NULL))
1689 t3_fatal_err(adapter);
1690}
1691
1692#define IESPI_FRM_ERR (F_IESPI0_FIFO2X_RX_FRAMING_ERROR | \
1693 F_IESPI1_FIFO2X_RX_FRAMING_ERROR | F_IESPI0_RX_FRAMING_ERROR | \
1694 F_IESPI1_RX_FRAMING_ERROR | F_IESPI0_TX_FRAMING_ERROR | \
1695 F_IESPI1_TX_FRAMING_ERROR)
1696#define OCSPI_FRM_ERR (F_OCSPI0_RX_FRAMING_ERROR | \
1697 F_OCSPI1_RX_FRAMING_ERROR | F_OCSPI0_TX_FRAMING_ERROR | \
1698 F_OCSPI1_TX_FRAMING_ERROR | F_OCSPI0_OFIFO2X_TX_FRAMING_ERROR | \
1699 F_OCSPI1_OFIFO2X_TX_FRAMING_ERROR)
1700
1701/*
1702 * PM RX interrupt handler.
1703 */
1704static void pmrx_intr_handler(struct adapter *adapter)
1705{
1706 static const struct intr_info pmrx_intr_info[] = {
1707 {F_ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1},
1708 {IESPI_FRM_ERR, "PMRX ispi framing error", -1, 1},
1709 {OCSPI_FRM_ERR, "PMRX ospi framing error", -1, 1},
1710 {V_IESPI_PAR_ERROR(M_IESPI_PAR_ERROR),
1711 "PMRX ispi parity error", -1, 1},
1712 {V_OCSPI_PAR_ERROR(M_OCSPI_PAR_ERROR),
1713 "PMRX ospi parity error", -1, 1},
1714 {0}
1715 };
1716
1717 if (t3_handle_intr_status(adapter, A_PM1_RX_INT_CAUSE, 0xffffffff,
1718 pmrx_intr_info, NULL))
1719 t3_fatal_err(adapter);
1720}
1721
1722/*
1723 * CPL switch interrupt handler.
1724 */
1725static void cplsw_intr_handler(struct adapter *adapter)
1726{
1727 static const struct intr_info cplsw_intr_info[] = {
1728 {F_CIM_OP_MAP_PERR, "CPL switch CIM parity error", -1, 1},
1729 {F_CIM_OVFL_ERROR, "CPL switch CIM overflow", -1, 1},
1730 {F_TP_FRAMING_ERROR, "CPL switch TP framing error", -1, 1},
1731 {F_SGE_FRAMING_ERROR, "CPL switch SGE framing error", -1, 1},
1732 {F_CIM_FRAMING_ERROR, "CPL switch CIM framing error", -1, 1},
1733 {F_ZERO_SWITCH_ERROR, "CPL switch no-switch error", -1, 1},
1734 {0}
1735 };
1736
1737 if (t3_handle_intr_status(adapter, A_CPL_INTR_CAUSE, 0xffffffff,
1738 cplsw_intr_info, NULL))
1739 t3_fatal_err(adapter);
1740}
1741
1742/*
1743 * MPS interrupt handler.
1744 */
1745static void mps_intr_handler(struct adapter *adapter)
1746{
1747 static const struct intr_info mps_intr_info[] = {
1748 {0x1ff, "MPS parity error", -1, 1},
1749 {0}
1750 };
1751
1752 if (t3_handle_intr_status(adapter, A_MPS_INT_CAUSE, 0xffffffff,
1753 mps_intr_info, NULL))
1754 t3_fatal_err(adapter);
1755}
1756
1757#define MC7_INTR_FATAL (F_UE | V_PE(M_PE) | F_AE)
1758
1759/*
1760 * MC7 interrupt handler.
1761 */
1762static void mc7_intr_handler(struct mc7 *mc7)
1763{
1764 struct adapter *adapter = mc7->adapter;
1765 u32 cause = t3_read_reg(adapter, mc7->offset + A_MC7_INT_CAUSE);
1766
1767 if (cause & F_CE) {
1768 mc7->stats.corr_err++;
1769 CH_WARN(adapter, "%s MC7 correctable error at addr 0x%x, "
1770 "data 0x%x 0x%x 0x%x\n", mc7->name,
1771 t3_read_reg(adapter, mc7->offset + A_MC7_CE_ADDR),
1772 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA0),
1773 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA1),
1774 t3_read_reg(adapter, mc7->offset + A_MC7_CE_DATA2));
1775 }
1776
1777 if (cause & F_UE) {
1778 mc7->stats.uncorr_err++;
1779 CH_ALERT(adapter, "%s MC7 uncorrectable error at addr 0x%x, "
1780 "data 0x%x 0x%x 0x%x\n", mc7->name,
1781 t3_read_reg(adapter, mc7->offset + A_MC7_UE_ADDR),
1782 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA0),
1783 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA1),
1784 t3_read_reg(adapter, mc7->offset + A_MC7_UE_DATA2));
1785 }
1786
1787 if (G_PE(cause)) {
1788 mc7->stats.parity_err++;
1789 CH_ALERT(adapter, "%s MC7 parity error 0x%x\n",
1790 mc7->name, G_PE(cause));
1791 }
1792
1793 if (cause & F_AE) {
1794 u32 addr = 0;
1795
1796 if (adapter->params.rev > 0)
1797 addr = t3_read_reg(adapter,
1798 mc7->offset + A_MC7_ERR_ADDR);
1799 mc7->stats.addr_err++;
1800 CH_ALERT(adapter, "%s MC7 address error: 0x%x\n",
1801 mc7->name, addr);
1802 }
1803
1804 if (cause & MC7_INTR_FATAL)
1805 t3_fatal_err(adapter);
1806
1807 t3_write_reg(adapter, mc7->offset + A_MC7_INT_CAUSE, cause);
1808}
1809
1810#define XGM_INTR_FATAL (V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR) | \
1811 V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR))
1812/*
1813 * XGMAC interrupt handler.
1814 */
1815static int mac_intr_handler(struct adapter *adap, unsigned int idx)
1816{
1817 struct cmac *mac = &adap2pinfo(adap, idx)->mac;
1818 /*
1819 * We mask out interrupt causes for which we're not taking interrupts.
1820 * This allows us to use polling logic to monitor some of the other
1821 * conditions when taking interrupts would impose too much load on the
1822 * system.
1823 */
1824 u32 cause = t3_read_reg(adap, A_XGM_INT_CAUSE + mac->offset) &
1825 ~F_RXFIFO_OVERFLOW;
1826
1827 if (cause & V_TXFIFO_PRTY_ERR(M_TXFIFO_PRTY_ERR)) {
1828 mac->stats.tx_fifo_parity_err++;
1829 CH_ALERT(adap, "port%d: MAC TX FIFO parity error\n", idx);
1830 }
1831 if (cause & V_RXFIFO_PRTY_ERR(M_RXFIFO_PRTY_ERR)) {
1832 mac->stats.rx_fifo_parity_err++;
1833 CH_ALERT(adap, "port%d: MAC RX FIFO parity error\n", idx);
1834 }
1835 if (cause & F_TXFIFO_UNDERRUN)
1836 mac->stats.tx_fifo_urun++;
1837 if (cause & F_RXFIFO_OVERFLOW)
1838 mac->stats.rx_fifo_ovfl++;
1839 if (cause & V_SERDES_LOS(M_SERDES_LOS))
1840 mac->stats.serdes_signal_loss++;
1841 if (cause & F_XAUIPCSCTCERR)
1842 mac->stats.xaui_pcs_ctc_err++;
1843 if (cause & F_XAUIPCSALIGNCHANGE)
1844 mac->stats.xaui_pcs_align_change++;
1845 if (cause & F_XGM_INT) {
1846 t3_set_reg_field(adap,
1847 A_XGM_INT_ENABLE + mac->offset,
1848 F_XGM_INT, 0);
1849 mac->stats.link_faults++;
1850
1851 t3_os_link_fault_handler(adap, idx);
1852 }
1853
1854 t3_write_reg(adap, A_XGM_INT_CAUSE + mac->offset, cause);
1855
1856 if (cause & XGM_INTR_FATAL)
1857 t3_fatal_err(adap);
1858
1859 return cause != 0;
1860}
1861
1862/*
1863 * Interrupt handler for PHY events.
1864 */
1865int t3_phy_intr_handler(struct adapter *adapter)
1866{
1867 u32 i, cause = t3_read_reg(adapter, A_T3DBG_INT_CAUSE);
1868
1869 for_each_port(adapter, i) {
1870 struct port_info *p = adap2pinfo(adapter, i);
1871
1872 if (!(p->phy.caps & SUPPORTED_IRQ))
1873 continue;
1874
1875 if (cause & (1 << adapter_info(adapter)->gpio_intr[i])) {
1876 int phy_cause = p->phy.ops->intr_handler(&p->phy);
1877
1878 if (phy_cause & cphy_cause_link_change)
1879 t3_link_changed(adapter, i);
1880 if (phy_cause & cphy_cause_fifo_error)
1881 p->phy.fifo_errors++;
1882 if (phy_cause & cphy_cause_module_change)
1883 t3_os_phymod_changed(adapter, i);
1884 }
1885 }
1886
1887 t3_write_reg(adapter, A_T3DBG_INT_CAUSE, cause);
1888 return 0;
1889}
1890
1891/*
1892 * T3 slow path (non-data) interrupt handler.
1893 */
1894int t3_slow_intr_handler(struct adapter *adapter)
1895{
1896 u32 cause = t3_read_reg(adapter, A_PL_INT_CAUSE0);
1897
1898 cause &= adapter->slow_intr_mask;
1899 if (!cause)
1900 return 0;
1901 if (cause & F_PCIM0) {
1902 if (is_pcie(adapter))
1903 pcie_intr_handler(adapter);
1904 else
1905 pci_intr_handler(adapter);
1906 }
1907 if (cause & F_SGE3)
1908 t3_sge_err_intr_handler(adapter);
1909 if (cause & F_MC7_PMRX)
1910 mc7_intr_handler(&adapter->pmrx);
1911 if (cause & F_MC7_PMTX)
1912 mc7_intr_handler(&adapter->pmtx);
1913 if (cause & F_MC7_CM)
1914 mc7_intr_handler(&adapter->cm);
1915 if (cause & F_CIM)
1916 cim_intr_handler(adapter);
1917 if (cause & F_TP1)
1918 tp_intr_handler(adapter);
1919 if (cause & F_ULP2_RX)
1920 ulprx_intr_handler(adapter);
1921 if (cause & F_ULP2_TX)
1922 ulptx_intr_handler(adapter);
1923 if (cause & F_PM1_RX)
1924 pmrx_intr_handler(adapter);
1925 if (cause & F_PM1_TX)
1926 pmtx_intr_handler(adapter);
1927 if (cause & F_CPL_SWITCH)
1928 cplsw_intr_handler(adapter);
1929 if (cause & F_MPS0)
1930 mps_intr_handler(adapter);
1931 if (cause & F_MC5A)
1932 t3_mc5_intr_handler(&adapter->mc5);
1933 if (cause & F_XGMAC0_0)
1934 mac_intr_handler(adapter, 0);
1935 if (cause & F_XGMAC0_1)
1936 mac_intr_handler(adapter, 1);
1937 if (cause & F_T3DBG)
1938 t3_os_ext_intr_handler(adapter);
1939
1940 /* Clear the interrupts just processed. */
1941 t3_write_reg(adapter, A_PL_INT_CAUSE0, cause);
1942 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
1943 return 1;
1944}
1945
1946static unsigned int calc_gpio_intr(struct adapter *adap)
1947{
1948 unsigned int i, gpi_intr = 0;
1949
1950 for_each_port(adap, i)
1951 if ((adap2pinfo(adap, i)->phy.caps & SUPPORTED_IRQ) &&
1952 adapter_info(adap)->gpio_intr[i])
1953 gpi_intr |= 1 << adapter_info(adap)->gpio_intr[i];
1954 return gpi_intr;
1955}
1956
1957/**
1958 * t3_intr_enable - enable interrupts
1959 * @adapter: the adapter whose interrupts should be enabled
1960 *
1961 * Enable interrupts by setting the interrupt enable registers of the
1962 * various HW modules and then enabling the top-level interrupt
1963 * concentrator.
1964 */
1965void t3_intr_enable(struct adapter *adapter)
1966{
1967 static const struct addr_val_pair intr_en_avp[] = {
1968 {A_SG_INT_ENABLE, SGE_INTR_MASK},
1969 {A_MC7_INT_ENABLE, MC7_INTR_MASK},
1970 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
1971 MC7_INTR_MASK},
1972 {A_MC7_INT_ENABLE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
1973 MC7_INTR_MASK},
1974 {A_MC5_DB_INT_ENABLE, MC5_INTR_MASK},
1975 {A_ULPRX_INT_ENABLE, ULPRX_INTR_MASK},
1976 {A_PM1_TX_INT_ENABLE, PMTX_INTR_MASK},
1977 {A_PM1_RX_INT_ENABLE, PMRX_INTR_MASK},
1978 {A_CIM_HOST_INT_ENABLE, CIM_INTR_MASK},
1979 {A_MPS_INT_ENABLE, MPS_INTR_MASK},
1980 };
1981
1982 adapter->slow_intr_mask = PL_INTR_MASK;
1983
1984 t3_write_regs(adapter, intr_en_avp, ARRAY_SIZE(intr_en_avp), 0);
1985 t3_write_reg(adapter, A_TP_INT_ENABLE,
1986 adapter->params.rev >= T3_REV_C ? 0x2bfffff : 0x3bfffff);
1987
1988 if (adapter->params.rev > 0) {
1989 t3_write_reg(adapter, A_CPL_INTR_ENABLE,
1990 CPLSW_INTR_MASK | F_CIM_OVFL_ERROR);
1991 t3_write_reg(adapter, A_ULPTX_INT_ENABLE,
1992 ULPTX_INTR_MASK | F_PBL_BOUND_ERR_CH0 |
1993 F_PBL_BOUND_ERR_CH1);
1994 } else {
1995 t3_write_reg(adapter, A_CPL_INTR_ENABLE, CPLSW_INTR_MASK);
1996 t3_write_reg(adapter, A_ULPTX_INT_ENABLE, ULPTX_INTR_MASK);
1997 }
1998
1999 t3_write_reg(adapter, A_T3DBG_INT_ENABLE, calc_gpio_intr(adapter));
2000
2001 if (is_pcie(adapter))
2002 t3_write_reg(adapter, A_PCIE_INT_ENABLE, PCIE_INTR_MASK);
2003 else
2004 t3_write_reg(adapter, A_PCIX_INT_ENABLE, PCIX_INTR_MASK);
2005 t3_write_reg(adapter, A_PL_INT_ENABLE0, adapter->slow_intr_mask);
2006 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2007}
2008
2009/**
2010 * t3_intr_disable - disable a card's interrupts
2011 * @adapter: the adapter whose interrupts should be disabled
2012 *
2013 * Disable interrupts. We only disable the top-level interrupt
2014 * concentrator and the SGE data interrupts.
2015 */
2016void t3_intr_disable(struct adapter *adapter)
2017{
2018 t3_write_reg(adapter, A_PL_INT_ENABLE0, 0);
2019 t3_read_reg(adapter, A_PL_INT_ENABLE0); /* flush */
2020 adapter->slow_intr_mask = 0;
2021}
2022
2023/**
2024 * t3_intr_clear - clear all interrupts
2025 * @adapter: the adapter whose interrupts should be cleared
2026 *
2027 * Clears all interrupts.
2028 */
2029void t3_intr_clear(struct adapter *adapter)
2030{
2031 static const unsigned int cause_reg_addr[] = {
2032 A_SG_INT_CAUSE,
2033 A_SG_RSPQ_FL_STATUS,
2034 A_PCIX_INT_CAUSE,
2035 A_MC7_INT_CAUSE,
2036 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_PMTX_BASE_ADDR,
2037 A_MC7_INT_CAUSE - MC7_PMRX_BASE_ADDR + MC7_CM_BASE_ADDR,
2038 A_CIM_HOST_INT_CAUSE,
2039 A_TP_INT_CAUSE,
2040 A_MC5_DB_INT_CAUSE,
2041 A_ULPRX_INT_CAUSE,
2042 A_ULPTX_INT_CAUSE,
2043 A_CPL_INTR_CAUSE,
2044 A_PM1_TX_INT_CAUSE,
2045 A_PM1_RX_INT_CAUSE,
2046 A_MPS_INT_CAUSE,
2047 A_T3DBG_INT_CAUSE,
2048 };
2049 unsigned int i;
2050
2051 /* Clear PHY and MAC interrupts for each port. */
2052 for_each_port(adapter, i)
2053 t3_port_intr_clear(adapter, i);
2054
2055 for (i = 0; i < ARRAY_SIZE(cause_reg_addr); ++i)
2056 t3_write_reg(adapter, cause_reg_addr[i], 0xffffffff);
2057
2058 if (is_pcie(adapter))
2059 t3_write_reg(adapter, A_PCIE_PEX_ERR, 0xffffffff);
2060 t3_write_reg(adapter, A_PL_INT_CAUSE0, 0xffffffff);
2061 t3_read_reg(adapter, A_PL_INT_CAUSE0); /* flush */
2062}
2063
2064void t3_xgm_intr_enable(struct adapter *adapter, int idx)
2065{
2066 struct port_info *pi = adap2pinfo(adapter, idx);
2067
2068 t3_write_reg(adapter, A_XGM_XGM_INT_ENABLE + pi->mac.offset,
2069 XGM_EXTRA_INTR_MASK);
2070}
2071
2072void t3_xgm_intr_disable(struct adapter *adapter, int idx)
2073{
2074 struct port_info *pi = adap2pinfo(adapter, idx);
2075
2076 t3_write_reg(adapter, A_XGM_XGM_INT_DISABLE + pi->mac.offset,
2077 0x7ff);
2078}
2079
2080/**
2081 * t3_port_intr_enable - enable port-specific interrupts
2082 * @adapter: associated adapter
2083 * @idx: index of port whose interrupts should be enabled
2084 *
2085 * Enable port-specific (i.e., MAC and PHY) interrupts for the given
2086 * adapter port.
2087 */
2088void t3_port_intr_enable(struct adapter *adapter, int idx)
2089{
2090 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2091
2092 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), XGM_INTR_MASK);
2093 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2094 phy->ops->intr_enable(phy);
2095}
2096
2097/**
2098 * t3_port_intr_disable - disable port-specific interrupts
2099 * @adapter: associated adapter
2100 * @idx: index of port whose interrupts should be disabled
2101 *
2102 * Disable port-specific (i.e., MAC and PHY) interrupts for the given
2103 * adapter port.
2104 */
2105void t3_port_intr_disable(struct adapter *adapter, int idx)
2106{
2107 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2108
2109 t3_write_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx), 0);
2110 t3_read_reg(adapter, XGM_REG(A_XGM_INT_ENABLE, idx)); /* flush */
2111 phy->ops->intr_disable(phy);
2112}
2113
2114/**
2115 * t3_port_intr_clear - clear port-specific interrupts
2116 * @adapter: associated adapter
2117 * @idx: index of port whose interrupts to clear
2118 *
2119 * Clear port-specific (i.e., MAC and PHY) interrupts for the given
2120 * adapter port.
2121 */
2122void t3_port_intr_clear(struct adapter *adapter, int idx)
2123{
2124 struct cphy *phy = &adap2pinfo(adapter, idx)->phy;
2125
2126 t3_write_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx), 0xffffffff);
2127 t3_read_reg(adapter, XGM_REG(A_XGM_INT_CAUSE, idx)); /* flush */
2128 phy->ops->intr_clear(phy);
2129}
2130
2131#define SG_CONTEXT_CMD_ATTEMPTS 100
2132
2133/**
2134 * t3_sge_write_context - write an SGE context
2135 * @adapter: the adapter
2136 * @id: the context id
2137 * @type: the context type
2138 *
2139 * Program an SGE context with the values already loaded in the
2140 * CONTEXT_DATA? registers.
2141 */
2142static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
2143 unsigned int type)
2144{
2145 if (type == F_RESPONSEQ) {
2146 /*
2147 * Can't write the Response Queue Context bits for
2148 * Interrupt Armed or the Reserve bits after the chip
2149 * has been initialized out of reset. Writing to these
2150 * bits can confuse the hardware.
2151 */
2152 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2153 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2154 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
2155 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2156 } else {
2157 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
2158 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
2159 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
2160 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
2161 }
2162 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2163 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2164 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2165 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2166}
2167
2168/**
2169 * clear_sge_ctxt - completely clear an SGE context
2170 * @adapter: the adapter
2171 * @id: the context id
2172 * @type: the context type
2173 *
2174 * Completely clear an SGE context. Used predominantly at post-reset
2175 * initialization. Note in particular that we don't skip writing to any
2176 * "sensitive bits" in the contexts the way that t3_sge_write_context()
2177 * does ...
2178 */
2179static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
2180 unsigned int type)
2181{
2182 t3_write_reg(adap, A_SG_CONTEXT_DATA0, 0);
2183 t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
2184 t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
2185 t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
2186 t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
2187 t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
2188 t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
2189 t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
2190 t3_write_reg(adap, A_SG_CONTEXT_CMD,
2191 V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
2192 return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2193 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2194}
2195
2196/**
2197 * t3_sge_init_ecntxt - initialize an SGE egress context
2198 * @adapter: the adapter to configure
2199 * @id: the context id
2200 * @gts_enable: whether to enable GTS for the context
2201 * @type: the egress context type
2202 * @respq: associated response queue
2203 * @base_addr: base address of queue
2204 * @size: number of queue entries
2205 * @token: uP token
2206 * @gen: initial generation value for the context
2207 * @cidx: consumer pointer
2208 *
2209 * Initialize an SGE egress context and make it ready for use. If the
2210 * platform allows concurrent context operations, the caller is
2211 * responsible for appropriate locking.
2212 */
2213int t3_sge_init_ecntxt(struct adapter *adapter, unsigned int id, int gts_enable,
2214 enum sge_context_type type, int respq, u64 base_addr,
2215 unsigned int size, unsigned int token, int gen,
2216 unsigned int cidx)
2217{
2218 unsigned int credits = type == SGE_CNTXT_OFLD ? 0 : FW_WR_NUM;
2219
2220 if (base_addr & 0xfff) /* must be 4K aligned */
2221 return -EINVAL;
2222 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2223 return -EBUSY;
2224
2225 base_addr >>= 12;
2226 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_EC_INDEX(cidx) |
2227 V_EC_CREDITS(credits) | V_EC_GTS(gts_enable));
2228 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, V_EC_SIZE(size) |
2229 V_EC_BASE_LO(base_addr & 0xffff));
2230 base_addr >>= 16;
2231 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, base_addr);
2232 base_addr >>= 32;
2233 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2234 V_EC_BASE_HI(base_addr & 0xf) | V_EC_RESPQ(respq) |
2235 V_EC_TYPE(type) | V_EC_GEN(gen) | V_EC_UP_TOKEN(token) |
2236 F_EC_VALID);
2237 return t3_sge_write_context(adapter, id, F_EGRESS);
2238}
2239
2240/**
2241 * t3_sge_init_flcntxt - initialize an SGE free-buffer list context
2242 * @adapter: the adapter to configure
2243 * @id: the context id
2244 * @gts_enable: whether to enable GTS for the context
2245 * @base_addr: base address of queue
2246 * @size: number of queue entries
2247 * @bsize: size of each buffer for this queue
2248 * @cong_thres: threshold to signal congestion to upstream producers
2249 * @gen: initial generation value for the context
2250 * @cidx: consumer pointer
2251 *
2252 * Initialize an SGE free list context and make it ready for use. The
2253 * caller is responsible for ensuring only one context operation occurs
2254 * at a time.
2255 */
2256int t3_sge_init_flcntxt(struct adapter *adapter, unsigned int id,
2257 int gts_enable, u64 base_addr, unsigned int size,
2258 unsigned int bsize, unsigned int cong_thres, int gen,
2259 unsigned int cidx)
2260{
2261 if (base_addr & 0xfff) /* must be 4K aligned */
2262 return -EINVAL;
2263 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2264 return -EBUSY;
2265
2266 base_addr >>= 12;
2267 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, base_addr);
2268 base_addr >>= 32;
2269 t3_write_reg(adapter, A_SG_CONTEXT_DATA1,
2270 V_FL_BASE_HI((u32) base_addr) |
2271 V_FL_INDEX_LO(cidx & M_FL_INDEX_LO));
2272 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, V_FL_SIZE(size) |
2273 V_FL_GEN(gen) | V_FL_INDEX_HI(cidx >> 12) |
2274 V_FL_ENTRY_SIZE_LO(bsize & M_FL_ENTRY_SIZE_LO));
2275 t3_write_reg(adapter, A_SG_CONTEXT_DATA3,
2276 V_FL_ENTRY_SIZE_HI(bsize >> (32 - S_FL_ENTRY_SIZE_LO)) |
2277 V_FL_CONG_THRES(cong_thres) | V_FL_GTS(gts_enable));
2278 return t3_sge_write_context(adapter, id, F_FREELIST);
2279}
2280
2281/**
2282 * t3_sge_init_rspcntxt - initialize an SGE response queue context
2283 * @adapter: the adapter to configure
2284 * @id: the context id
2285 * @irq_vec_idx: MSI-X interrupt vector index, 0 if no MSI-X, -1 if no IRQ
2286 * @base_addr: base address of queue
2287 * @size: number of queue entries
2288 * @fl_thres: threshold for selecting the normal or jumbo free list
2289 * @gen: initial generation value for the context
2290 * @cidx: consumer pointer
2291 *
2292 * Initialize an SGE response queue context and make it ready for use.
2293 * The caller is responsible for ensuring only one context operation
2294 * occurs at a time.
2295 */
2296int t3_sge_init_rspcntxt(struct adapter *adapter, unsigned int id,
2297 int irq_vec_idx, u64 base_addr, unsigned int size,
2298 unsigned int fl_thres, int gen, unsigned int cidx)
2299{
2300 unsigned int intr = 0;
2301
2302 if (base_addr & 0xfff) /* must be 4K aligned */
2303 return -EINVAL;
2304 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2305 return -EBUSY;
2306
2307 base_addr >>= 12;
2308 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size) |
2309 V_CQ_INDEX(cidx));
2310 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2311 base_addr >>= 32;
2312 if (irq_vec_idx >= 0)
2313 intr = V_RQ_MSI_VEC(irq_vec_idx) | F_RQ_INTR_EN;
2314 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2315 V_CQ_BASE_HI((u32) base_addr) | intr | V_RQ_GEN(gen));
2316 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, fl_thres);
2317 return t3_sge_write_context(adapter, id, F_RESPONSEQ);
2318}
2319
2320/**
2321 * t3_sge_init_cqcntxt - initialize an SGE completion queue context
2322 * @adapter: the adapter to configure
2323 * @id: the context id
2324 * @base_addr: base address of queue
2325 * @size: number of queue entries
2326 * @rspq: response queue for async notifications
2327 * @ovfl_mode: CQ overflow mode
2328 * @credits: completion queue credits
2329 * @credit_thres: the credit threshold
2330 *
2331 * Initialize an SGE completion queue context and make it ready for use.
2332 * The caller is responsible for ensuring only one context operation
2333 * occurs at a time.
2334 */
2335int t3_sge_init_cqcntxt(struct adapter *adapter, unsigned int id, u64 base_addr,
2336 unsigned int size, int rspq, int ovfl_mode,
2337 unsigned int credits, unsigned int credit_thres)
2338{
2339 if (base_addr & 0xfff) /* must be 4K aligned */
2340 return -EINVAL;
2341 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2342 return -EBUSY;
2343
2344 base_addr >>= 12;
2345 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, V_CQ_SIZE(size));
2346 t3_write_reg(adapter, A_SG_CONTEXT_DATA1, base_addr);
2347 base_addr >>= 32;
2348 t3_write_reg(adapter, A_SG_CONTEXT_DATA2,
2349 V_CQ_BASE_HI((u32) base_addr) | V_CQ_RSPQ(rspq) |
2350 V_CQ_GEN(1) | V_CQ_OVERFLOW_MODE(ovfl_mode) |
2351 V_CQ_ERR(ovfl_mode));
2352 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_CQ_CREDITS(credits) |
2353 V_CQ_CREDIT_THRES(credit_thres));
2354 return t3_sge_write_context(adapter, id, F_CQ);
2355}
2356
2357/**
2358 * t3_sge_enable_ecntxt - enable/disable an SGE egress context
2359 * @adapter: the adapter
2360 * @id: the egress context id
2361 * @enable: enable (1) or disable (0) the context
2362 *
2363 * Enable or disable an SGE egress context. The caller is responsible for
2364 * ensuring only one context operation occurs at a time.
2365 */
2366int t3_sge_enable_ecntxt(struct adapter *adapter, unsigned int id, int enable)
2367{
2368 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2369 return -EBUSY;
2370
2371 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2372 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2373 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2374 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, F_EC_VALID);
2375 t3_write_reg(adapter, A_SG_CONTEXT_DATA3, V_EC_VALID(enable));
2376 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2377 V_CONTEXT_CMD_OPCODE(1) | F_EGRESS | V_CONTEXT(id));
2378 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2379 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2380}
2381
2382/**
2383 * t3_sge_disable_fl - disable an SGE free-buffer list
2384 * @adapter: the adapter
2385 * @id: the free list context id
2386 *
2387 * Disable an SGE free-buffer list. The caller is responsible for
2388 * ensuring only one context operation occurs at a time.
2389 */
2390int t3_sge_disable_fl(struct adapter *adapter, unsigned int id)
2391{
2392 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2393 return -EBUSY;
2394
2395 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0);
2396 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2397 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, V_FL_SIZE(M_FL_SIZE));
2398 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2399 t3_write_reg(adapter, A_SG_CONTEXT_DATA2, 0);
2400 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2401 V_CONTEXT_CMD_OPCODE(1) | F_FREELIST | V_CONTEXT(id));
2402 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2403 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2404}
2405
2406/**
2407 * t3_sge_disable_rspcntxt - disable an SGE response queue
2408 * @adapter: the adapter
2409 * @id: the response queue context id
2410 *
2411 * Disable an SGE response queue. The caller is responsible for
2412 * ensuring only one context operation occurs at a time.
2413 */
2414int t3_sge_disable_rspcntxt(struct adapter *adapter, unsigned int id)
2415{
2416 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2417 return -EBUSY;
2418
2419 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2420 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2421 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2422 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2423 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2424 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2425 V_CONTEXT_CMD_OPCODE(1) | F_RESPONSEQ | V_CONTEXT(id));
2426 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2427 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2428}
2429
2430/**
2431 * t3_sge_disable_cqcntxt - disable an SGE completion queue
2432 * @adapter: the adapter
2433 * @id: the completion queue context id
2434 *
2435 * Disable an SGE completion queue. The caller is responsible for
2436 * ensuring only one context operation occurs at a time.
2437 */
2438int t3_sge_disable_cqcntxt(struct adapter *adapter, unsigned int id)
2439{
2440 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2441 return -EBUSY;
2442
2443 t3_write_reg(adapter, A_SG_CONTEXT_MASK0, V_CQ_SIZE(M_CQ_SIZE));
2444 t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0);
2445 t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0);
2446 t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0);
2447 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, 0);
2448 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2449 V_CONTEXT_CMD_OPCODE(1) | F_CQ | V_CONTEXT(id));
2450 return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2451 0, SG_CONTEXT_CMD_ATTEMPTS, 1);
2452}
2453
2454/**
2455 * t3_sge_cqcntxt_op - perform an operation on a completion queue context
2456 * @adapter: the adapter
2457 * @id: the context id
2458 * @op: the operation to perform
2459 *
2460 * Perform the selected operation on an SGE completion queue context.
2461 * The caller is responsible for ensuring only one context operation
2462 * occurs at a time.
2463 */
2464int t3_sge_cqcntxt_op(struct adapter *adapter, unsigned int id, unsigned int op,
2465 unsigned int credits)
2466{
2467 u32 val;
2468
2469 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2470 return -EBUSY;
2471
2472 t3_write_reg(adapter, A_SG_CONTEXT_DATA0, credits << 16);
2473 t3_write_reg(adapter, A_SG_CONTEXT_CMD, V_CONTEXT_CMD_OPCODE(op) |
2474 V_CONTEXT(id) | F_CQ);
2475 if (t3_wait_op_done_val(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
2476 0, SG_CONTEXT_CMD_ATTEMPTS, 1, &val))
2477 return -EIO;
2478
2479 if (op >= 2 && op < 7) {
2480 if (adapter->params.rev > 0)
2481 return G_CQ_INDEX(val);
2482
2483 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2484 V_CONTEXT_CMD_OPCODE(0) | F_CQ | V_CONTEXT(id));
2485 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD,
2486 F_CONTEXT_CMD_BUSY, 0,
2487 SG_CONTEXT_CMD_ATTEMPTS, 1))
2488 return -EIO;
2489 return G_CQ_INDEX(t3_read_reg(adapter, A_SG_CONTEXT_DATA0));
2490 }
2491 return 0;
2492}
2493
2494/**
2495 * t3_sge_read_context - read an SGE context
2496 * @type: the context type
2497 * @adapter: the adapter
2498 * @id: the context id
2499 * @data: holds the retrieved context
2500 *
2501 * Read an SGE egress context. The caller is responsible for ensuring
2502 * only one context operation occurs at a time.
2503 */
2504static int t3_sge_read_context(unsigned int type, struct adapter *adapter,
2505 unsigned int id, u32 data[4])
2506{
2507 if (t3_read_reg(adapter, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
2508 return -EBUSY;
2509
2510 t3_write_reg(adapter, A_SG_CONTEXT_CMD,
2511 V_CONTEXT_CMD_OPCODE(0) | type | V_CONTEXT(id));
2512 if (t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY, 0,
2513 SG_CONTEXT_CMD_ATTEMPTS, 1))
2514 return -EIO;
2515 data[0] = t3_read_reg(adapter, A_SG_CONTEXT_DATA0);
2516 data[1] = t3_read_reg(adapter, A_SG_CONTEXT_DATA1);
2517 data[2] = t3_read_reg(adapter, A_SG_CONTEXT_DATA2);
2518 data[3] = t3_read_reg(adapter, A_SG_CONTEXT_DATA3);
2519 return 0;
2520}
2521
2522/**
2523 * t3_sge_read_ecntxt - read an SGE egress context
2524 * @adapter: the adapter
2525 * @id: the context id
2526 * @data: holds the retrieved context
2527 *
2528 * Read an SGE egress context. The caller is responsible for ensuring
2529 * only one context operation occurs at a time.
2530 */
2531int t3_sge_read_ecntxt(struct adapter *adapter, unsigned int id, u32 data[4])
2532{
2533 if (id >= 65536)
2534 return -EINVAL;
2535 return t3_sge_read_context(F_EGRESS, adapter, id, data);
2536}
2537
2538/**
2539 * t3_sge_read_cq - read an SGE CQ context
2540 * @adapter: the adapter
2541 * @id: the context id
2542 * @data: holds the retrieved context
2543 *
2544 * Read an SGE CQ context. The caller is responsible for ensuring
2545 * only one context operation occurs at a time.
2546 */
2547int t3_sge_read_cq(struct adapter *adapter, unsigned int id, u32 data[4])
2548{
2549 if (id >= 65536)
2550 return -EINVAL;
2551 return t3_sge_read_context(F_CQ, adapter, id, data);
2552}
2553
2554/**
2555 * t3_sge_read_fl - read an SGE free-list context
2556 * @adapter: the adapter
2557 * @id: the context id
2558 * @data: holds the retrieved context
2559 *
2560 * Read an SGE free-list context. The caller is responsible for ensuring
2561 * only one context operation occurs at a time.
2562 */
2563int t3_sge_read_fl(struct adapter *adapter, unsigned int id, u32 data[4])
2564{
2565 if (id >= SGE_QSETS * 2)
2566 return -EINVAL;
2567 return t3_sge_read_context(F_FREELIST, adapter, id, data);
2568}
2569
2570/**
2571 * t3_sge_read_rspq - read an SGE response queue context
2572 * @adapter: the adapter
2573 * @id: the context id
2574 * @data: holds the retrieved context
2575 *
2576 * Read an SGE response queue context. The caller is responsible for
2577 * ensuring only one context operation occurs at a time.
2578 */
2579int t3_sge_read_rspq(struct adapter *adapter, unsigned int id, u32 data[4])
2580{
2581 if (id >= SGE_QSETS)
2582 return -EINVAL;
2583 return t3_sge_read_context(F_RESPONSEQ, adapter, id, data);
2584}
2585
2586/**
2587 * t3_config_rss - configure Rx packet steering
2588 * @adapter: the adapter
2589 * @rss_config: RSS settings (written to TP_RSS_CONFIG)
2590 * @cpus: values for the CPU lookup table (0xff terminated)
2591 * @rspq: values for the response queue lookup table (0xffff terminated)
2592 *
2593 * Programs the receive packet steering logic. @cpus and @rspq provide
2594 * the values for the CPU and response queue lookup tables. If they
2595 * provide fewer values than the size of the tables the supplied values
2596 * are used repeatedly until the tables are fully populated.
2597 */
2598void t3_config_rss(struct adapter *adapter, unsigned int rss_config,
2599 const u8 * cpus, const u16 *rspq)
2600{
2601 int i, j, cpu_idx = 0, q_idx = 0;
2602
2603 if (cpus)
2604 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2605 u32 val = i << 16;
2606
2607 for (j = 0; j < 2; ++j) {
2608 val |= (cpus[cpu_idx++] & 0x3f) << (8 * j);
2609 if (cpus[cpu_idx] == 0xff)
2610 cpu_idx = 0;
2611 }
2612 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE, val);
2613 }
2614
2615 if (rspq)
2616 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2617 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2618 (i << 16) | rspq[q_idx++]);
2619 if (rspq[q_idx] == 0xffff)
2620 q_idx = 0;
2621 }
2622
2623 t3_write_reg(adapter, A_TP_RSS_CONFIG, rss_config);
2624}
2625
2626/**
2627 * t3_read_rss - read the contents of the RSS tables
2628 * @adapter: the adapter
2629 * @lkup: holds the contents of the RSS lookup table
2630 * @map: holds the contents of the RSS map table
2631 *
2632 * Reads the contents of the receive packet steering tables.
2633 */
2634int t3_read_rss(struct adapter *adapter, u8 * lkup, u16 *map)
2635{
2636 int i;
2637 u32 val;
2638
2639 if (lkup)
2640 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2641 t3_write_reg(adapter, A_TP_RSS_LKP_TABLE,
2642 0xffff0000 | i);
2643 val = t3_read_reg(adapter, A_TP_RSS_LKP_TABLE);
2644 if (!(val & 0x80000000))
2645 return -EAGAIN;
2646 *lkup++ = val;
2647 *lkup++ = (val >> 8);
2648 }
2649
2650 if (map)
2651 for (i = 0; i < RSS_TABLE_SIZE; ++i) {
2652 t3_write_reg(adapter, A_TP_RSS_MAP_TABLE,
2653 0xffff0000 | i);
2654 val = t3_read_reg(adapter, A_TP_RSS_MAP_TABLE);
2655 if (!(val & 0x80000000))
2656 return -EAGAIN;
2657 *map++ = val;
2658 }
2659 return 0;
2660}
2661
2662/**
2663 * t3_tp_set_offload_mode - put TP in NIC/offload mode
2664 * @adap: the adapter
2665 * @enable: 1 to select offload mode, 0 for regular NIC
2666 *
2667 * Switches TP to NIC/offload mode.
2668 */
2669void t3_tp_set_offload_mode(struct adapter *adap, int enable)
2670{
2671 if (is_offload(adap) || !enable)
2672 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_NICMODE,
2673 V_NICMODE(!enable));
2674}
2675
2676/**
2677 * pm_num_pages - calculate the number of pages of the payload memory
2678 * @mem_size: the size of the payload memory
2679 * @pg_size: the size of each payload memory page
2680 *
2681 * Calculate the number of pages, each of the given size, that fit in a
2682 * memory of the specified size, respecting the HW requirement that the
2683 * number of pages must be a multiple of 24.
2684 */
2685static inline unsigned int pm_num_pages(unsigned int mem_size,
2686 unsigned int pg_size)
2687{
2688 unsigned int n = mem_size / pg_size;
2689
2690 return n - n % 24;
2691}
2692
2693#define mem_region(adap, start, size, reg) \
2694 t3_write_reg((adap), A_ ## reg, (start)); \
2695 start += size
2696
2697/**
2698 * partition_mem - partition memory and configure TP memory settings
2699 * @adap: the adapter
2700 * @p: the TP parameters
2701 *
2702 * Partitions context and payload memory and configures TP's memory
2703 * registers.
2704 */
2705static void partition_mem(struct adapter *adap, const struct tp_params *p)
2706{
2707 unsigned int m, pstructs, tids = t3_mc5_size(&adap->mc5);
2708 unsigned int timers = 0, timers_shift = 22;
2709
2710 if (adap->params.rev > 0) {
2711 if (tids <= 16 * 1024) {
2712 timers = 1;
2713 timers_shift = 16;
2714 } else if (tids <= 64 * 1024) {
2715 timers = 2;
2716 timers_shift = 18;
2717 } else if (tids <= 256 * 1024) {
2718 timers = 3;
2719 timers_shift = 20;
2720 }
2721 }
2722
2723 t3_write_reg(adap, A_TP_PMM_SIZE,
2724 p->chan_rx_size | (p->chan_tx_size >> 16));
2725
2726 t3_write_reg(adap, A_TP_PMM_TX_BASE, 0);
2727 t3_write_reg(adap, A_TP_PMM_TX_PAGE_SIZE, p->tx_pg_size);
2728 t3_write_reg(adap, A_TP_PMM_TX_MAX_PAGE, p->tx_num_pgs);
2729 t3_set_reg_field(adap, A_TP_PARA_REG3, V_TXDATAACKIDX(M_TXDATAACKIDX),
2730 V_TXDATAACKIDX(fls(p->tx_pg_size) - 12));
2731
2732 t3_write_reg(adap, A_TP_PMM_RX_BASE, 0);
2733 t3_write_reg(adap, A_TP_PMM_RX_PAGE_SIZE, p->rx_pg_size);
2734 t3_write_reg(adap, A_TP_PMM_RX_MAX_PAGE, p->rx_num_pgs);
2735
2736 pstructs = p->rx_num_pgs + p->tx_num_pgs;
2737 /* Add a bit of headroom and make multiple of 24 */
2738 pstructs += 48;
2739 pstructs -= pstructs % 24;
2740 t3_write_reg(adap, A_TP_CMM_MM_MAX_PSTRUCT, pstructs);
2741
2742 m = tids * TCB_SIZE;
2743 mem_region(adap, m, (64 << 10) * 64, SG_EGR_CNTX_BADDR);
2744 mem_region(adap, m, (64 << 10) * 64, SG_CQ_CONTEXT_BADDR);
2745 t3_write_reg(adap, A_TP_CMM_TIMER_BASE, V_CMTIMERMAXNUM(timers) | m);
2746 m += ((p->ntimer_qs - 1) << timers_shift) + (1 << 22);
2747 mem_region(adap, m, pstructs * 64, TP_CMM_MM_BASE);
2748 mem_region(adap, m, 64 * (pstructs / 24), TP_CMM_MM_PS_FLST_BASE);
2749 mem_region(adap, m, 64 * (p->rx_num_pgs / 24), TP_CMM_MM_RX_FLST_BASE);
2750 mem_region(adap, m, 64 * (p->tx_num_pgs / 24), TP_CMM_MM_TX_FLST_BASE);
2751
2752 m = (m + 4095) & ~0xfff;
2753 t3_write_reg(adap, A_CIM_SDRAM_BASE_ADDR, m);
2754 t3_write_reg(adap, A_CIM_SDRAM_ADDR_SIZE, p->cm_size - m);
2755
2756 tids = (p->cm_size - m - (3 << 20)) / 3072 - 32;
2757 m = t3_mc5_size(&adap->mc5) - adap->params.mc5.nservers -
2758 adap->params.mc5.nfilters - adap->params.mc5.nroutes;
2759 if (tids < m)
2760 adap->params.mc5.nservers += m - tids;
2761}
2762
2763static inline void tp_wr_indirect(struct adapter *adap, unsigned int addr,
2764 u32 val)
2765{
2766 t3_write_reg(adap, A_TP_PIO_ADDR, addr);
2767 t3_write_reg(adap, A_TP_PIO_DATA, val);
2768}
2769
2770static void tp_config(struct adapter *adap, const struct tp_params *p)
2771{
2772 t3_write_reg(adap, A_TP_GLOBAL_CONFIG, F_TXPACINGENABLE | F_PATHMTU |
2773 F_IPCHECKSUMOFFLOAD | F_UDPCHECKSUMOFFLOAD |
2774 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2775 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2776 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2777 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
2778 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2779 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2780 V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
2781 F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
2782 t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
2783 F_IPV6ENABLE | F_NICMODE);
2784 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2785 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2786 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2787 adap->params.rev > 0 ? F_ENABLEESND :
2788 F_T3A_ENABLEESND);
2789
2790 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2791 F_ENABLEEPCMDAFULL,
2792 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2793 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2794 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL,
2795 F_ENABLEIPV6RSS | F_ENABLENONOFDTNLSYN |
2796 F_ENABLEARPMISS | F_DISBLEDAPARBIT0);
2797 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2798 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2799
2800 if (adap->params.rev > 0) {
2801 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2802 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
2803 F_TXPACEAUTO);
2804 t3_set_reg_field(adap, A_TP_PC_CONFIG, F_LOCKTID, F_LOCKTID);
2805 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEAUTOSTRICT);
2806 } else
2807 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2808
2809 if (adap->params.rev == T3_REV_C)
2810 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2811 V_TABLELATENCYDELTA(M_TABLELATENCYDELTA),
2812 V_TABLELATENCYDELTA(4));
2813
2814 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2815 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2816 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2817 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2818}
2819
2820/* Desired TP timer resolution in usec */
2821#define TP_TMR_RES 50
2822
2823/* TCP timer values in ms */
2824#define TP_DACK_TIMER 50
2825#define TP_RTO_MIN 250
2826
2827/**
2828 * tp_set_timers - set TP timing parameters
2829 * @adap: the adapter to set
2830 * @core_clk: the core clock frequency in Hz
2831 *
2832 * Set TP's timing parameters, such as the various timer resolutions and
2833 * the TCP timer values.
2834 */
2835static void tp_set_timers(struct adapter *adap, unsigned int core_clk)
2836{
2837 unsigned int tre = fls(core_clk / (1000000 / TP_TMR_RES)) - 1;
2838 unsigned int dack_re = fls(core_clk / 5000) - 1; /* 200us */
2839 unsigned int tstamp_re = fls(core_clk / 1000); /* 1ms, at least */
2840 unsigned int tps = core_clk >> tre;
2841
2842 t3_write_reg(adap, A_TP_TIMER_RESOLUTION, V_TIMERRESOLUTION(tre) |
2843 V_DELAYEDACKRESOLUTION(dack_re) |
2844 V_TIMESTAMPRESOLUTION(tstamp_re));
2845 t3_write_reg(adap, A_TP_DACK_TIMER,
2846 (core_clk >> dack_re) / (1000 / TP_DACK_TIMER));
2847 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG0, 0x3020100);
2848 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG1, 0x7060504);
2849 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG2, 0xb0a0908);
2850 t3_write_reg(adap, A_TP_TCP_BACKOFF_REG3, 0xf0e0d0c);
2851 t3_write_reg(adap, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) |
2852 V_RXTSHIFTMAXR1(4) | V_RXTSHIFTMAXR2(15) |
2853 V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
2854 V_KEEPALIVEMAX(9));
2855
2856#define SECONDS * tps
2857
2858 t3_write_reg(adap, A_TP_MSL, adap->params.rev > 0 ? 0 : 2 SECONDS);
2859 t3_write_reg(adap, A_TP_RXT_MIN, tps / (1000 / TP_RTO_MIN));
2860 t3_write_reg(adap, A_TP_RXT_MAX, 64 SECONDS);
2861 t3_write_reg(adap, A_TP_PERS_MIN, 5 SECONDS);
2862 t3_write_reg(adap, A_TP_PERS_MAX, 64 SECONDS);
2863 t3_write_reg(adap, A_TP_KEEP_IDLE, 7200 SECONDS);
2864 t3_write_reg(adap, A_TP_KEEP_INTVL, 75 SECONDS);
2865 t3_write_reg(adap, A_TP_INIT_SRTT, 3 SECONDS);
2866 t3_write_reg(adap, A_TP_FINWAIT2_TIMER, 600 SECONDS);
2867
2868#undef SECONDS
2869}
2870
2871/**
2872 * t3_tp_set_coalescing_size - set receive coalescing size
2873 * @adap: the adapter
2874 * @size: the receive coalescing size
2875 * @psh: whether a set PSH bit should deliver coalesced data
2876 *
2877 * Set the receive coalescing size and PSH bit handling.
2878 */
2879int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2880{
2881 u32 val;
2882
2883 if (size > MAX_RX_COALESCING_LEN)
2884 return -EINVAL;
2885
2886 val = t3_read_reg(adap, A_TP_PARA_REG3);
2887 val &= ~(F_RXCOALESCEENABLE | F_RXCOALESCEPSHEN);
2888
2889 if (size) {
2890 val |= F_RXCOALESCEENABLE;
2891 if (psh)
2892 val |= F_RXCOALESCEPSHEN;
2893 size = min(MAX_RX_COALESCING_LEN, size);
2894 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2895 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2896 }
2897 t3_write_reg(adap, A_TP_PARA_REG3, val);
2898 return 0;
2899}
2900
2901/**
2902 * t3_tp_set_max_rxsize - set the max receive size
2903 * @adap: the adapter
2904 * @size: the max receive size
2905 *
2906 * Set TP's max receive size. This is the limit that applies when
2907 * receive coalescing is disabled.
2908 */
2909void t3_tp_set_max_rxsize(struct adapter *adap, unsigned int size)
2910{
2911 t3_write_reg(adap, A_TP_PARA_REG7,
2912 V_PMMAXXFERLEN0(size) | V_PMMAXXFERLEN1(size));
2913}
2914
2915static void init_mtus(unsigned short mtus[])
2916{
2917 /*
2918 * See draft-mathis-plpmtud-00.txt for the values. The min is 88 so
2919 * it can accomodate max size TCP/IP headers when SACK and timestamps
2920 * are enabled and still have at least 8 bytes of payload.
2921 */
2922 mtus[0] = 88;
2923 mtus[1] = 88;
2924 mtus[2] = 256;
2925 mtus[3] = 512;
2926 mtus[4] = 576;
2927 mtus[5] = 1024;
2928 mtus[6] = 1280;
2929 mtus[7] = 1492;
2930 mtus[8] = 1500;
2931 mtus[9] = 2002;
2932 mtus[10] = 2048;
2933 mtus[11] = 4096;
2934 mtus[12] = 4352;
2935 mtus[13] = 8192;
2936 mtus[14] = 9000;
2937 mtus[15] = 9600;
2938}
2939
2940/*
2941 * Initial congestion control parameters.
2942 */
2943static void init_cong_ctrl(unsigned short *a, unsigned short *b)
2944{
2945 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
2946 a[9] = 2;
2947 a[10] = 3;
2948 a[11] = 4;
2949 a[12] = 5;
2950 a[13] = 6;
2951 a[14] = 7;
2952 a[15] = 8;
2953 a[16] = 9;
2954 a[17] = 10;
2955 a[18] = 14;
2956 a[19] = 17;
2957 a[20] = 21;
2958 a[21] = 25;
2959 a[22] = 30;
2960 a[23] = 35;
2961 a[24] = 45;
2962 a[25] = 60;
2963 a[26] = 80;
2964 a[27] = 100;
2965 a[28] = 200;
2966 a[29] = 300;
2967 a[30] = 400;
2968 a[31] = 500;
2969
2970 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
2971 b[9] = b[10] = 1;
2972 b[11] = b[12] = 2;
2973 b[13] = b[14] = b[15] = b[16] = 3;
2974 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
2975 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
2976 b[28] = b[29] = 6;
2977 b[30] = b[31] = 7;
2978}
2979
2980/* The minimum additive increment value for the congestion control table */
2981#define CC_MIN_INCR 2U
2982
2983/**
2984 * t3_load_mtus - write the MTU and congestion control HW tables
2985 * @adap: the adapter
2986 * @mtus: the unrestricted values for the MTU table
2987 * @alphs: the values for the congestion control alpha parameter
2988 * @beta: the values for the congestion control beta parameter
2989 * @mtu_cap: the maximum permitted effective MTU
2990 *
2991 * Write the MTU table with the supplied MTUs capping each at &mtu_cap.
2992 * Update the high-speed congestion control table with the supplied alpha,
2993 * beta, and MTUs.
2994 */
2995void t3_load_mtus(struct adapter *adap, unsigned short mtus[NMTUS],
2996 unsigned short alpha[NCCTRL_WIN],
2997 unsigned short beta[NCCTRL_WIN], unsigned short mtu_cap)
2998{
2999 static const unsigned int avg_pkts[NCCTRL_WIN] = {
3000 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
3001 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
3002 28672, 40960, 57344, 81920, 114688, 163840, 229376
3003 };
3004
3005 unsigned int i, w;
3006
3007 for (i = 0; i < NMTUS; ++i) {
3008 unsigned int mtu = min(mtus[i], mtu_cap);
3009 unsigned int log2 = fls(mtu);
3010
3011 if (!(mtu & ((1 << log2) >> 2))) /* round */
3012 log2--;
3013 t3_write_reg(adap, A_TP_MTU_TABLE,
3014 (i << 24) | (log2 << 16) | mtu);
3015
3016 for (w = 0; w < NCCTRL_WIN; ++w) {
3017 unsigned int inc;
3018
3019 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
3020 CC_MIN_INCR);
3021
3022 t3_write_reg(adap, A_TP_CCTRL_TABLE, (i << 21) |
3023 (w << 16) | (beta[w] << 13) | inc);
3024 }
3025 }
3026}
3027
3028/**
3029 * t3_read_hw_mtus - returns the values in the HW MTU table
3030 * @adap: the adapter
3031 * @mtus: where to store the HW MTU values
3032 *
3033 * Reads the HW MTU table.
3034 */
3035void t3_read_hw_mtus(struct adapter *adap, unsigned short mtus[NMTUS])
3036{
3037 int i;
3038
3039 for (i = 0; i < NMTUS; ++i) {
3040 unsigned int val;
3041
3042 t3_write_reg(adap, A_TP_MTU_TABLE, 0xff000000 | i);
3043 val = t3_read_reg(adap, A_TP_MTU_TABLE);
3044 mtus[i] = val & 0x3fff;
3045 }
3046}
3047
3048/**
3049 * t3_get_cong_cntl_tab - reads the congestion control table
3050 * @adap: the adapter
3051 * @incr: where to store the alpha values
3052 *
3053 * Reads the additive increments programmed into the HW congestion
3054 * control table.
3055 */
3056void t3_get_cong_cntl_tab(struct adapter *adap,
3057 unsigned short incr[NMTUS][NCCTRL_WIN])
3058{
3059 unsigned int mtu, w;
3060
3061 for (mtu = 0; mtu < NMTUS; ++mtu)
3062 for (w = 0; w < NCCTRL_WIN; ++w) {
3063 t3_write_reg(adap, A_TP_CCTRL_TABLE,
3064 0xffff0000 | (mtu << 5) | w);
3065 incr[mtu][w] = t3_read_reg(adap, A_TP_CCTRL_TABLE) &
3066 0x1fff;
3067 }
3068}
3069
3070/**
3071 * t3_tp_get_mib_stats - read TP's MIB counters
3072 * @adap: the adapter
3073 * @tps: holds the returned counter values
3074 *
3075 * Returns the values of TP's MIB counters.
3076 */
3077void t3_tp_get_mib_stats(struct adapter *adap, struct tp_mib_stats *tps)
3078{
3079 t3_read_indirect(adap, A_TP_MIB_INDEX, A_TP_MIB_RDATA, (u32 *) tps,
3080 sizeof(*tps) / sizeof(u32), 0);
3081}
3082
3083#define ulp_region(adap, name, start, len) \
3084 t3_write_reg((adap), A_ULPRX_ ## name ## _LLIMIT, (start)); \
3085 t3_write_reg((adap), A_ULPRX_ ## name ## _ULIMIT, \
3086 (start) + (len) - 1); \
3087 start += len
3088
3089#define ulptx_region(adap, name, start, len) \
3090 t3_write_reg((adap), A_ULPTX_ ## name ## _LLIMIT, (start)); \
3091 t3_write_reg((adap), A_ULPTX_ ## name ## _ULIMIT, \
3092 (start) + (len) - 1)
3093
3094static void ulp_config(struct adapter *adap, const struct tp_params *p)
3095{
3096 unsigned int m = p->chan_rx_size;
3097
3098 ulp_region(adap, ISCSI, m, p->chan_rx_size / 8);
3099 ulp_region(adap, TDDP, m, p->chan_rx_size / 8);
3100 ulptx_region(adap, TPT, m, p->chan_rx_size / 4);
3101 ulp_region(adap, STAG, m, p->chan_rx_size / 4);
3102 ulp_region(adap, RQ, m, p->chan_rx_size / 4);
3103 ulptx_region(adap, PBL, m, p->chan_rx_size / 4);
3104 ulp_region(adap, PBL, m, p->chan_rx_size / 4);
3105 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
3106}
3107
3108/**
3109 * t3_set_proto_sram - set the contents of the protocol sram
3110 * @adapter: the adapter
3111 * @data: the protocol image
3112 *
3113 * Write the contents of the protocol SRAM.
3114 */
3115int t3_set_proto_sram(struct adapter *adap, const u8 *data)
3116{
3117 int i;
3118 const __be32 *buf = (const __be32 *)data;
3119
3120 for (i = 0; i < PROTO_SRAM_LINES; i++) {
3121 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, be32_to_cpu(*buf++));
3122 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, be32_to_cpu(*buf++));
3123 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, be32_to_cpu(*buf++));
3124 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, be32_to_cpu(*buf++));
3125 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, be32_to_cpu(*buf++));
3126
3127 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
3128 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
3129 return -EIO;
3130 }
3131 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
3132
3133 return 0;
3134}
3135
3136void t3_config_trace_filter(struct adapter *adapter,
3137 const struct trace_params *tp, int filter_index,
3138 int invert, int enable)
3139{
3140 u32 addr, key[4], mask[4];
3141
3142 key[0] = tp->sport | (tp->sip << 16);
3143 key[1] = (tp->sip >> 16) | (tp->dport << 16);
3144 key[2] = tp->dip;
3145 key[3] = tp->proto | (tp->vlan << 8) | (tp->intf << 20);
3146
3147 mask[0] = tp->sport_mask | (tp->sip_mask << 16);
3148 mask[1] = (tp->sip_mask >> 16) | (tp->dport_mask << 16);
3149 mask[2] = tp->dip_mask;
3150 mask[3] = tp->proto_mask | (tp->vlan_mask << 8) | (tp->intf_mask << 20);
3151
3152 if (invert)
3153 key[3] |= (1 << 29);
3154 if (enable)
3155 key[3] |= (1 << 28);
3156
3157 addr = filter_index ? A_TP_RX_TRC_KEY0 : A_TP_TX_TRC_KEY0;
3158 tp_wr_indirect(adapter, addr++, key[0]);
3159 tp_wr_indirect(adapter, addr++, mask[0]);
3160 tp_wr_indirect(adapter, addr++, key[1]);
3161 tp_wr_indirect(adapter, addr++, mask[1]);
3162 tp_wr_indirect(adapter, addr++, key[2]);
3163 tp_wr_indirect(adapter, addr++, mask[2]);
3164 tp_wr_indirect(adapter, addr++, key[3]);
3165 tp_wr_indirect(adapter, addr, mask[3]);
3166 t3_read_reg(adapter, A_TP_PIO_DATA);
3167}
3168
3169/**
3170 * t3_config_sched - configure a HW traffic scheduler
3171 * @adap: the adapter
3172 * @kbps: target rate in Kbps
3173 * @sched: the scheduler index
3174 *
3175 * Configure a HW scheduler for the target rate
3176 */
3177int t3_config_sched(struct adapter *adap, unsigned int kbps, int sched)
3178{
3179 unsigned int v, tps, cpt, bpt, delta, mindelta = ~0;
3180 unsigned int clk = adap->params.vpd.cclk * 1000;
3181 unsigned int selected_cpt = 0, selected_bpt = 0;
3182
3183 if (kbps > 0) {
3184 kbps *= 125; /* -> bytes */
3185 for (cpt = 1; cpt <= 255; cpt++) {
3186 tps = clk / cpt;
3187 bpt = (kbps + tps / 2) / tps;
3188 if (bpt > 0 && bpt <= 255) {
3189 v = bpt * tps;
3190 delta = v >= kbps ? v - kbps : kbps - v;
3191 if (delta <= mindelta) {
3192 mindelta = delta;
3193 selected_cpt = cpt;
3194 selected_bpt = bpt;
3195 }
3196 } else if (selected_cpt)
3197 break;
3198 }
3199 if (!selected_cpt)
3200 return -EINVAL;
3201 }
3202 t3_write_reg(adap, A_TP_TM_PIO_ADDR,
3203 A_TP_TX_MOD_Q1_Q0_RATE_LIMIT - sched / 2);
3204 v = t3_read_reg(adap, A_TP_TM_PIO_DATA);
3205 if (sched & 1)
3206 v = (v & 0xffff) | (selected_cpt << 16) | (selected_bpt << 24);
3207 else
3208 v = (v & 0xffff0000) | selected_cpt | (selected_bpt << 8);
3209 t3_write_reg(adap, A_TP_TM_PIO_DATA, v);
3210 return 0;
3211}
3212
3213static int tp_init(struct adapter *adap, const struct tp_params *p)
3214{
3215 int busy = 0;
3216
3217 tp_config(adap, p);
3218 t3_set_vlan_accel(adap, 3, 0);
3219
3220 if (is_offload(adap)) {
3221 tp_set_timers(adap, adap->params.vpd.cclk * 1000);
3222 t3_write_reg(adap, A_TP_RESET, F_FLSTINITENABLE);
3223 busy = t3_wait_op_done(adap, A_TP_RESET, F_FLSTINITENABLE,
3224 0, 1000, 5);
3225 if (busy)
3226 CH_ERR(adap, "TP initialization timed out\n");
3227 }
3228
3229 if (!busy)
3230 t3_write_reg(adap, A_TP_RESET, F_TPRESET);
3231 return busy;
3232}
3233
3234int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
3235{
3236 if (port_mask & ~((1 << adap->params.nports) - 1))
3237 return -EINVAL;
3238 t3_set_reg_field(adap, A_MPS_CFG, F_PORT1ACTIVE | F_PORT0ACTIVE,
3239 port_mask << S_PORT0ACTIVE);
3240 return 0;
3241}
3242
3243/*
3244 * Perform the bits of HW initialization that are dependent on the Tx
3245 * channels being used.
3246 */
3247static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
3248{
3249 int i;
3250
3251 if (chan_map != 3) { /* one channel */
3252 t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
3253 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
3254 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
3255 (chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
3256 F_TPTXPORT1EN | F_PORT1ACTIVE));
3257 t3_write_reg(adap, A_PM1_TX_CFG,
3258 chan_map == 1 ? 0xffffffff : 0);
3259 } else { /* two channels */
3260 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
3261 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
3262 t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
3263 V_D1_WEIGHT(16) | V_D0_WEIGHT(16));
3264 t3_write_reg(adap, A_MPS_CFG, F_TPTXPORT0EN | F_TPTXPORT1EN |
3265 F_TPRXPORTEN | F_PORT0ACTIVE | F_PORT1ACTIVE |
3266 F_ENFORCEPKT);
3267 t3_write_reg(adap, A_PM1_TX_CFG, 0x80008000);
3268 t3_set_reg_field(adap, A_TP_PC_CONFIG, 0, F_TXTOSQUEUEMAPMODE);
3269 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_REQ_MAP,
3270 V_TX_MOD_QUEUE_REQ_MAP(0xaa));
3271 for (i = 0; i < 16; i++)
3272 t3_write_reg(adap, A_TP_TX_MOD_QUE_TABLE,
3273 (i << 16) | 0x1010);
3274 }
3275}
3276
3277static int calibrate_xgm(struct adapter *adapter)
3278{
3279 if (uses_xaui(adapter)) {
3280 unsigned int v, i;
3281
3282 for (i = 0; i < 5; ++i) {
3283 t3_write_reg(adapter, A_XGM_XAUI_IMP, 0);
3284 t3_read_reg(adapter, A_XGM_XAUI_IMP);
3285 msleep(1);
3286 v = t3_read_reg(adapter, A_XGM_XAUI_IMP);
3287 if (!(v & (F_XGM_CALFAULT | F_CALBUSY))) {
3288 t3_write_reg(adapter, A_XGM_XAUI_IMP,
3289 V_XAUIIMP(G_CALIMP(v) >> 2));
3290 return 0;
3291 }
3292 }
3293 CH_ERR(adapter, "MAC calibration failed\n");
3294 return -1;
3295 } else {
3296 t3_write_reg(adapter, A_XGM_RGMII_IMP,
3297 V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3298 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3299 F_XGM_IMPSETUPDATE);
3300 }
3301 return 0;
3302}
3303
3304static void calibrate_xgm_t3b(struct adapter *adapter)
3305{
3306 if (!uses_xaui(adapter)) {
3307 t3_write_reg(adapter, A_XGM_RGMII_IMP, F_CALRESET |
3308 F_CALUPDATE | V_RGMIIIMPPD(2) | V_RGMIIIMPPU(3));
3309 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALRESET, 0);
3310 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0,
3311 F_XGM_IMPSETUPDATE);
3312 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_XGM_IMPSETUPDATE,
3313 0);
3314 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, F_CALUPDATE, 0);
3315 t3_set_reg_field(adapter, A_XGM_RGMII_IMP, 0, F_CALUPDATE);
3316 }
3317}
3318
3319struct mc7_timing_params {
3320 unsigned char ActToPreDly;
3321 unsigned char ActToRdWrDly;
3322 unsigned char PreCyc;
3323 unsigned char RefCyc[5];
3324 unsigned char BkCyc;
3325 unsigned char WrToRdDly;
3326 unsigned char RdToWrDly;
3327};
3328
3329/*
3330 * Write a value to a register and check that the write completed. These
3331 * writes normally complete in a cycle or two, so one read should suffice.
3332 * The very first read exists to flush the posted write to the device.
3333 */
3334static int wrreg_wait(struct adapter *adapter, unsigned int addr, u32 val)
3335{
3336 t3_write_reg(adapter, addr, val);
3337 t3_read_reg(adapter, addr); /* flush */
3338 if (!(t3_read_reg(adapter, addr) & F_BUSY))
3339 return 0;
3340 CH_ERR(adapter, "write to MC7 register 0x%x timed out\n", addr);
3341 return -EIO;
3342}
3343
3344static int mc7_init(struct mc7 *mc7, unsigned int mc7_clock, int mem_type)
3345{
3346 static const unsigned int mc7_mode[] = {
3347 0x632, 0x642, 0x652, 0x432, 0x442
3348 };
3349 static const struct mc7_timing_params mc7_timings[] = {
3350 {12, 3, 4, {20, 28, 34, 52, 0}, 15, 6, 4},
3351 {12, 4, 5, {20, 28, 34, 52, 0}, 16, 7, 4},
3352 {12, 5, 6, {20, 28, 34, 52, 0}, 17, 8, 4},
3353 {9, 3, 4, {15, 21, 26, 39, 0}, 12, 6, 4},
3354 {9, 4, 5, {15, 21, 26, 39, 0}, 13, 7, 4}
3355 };
3356
3357 u32 val;
3358 unsigned int width, density, slow, attempts;
3359 struct adapter *adapter = mc7->adapter;
3360 const struct mc7_timing_params *p = &mc7_timings[mem_type];
3361
3362 if (!mc7->size)
3363 return 0;
3364
3365 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3366 slow = val & F_SLOW;
3367 width = G_WIDTH(val);
3368 density = G_DEN(val);
3369
3370 t3_write_reg(adapter, mc7->offset + A_MC7_CFG, val | F_IFEN);
3371 val = t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3372 msleep(1);
3373
3374 if (!slow) {
3375 t3_write_reg(adapter, mc7->offset + A_MC7_CAL, F_SGL_CAL_EN);
3376 t3_read_reg(adapter, mc7->offset + A_MC7_CAL);
3377 msleep(1);
3378 if (t3_read_reg(adapter, mc7->offset + A_MC7_CAL) &
3379 (F_BUSY | F_SGL_CAL_EN | F_CAL_FAULT)) {
3380 CH_ERR(adapter, "%s MC7 calibration timed out\n",
3381 mc7->name);
3382 goto out_fail;
3383 }
3384 }
3385
3386 t3_write_reg(adapter, mc7->offset + A_MC7_PARM,
3387 V_ACTTOPREDLY(p->ActToPreDly) |
3388 V_ACTTORDWRDLY(p->ActToRdWrDly) | V_PRECYC(p->PreCyc) |
3389 V_REFCYC(p->RefCyc[density]) | V_BKCYC(p->BkCyc) |
3390 V_WRTORDDLY(p->WrToRdDly) | V_RDTOWRDLY(p->RdToWrDly));
3391
3392 t3_write_reg(adapter, mc7->offset + A_MC7_CFG,
3393 val | F_CLKEN | F_TERM150);
3394 t3_read_reg(adapter, mc7->offset + A_MC7_CFG); /* flush */
3395
3396 if (!slow)
3397 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLENB,
3398 F_DLLENB);
3399 udelay(1);
3400
3401 val = slow ? 3 : 6;
3402 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3403 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE2, 0) ||
3404 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE3, 0) ||
3405 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3406 goto out_fail;
3407
3408 if (!slow) {
3409 t3_write_reg(adapter, mc7->offset + A_MC7_MODE, 0x100);
3410 t3_set_reg_field(adapter, mc7->offset + A_MC7_DLL, F_DLLRST, 0);
3411 udelay(5);
3412 }
3413
3414 if (wrreg_wait(adapter, mc7->offset + A_MC7_PRE, 0) ||
3415 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3416 wrreg_wait(adapter, mc7->offset + A_MC7_REF, 0) ||
3417 wrreg_wait(adapter, mc7->offset + A_MC7_MODE,
3418 mc7_mode[mem_type]) ||
3419 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val | 0x380) ||
3420 wrreg_wait(adapter, mc7->offset + A_MC7_EXT_MODE1, val))
3421 goto out_fail;
3422
3423 /* clock value is in KHz */
3424 mc7_clock = mc7_clock * 7812 + mc7_clock / 2; /* ns */
3425 mc7_clock /= 1000000; /* KHz->MHz, ns->us */
3426
3427 t3_write_reg(adapter, mc7->offset + A_MC7_REF,
3428 F_PERREFEN | V_PREREFDIV(mc7_clock));
3429 t3_read_reg(adapter, mc7->offset + A_MC7_REF); /* flush */
3430
3431 t3_write_reg(adapter, mc7->offset + A_MC7_ECC, F_ECCGENEN | F_ECCCHKEN);
3432 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_DATA, 0);
3433 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_BEG, 0);
3434 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_ADDR_END,
3435 (mc7->size << width) - 1);
3436 t3_write_reg(adapter, mc7->offset + A_MC7_BIST_OP, V_OP(1));
3437 t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP); /* flush */
3438
3439 attempts = 50;
3440 do {
3441 msleep(250);
3442 val = t3_read_reg(adapter, mc7->offset + A_MC7_BIST_OP);
3443 } while ((val & F_BUSY) && --attempts);
3444 if (val & F_BUSY) {
3445 CH_ERR(adapter, "%s MC7 BIST timed out\n", mc7->name);
3446 goto out_fail;
3447 }
3448
3449 /* Enable normal memory accesses. */
3450 t3_set_reg_field(adapter, mc7->offset + A_MC7_CFG, 0, F_RDY);
3451 return 0;
3452
3453out_fail:
3454 return -1;
3455}
3456
3457static void config_pcie(struct adapter *adap)
3458{
3459 static const u16 ack_lat[4][6] = {
3460 {237, 416, 559, 1071, 2095, 4143},
3461 {128, 217, 289, 545, 1057, 2081},
3462 {73, 118, 154, 282, 538, 1050},
3463 {67, 107, 86, 150, 278, 534}
3464 };
3465 static const u16 rpl_tmr[4][6] = {
3466 {711, 1248, 1677, 3213, 6285, 12429},
3467 {384, 651, 867, 1635, 3171, 6243},
3468 {219, 354, 462, 846, 1614, 3150},
3469 {201, 321, 258, 450, 834, 1602}
3470 };
3471
3472 u16 val, devid;
3473 unsigned int log2_width, pldsize;
3474 unsigned int fst_trn_rx, fst_trn_tx, acklat, rpllmt;
3475
3476 pci_read_config_word(adap->pdev,
3477 adap->params.pci.pcie_cap_addr + PCI_EXP_DEVCTL,
3478 &val);
3479 pldsize = (val & PCI_EXP_DEVCTL_PAYLOAD) >> 5;
3480
3481 pci_read_config_word(adap->pdev, 0x2, &devid);
3482 if (devid == 0x37) {
3483 pci_write_config_word(adap->pdev,
3484 adap->params.pci.pcie_cap_addr +
3485 PCI_EXP_DEVCTL,
3486 val & ~PCI_EXP_DEVCTL_READRQ &
3487 ~PCI_EXP_DEVCTL_PAYLOAD);
3488 pldsize = 0;
3489 }
3490
3491 pci_read_config_word(adap->pdev,
3492 adap->params.pci.pcie_cap_addr + PCI_EXP_LNKCTL,
3493 &val);
3494
3495 fst_trn_tx = G_NUMFSTTRNSEQ(t3_read_reg(adap, A_PCIE_PEX_CTRL0));
3496 fst_trn_rx = adap->params.rev == 0 ? fst_trn_tx :
3497 G_NUMFSTTRNSEQRX(t3_read_reg(adap, A_PCIE_MODE));
3498 log2_width = fls(adap->params.pci.width) - 1;
3499 acklat = ack_lat[log2_width][pldsize];
3500 if (val & 1) /* check LOsEnable */
3501 acklat += fst_trn_tx * 4;
3502 rpllmt = rpl_tmr[log2_width][pldsize] + fst_trn_rx * 4;
3503
3504 if (adap->params.rev == 0)
3505 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1,
3506 V_T3A_ACKLAT(M_T3A_ACKLAT),
3507 V_T3A_ACKLAT(acklat));
3508 else
3509 t3_set_reg_field(adap, A_PCIE_PEX_CTRL1, V_ACKLAT(M_ACKLAT),
3510 V_ACKLAT(acklat));
3511
3512 t3_set_reg_field(adap, A_PCIE_PEX_CTRL0, V_REPLAYLMT(M_REPLAYLMT),
3513 V_REPLAYLMT(rpllmt));
3514
3515 t3_write_reg(adap, A_PCIE_PEX_ERR, 0xffffffff);
3516 t3_set_reg_field(adap, A_PCIE_CFG, 0,
3517 F_ENABLELINKDWNDRST | F_ENABLELINKDOWNRST |
3518 F_PCIE_DMASTOPEN | F_PCIE_CLIDECEN);
3519}
3520
3521/*
3522 * Initialize and configure T3 HW modules. This performs the
3523 * initialization steps that need to be done once after a card is reset.
3524 * MAC and PHY initialization is handled separarely whenever a port is enabled.
3525 *
3526 * fw_params are passed to FW and their value is platform dependent. Only the
3527 * top 8 bits are available for use, the rest must be 0.
3528 */
3529int t3_init_hw(struct adapter *adapter, u32 fw_params)
3530{
3531 int err = -EIO, attempts, i;
3532 const struct vpd_params *vpd = &adapter->params.vpd;
3533
3534 if (adapter->params.rev > 0)
3535 calibrate_xgm_t3b(adapter);
3536 else if (calibrate_xgm(adapter))
3537 goto out_err;
3538
3539 if (vpd->mclk) {
3540 partition_mem(adapter, &adapter->params.tp);
3541
3542 if (mc7_init(&adapter->pmrx, vpd->mclk, vpd->mem_timing) ||
3543 mc7_init(&adapter->pmtx, vpd->mclk, vpd->mem_timing) ||
3544 mc7_init(&adapter->cm, vpd->mclk, vpd->mem_timing) ||
3545 t3_mc5_init(&adapter->mc5, adapter->params.mc5.nservers,
3546 adapter->params.mc5.nfilters,
3547 adapter->params.mc5.nroutes))
3548 goto out_err;
3549
3550 for (i = 0; i < 32; i++)
3551 if (clear_sge_ctxt(adapter, i, F_CQ))
3552 goto out_err;
3553 }
3554
3555 if (tp_init(adapter, &adapter->params.tp))
3556 goto out_err;
3557
3558 t3_tp_set_coalescing_size(adapter,
3559 min(adapter->params.sge.max_pkt_size,
3560 MAX_RX_COALESCING_LEN), 1);
3561 t3_tp_set_max_rxsize(adapter,
3562 min(adapter->params.sge.max_pkt_size, 16384U));
3563 ulp_config(adapter, &adapter->params.tp);
3564
3565 if (is_pcie(adapter))
3566 config_pcie(adapter);
3567 else
3568 t3_set_reg_field(adapter, A_PCIX_CFG, 0,
3569 F_DMASTOPEN | F_CLIDECEN);
3570
3571 if (adapter->params.rev == T3_REV_C)
3572 t3_set_reg_field(adapter, A_ULPTX_CONFIG, 0,
3573 F_CFG_CQE_SOP_MASK);
3574
3575 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3576 t3_write_reg(adapter, A_PM1_RX_MODE, 0);
3577 t3_write_reg(adapter, A_PM1_TX_MODE, 0);
3578 chan_init_hw(adapter, adapter->params.chan_map);
3579 t3_sge_init(adapter, &adapter->params.sge);
3580
3581 t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
3582
3583 t3_write_reg(adapter, A_CIM_HOST_ACC_DATA, vpd->uclk | fw_params);
3584 t3_write_reg(adapter, A_CIM_BOOT_CFG,
3585 V_BOOTADDR(FW_FLASH_BOOT_ADDR >> 2));
3586 t3_read_reg(adapter, A_CIM_BOOT_CFG); /* flush */
3587
3588 attempts = 100;
3589 do { /* wait for uP to initialize */
3590 msleep(20);
3591 } while (t3_read_reg(adapter, A_CIM_HOST_ACC_DATA) && --attempts);
3592 if (!attempts) {
3593 CH_ERR(adapter, "uP initialization timed out\n");
3594 goto out_err;
3595 }
3596
3597 err = 0;
3598out_err:
3599 return err;
3600}
3601
3602/**
3603 * get_pci_mode - determine a card's PCI mode
3604 * @adapter: the adapter
3605 * @p: where to store the PCI settings
3606 *
3607 * Determines a card's PCI mode and associated parameters, such as speed
3608 * and width.
3609 */
3610static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
3611{
3612 static unsigned short speed_map[] = { 33, 66, 100, 133 };
3613 u32 pci_mode, pcie_cap;
3614
3615 pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
3616 if (pcie_cap) {
3617 u16 val;
3618
3619 p->variant = PCI_VARIANT_PCIE;
3620 p->pcie_cap_addr = pcie_cap;
3621 pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
3622 &val);
3623 p->width = (val >> 4) & 0x3f;
3624 return;
3625 }
3626
3627 pci_mode = t3_read_reg(adapter, A_PCIX_MODE);
3628 p->speed = speed_map[G_PCLKRANGE(pci_mode)];
3629 p->width = (pci_mode & F_64BIT) ? 64 : 32;
3630 pci_mode = G_PCIXINITPAT(pci_mode);
3631 if (pci_mode == 0)
3632 p->variant = PCI_VARIANT_PCI;
3633 else if (pci_mode < 4)
3634 p->variant = PCI_VARIANT_PCIX_MODE1_PARITY;
3635 else if (pci_mode < 8)
3636 p->variant = PCI_VARIANT_PCIX_MODE1_ECC;
3637 else
3638 p->variant = PCI_VARIANT_PCIX_266_MODE2;
3639}
3640
3641/**
3642 * init_link_config - initialize a link's SW state
3643 * @lc: structure holding the link state
3644 * @ai: information about the current card
3645 *
3646 * Initializes the SW state maintained for each link, including the link's
3647 * capabilities and default speed/duplex/flow-control/autonegotiation
3648 * settings.
3649 */
3650static void init_link_config(struct link_config *lc, unsigned int caps)
3651{
3652 lc->supported = caps;
3653 lc->requested_speed = lc->speed = SPEED_INVALID;
3654 lc->requested_duplex = lc->duplex = DUPLEX_INVALID;
3655 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
3656 if (lc->supported & SUPPORTED_Autoneg) {
3657 lc->advertising = lc->supported;
3658 lc->autoneg = AUTONEG_ENABLE;
3659 lc->requested_fc |= PAUSE_AUTONEG;
3660 } else {
3661 lc->advertising = 0;
3662 lc->autoneg = AUTONEG_DISABLE;
3663 }
3664}
3665
3666/**
3667 * mc7_calc_size - calculate MC7 memory size
3668 * @cfg: the MC7 configuration
3669 *
3670 * Calculates the size of an MC7 memory in bytes from the value of its
3671 * configuration register.
3672 */
3673static unsigned int mc7_calc_size(u32 cfg)
3674{
3675 unsigned int width = G_WIDTH(cfg);
3676 unsigned int banks = !!(cfg & F_BKS) + 1;
3677 unsigned int org = !!(cfg & F_ORG) + 1;
3678 unsigned int density = G_DEN(cfg);
3679 unsigned int MBs = ((256 << density) * banks) / (org << width);
3680
3681 return MBs << 20;
3682}
3683
3684static void mc7_prep(struct adapter *adapter, struct mc7 *mc7,
3685 unsigned int base_addr, const char *name)
3686{
3687 u32 cfg;
3688
3689 mc7->adapter = adapter;
3690 mc7->name = name;
3691 mc7->offset = base_addr - MC7_PMRX_BASE_ADDR;
3692 cfg = t3_read_reg(adapter, mc7->offset + A_MC7_CFG);
3693 mc7->size = mc7->size = G_DEN(cfg) == M_DEN ? 0 : mc7_calc_size(cfg);
3694 mc7->width = G_WIDTH(cfg);
3695}
3696
3697void mac_prep(struct cmac *mac, struct adapter *adapter, int index)
3698{
3699 u16 devid;
3700
3701 mac->adapter = adapter;
3702 pci_read_config_word(adapter->pdev, 0x2, &devid);
3703
3704 if (devid == 0x37 && !adapter->params.vpd.xauicfg[1])
3705 index = 0;
3706 mac->offset = (XGMAC0_1_BASE_ADDR - XGMAC0_0_BASE_ADDR) * index;
3707 mac->nucast = 1;
3708
3709 if (adapter->params.rev == 0 && uses_xaui(adapter)) {
3710 t3_write_reg(adapter, A_XGM_SERDES_CTRL + mac->offset,
3711 is_10G(adapter) ? 0x2901c04 : 0x2301c04);
3712 t3_set_reg_field(adapter, A_XGM_PORT_CFG + mac->offset,
3713 F_ENRGMII, 0);
3714 }
3715}
3716
3717void early_hw_init(struct adapter *adapter, const struct adapter_info *ai)
3718{
3719 u32 val = V_PORTSPEED(is_10G(adapter) ? 3 : 2);
3720
3721 mi1_init(adapter, ai);
3722 t3_write_reg(adapter, A_I2C_CFG, /* set for 80KHz */
3723 V_I2C_CLKDIV(adapter->params.vpd.cclk / 80 - 1));
3724 t3_write_reg(adapter, A_T3DBG_GPIO_EN,
3725 ai->gpio_out | F_GPIO0_OEN | F_GPIO0_OUT_VAL);
3726 t3_write_reg(adapter, A_MC5_DB_SERVER_INDEX, 0);
3727 t3_write_reg(adapter, A_SG_OCO_BASE, V_BASE1(0xfff));
3728
3729 if (adapter->params.rev == 0 || !uses_xaui(adapter))
3730 val |= F_ENRGMII;
3731
3732 /* Enable MAC clocks so we can access the registers */
3733 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3734 t3_read_reg(adapter, A_XGM_PORT_CFG);
3735
3736 val |= F_CLKDIVRESET_;
3737 t3_write_reg(adapter, A_XGM_PORT_CFG, val);
3738 t3_read_reg(adapter, A_XGM_PORT_CFG);
3739 t3_write_reg(adapter, XGM_REG(A_XGM_PORT_CFG, 1), val);
3740 t3_read_reg(adapter, A_XGM_PORT_CFG);
3741}
3742
3743/*
3744 * Reset the adapter.
3745 * Older PCIe cards lose their config space during reset, PCI-X
3746 * ones don't.
3747 */
3748int t3_reset_adapter(struct adapter *adapter)
3749{
3750 int i, save_and_restore_pcie =
3751 adapter->params.rev < T3_REV_B2 && is_pcie(adapter);
3752 uint16_t devid = 0;
3753
3754 if (save_and_restore_pcie)
3755 pci_save_state(adapter->pdev);
3756 t3_write_reg(adapter, A_PL_RST, F_CRSTWRM | F_CRSTWRMMODE);
3757
3758 /*
3759 * Delay. Give Some time to device to reset fully.
3760 * XXX The delay time should be modified.
3761 */
3762 for (i = 0; i < 10; i++) {
3763 msleep(50);
3764 pci_read_config_word(adapter->pdev, 0x00, &devid);
3765 if (devid == 0x1425)
3766 break;
3767 }
3768
3769 if (devid != 0x1425)
3770 return -1;
3771
3772 if (save_and_restore_pcie)
3773 pci_restore_state(adapter->pdev);
3774 return 0;
3775}
3776
3777static int init_parity(struct adapter *adap)
3778{
3779 int i, err, addr;
3780
3781 if (t3_read_reg(adap, A_SG_CONTEXT_CMD) & F_CONTEXT_CMD_BUSY)
3782 return -EBUSY;
3783
3784 for (err = i = 0; !err && i < 16; i++)
3785 err = clear_sge_ctxt(adap, i, F_EGRESS);
3786 for (i = 0xfff0; !err && i <= 0xffff; i++)
3787 err = clear_sge_ctxt(adap, i, F_EGRESS);
3788 for (i = 0; !err && i < SGE_QSETS; i++)
3789 err = clear_sge_ctxt(adap, i, F_RESPONSEQ);
3790 if (err)
3791 return err;
3792
3793 t3_write_reg(adap, A_CIM_IBQ_DBG_DATA, 0);
3794 for (i = 0; i < 4; i++)
3795 for (addr = 0; addr <= M_IBQDBGADDR; addr++) {
3796 t3_write_reg(adap, A_CIM_IBQ_DBG_CFG, F_IBQDBGEN |
3797 F_IBQDBGWR | V_IBQDBGQID(i) |
3798 V_IBQDBGADDR(addr));
3799 err = t3_wait_op_done(adap, A_CIM_IBQ_DBG_CFG,
3800 F_IBQDBGBUSY, 0, 2, 1);
3801 if (err)
3802 return err;
3803 }
3804 return 0;
3805}
3806
3807/*
3808 * Initialize adapter SW state for the various HW modules, set initial values
3809 * for some adapter tunables, take PHYs out of reset, and initialize the MDIO
3810 * interface.
3811 */
3812int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
3813 int reset)
3814{
3815 int ret;
3816 unsigned int i, j = -1;
3817
3818 get_pci_mode(adapter, &adapter->params.pci);
3819
3820 adapter->params.info = ai;
3821 adapter->params.nports = ai->nports0 + ai->nports1;
3822 adapter->params.chan_map = (!!ai->nports0) | (!!ai->nports1 << 1);
3823 adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
3824 /*
3825 * We used to only run the "adapter check task" once a second if
3826 * we had PHYs which didn't support interrupts (we would check
3827 * their link status once a second). Now we check other conditions
3828 * in that routine which could potentially impose a very high
3829 * interrupt load on the system. As such, we now always scan the
3830 * adapter state once a second ...
3831 */
3832 adapter->params.linkpoll_period = 10;
3833 adapter->params.stats_update_period = is_10G(adapter) ?
3834 MAC_STATS_ACCUM_SECS : (MAC_STATS_ACCUM_SECS * 10);
3835 adapter->params.pci.vpd_cap_addr =
3836 pci_find_capability(adapter->pdev, PCI_CAP_ID_VPD);
3837 ret = get_vpd_params(adapter, &adapter->params.vpd);
3838 if (ret < 0)
3839 return ret;
3840
3841 if (reset && t3_reset_adapter(adapter))
3842 return -1;
3843
3844 t3_sge_prep(adapter, &adapter->params.sge);
3845
3846 if (adapter->params.vpd.mclk) {
3847 struct tp_params *p = &adapter->params.tp;
3848
3849 mc7_prep(adapter, &adapter->pmrx, MC7_PMRX_BASE_ADDR, "PMRX");
3850 mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
3851 mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
3852
3853 p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
3854 p->pmrx_size = t3_mc7_size(&adapter->pmrx);
3855 p->pmtx_size = t3_mc7_size(&adapter->pmtx);
3856 p->cm_size = t3_mc7_size(&adapter->cm);
3857 p->chan_rx_size = p->pmrx_size / 2; /* only 1 Rx channel */
3858 p->chan_tx_size = p->pmtx_size / p->nchan;
3859 p->rx_pg_size = 64 * 1024;
3860 p->tx_pg_size = is_10G(adapter) ? 64 * 1024 : 16 * 1024;
3861 p->rx_num_pgs = pm_num_pages(p->chan_rx_size, p->rx_pg_size);
3862 p->tx_num_pgs = pm_num_pages(p->chan_tx_size, p->tx_pg_size);
3863 p->ntimer_qs = p->cm_size >= (128 << 20) ||
3864 adapter->params.rev > 0 ? 12 : 6;
3865 }
3866
3867 adapter->params.offload = t3_mc7_size(&adapter->pmrx) &&
3868 t3_mc7_size(&adapter->pmtx) &&
3869 t3_mc7_size(&adapter->cm);
3870
3871 if (is_offload(adapter)) {
3872 adapter->params.mc5.nservers = DEFAULT_NSERVERS;
3873 adapter->params.mc5.nfilters = adapter->params.rev > 0 ?
3874 DEFAULT_NFILTERS : 0;
3875 adapter->params.mc5.nroutes = 0;
3876 t3_mc5_prep(adapter, &adapter->mc5, MC5_MODE_144_BIT);
3877
3878 init_mtus(adapter->params.mtus);
3879 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
3880 }
3881
3882 early_hw_init(adapter, ai);
3883 ret = init_parity(adapter);
3884 if (ret)
3885 return ret;
3886
3887 for_each_port(adapter, i) {
3888 u8 hw_addr[6];
3889 const struct port_type_info *pti;
3890 struct port_info *p = adap2pinfo(adapter, i);
3891
3892 while (!adapter->params.vpd.port_type[++j])
3893 ;
3894
3895 pti = &port_types[adapter->params.vpd.port_type[j]];
3896 if (!pti->phy_prep) {
3897 CH_ALERT(adapter, "Invalid port type index %d\n",
3898 adapter->params.vpd.port_type[j]);
3899 return -EINVAL;
3900 }
3901
3902 p->phy.mdio.dev = adapter->port[i];
3903 ret = pti->phy_prep(&p->phy, adapter, ai->phy_base_addr + j,
3904 ai->mdio_ops);
3905 if (ret)
3906 return ret;
3907 mac_prep(&p->mac, adapter, j);
3908
3909 /*
3910 * The VPD EEPROM stores the base Ethernet address for the
3911 * card. A port's address is derived from the base by adding
3912 * the port's index to the base's low octet.
3913 */
3914 memcpy(hw_addr, adapter->params.vpd.eth_base, 5);
3915 hw_addr[5] = adapter->params.vpd.eth_base[5] + i;
3916
3917 memcpy(adapter->port[i]->dev_addr, hw_addr,
3918 ETH_ALEN);
3919 memcpy(adapter->port[i]->perm_addr, hw_addr,
3920 ETH_ALEN);
3921 init_link_config(&p->link_config, p->phy.caps);
3922 p->phy.ops->power_down(&p->phy, 1);
3923
3924 /*
3925 * If the PHY doesn't support interrupts for link status
3926 * changes, schedule a scan of the adapter links at least
3927 * once a second.
3928 */
3929 if (!(p->phy.caps & SUPPORTED_IRQ) &&
3930 adapter->params.linkpoll_period > 10)
3931 adapter->params.linkpoll_period = 10;
3932 }
3933
3934 return 0;
3935}
3936
3937void t3_led_ready(struct adapter *adapter)
3938{
3939 t3_set_reg_field(adapter, A_T3DBG_GPIO_EN, F_GPIO0_OUT_VAL,
3940 F_GPIO0_OUT_VAL);
3941}
3942
3943int t3_replay_prep_adapter(struct adapter *adapter)
3944{
3945 const struct adapter_info *ai = adapter->params.info;
3946 unsigned int i, j = -1;
3947 int ret;
3948
3949 early_hw_init(adapter, ai);
3950 ret = init_parity(adapter);
3951 if (ret)
3952 return ret;
3953
3954 for_each_port(adapter, i) {
3955 const struct port_type_info *pti;
3956 struct port_info *p = adap2pinfo(adapter, i);
3957
3958 while (!adapter->params.vpd.port_type[++j])
3959 ;
3960
3961 pti = &port_types[adapter->params.vpd.port_type[j]];
3962 ret = pti->phy_prep(&p->phy, adapter, p->phy.mdio.prtad, NULL);
3963 if (ret)
3964 return ret;
3965 p->phy.ops->power_down(&p->phy, 1);
3966 }
3967
3968return 0;
3969}
3970