blob: 8acc655ec1e82e911c88cea2cbe8340cb2ab89d2 [file] [log] [blame]
/* bnx2.c: Broadcom NX2 network driver.
*
* Copyright (c) 2004, 2005 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
* Written by: Michael Chan (mchan@broadcom.com)
*/
#include "bnx2.h"
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.2.19"
#define DRV_MODULE_RELDATE "May 23, 2005"
#define RUN_AT(x) (jiffies + (x))
/* Time in jiffies before concluding the transmitter is hung. */
#define TX_TIMEOUT (5*HZ)
static char version[] __devinitdata =
"Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706 Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_MODULE_VERSION);
static int disable_msi = 0;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
typedef enum {
BCM5706 = 0,
NC370T,
NC370I,
BCM5706S,
NC370F,
} board_t;
/* indexed by board_t, above */
static struct {
char *name;
} board_info[] __devinitdata = {
{ "Broadcom NetXtreme II BCM5706 1000Base-T" },
{ "HP NC370T Multifunction Gigabit Server Adapter" },
{ "HP NC370i Multifunction Gigabit Server Adapter" },
{ "Broadcom NetXtreme II BCM5706 1000Base-SX" },
{ "HP NC370F Multifunction Gigabit Server Adapter" },
{ 0 },
};
static struct pci_device_id bnx2_pci_tbl[] = {
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
{ PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
{ 0, }
};
static struct flash_spec flash_table[] =
{
/* Slow EEPROM */
{0x00000000, 0x40030380, 0x009f0081, 0xa184a053, 0xaf000400,
1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
"EEPROM - slow"},
/* Fast EEPROM */
{0x02000000, 0x62008380, 0x009f0081, 0xa184a053, 0xaf000400,
1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
"EEPROM - fast"},
/* ATMEL AT45DB011B (buffered flash) */
{0x02000003, 0x6e008173, 0x00570081, 0x68848353, 0xaf000400,
1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
"Buffered flash"},
/* Saifun SA25F005 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x01000003, 0x5f008081, 0x00050081, 0x03840253, 0xaf020406,
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
"Non-buffered flash (64kB)"},
/* Saifun SA25F010 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x00000001, 0x47008081, 0x00050081, 0x03840253, 0xaf020406,
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
"Non-buffered flash (128kB)"},
/* Saifun SA25F020 (non-buffered flash) */
/* strap, cfg1, & write1 need updates */
{0x00000003, 0x4f008081, 0x00050081, 0x03840253, 0xaf020406,
0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
"Non-buffered flash (256kB)"},
};
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
static u32
bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
{
REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
}
static void
bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
{
REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
}
static void
bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
{
offset += cid_addr;
REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
REG_WR(bp, BNX2_CTX_DATA, val);
}
static int
bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
{
u32 val1;
int i, ret;
if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
REG_RD(bp, BNX2_EMAC_MDIO_MODE);
udelay(40);
}
val1 = (bp->phy_addr << 21) | (reg << 16) |
BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
BNX2_EMAC_MDIO_COMM_START_BUSY;
REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
for (i = 0; i < 50; i++) {
udelay(10);
val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
val1 &= BNX2_EMAC_MDIO_COMM_DATA;
break;
}
}
if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
*val = 0x0;
ret = -EBUSY;
}
else {
*val = val1;
ret = 0;
}
if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
REG_RD(bp, BNX2_EMAC_MDIO_MODE);
udelay(40);
}
return ret;
}
static int
bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
{
u32 val1;
int i, ret;
if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
REG_RD(bp, BNX2_EMAC_MDIO_MODE);
udelay(40);
}
val1 = (bp->phy_addr << 21) | (reg << 16) | val |
BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
for (i = 0; i < 50; i++) {
udelay(10);
val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
udelay(5);
break;
}
}
if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
ret = -EBUSY;
else
ret = 0;
if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
REG_RD(bp, BNX2_EMAC_MDIO_MODE);
udelay(40);
}
return ret;
}
static void
bnx2_disable_int(struct bnx2 *bp)
{
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
}
static void
bnx2_enable_int(struct bnx2 *bp)
{
u32 val;
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
val = REG_RD(bp, BNX2_HC_COMMAND);
REG_WR(bp, BNX2_HC_COMMAND, val | BNX2_HC_COMMAND_COAL_NOW);
}
static void
bnx2_disable_int_sync(struct bnx2 *bp)
{
atomic_inc(&bp->intr_sem);
bnx2_disable_int(bp);
synchronize_irq(bp->pdev->irq);
}
static void
bnx2_netif_stop(struct bnx2 *bp)
{
bnx2_disable_int_sync(bp);
if (netif_running(bp->dev)) {
netif_poll_disable(bp->dev);
netif_tx_disable(bp->dev);
bp->dev->trans_start = jiffies; /* prevent tx timeout */
}
}
static void
bnx2_netif_start(struct bnx2 *bp)
{
if (atomic_dec_and_test(&bp->intr_sem)) {
if (netif_running(bp->dev)) {
netif_wake_queue(bp->dev);
netif_poll_enable(bp->dev);
bnx2_enable_int(bp);
}
}
}
static void
bnx2_free_mem(struct bnx2 *bp)
{
if (bp->stats_blk) {
pci_free_consistent(bp->pdev, sizeof(struct statistics_block),
bp->stats_blk, bp->stats_blk_mapping);
bp->stats_blk = NULL;
}
if (bp->status_blk) {
pci_free_consistent(bp->pdev, sizeof(struct status_block),
bp->status_blk, bp->status_blk_mapping);
bp->status_blk = NULL;
}
if (bp->tx_desc_ring) {
pci_free_consistent(bp->pdev,
sizeof(struct tx_bd) * TX_DESC_CNT,
bp->tx_desc_ring, bp->tx_desc_mapping);
bp->tx_desc_ring = NULL;
}
if (bp->tx_buf_ring) {
kfree(bp->tx_buf_ring);
bp->tx_buf_ring = NULL;
}
if (bp->rx_desc_ring) {
pci_free_consistent(bp->pdev,
sizeof(struct rx_bd) * RX_DESC_CNT,
bp->rx_desc_ring, bp->rx_desc_mapping);
bp->rx_desc_ring = NULL;
}
if (bp->rx_buf_ring) {
kfree(bp->rx_buf_ring);
bp->rx_buf_ring = NULL;
}
}
static int
bnx2_alloc_mem(struct bnx2 *bp)
{
bp->tx_buf_ring = kmalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
GFP_KERNEL);
if (bp->tx_buf_ring == NULL)
return -ENOMEM;
memset(bp->tx_buf_ring, 0, sizeof(struct sw_bd) * TX_DESC_CNT);
bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
sizeof(struct tx_bd) *
TX_DESC_CNT,
&bp->tx_desc_mapping);
if (bp->tx_desc_ring == NULL)
goto alloc_mem_err;
bp->rx_buf_ring = kmalloc(sizeof(struct sw_bd) * RX_DESC_CNT,
GFP_KERNEL);
if (bp->rx_buf_ring == NULL)
goto alloc_mem_err;
memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT);
bp->rx_desc_ring = pci_alloc_consistent(bp->pdev,
sizeof(struct rx_bd) *
RX_DESC_CNT,
&bp->rx_desc_mapping);
if (bp->rx_desc_ring == NULL)
goto alloc_mem_err;
bp->status_blk = pci_alloc_consistent(bp->pdev,
sizeof(struct status_block),
&bp->status_blk_mapping);
if (bp->status_blk == NULL)
goto alloc_mem_err;
memset(bp->status_blk, 0, sizeof(struct status_block));
bp->stats_blk = pci_alloc_consistent(bp->pdev,
sizeof(struct statistics_block),
&bp->stats_blk_mapping);
if (bp->stats_blk == NULL)
goto alloc_mem_err;
memset(bp->stats_blk, 0, sizeof(struct statistics_block));
return 0;
alloc_mem_err:
bnx2_free_mem(bp);
return -ENOMEM;
}
static void
bnx2_report_link(struct bnx2 *bp)
{
if (bp->link_up) {
netif_carrier_on(bp->dev);
printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
printk("%d Mbps ", bp->line_speed);
if (bp->duplex == DUPLEX_FULL)
printk("full duplex");
else
printk("half duplex");
if (bp->flow_ctrl) {
if (bp->flow_ctrl & FLOW_CTRL_RX) {
printk(", receive ");
if (bp->flow_ctrl & FLOW_CTRL_TX)
printk("& transmit ");
}
else {
printk(", transmit ");
}
printk("flow control ON");
}
printk("\n");
}
else {
netif_carrier_off(bp->dev);
printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
}
}
static void
bnx2_resolve_flow_ctrl(struct bnx2 *bp)
{
u32 local_adv, remote_adv;
bp->flow_ctrl = 0;
if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
(AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
if (bp->duplex == DUPLEX_FULL) {
bp->flow_ctrl = bp->req_flow_ctrl;
}
return;
}
if (bp->duplex != DUPLEX_FULL) {
return;
}
bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
bnx2_read_phy(bp, MII_LPA, &remote_adv);
if (bp->phy_flags & PHY_SERDES_FLAG) {
u32 new_local_adv = 0;
u32 new_remote_adv = 0;
if (local_adv & ADVERTISE_1000XPAUSE)
new_local_adv |= ADVERTISE_PAUSE_CAP;
if (local_adv & ADVERTISE_1000XPSE_ASYM)
new_local_adv |= ADVERTISE_PAUSE_ASYM;
if (remote_adv & ADVERTISE_1000XPAUSE)
new_remote_adv |= ADVERTISE_PAUSE_CAP;
if (remote_adv & ADVERTISE_1000XPSE_ASYM)
new_remote_adv |= ADVERTISE_PAUSE_ASYM;
local_adv = new_local_adv;
remote_adv = new_remote_adv;
}
/* See Table 28B-3 of 802.3ab-1999 spec. */
if (local_adv & ADVERTISE_PAUSE_CAP) {
if(local_adv & ADVERTISE_PAUSE_ASYM) {
if (remote_adv & ADVERTISE_PAUSE_CAP) {
bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
}
else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
bp->flow_ctrl = FLOW_CTRL_RX;
}
}
else {
if (remote_adv & ADVERTISE_PAUSE_CAP) {
bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
}
}
}
else if (local_adv & ADVERTISE_PAUSE_ASYM) {
if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
(remote_adv & ADVERTISE_PAUSE_ASYM)) {
bp->flow_ctrl = FLOW_CTRL_TX;
}
}
}
static int
bnx2_serdes_linkup(struct bnx2 *bp)
{
u32 bmcr, local_adv, remote_adv, common;
bp->link_up = 1;
bp->line_speed = SPEED_1000;
bnx2_read_phy(bp, MII_BMCR, &bmcr);
if (bmcr & BMCR_FULLDPLX) {
bp->duplex = DUPLEX_FULL;
}
else {
bp->duplex = DUPLEX_HALF;
}
if (!(bmcr & BMCR_ANENABLE)) {
return 0;
}
bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
bnx2_read_phy(bp, MII_LPA, &remote_adv);
common = local_adv & remote_adv;
if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
if (common & ADVERTISE_1000XFULL) {
bp->duplex = DUPLEX_FULL;
}
else {
bp->duplex = DUPLEX_HALF;
}
}
return 0;
}
static int
bnx2_copper_linkup(struct bnx2 *bp)
{
u32 bmcr;
bnx2_read_phy(bp, MII_BMCR, &bmcr);
if (bmcr & BMCR_ANENABLE) {
u32 local_adv, remote_adv, common;
bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
common = local_adv & (remote_adv >> 2);
if (common & ADVERTISE_1000FULL) {
bp->line_speed = SPEED_1000;
bp->duplex = DUPLEX_FULL;
}
else if (common & ADVERTISE_1000HALF) {
bp->line_speed = SPEED_1000;
bp->duplex = DUPLEX_HALF;
}
else {
bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
bnx2_read_phy(bp, MII_LPA, &remote_adv);
common = local_adv & remote_adv;
if (common & ADVERTISE_100FULL) {
bp->line_speed = SPEED_100;
bp->duplex = DUPLEX_FULL;
}
else if (common & ADVERTISE_100HALF) {
bp->line_speed = SPEED_100;
bp->duplex = DUPLEX_HALF;
}
else if (common & ADVERTISE_10FULL) {
bp->line_speed = SPEED_10;
bp->duplex = DUPLEX_FULL;
}
else if (common & ADVERTISE_10HALF) {
bp->line_speed = SPEED_10;
bp->duplex = DUPLEX_HALF;
}
else {
bp->line_speed = 0;
bp->link_up = 0;
}
}
}
else {
if (bmcr & BMCR_SPEED100) {
bp->line_speed = SPEED_100;
}
else {
bp->line_speed = SPEED_10;
}
if (bmcr & BMCR_FULLDPLX) {
bp->duplex = DUPLEX_FULL;
}
else {
bp->duplex = DUPLEX_HALF;
}
}
return 0;
}
static int
bnx2_set_mac_link(struct bnx2 *bp)
{
u32 val;
REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
if (bp->link_up && (bp->line_speed == SPEED_1000) &&
(bp->duplex == DUPLEX_HALF)) {
REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
}
/* Configure the EMAC mode register. */
val = REG_RD(bp, BNX2_EMAC_MODE);
val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK);
if (bp->link_up) {
if (bp->line_speed != SPEED_1000)
val |= BNX2_EMAC_MODE_PORT_MII;
else
val |= BNX2_EMAC_MODE_PORT_GMII;
}
else {
val |= BNX2_EMAC_MODE_PORT_GMII;
}
/* Set the MAC to operate in the appropriate duplex mode. */
if (bp->duplex == DUPLEX_HALF)
val |= BNX2_EMAC_MODE_HALF_DUPLEX;
REG_WR(bp, BNX2_EMAC_MODE, val);
/* Enable/disable rx PAUSE. */
bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
if (bp->flow_ctrl & FLOW_CTRL_RX)
bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
/* Enable/disable tx PAUSE. */
val = REG_RD(bp, BNX2_EMAC_TX_MODE);
val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
if (bp->flow_ctrl & FLOW_CTRL_TX)
val |= BNX2_EMAC_TX_MODE_FLOW_EN;
REG_WR(bp, BNX2_EMAC_TX_MODE, val);
/* Acknowledge the interrupt. */
REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
return 0;
}
static int
bnx2_set_link(struct bnx2 *bp)
{
u32 bmsr;
u8 link_up;
if (bp->loopback == MAC_LOOPBACK) {
bp->link_up = 1;
return 0;
}
link_up = bp->link_up;
bnx2_read_phy(bp, MII_BMSR, &bmsr);
bnx2_read_phy(bp, MII_BMSR, &bmsr);
if ((bp->phy_flags & PHY_SERDES_FLAG) &&
(CHIP_NUM(bp) == CHIP_NUM_5706)) {
u32 val;
val = REG_RD(bp, BNX2_EMAC_STATUS);
if (val & BNX2_EMAC_STATUS_LINK)
bmsr |= BMSR_LSTATUS;
else
bmsr &= ~BMSR_LSTATUS;
}
if (bmsr & BMSR_LSTATUS) {
bp->link_up = 1;
if (bp->phy_flags & PHY_SERDES_FLAG) {
bnx2_serdes_linkup(bp);
}
else {
bnx2_copper_linkup(bp);
}
bnx2_resolve_flow_ctrl(bp);
}
else {
if ((bp->phy_flags & PHY_SERDES_FLAG) &&
(bp->autoneg & AUTONEG_SPEED)) {
u32 bmcr;
bnx2_read_phy(bp, MII_BMCR, &bmcr);
if (!(bmcr & BMCR_ANENABLE)) {
bnx2_write_phy(bp, MII_BMCR, bmcr |
BMCR_ANENABLE);
}
}
bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
bp->link_up = 0;
}
if (bp->link_up != link_up) {
bnx2_report_link(bp);
}
bnx2_set_mac_link(bp);
return 0;
}
static int
bnx2_reset_phy(struct bnx2 *bp)
{
int i;
u32 reg;
bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
#define PHY_RESET_MAX_WAIT 100
for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
udelay(10);
bnx2_read_phy(bp, MII_BMCR, &reg);
if (!(reg & BMCR_RESET)) {
udelay(20);
break;
}
}
if (i == PHY_RESET_MAX_WAIT) {
return -EBUSY;
}
return 0;
}
static u32
bnx2_phy_get_pause_adv(struct bnx2 *bp)
{
u32 adv = 0;
if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
(FLOW_CTRL_RX | FLOW_CTRL_TX)) {
if (bp->phy_flags & PHY_SERDES_FLAG) {
adv = ADVERTISE_1000XPAUSE;
}
else {
adv = ADVERTISE_PAUSE_CAP;
}
}
else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
if (bp->phy_flags & PHY_SERDES_FLAG) {
adv = ADVERTISE_1000XPSE_ASYM;
}
else {
adv = ADVERTISE_PAUSE_ASYM;
}
}
else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
if (bp->phy_flags & PHY_SERDES_FLAG) {
adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
}
else {
adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
}
}
return adv;
}
static int
bnx2_setup_serdes_phy(struct bnx2 *bp)
{
u32 adv, bmcr;
u32 new_adv = 0;
if (!(bp->autoneg & AUTONEG_SPEED)) {
u32 new_bmcr;
bnx2_read_phy(bp, MII_BMCR, &bmcr);
new_bmcr = bmcr & ~BMCR_ANENABLE;
new_bmcr |= BMCR_SPEED1000;
if (bp->req_duplex == DUPLEX_FULL) {
new_bmcr |= BMCR_FULLDPLX;
}
else {
new_bmcr &= ~BMCR_FULLDPLX;
}
if (new_bmcr != bmcr) {
/* Force a link down visible on the other side */
if (bp->link_up) {
bnx2_read_phy(bp, MII_ADVERTISE, &adv);
adv &= ~(ADVERTISE_1000XFULL |
ADVERTISE_1000XHALF);
bnx2_write_phy(bp, MII_ADVERTISE, adv);
bnx2_write_phy(bp, MII_BMCR, bmcr |
BMCR_ANRESTART | BMCR_ANENABLE);
bp->link_up = 0;
netif_carrier_off(bp->dev);
}
bnx2_write_phy(bp, MII_BMCR, new_bmcr);
}
return 0;
}
if (bp->advertising & ADVERTISED_1000baseT_Full)
new_adv |= ADVERTISE_1000XFULL;
new_adv |= bnx2_phy_get_pause_adv(bp);
bnx2_read_phy(bp, MII_ADVERTISE, &adv);
bnx2_read_phy(bp, MII_BMCR, &bmcr);
bp->serdes_an_pending = 0;
if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
/* Force a link down visible on the other side */
if (bp->link_up) {
int i;
bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
for (i = 0; i < 110; i++) {
udelay(100);
}
}
bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
BMCR_ANENABLE);
bp->serdes_an_pending = SERDES_AN_TIMEOUT / bp->timer_interval;
}
return 0;
}
#define ETHTOOL_ALL_FIBRE_SPEED \
(ADVERTISED_1000baseT_Full)
#define ETHTOOL_ALL_COPPER_SPEED \
(ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
ADVERTISED_1000baseT_Full)
#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
static int
bnx2_setup_copper_phy(struct bnx2 *bp)
{
u32 bmcr;
u32 new_bmcr;
bnx2_read_phy(bp, MII_BMCR, &bmcr);
if (bp->autoneg & AUTONEG_SPEED) {
u32 adv_reg, adv1000_reg;
u32 new_adv_reg = 0;
u32 new_adv1000_reg = 0;
bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
adv1000_reg &= PHY_ALL_1000_SPEED;
if (bp->advertising & ADVERTISED_10baseT_Half)
new_adv_reg |= ADVERTISE_10HALF;
if (bp->advertising & ADVERTISED_10baseT_Full)
new_adv_reg |= ADVERTISE_10FULL;
if (bp->advertising & ADVERTISED_100baseT_Half)
new_adv_reg |= ADVERTISE_100HALF;
if (bp->advertising & ADVERTISED_100baseT_Full)
new_adv_reg |= ADVERTISE_100FULL;
if (bp->advertising & ADVERTISED_1000baseT_Full)
new_adv1000_reg |= ADVERTISE_1000FULL;
new_adv_reg |= ADVERTISE_CSMA;
new_adv_reg |= bnx2_phy_get_pause_adv(bp);
if ((adv1000_reg != new_adv1000_reg) ||
(adv_reg != new_adv_reg) ||
((bmcr & BMCR_ANENABLE) == 0)) {
bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
BMCR_ANENABLE);
}
else if (bp->link_up) {
/* Flow ctrl may have changed from auto to forced */
/* or vice-versa. */
bnx2_resolve_flow_ctrl(bp);
bnx2_set_mac_link(bp);
}
return 0;
}
new_bmcr = 0;
if (bp->req_line_speed == SPEED_100) {
new_bmcr |= BMCR_SPEED100;
}
if (bp->req_duplex == DUPLEX_FULL) {
new_bmcr |= BMCR_FULLDPLX;
}
if (new_bmcr != bmcr) {
u32 bmsr;
int i = 0;
bnx2_read_phy(bp, MII_BMSR, &bmsr);
bnx2_read_phy(bp, MII_BMSR, &bmsr);
if (bmsr & BMSR_LSTATUS) {
/* Force link down */
bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
do {
udelay(100);
bnx2_read_phy(bp, MII_BMSR, &bmsr);
bnx2_read_phy(bp, MII_BMSR, &bmsr);
i++;
} while ((bmsr & BMSR_LSTATUS) && (i < 620));
}
bnx2_write_phy(bp, MII_BMCR, new_bmcr);
/* Normally, the new speed is setup after the link has
* gone down and up again. In some cases, link will not go
* down so we need to set up the new speed here.
*/
if (bmsr & BMSR_LSTATUS) {
bp->line_speed = bp->req_line_speed;
bp->duplex = bp->req_duplex;
bnx2_resolve_flow_ctrl(bp);
bnx2_set_mac_link(bp);
}
}
return 0;
}
static int
bnx2_setup_phy(struct bnx2 *bp)
{
if (bp->loopback == MAC_LOOPBACK)
return 0;
if (bp->phy_flags & PHY_SERDES_FLAG) {
return (bnx2_setup_serdes_phy(bp));
}
else {
return (bnx2_setup_copper_phy(bp));
}
}
static int
bnx2_init_serdes_phy(struct bnx2 *bp)
{
bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
if (CHIP_NUM(bp) == CHIP_NUM_5706) {
REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
}
if (bp->dev->mtu > 1500) {
u32 val;
/* Set extended packet length bit */
bnx2_write_phy(bp, 0x18, 0x7);
bnx2_read_phy(bp, 0x18, &val);
bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
bnx2_write_phy(bp, 0x1c, 0x6c00);
bnx2_read_phy(bp, 0x1c, &val);
bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
}
else {
u32 val;
bnx2_write_phy(bp, 0x18, 0x7);
bnx2_read_phy(bp, 0x18, &val);
bnx2_write_phy(bp, 0x18, val & ~0x4007);
bnx2_write_phy(bp, 0x1c, 0x6c00);
bnx2_read_phy(bp, 0x1c, &val);
bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
}
return 0;
}
static int
bnx2_init_copper_phy(struct bnx2 *bp)
{
bp->phy_flags |= PHY_CRC_FIX_FLAG;
if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
bnx2_write_phy(bp, 0x18, 0x0c00);
bnx2_write_phy(bp, 0x17, 0x000a);
bnx2_write_phy(bp, 0x15, 0x310b);
bnx2_write_phy(bp, 0x17, 0x201f);
bnx2_write_phy(bp, 0x15, 0x9506);
bnx2_write_phy(bp, 0x17, 0x401f);
bnx2_write_phy(bp, 0x15, 0x14e2);
bnx2_write_phy(bp, 0x18, 0x0400);
}
if (bp->dev->mtu > 1500) {
u32 val;
/* Set extended packet length bit */
bnx2_write_phy(bp, 0x18, 0x7);
bnx2_read_phy(bp, 0x18, &val);
bnx2_write_phy(bp, 0x18, val | 0x4000);
bnx2_read_phy(bp, 0x10, &val);
bnx2_write_phy(bp, 0x10, val | 0x1);
}
else {
u32 val;
bnx2_write_phy(bp, 0x18, 0x7);
bnx2_read_phy(bp, 0x18, &val);
bnx2_write_phy(bp, 0x18, val & ~0x4007);
bnx2_read_phy(bp, 0x10, &val);
bnx2_write_phy(bp, 0x10, val & ~0x1);
}
return 0;
}
static int
bnx2_init_phy(struct bnx2 *bp)
{
u32 val;
int rc = 0;
bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
bnx2_reset_phy(bp);
bnx2_read_phy(bp, MII_PHYSID1, &val);
bp->phy_id = val << 16;
bnx2_read_phy(bp, MII_PHYSID2, &val);
bp->phy_id |= val & 0xffff;
if (bp->phy_flags & PHY_SERDES_FLAG) {
rc = bnx2_init_serdes_phy(bp);
}
else {
rc = bnx2_init_copper_phy(bp);
}
bnx2_setup_phy(bp);
return rc;
}
static int
bnx2_set_mac_loopback(struct bnx2 *bp)
{
u32 mac_mode;
mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
mac_mode &= ~BNX2_EMAC_MODE_PORT;
mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
bp->link_up = 1;
return 0;
}
static int
bnx2_fw_sync(struct bnx2 *bp, u32 msg_data)
{
int i;
u32 val;
if (bp->fw_timed_out)
return -EBUSY;
bp->fw_wr_seq++;
msg_data |= bp->fw_wr_seq;
REG_WR_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_DRV_MB, msg_data);
/* wait for an acknowledgement. */
for (i = 0; i < (FW_ACK_TIME_OUT_MS * 1000)/5; i++) {
udelay(5);
val = REG_RD_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_FW_MB);
if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
break;
}
/* If we timed out, inform the firmware that this is the case. */
if (((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) &&
((msg_data & BNX2_DRV_MSG_DATA) != BNX2_DRV_MSG_DATA_WAIT0)) {
msg_data &= ~BNX2_DRV_MSG_CODE;
msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
REG_WR_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_DRV_MB, msg_data);
bp->fw_timed_out = 1;
return -EBUSY;
}
return 0;
}
static void
bnx2_init_context(struct bnx2 *bp)
{
u32 vcid;
vcid = 96;
while (vcid) {
u32 vcid_addr, pcid_addr, offset;
vcid--;
if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
u32 new_vcid;
vcid_addr = GET_PCID_ADDR(vcid);
if (vcid & 0x8) {
new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
}
else {
new_vcid = vcid;
}
pcid_addr = GET_PCID_ADDR(new_vcid);
}
else {
vcid_addr = GET_CID_ADDR(vcid);
pcid_addr = vcid_addr;
}
REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
/* Zero out the context. */
for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
CTX_WR(bp, 0x00, offset, 0);
}
REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
}
}
static int
bnx2_alloc_bad_rbuf(struct bnx2 *bp)
{
u16 *good_mbuf;
u32 good_mbuf_cnt;
u32 val;
good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
if (good_mbuf == NULL) {
printk(KERN_ERR PFX "Failed to allocate memory in "
"bnx2_alloc_bad_rbuf\n");
return -ENOMEM;
}
REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
good_mbuf_cnt = 0;
/* Allocate a bunch of mbufs and save the good ones in an array. */
val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
/* The addresses with Bit 9 set are bad memory blocks. */
if (!(val & (1 << 9))) {
good_mbuf[good_mbuf_cnt] = (u16) val;
good_mbuf_cnt++;
}
val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
}
/* Free the good ones back to the mbuf pool thus discarding
* all the bad ones. */
while (good_mbuf_cnt) {
good_mbuf_cnt--;
val = good_mbuf[good_mbuf_cnt];
val = (val << 9) | val | 1;
REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
}
kfree(good_mbuf);
return 0;
}
static void
bnx2_set_mac_addr(struct bnx2 *bp)
{
u32 val;
u8 *mac_addr = bp->dev->dev_addr;
val = (mac_addr[0] << 8) | mac_addr[1];
REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
(mac_addr[4] << 8) | mac_addr[5];
REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
}
static inline int
bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
{
struct sk_buff *skb;
struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
dma_addr_t mapping;
struct rx_bd *rxbd = &bp->rx_desc_ring[index];
unsigned long align;
skb = dev_alloc_skb(bp->rx_buf_size);
if (skb == NULL) {
return -ENOMEM;
}
if (unlikely((align = (unsigned long) skb->data & 0x7))) {
skb_reserve(skb, 8 - align);
}
skb->dev = bp->dev;
mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
PCI_DMA_FROMDEVICE);
rx_buf->skb = skb;
pci_unmap_addr_set(rx_buf, mapping, mapping);
rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
bp->rx_prod_bseq += bp->rx_buf_use_size;
return 0;
}
static void
bnx2_phy_int(struct bnx2 *bp)
{
u32 new_link_state, old_link_state;
new_link_state = bp->status_blk->status_attn_bits &
STATUS_ATTN_BITS_LINK_STATE;
old_link_state = bp->status_blk->status_attn_bits_ack &
STATUS_ATTN_BITS_LINK_STATE;
if (new_link_state != old_link_state) {
if (new_link_state) {
REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
STATUS_ATTN_BITS_LINK_STATE);
}
else {
REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
STATUS_ATTN_BITS_LINK_STATE);
}
bnx2_set_link(bp);
}
}
static void
bnx2_tx_int(struct bnx2 *bp)
{
u16 hw_cons, sw_cons, sw_ring_cons;
int tx_free_bd = 0;
hw_cons = bp->status_blk->status_tx_quick_consumer_index0;
if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
hw_cons++;
}
sw_cons = bp->tx_cons;
while (sw_cons != hw_cons) {
struct sw_bd *tx_buf;
struct sk_buff *skb;
int i, last;
sw_ring_cons = TX_RING_IDX(sw_cons);
tx_buf = &bp->tx_buf_ring[sw_ring_cons];
skb = tx_buf->skb;
#ifdef BCM_TSO
/* partial BD completions possible with TSO packets */
if (skb_shinfo(skb)->tso_size) {
u16 last_idx, last_ring_idx;
last_idx = sw_cons +
skb_shinfo(skb)->nr_frags + 1;
last_ring_idx = sw_ring_cons +
skb_shinfo(skb)->nr_frags + 1;
if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
last_idx++;
}
if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
break;
}
}
#endif
pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
skb_headlen(skb), PCI_DMA_TODEVICE);
tx_buf->skb = NULL;
last = skb_shinfo(skb)->nr_frags;
for (i = 0; i < last; i++) {
sw_cons = NEXT_TX_BD(sw_cons);
pci_unmap_page(bp->pdev,
pci_unmap_addr(
&bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
mapping),
skb_shinfo(skb)->frags[i].size,
PCI_DMA_TODEVICE);
}
sw_cons = NEXT_TX_BD(sw_cons);
tx_free_bd += last + 1;
dev_kfree_skb_irq(skb);
hw_cons = bp->status_blk->status_tx_quick_consumer_index0;
if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
hw_cons++;
}
}
atomic_add(tx_free_bd, &bp->tx_avail_bd);
if (unlikely(netif_queue_stopped(bp->dev))) {
unsigned long flags;
spin_lock_irqsave(&bp->tx_lock, flags);
if ((netif_queue_stopped(bp->dev)) &&
(atomic_read(&bp->tx_avail_bd) > MAX_SKB_FRAGS)) {
netif_wake_queue(bp->dev);
}
spin_unlock_irqrestore(&bp->tx_lock, flags);
}
bp->tx_cons = sw_cons;
}
static inline void
bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
u16 cons, u16 prod)
{
struct sw_bd *cons_rx_buf = &bp->rx_buf_ring[cons];
struct sw_bd *prod_rx_buf = &bp->rx_buf_ring[prod];
struct rx_bd *cons_bd = &bp->rx_desc_ring[cons];
struct rx_bd *prod_bd = &bp->rx_desc_ring[prod];
pci_dma_sync_single_for_device(bp->pdev,
pci_unmap_addr(cons_rx_buf, mapping),
bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
prod_rx_buf->skb = cons_rx_buf->skb;
pci_unmap_addr_set(prod_rx_buf, mapping,
pci_unmap_addr(cons_rx_buf, mapping));
memcpy(prod_bd, cons_bd, 8);
bp->rx_prod_bseq += bp->rx_buf_use_size;
}
static int
bnx2_rx_int(struct bnx2 *bp, int budget)
{
u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
struct l2_fhdr *rx_hdr;
int rx_pkt = 0;
hw_cons = bp->status_blk->status_rx_quick_consumer_index0;
if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
hw_cons++;
}
sw_cons = bp->rx_cons;
sw_prod = bp->rx_prod;
/* Memory barrier necessary as speculative reads of the rx
* buffer can be ahead of the index in the status block
*/
rmb();
while (sw_cons != hw_cons) {
unsigned int len;
u16 status;
struct sw_bd *rx_buf;
struct sk_buff *skb;
sw_ring_cons = RX_RING_IDX(sw_cons);
sw_ring_prod = RX_RING_IDX(sw_prod);
rx_buf = &bp->rx_buf_ring[sw_ring_cons];
skb = rx_buf->skb;
pci_dma_sync_single_for_cpu(bp->pdev,
pci_unmap_addr(rx_buf, mapping),
bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
rx_hdr = (struct l2_fhdr *) skb->data;
len = rx_hdr->l2_fhdr_pkt_len - 4;
if (rx_hdr->l2_fhdr_errors &
(L2_FHDR_ERRORS_BAD_CRC |
L2_FHDR_ERRORS_PHY_DECODE |
L2_FHDR_ERRORS_ALIGNMENT |
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME)) {
goto reuse_rx;
}
/* Since we don't have a jumbo ring, copy small packets
* if mtu > 1500
*/
if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
struct sk_buff *new_skb;
new_skb = dev_alloc_skb(len + 2);
if (new_skb == NULL)
goto reuse_rx;
/* aligned copy */
memcpy(new_skb->data,
skb->data + bp->rx_offset - 2,
len + 2);
skb_reserve(new_skb, 2);
skb_put(new_skb, len);
new_skb->dev = bp->dev;
bnx2_reuse_rx_skb(bp, skb,
sw_ring_cons, sw_ring_prod);
skb = new_skb;
}
else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
pci_unmap_single(bp->pdev,
pci_unmap_addr(rx_buf, mapping),
bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
skb_reserve(skb, bp->rx_offset);
skb_put(skb, len);
}
else {
reuse_rx:
bnx2_reuse_rx_skb(bp, skb,
sw_ring_cons, sw_ring_prod);
goto next_rx;
}
skb->protocol = eth_type_trans(skb, bp->dev);
if ((len > (bp->dev->mtu + ETH_HLEN)) &&
(htons(skb->protocol) != 0x8100)) {
dev_kfree_skb_irq(skb);
goto next_rx;
}
status = rx_hdr->l2_fhdr_status;
skb->ip_summed = CHECKSUM_NONE;
if (bp->rx_csum &&
(status & (L2_FHDR_STATUS_TCP_SEGMENT |
L2_FHDR_STATUS_UDP_DATAGRAM))) {
u16 cksum = rx_hdr->l2_fhdr_tcp_udp_xsum;
if (cksum == 0xffff)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
#ifdef BCM_VLAN
if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
vlan_hwaccel_receive_skb(skb, bp->vlgrp,
rx_hdr->l2_fhdr_vlan_tag);
}
else
#endif
netif_receive_skb(skb);
bp->dev->last_rx = jiffies;
rx_pkt++;
next_rx:
rx_buf->skb = NULL;
sw_cons = NEXT_RX_BD(sw_cons);
sw_prod = NEXT_RX_BD(sw_prod);
if ((rx_pkt == budget))
break;
}
bp->rx_cons = sw_cons;
bp->rx_prod = sw_prod;
REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
mmiowb();
return rx_pkt;
}
/* MSI ISR - The only difference between this and the INTx ISR
* is that the MSI interrupt is always serviced.
*/
static irqreturn_t
bnx2_msi(int irq, void *dev_instance, struct pt_regs *regs)
{
struct net_device *dev = dev_instance;
struct bnx2 *bp = dev->priv;
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
/* Return here if interrupt is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
return IRQ_RETVAL(1);
}
if (netif_rx_schedule_prep(dev)) {
__netif_rx_schedule(dev);
}
return IRQ_RETVAL(1);
}
static irqreturn_t
bnx2_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
{
struct net_device *dev = dev_instance;
struct bnx2 *bp = dev->priv;
/* When using INTx, it is possible for the interrupt to arrive
* at the CPU before the status block posted prior to the
* interrupt. Reading a register will flush the status block.
* When using MSI, the MSI message will always complete after
* the status block write.
*/
if ((bp->status_blk->status_idx == bp->last_status_idx) ||
(REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
return IRQ_RETVAL(0);
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
/* Return here if interrupt is shared and is disabled. */
if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
return IRQ_RETVAL(1);
}
if (netif_rx_schedule_prep(dev)) {
__netif_rx_schedule(dev);
}
return IRQ_RETVAL(1);
}
static int
bnx2_poll(struct net_device *dev, int *budget)
{
struct bnx2 *bp = dev->priv;
int rx_done = 1;
bp->last_status_idx = bp->status_blk->status_idx;
rmb();
if ((bp->status_blk->status_attn_bits &
STATUS_ATTN_BITS_LINK_STATE) !=
(bp->status_blk->status_attn_bits_ack &
STATUS_ATTN_BITS_LINK_STATE)) {
unsigned long flags;
spin_lock_irqsave(&bp->phy_lock, flags);
bnx2_phy_int(bp);
spin_unlock_irqrestore(&bp->phy_lock, flags);
}
if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_cons) {
bnx2_tx_int(bp);
}
if (bp->status_blk->status_rx_quick_consumer_index0 != bp->rx_cons) {
int orig_budget = *budget;
int work_done;
if (orig_budget > dev->quota)
orig_budget = dev->quota;
work_done = bnx2_rx_int(bp, orig_budget);
*budget -= work_done;
dev->quota -= work_done;
if (work_done >= orig_budget) {
rx_done = 0;
}
}
if (rx_done) {
netif_rx_complete(dev);
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
bp->last_status_idx);
return 0;
}
return 1;
}
/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
* from set_multicast.
*/
static void
bnx2_set_rx_mode(struct net_device *dev)
{
struct bnx2 *bp = dev->priv;
u32 rx_mode, sort_mode;
int i;
unsigned long flags;
spin_lock_irqsave(&bp->phy_lock, flags);
rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
#ifdef BCM_VLAN
if (!bp->vlgrp) {
rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
}
#else
rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
#endif
if (dev->flags & IFF_PROMISC) {
/* Promiscuous mode. */
rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN;
}
else if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
0xffffffff);
}
sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
}
else {
/* Accept one or more multicast(s). */
struct dev_mc_list *mclist;
u32 mc_filter[NUM_MC_HASH_REGISTERS];
u32 regidx;
u32 bit;
u32 crc;
memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
i++, mclist = mclist->next) {
crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
bit = crc & 0xff;
regidx = (bit & 0xe0) >> 5;
bit &= 0x1f;
mc_filter[regidx] |= (1 << bit);
}
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
mc_filter[i]);
}
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
}
if (rx_mode != bp->rx_mode) {
bp->rx_mode = rx_mode;
REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
}
REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
spin_unlock_irqrestore(&bp->phy_lock, flags);
}
static void
load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
u32 rv2p_proc)
{
int i;
u32 val;
for (i = 0; i < rv2p_code_len; i += 8) {
REG_WR(bp, BNX2_RV2P_INSTR_HIGH, *rv2p_code);
rv2p_code++;
REG_WR(bp, BNX2_RV2P_INSTR_LOW, *rv2p_code);
rv2p_code++;
if (rv2p_proc == RV2P_PROC1) {
val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
}
else {
val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
}
}
/* Reset the processor, un-stall is done later. */
if (rv2p_proc == RV2P_PROC1) {
REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
}
else {
REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
}
}
static void
load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
{
u32 offset;
u32 val;
/* Halt the CPU. */
val = REG_RD_IND(bp, cpu_reg->mode);
val |= cpu_reg->mode_value_halt;
REG_WR_IND(bp, cpu_reg->mode, val);
REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
/* Load the Text area. */
offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
if (fw->text) {
int j;
for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
REG_WR_IND(bp, offset, fw->text[j]);
}
}
/* Load the Data area. */
offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
if (fw->data) {
int j;
for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
REG_WR_IND(bp, offset, fw->data[j]);
}
}
/* Load the SBSS area. */
offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
if (fw->sbss) {
int j;
for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
REG_WR_IND(bp, offset, fw->sbss[j]);
}
}
/* Load the BSS area. */
offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
if (fw->bss) {
int j;
for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
REG_WR_IND(bp, offset, fw->bss[j]);
}
}
/* Load the Read-Only area. */
offset = cpu_reg->spad_base +
(fw->rodata_addr - cpu_reg->mips_view_base);
if (fw->rodata) {
int j;
for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
REG_WR_IND(bp, offset, fw->rodata[j]);
}
}
/* Clear the pre-fetch instruction. */
REG_WR_IND(bp, cpu_reg->inst, 0);
REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
/* Start the CPU. */
val = REG_RD_IND(bp, cpu_reg->mode);
val &= ~cpu_reg->mode_value_halt;
REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
REG_WR_IND(bp, cpu_reg->mode, val);
}
static void
bnx2_init_cpus(struct bnx2 *bp)
{
struct cpu_reg cpu_reg;
struct fw_info fw;
/* Initialize the RV2P processor. */
load_rv2p_fw(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), RV2P_PROC1);
load_rv2p_fw(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), RV2P_PROC2);
/* Initialize the RX Processor. */
cpu_reg.mode = BNX2_RXP_CPU_MODE;
cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
cpu_reg.state = BNX2_RXP_CPU_STATE;
cpu_reg.state_value_clear = 0xffffff;
cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BNX2_RXP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
fw.start_addr = bnx2_RXP_b06FwStartAddr;
fw.text_addr = bnx2_RXP_b06FwTextAddr;
fw.text_len = bnx2_RXP_b06FwTextLen;
fw.text_index = 0;
fw.text = bnx2_RXP_b06FwText;
fw.data_addr = bnx2_RXP_b06FwDataAddr;
fw.data_len = bnx2_RXP_b06FwDataLen;
fw.data_index = 0;
fw.data = bnx2_RXP_b06FwData;
fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
fw.sbss_len = bnx2_RXP_b06FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bnx2_RXP_b06FwSbss;
fw.bss_addr = bnx2_RXP_b06FwBssAddr;
fw.bss_len = bnx2_RXP_b06FwBssLen;
fw.bss_index = 0;
fw.bss = bnx2_RXP_b06FwBss;
fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
fw.rodata_len = bnx2_RXP_b06FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bnx2_RXP_b06FwRodata;
load_cpu_fw(bp, &cpu_reg, &fw);
/* Initialize the TX Processor. */
cpu_reg.mode = BNX2_TXP_CPU_MODE;
cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
cpu_reg.state = BNX2_TXP_CPU_STATE;
cpu_reg.state_value_clear = 0xffffff;
cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BNX2_TXP_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
fw.start_addr = bnx2_TXP_b06FwStartAddr;
fw.text_addr = bnx2_TXP_b06FwTextAddr;
fw.text_len = bnx2_TXP_b06FwTextLen;
fw.text_index = 0;
fw.text = bnx2_TXP_b06FwText;
fw.data_addr = bnx2_TXP_b06FwDataAddr;
fw.data_len = bnx2_TXP_b06FwDataLen;
fw.data_index = 0;
fw.data = bnx2_TXP_b06FwData;
fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
fw.sbss_len = bnx2_TXP_b06FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bnx2_TXP_b06FwSbss;
fw.bss_addr = bnx2_TXP_b06FwBssAddr;
fw.bss_len = bnx2_TXP_b06FwBssLen;
fw.bss_index = 0;
fw.bss = bnx2_TXP_b06FwBss;
fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
fw.rodata_len = bnx2_TXP_b06FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bnx2_TXP_b06FwRodata;
load_cpu_fw(bp, &cpu_reg, &fw);
/* Initialize the TX Patch-up Processor. */
cpu_reg.mode = BNX2_TPAT_CPU_MODE;
cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
cpu_reg.state = BNX2_TPAT_CPU_STATE;
cpu_reg.state_value_clear = 0xffffff;
cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
fw.start_addr = bnx2_TPAT_b06FwStartAddr;
fw.text_addr = bnx2_TPAT_b06FwTextAddr;
fw.text_len = bnx2_TPAT_b06FwTextLen;
fw.text_index = 0;
fw.text = bnx2_TPAT_b06FwText;
fw.data_addr = bnx2_TPAT_b06FwDataAddr;
fw.data_len = bnx2_TPAT_b06FwDataLen;
fw.data_index = 0;
fw.data = bnx2_TPAT_b06FwData;
fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bnx2_TPAT_b06FwSbss;
fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
fw.bss_len = bnx2_TPAT_b06FwBssLen;
fw.bss_index = 0;
fw.bss = bnx2_TPAT_b06FwBss;
fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bnx2_TPAT_b06FwRodata;
load_cpu_fw(bp, &cpu_reg, &fw);
/* Initialize the Completion Processor. */
cpu_reg.mode = BNX2_COM_CPU_MODE;
cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
cpu_reg.state = BNX2_COM_CPU_STATE;
cpu_reg.state_value_clear = 0xffffff;
cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
cpu_reg.spad_base = BNX2_COM_SCRATCH;
cpu_reg.mips_view_base = 0x8000000;
fw.ver_major = bnx2_COM_b06FwReleaseMajor;
fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
fw.ver_fix = bnx2_COM_b06FwReleaseFix;
fw.start_addr = bnx2_COM_b06FwStartAddr;
fw.text_addr = bnx2_COM_b06FwTextAddr;
fw.text_len = bnx2_COM_b06FwTextLen;
fw.text_index = 0;
fw.text = bnx2_COM_b06FwText;
fw.data_addr = bnx2_COM_b06FwDataAddr;
fw.data_len = bnx2_COM_b06FwDataLen;
fw.data_index = 0;
fw.data = bnx2_COM_b06FwData;
fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
fw.sbss_len = bnx2_COM_b06FwSbssLen;
fw.sbss_index = 0;
fw.sbss = bnx2_COM_b06FwSbss;
fw.bss_addr = bnx2_COM_b06FwBssAddr;
fw.bss_len = bnx2_COM_b06FwBssLen;
fw.bss_index = 0;
fw.bss = bnx2_COM_b06FwBss;
fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
fw.rodata_len = bnx2_COM_b06FwRodataLen;
fw.rodata_index = 0;
fw.rodata = bnx2_COM_b06FwRodata;
load_cpu_fw(bp, &cpu_reg, &fw);
}
static int
bnx2_set_power_state(struct bnx2 *bp, int state)
{
u16 pmcsr;
pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
switch (state) {
case 0: {
u32 val;
pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
(pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
PCI_PM_CTRL_PME_STATUS);
if (pmcsr & PCI_PM_CTRL_STATE_MASK)
/* delay required during transition out of D3hot */
msleep(20);
val = REG_RD(bp, BNX2_EMAC_MODE);
val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
val &= ~BNX2_EMAC_MODE_MPKT;
REG_WR(bp, BNX2_EMAC_MODE, val);
val = REG_RD(bp, BNX2_RPM_CONFIG);
val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
REG_WR(bp, BNX2_RPM_CONFIG, val);
break;
}
case 3: {
int i;
u32 val, wol_msg;
if (bp->wol) {
u32 advertising;
u8 autoneg;
autoneg = bp->autoneg;
advertising = bp->advertising;
bp->autoneg = AUTONEG_SPEED;
bp->advertising = ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_Autoneg;
bnx2_setup_copper_phy(bp);
bp->autoneg = autoneg;
bp->advertising = advertising;
bnx2_set_mac_addr(bp);
val = REG_RD(bp, BNX2_EMAC_MODE);
/* Enable port mode. */
val &= ~BNX2_EMAC_MODE_PORT;
val |= BNX2_EMAC_MODE_PORT_MII |
BNX2_EMAC_MODE_MPKT_RCVD |
BNX2_EMAC_MODE_ACPI_RCVD |
BNX2_EMAC_MODE_FORCE_LINK |
BNX2_EMAC_MODE_MPKT;
REG_WR(bp, BNX2_EMAC_MODE, val);
/* receive all multicast */
for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
0xffffffff);
}
REG_WR(bp, BNX2_EMAC_RX_MODE,
BNX2_EMAC_RX_MODE_SORT_MODE);
val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
BNX2_RPM_SORT_USER0_MC_EN;
REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
REG_WR(bp, BNX2_RPM_SORT_USER0, val);
REG_WR(bp, BNX2_RPM_SORT_USER0, val |
BNX2_RPM_SORT_USER0_ENA);
/* Need to enable EMAC and RPM for WOL. */
REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
val = REG_RD(bp, BNX2_RPM_CONFIG);
val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
REG_WR(bp, BNX2_RPM_CONFIG, val);
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
}
else {
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
}
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg);
pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
(CHIP_ID(bp) == CHIP_ID_5706_A1)) {
if (bp->wol)
pmcsr |= 3;
}
else {
pmcsr |= 3;
}
if (bp->wol) {
pmcsr |= PCI_PM_CTRL_PME_ENABLE;
}
pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
pmcsr);
/* No more memory access after this point until
* device is brought back to D0.
*/
udelay(50);
break;
}
default:
return -EINVAL;
}
return 0;
}
static int
bnx2_acquire_nvram_lock(struct bnx2 *bp)
{
u32 val;
int j;
/* Request access to the flash interface. */
REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
val = REG_RD(bp, BNX2_NVM_SW_ARB);
if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
break;
udelay(5);
}
if (j >= NVRAM_TIMEOUT_COUNT)
return -EBUSY;
return 0;
}
static int
bnx2_release_nvram_lock(struct bnx2 *bp)
{
int j;
u32 val;
/* Relinquish nvram interface. */
REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
val = REG_RD(bp, BNX2_NVM_SW_ARB);
if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
break;
udelay(5);
}
if (j >= NVRAM_TIMEOUT_COUNT)
return -EBUSY;
return 0;
}
static int
bnx2_enable_nvram_write(struct bnx2 *bp)
{
u32 val;
val = REG_RD(bp, BNX2_MISC_CFG);
REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
if (!bp->flash_info->buffered) {
int j;
REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
REG_WR(bp, BNX2_NVM_COMMAND,
BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
udelay(5);
val = REG_RD(bp, BNX2_NVM_COMMAND);
if (val & BNX2_NVM_COMMAND_DONE)
break;
}
if (j >= NVRAM_TIMEOUT_COUNT)
return -EBUSY;
}
return 0;
}
static void
bnx2_disable_nvram_write(struct bnx2 *bp)
{
u32 val;
val = REG_RD(bp, BNX2_MISC_CFG);
REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
}
static void
bnx2_enable_nvram_access(struct bnx2 *bp)
{
u32 val;
val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
/* Enable both bits, even on read. */
REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
}
static void
bnx2_disable_nvram_access(struct bnx2 *bp)
{
u32 val;
val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
/* Disable both bits, even after read. */
REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
BNX2_NVM_ACCESS_ENABLE_WR_EN));
}
static int
bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
{
u32 cmd;
int j;
if (bp->flash_info->buffered)
/* Buffered flash, no erase needed */
return 0;
/* Build an erase command */
cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
BNX2_NVM_COMMAND_DOIT;
/* Need to clear DONE bit separately. */
REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
/* Address of the NVRAM to read from. */
REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
/* Issue an erase command. */
REG_WR(bp, BNX2_NVM_COMMAND, cmd);
/* Wait for completion. */
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
u32 val;
udelay(5);
val = REG_RD(bp, BNX2_NVM_COMMAND);
if (val & BNX2_NVM_COMMAND_DONE)
break;
}
if (j >= NVRAM_TIMEOUT_COUNT)
return -EBUSY;
return 0;
}
static int
bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
{
u32 cmd;
int j;
/* Build the command word. */
cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
/* Calculate an offset of a buffered flash. */
if (bp->flash_info->buffered) {
offset = ((offset / bp->flash_info->page_size) <<
bp->flash_info->page_bits) +
(offset % bp->flash_info->page_size);
}
/* Need to clear DONE bit separately. */
REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
/* Address of the NVRAM to read from. */
REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
/* Issue a read command. */
REG_WR(bp, BNX2_NVM_COMMAND, cmd);
/* Wait for completion. */
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
u32 val;
udelay(5);
val = REG_RD(bp, BNX2_NVM_COMMAND);
if (val & BNX2_NVM_COMMAND_DONE) {
val = REG_RD(bp, BNX2_NVM_READ);
val = be32_to_cpu(val);
memcpy(ret_val, &val, 4);
break;
}
}
if (j >= NVRAM_TIMEOUT_COUNT)
return -EBUSY;
return 0;
}
static int
bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
{
u32 cmd, val32;
int j;
/* Build the command word. */
cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
/* Calculate an offset of a buffered flash. */
if (bp->flash_info->buffered) {
offset = ((offset / bp->flash_info->page_size) <<
bp->flash_info->page_bits) +
(offset % bp->flash_info->page_size);
}
/* Need to clear DONE bit separately. */
REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
memcpy(&val32, val, 4);
val32 = cpu_to_be32(val32);
/* Write the data. */
REG_WR(bp, BNX2_NVM_WRITE, val32);
/* Address of the NVRAM to write to. */
REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
/* Issue the write command. */
REG_WR(bp, BNX2_NVM_COMMAND, cmd);
/* Wait for completion. */
for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
udelay(5);
if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
break;
}
if (j >= NVRAM_TIMEOUT_COUNT)
return -EBUSY;
return 0;
}
static int
bnx2_init_nvram(struct bnx2 *bp)
{
u32 val;
int j, entry_count, rc;
struct flash_spec *flash;
/* Determine the selected interface. */
val = REG_RD(bp, BNX2_NVM_CFG1);
entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
rc = 0;
if (val & 0x40000000) {
/* Flash interface has been reconfigured */
for (j = 0, flash = &flash_table[0]; j < entry_count;
j++, flash++) {
if (val == flash->config1) {
bp->flash_info = flash;
break;
}
}
}
else {
/* Not yet been reconfigured */
for (j = 0, flash = &flash_table[0]; j < entry_count;
j++, flash++) {
if ((val & FLASH_STRAP_MASK) == flash->strapping) {
bp->flash_info = flash;
/* Request access to the flash interface. */
if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
return rc;
/* Enable access to flash interface */
bnx2_enable_nvram_access(bp);
/* Reconfigure the flash interface */
REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
/* Disable access to flash interface */
bnx2_disable_nvram_access(bp);
bnx2_release_nvram_lock(bp);
break;
}
}
} /* if (val & 0x40000000) */
if (j == entry_count) {
bp->flash_info = NULL;
printk(KERN_ALERT "Unknown flash/EEPROM type.\n");
rc = -ENODEV;
}
return rc;
}
static int
bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
int buf_size)
{
int rc = 0;
u32 cmd_flags, offset32, len32, extra;
if (buf_size == 0)
return 0;
/* Request access to the flash interface. */
if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
return rc;
/* Enable access to flash interface */
bnx2_enable_nvram_access(bp);
len32 = buf_size;
offset32 = offset;
extra = 0;
cmd_flags = 0;
if (offset32 & 3) {
u8 buf[4];
u32 pre_len;
offset32 &= ~3;
pre_len = 4 - (offset & 3);
if (pre_len >= len32) {
pre_len = len32;
cmd_flags = BNX2_NVM_COMMAND_FIRST |
BNX2_NVM_COMMAND_LAST;
}
else {
cmd_flags = BNX2_NVM_COMMAND_FIRST;
}
rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
if (rc)
return rc;
memcpy(ret_buf, buf + (offset & 3), pre_len);
offset32 += 4;
ret_buf += pre_len;
len32 -= pre_len;
}
if (len32 & 3) {
extra = 4 - (len32 & 3);
len32 = (len32 + 4) & ~3;
}
if (len32 == 4) {
u8 buf[4];
if (cmd_flags)
cmd_flags = BNX2_NVM_COMMAND_LAST;
else
cmd_flags = BNX2_NVM_COMMAND_FIRST |
BNX2_NVM_COMMAND_LAST;
rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
memcpy(ret_buf, buf, 4 - extra);
}
else if (len32 > 0) {
u8 buf[4];
/* Read the first word. */
if (cmd_flags)
cmd_flags = 0;
else
cmd_flags = BNX2_NVM_COMMAND_FIRST;
rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
/* Advance to the next dword. */
offset32 += 4;
ret_buf += 4;
len32 -= 4;
while (len32 > 4 && rc == 0) {
rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
/* Advance to the next dword. */
offset32 += 4;
ret_buf += 4;
len32 -= 4;
}
if (rc)
return rc;
cmd_flags = BNX2_NVM_COMMAND_LAST;
rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
memcpy(ret_buf, buf, 4 - extra);
}
/* Disable access to flash interface */
bnx2_disable_nvram_access(bp);
bnx2_release_nvram_lock(bp);
return rc;
}
static int
bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
int buf_size)
{
u32 written, offset32, len32;
u8 *buf, start[4], end[4];
int rc = 0;
int align_start, align_end;
buf = data_buf;
offset32 = offset;
len32 = buf_size;
align_start = align_end = 0;
if ((align_start = (offset32 & 3))) {
offset32 &= ~3;
len32 += align_start;
if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
return rc;
}
if (len32 & 3) {
if ((len32 > 4) || !align_start) {
align_end = 4 - (len32 & 3);
len32 += align_end;
if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
end, 4))) {
return rc;
}
}
}
if (align_start || align_end) {
buf = kmalloc(len32, GFP_KERNEL);
if (buf == 0)
return -ENOMEM;
if (align_start) {
memcpy(buf, start, 4);
}
if (align_end) {
memcpy(buf + len32 - 4, end, 4);
}
memcpy(buf + align_start, data_buf, buf_size);
}
written = 0;
while ((written < len32) && (rc == 0)) {
u32 page_start, page_end, data_start, data_end;
u32 addr, cmd_flags;
int i;
u8 flash_buffer[264];
/* Find the page_start addr */
page_start = offset32 + written;
page_start -= (page_start % bp->flash_info->page_size);
/* Find the page_end addr */
page_end = page_start + bp->flash_info->page_size;
/* Find the data_start addr */
data_start = (written == 0) ? offset32 : page_start;
/* Find the data_end addr */
data_end = (page_end > offset32 + len32) ?
(offset32 + len32) : page_end;
/* Request access to the flash interface. */
if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
goto nvram_write_end;
/* Enable access to flash interface */
bnx2_enable_nvram_access(bp);
cmd_flags = BNX2_NVM_COMMAND_FIRST;
if (bp->flash_info->buffered == 0) {
int j;
/* Read the whole page into the buffer
* (non-buffer flash only) */
for (j = 0; j < bp->flash_info->page_size; j += 4) {
if (j == (bp->flash_info->page_size - 4)) {
cmd_flags |= BNX2_NVM_COMMAND_LAST;
}
rc = bnx2_nvram_read_dword(bp,
page_start + j,
&flash_buffer[j],
cmd_flags);
if (rc)
goto nvram_write_end;
cmd_flags = 0;
}
}
/* Enable writes to flash interface (unlock write-protect) */
if ((rc = bnx2_enable_nvram_write(bp)) != 0)
goto nvram_write_end;
/* Erase the page */
if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
goto nvram_write_end;
/* Re-enable the write again for the actual write */
bnx2_enable_nvram_write(bp);
/* Loop to write back the buffer data from page_start to
* data_start */
i = 0;
if (bp->flash_info->buffered == 0) {
for (addr = page_start; addr < data_start;
addr += 4, i += 4) {
rc = bnx2_nvram_write_dword(bp, addr,
&flash_buffer[i], cmd_flags);
if (rc != 0)
goto nvram_write_end;
cmd_flags = 0;
}
}
/* Loop to write the new data from data_start to data_end */
for (addr = data_start; addr < data_end; addr += 4, i++) {
if ((addr == page_end - 4) ||
((bp->flash_info->buffered) &&
(addr == data_end - 4))) {
cmd_flags |= BNX2_NVM_COMMAND_LAST;
}
rc = bnx2_nvram_write_dword(bp, addr, buf,
cmd_flags);
if (rc != 0)
goto nvram_write_end;
cmd_flags = 0;
buf += 4;
}
/* Loop to write back the buffer data from data_end
* to page_end */
if (bp->flash_info->buffered == 0) {
for (addr = data_end; addr < page_end;
addr += 4, i += 4) {
if (addr == page_end-4) {
cmd_flags = BNX2_NVM_COMMAND_LAST;
}
rc = bnx2_nvram_write_dword(bp, addr,
&flash_buffer[i], cmd_flags);
if (rc != 0)
goto nvram_write_end;
cmd_flags = 0;
}
}
/* Disable writes to flash interface (lock write-protect) */
bnx2_disable_nvram_write(bp);
/* Disable access to flash interface */
bnx2_disable_nvram_access(bp);
bnx2_release_nvram_lock(bp);
/* Increment written */
written += data_end - data_start;
}
nvram_write_end:
if (align_start || align_end)
kfree(buf);
return rc;
}
static int
bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
{
u32 val;
int i, rc = 0;
/* Wait for the current PCI transaction to complete before
* issuing a reset. */
REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
udelay(5);
/* Deposit a driver reset signature so the firmware knows that
* this is a soft reset. */
REG_WR_IND(bp, HOST_VIEW_SHMEM_BASE + BNX2_DRV_RESET_SIGNATURE,
BNX2_DRV_RESET_SIGNATURE_MAGIC);
bp->fw_timed_out = 0;
/* Wait for the firmware to tell us it is ok to issue a reset. */
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code);
/* Do a dummy read to force the chip to complete all current transaction
* before we issue a reset. */
val = REG_RD(bp, BNX2_MISC_ID);
val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
/* Chip reset. */
REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
(CHIP_ID(bp) == CHIP_ID_5706_A1))
msleep(15);
/* Reset takes approximate 30 usec */
for (i = 0; i < 10; i++) {
val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
break;
}
udelay(10);
}
if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
printk(KERN_ERR PFX "Chip reset did not complete\n");
return -EBUSY;
}
/* Make sure byte swapping is properly configured. */
val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
if (val != 0x01020304) {
printk(KERN_ERR PFX "Chip not in correct endian mode\n");
return -ENODEV;
}
bp->fw_timed_out = 0;
/* Wait for the firmware to finish its initialization. */
bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code);
if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
/* Adjust the voltage regular to two steps lower. The default
* of this register is 0x0000000e. */
REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
/* Remove bad rbuf memory from the free pool. */
rc = bnx2_alloc_bad_rbuf(bp);
}
return rc;
}
static int
bnx2_init_chip(struct bnx2 *bp)
{
u32 val;
/* Make sure the interrupt is not active. */
REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
BNX2_DMA_CONFIG_DATA_WORD_SWAP |
#ifdef __BIG_ENDIAN
BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
#endif
BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
DMA_READ_CHANS << 12 |
DMA_WRITE_CHANS << 16;
val |= (0x2 << 20) | (1 << 11);
if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz = 133))
val |= (1 << 23);
if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
(CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;