20
#include <kernel/irq.h>
22
#include <drivers/pci/pci.h>
23
#include <drivers/pci/pci_driver.h>
25
#include <net/l0/net_entry.h>
26
#include <net/skbuff.h>
27
#include <net/netdevice.h>
28
#include <net/l2/ethernet.h>
29
#include <net/inetdevice.h>
31
#include <net/util/show_packet.h>
34
#include "dp83865_phy.h"
36
#include <framework/mod/options.h>
41
#define E1000_LOG_RX_BUFFERS OPTION_GET(NUMBER,log_rx_desc_quantity)
43
#define E1000_LOG_TX_BUFFERS OPTION_GET(NUMBER,log_tx_desc_quantity)
45
#define E1000_CARD_QUANTITY OPTION_GET(NUMBER,card_quantity)
46
#define E1000_PHY_ID OPTION_GET(NUMBER,phy_id)
53
#define TX_RING_SIZE (1 << (E1000_LOG_TX_BUFFERS))
54
#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
55
#define TX_RING_LEN_BITS ((E1000_LOG_TX_BUFFERS) << 12)
57
#define RX_RING_SIZE (1 << (E1000_LOG_RX_BUFFERS))
58
#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
59
#define RX_RING_LEN_BITS ((E1000_LOG_RX_BUFFERS) << 4)
62
static const uint8_t l_base_mac_addr[6] = { 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0x02 };
63
static int card_number = 0;
65
struct l_e1000_dma_area {
66
struct l_e1000_init_block init_block __attribute__((aligned(64)));
67
struct l_e1000_rx_desc rx_ring[RX_RING_SIZE] __attribute__((aligned(16)));
68
struct l_e1000_tx_desc tx_ring[TX_RING_SIZE] __attribute__((aligned(16)));
72
unsigned char full_duplex;
73
unsigned char supports_gmii;
74
unsigned char phy_id_mask;
75
unsigned char reg_num_mask;
79
static struct l_e1000_dma_area e1000_dma_area[E1000_CARD_QUANTITY] __attribute__((aligned(64)));
82
struct net_device *netdev;
84
struct sk_buff *volatile rx_skbs[RX_RING_SIZE];
85
struct sk_buff *volatile tx_skbs[TX_RING_SIZE];
87
struct l_e1000_init_block *init_block;
88
struct l_e1000_rx_desc *rx_ring;
89
struct l_e1000_tx_desc *tx_ring;
91
uintptr_t base_ioaddr;
97
static inline uint32_t e1000_read_e_csr(struct l_e1000_priv *ep) {
98
assert(ep->base_ioaddr);
99
return e2k_read32(ep->base_ioaddr + L_E1000_E_CSR);
102
static inline void e1000_write_e_csr(struct l_e1000_priv *ep, int val) {
103
assert(ep->base_ioaddr);
104
e2k_write32(val, ep->base_ioaddr + L_E1000_E_CSR);
107
static inline uint32_t e1000_read_mgio_csr(struct l_e1000_priv *ep) {
108
assert(ep->base_ioaddr);
109
return e2k_read32(ep->base_ioaddr + L_E1000_MGIO_CSR);
112
static inline void e1000_write_mgio_csr(struct l_e1000_priv *ep, int val) {
113
assert(ep->base_ioaddr);
114
e2k_write32(val, ep->base_ioaddr + L_E1000_MGIO_CSR);
117
static inline uint32_t e1000_read_mgio_data(struct l_e1000_priv *ep) {
118
assert(ep->base_ioaddr);
119
return e2k_read32(ep->base_ioaddr + L_E1000_MGIO_DATA);
122
static inline void e1000_write_mgio_data(struct l_e1000_priv *ep, int val) {
123
assert(ep->base_ioaddr);
124
e2k_write32(val, ep->base_ioaddr + L_E1000_MGIO_DATA);
127
static inline void e1000_write_e_base_addr(struct l_e1000_priv *ep, int val) {
128
assert(ep->base_ioaddr);
129
e2k_write32(val, ep->base_ioaddr + L_E1000_E_BASE_ADDR);
132
static inline void e1000_write_dma_base_addr(struct l_e1000_priv *ep, int val) {
133
assert(ep->base_ioaddr);
134
e2k_write32(val, ep->base_ioaddr + L_E1000_DMA_BASE_ADDR);
137
static inline uint32_t e1000_read_psf_csr(struct l_e1000_priv *ep) {
138
assert(ep->base_ioaddr);
139
return e2k_read32(ep->base_ioaddr + L_E1000_PSF_CSR);
142
static inline uint32_t e1000_read_psf_data(struct l_e1000_priv *ep) {
143
assert(ep->base_ioaddr);
144
return e2k_read32(ep->base_ioaddr + L_E1000_PSF_DATA);
147
static int e1000_mii_readreg(struct net_device *dev, int phy_id, int reg_num) {
148
struct l_e1000_priv *ep = netdev_priv(dev);
150
uint16_t val_out = 0;
154
rd |= 0x2 << MGIO_CS_OFF;
155
rd |= 0x1 << MGIO_ST_OF_F_OFF;
156
rd |= 0x2 << MGIO_OP_CODE_OFF;
157
rd |= (phy_id & 0x1f) << MGIO_PHY_AD_OFF;
158
rd |= (reg_num & 0x1f) << MGIO_REG_AD_OFF;
160
e1000_write_mgio_data(ep, rd);
163
for (i = 0; i != 1000; i++) {
164
if (e1000_read_mgio_csr(ep) & MGIO_CSR_RRDY) {
165
rd = (uint16_t)e1000_read_mgio_data(ep);
167
val_out = rd & 0xffff;
168
log_debug("reg 0x%x >>> 0x%x", reg_num, val_out);
174
log_error("mdio_read: Unable to read from MGIO_DATA reg\n");
179
static void e1000_mii_writereg(struct net_device *dev, int phy_id, int reg_num, int val) {
180
struct l_e1000_priv *ep = netdev_priv(dev);
185
wr |= 0x2 << MGIO_CS_OFF;
186
wr |= 0x1 << MGIO_ST_OF_F_OFF;
187
wr |= 0x1 << MGIO_OP_CODE_OFF;
188
wr |= (phy_id & 0x1f) << MGIO_PHY_AD_OFF;
189
wr |= (reg_num & 0x1f) << MGIO_REG_AD_OFF;
192
log_debug("reg 0x%x <<< 0x%x", reg_num, val);
193
e1000_write_mgio_data(ep, wr);
195
for (i = 0; i != 1000; i++) {
196
if (e1000_read_mgio_csr(ep) & MGIO_CSR_RRDY) {
201
log_error("Unable to write MGIO_DATA reg: val = 0x%x", wr);
206
e1000_phy_mmd_write(void *eth1000_ptr, uint8_t mmd_dev, uint16_t mmd_reg, uint16_t val) {
207
e1000_mii_writereg(eth1000_ptr, 0, 0xD, mmd_dev & 0x1F);
208
e1000_mii_writereg(eth1000_ptr, 0, 0xE, mmd_reg);
209
e1000_mii_writereg(eth1000_ptr, 0, 0xD, 0x01 << 14 | mmd_dev & 0x1F);
210
e1000_mii_writereg(eth1000_ptr, 0, 0xE, val);
215
#define SPEED_1000 1000
217
static void l_e1000_set_phy_mode(struct net_device *dev) {
218
static const int assigned_speed = SPEED_1000;
219
struct l_e1000_priv *ep = netdev_priv(dev);
222
unsigned int advertise;
225
unsigned int advertise2;
226
int speed = SPEED_1000;
228
lpa = e1000_mii_readreg(dev, ep->mii_if.phy_id, MII_LPA);
229
advertise = e1000_mii_readreg(dev, ep->mii_if.phy_id, MII_ADVERTISE);
230
log_debug("MII lpa 0x%x advertise 0x%x", lpa, advertise);
231
if (ep->mii_if.supports_gmii) {
232
lpa2 = e1000_mii_readreg(dev, ep->mii_if.phy_id, MII_STAT1000);
233
advertise2 = e1000_mii_readreg(dev, ep->mii_if.phy_id, MII_CTRL1000);
234
log_debug(" GMII status 0x%x control 0x%x", lpa2, advertise2);
235
if ((advertise2 & (ADVERTISE_1000HALF | ADVERTISE_1000FULL)) &&
236
(lpa2 & (LPA_1000FULL | LPA_1000HALF))) {
241
media = mii_nway_result(lpa & advertise);
242
if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) {
248
if (speed != assigned_speed) {
249
log_debug("decrease speed to %d due to module param", SPEED_1000);
250
speed = assigned_speed;
251
if (ep->mii_if.supports_gmii && (speed < SPEED_1000)) {
252
lpa2 &= ~(LPA_1000FULL | LPA_1000HALF);
253
e1000_mii_writereg(dev, ep->mii_if.phy_id, MII_STAT1000, lpa2);
254
advertise2 &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
255
e1000_mii_writereg(dev, ep->mii_if.phy_id, MII_CTRL1000, advertise2);
257
if (speed == SPEED_10) {
258
lpa &= ~(LPA_100FULL | LPA_100BASE4 | LPA_100HALF);
259
e1000_mii_writereg(dev, ep->mii_if.phy_id, MII_LPA, lpa);
261
advertise &= ~(ADVERTISE_1000XFULL |
262
ADVERTISE_1000XHALF |
263
ADVERTISE_1000XPSE_ASYM |
264
ADVERTISE_1000XPAUSE);
265
advertise |= (ADVERTISE_10FULL | ADVERTISE_10HALF);
266
e1000_mii_writereg(dev, ep->mii_if.phy_id, MII_ADVERTISE, advertise);
269
log_debug("e1000_set_phy_mode will set %d Mbits %s-duplex mode",
270
speed, (ep->mii_if.full_duplex) ? "full" : "half");
271
val = e1000_read_mgio_csr(ep);
272
log_debug("mgio_csr before set : 0x%x", val);
273
val |= MGIO_CSR_HARD;
274
e1000_write_mgio_csr(ep, val);
275
log_debug("mgio_csr after writing HARD = 1 in : 0x%x", e1000_read_mgio_csr(ep));
276
val &= ~(MGIO_CSR_FETH | MGIO_CSR_GETH | MGIO_CSR_FDUP);
277
if (ep->mii_if.full_duplex) {
278
val |= MGIO_CSR_FDUP;
280
if (speed >= SPEED_1000) {
281
val |= MGIO_CSR_GETH;
282
} else if (speed >= SPEED_100) {
283
val |= MGIO_CSR_FETH;
286
e1000_write_mgio_csr(ep, val);
287
log_debug("mgio_csr after setting %d Mbits %s-duplex mode : 0x%x",
288
speed, (ep->mii_if.full_duplex) ? "full" : "half", e1000_read_mgio_csr(ep));
291
static struct sk_buff *l_e1000_new_rx_buff(struct l_e1000_priv *ep, int i) {
293
struct l_e1000_rx_desc *rdesc;
295
log_debug("i %d", i);
296
skb = skb_alloc(ETH_FRAME_LEN + ETH_FCS_LEN);
298
log_error("couldn't skb_alloc(i %d)", i);
301
log_debug("skb = %p", skb);
303
rdesc = &ep->rx_ring[i];
304
rdesc->base = htole32((uintptr_t)skb_get_data_pointner(skb->data));
305
rdesc->buf_length = htole16(-(ETH_FRAME_LEN + ETH_FCS_LEN));
307
rdesc->status = htole16(RD_OWN);
309
log_debug("rxdesc[%d](base=0x%x,len=%d)", i,
310
le32toh(rdesc->base), le32toh(rdesc->buf_length));
316
static int e1000_init_block(struct net_device *dev) {
317
struct l_e1000_priv *ep = netdev_priv(dev);
318
uint32_t init_block_addr_part;
323
for (i = 0; i < RX_RING_SIZE; i++) {
324
ep->rx_skbs[i] = l_e1000_new_rx_buff(ep, i);
325
if (NULL == ep->rx_skbs[i]) {
326
log_error("Coudn't alloc sk_buff");
331
for (i = 0; i < TX_RING_SIZE; i++) {
332
ep->tx_skbs[i] = NULL;
335
ep->tx_ring->status = 0;
337
ep->tx_ring->base = 0;
338
ep->tx_ring->buf_length = 0;
342
ep->init_block->mode = htole16(FULL|PROM);
343
ep->init_block->laddrf = 0x0;
344
for (i = 0; i < 6; i++) {
345
ep->init_block->paddr[i] = dev->dev_addr[i];
348
ep->init_block->rdra = htole32((uint32_t)((uintptr_t)(&ep->rx_ring[0])));
349
ep->init_block->rdra |= htole32(E1000_LOG_RX_BUFFERS);
350
ep->init_block->tdra = htole32((uint32_t)((uintptr_t)(&ep->tx_ring[0])));
351
ep->init_block->tdra |= htole32(E1000_LOG_TX_BUFFERS);
352
log_debug("Rx Desc Ring DMA Addr 0x%x", le32toh(ep->init_block->rdra));
353
log_debug("Tx Desc Ring DMA Addr 0x%x", le32toh(ep->init_block->tdra));
356
init_block_addr_part = (uint32_t)((uintptr_t)ep->init_block & 0xffffffff);
357
e1000_write_e_base_addr(ep, init_block_addr_part);
358
log_debug("Init Block Low DMA addr: 0x%x", init_block_addr_part);
360
init_block_addr_part = (uint32_t)(((uintptr_t)(ep->init_block) >> 32) & 0xffffffff);
361
e1000_write_dma_base_addr(ep, init_block_addr_part);
362
log_debug("Init Block High DMA addr: 0x%x", init_block_addr_part);
368
static int l_e1000_reset(struct net_device *dev) {
369
struct l_e1000_priv *ep = netdev_priv(dev);
373
e1000_write_e_csr(ep, E_CSR_STOP);
375
for (i = 0; i < 1000; i++) {
376
if (e1000_read_e_csr(ep) & E_CSR_STOP) {
381
e1000_init_block(dev);
383
e1000_write_e_csr(ep, E_CSR_INIT);
384
l_e1000_set_phy_mode(dev);
390
if (e1000_read_e_csr(ep) & E_CSR_IDON) {
395
log_debug("e_csr register after initialization: 0x%x, must be 0x%x",
396
e1000_read_e_csr(ep), (E_CSR_IDON | E_CSR_INTR | E_CSR_INIT));
398
e1000_write_e_csr(ep, E_CSR_IDON);
399
log_debug("e_csr register after clear IDON bit: 0x%x, must be 0x%x",
400
e1000_read_e_csr(ep), (E_CSR_INIT));
402
e1000_write_e_csr(ep, E_CSR_INEA | E_CSR_STRT);
403
log_debug("e_csr register after setting STRT bit: 0x%x, must be 0x%x",
404
e1000_read_e_csr(ep),
405
(E_CSR_INEA | E_CSR_RXON | E_CSR_TXON | E_CSR_STRT | E_CSR_INIT));
410
static int l_e1000_hw_init(struct pci_slot_dev *pci_dev, struct net_device *dev, int number) {
411
struct l_e1000_priv *ep;
412
struct l_e1000_dma_area *m;
418
ep = netdev_priv(dev);
420
m = &e1000_dma_area[number];
422
ep->init_block = &m->init_block;
423
ep->tx_ring = &m->tx_ring[0];
424
ep->rx_ring = &m->rx_ring[0];
426
ep->base_ioaddr = dev->base_addr;
432
unsigned int soft_reset;
434
e1000_write_e_csr(ep, E_CSR_STOP);
437
soft_reset |= (L_E1000_RSET_POLARITY | MGIO_CSR_SRST);
438
e1000_write_mgio_csr(ep, soft_reset);
439
soft_reset = e1000_read_mgio_csr(ep);
440
soft_reset &= ~(MGIO_CSR_SRST);
441
e1000_write_mgio_csr(ep, soft_reset);
445
for (i = 0; i < 6; i++) {
446
dev->dev_addr[i] = l_base_mac_addr[i];
448
dev->dev_addr[5] = l_base_mac_addr[5] + number;
450
fdx = 1; mii = 1; gmii = 1;
451
ep->mii_if.full_duplex = fdx;
452
ep->mii_if.supports_gmii = gmii;
453
ep->mii_if.phy_id_mask = 0x1f;
454
ep->mii_if.reg_num_mask = 0x1f;
455
ep->mii_if.phy_id = E1000_PHY_ID;
471
val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0);
472
log_info("PHY reg # 0x0 after reset : 0x%x\n", val);
473
val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x1);
474
log_info("PHY reg # 0x1 after reset : 0x%x\n", val);
475
val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x1);
476
log_info("PHY reg # 0x1 after reset : 0x%x\n", val);
477
val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x10);
478
log_info("PHY reg # 0x10 after reset : 0x%x\n", val);
479
val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x11);
480
log_info("PHY reg # 0x11 after reset : 0x%x\n", val);
481
val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x14);
482
log_info("PHY reg # 0x14 after reset : 0x%x\n", val);
483
val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x15);
484
log_info("PHY reg # 0x15 after reset : 0x%x\n", val);
495
e1000_phy_mmd_write(dev, 0x0, 0x4, 0x0006);
496
e1000_phy_mmd_write(dev, 0x0, 0x3, 0x1A80);
502
log_info("move e1000 link status select to default 0 link \n");
503
val = e1000_read_mgio_csr(ep);
504
val &= ~MGIO_CSR_LSTS;
505
val |= MGIO_CSR_SLSP;
506
e1000_write_mgio_csr(ep, val);
507
log_info(" write mgio csr #0x04 (BIST_CFG2): 0x%x\n", val);
513
static int l_e1000_open(struct net_device *dev) {
514
log_debug("mac addr: %x:%x:%x:%x:%x:%x\n",
515
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
516
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
521
static int l_e1000_xmit(struct net_device *dev, struct sk_buff *skb) {
522
struct l_e1000_priv *ep = netdev_priv(dev);
526
len = max(skb->len, ETH_ZLEN);
528
show_packet(skb_get_data_pointner(skb->data), len, "transmit");
532
while(ep->tx_skbs[0] != NULL) {
535
ep->tx_ring->base = (uint32_t)(uintptr_t)skb_get_data_pointner(skb->data);
536
ep->tx_ring->buf_length = htole16(-len);
537
ep->tx_ring->misc = 0x00000000;
538
status = TD_OWN | TD_ENP | TD_STP;
539
ep->tx_ring->status = htole16(status);
541
e1000_write_e_csr(ep, E_CSR_INEA | E_CSR_TDMD);
544
ep->tx_skbs[0] = skb;
549
static int l_e1000_set_mac_address(struct net_device *dev, const void *addr) {
550
memcpy(&dev->dev_addr[0], addr, sizeof(dev->dev_addr));
551
log_debug("mac addr: %x:%x:%x:%x:%x:%x\n",
552
dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
553
dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
557
static int l_e1000_stop(struct net_device *dev) {
561
static const struct net_driver l_e1000_drv_ops = {
562
.xmit = l_e1000_xmit,
563
.start = l_e1000_open,
564
.stop = l_e1000_stop,
565
.set_macaddr = l_e1000_set_mac_address
568
static int l_e1000_rx(struct l_e1000_priv *ep) {
569
uint8_t * buf = NULL;
573
entry = ep->cur_rx & RX_RING_MOD_MASK;
576
while ((le16toh(ep->rx_ring[entry].status)) >= 0) {
582
status = le16toh(ep->rx_ring[entry].status);
583
skb = ep->rx_skbs[entry];
585
if ((status & 0xff00) != (RD_ENP|RD_STP)) {
587
if (status & RD_ENP) {
591
if (status & RD_FRAM) {
594
if (status & RD_OFLO) {
597
if (status & RD_CRC) {
600
if (status & RD_BUFF) {
605
buf = (void *)(uintptr_t)le32toh(ep->rx_ring[entry].base);
606
msg_len = (le16toh(ep->rx_ring[entry].msg_length) & 0xfff) - ETH_FCS_LEN;
607
show_packet(buf, msg_len, " --- BUF received packet print:");
608
skb->dev = ep->netdev;
614
ep->rx_skbs[entry] = l_e1000_new_rx_buff(ep, entry);
616
entry = ep->cur_rx & RX_RING_MOD_MASK;
621
static irq_return_t l_e1000_interrupt(unsigned int irq_num, void *dev_id) {
622
struct net_device *dev = dev_id;
623
struct l_e1000_priv *ep;
626
ep = netdev_priv(dev);
627
csr0 = e1000_read_e_csr(ep);
628
if (!(csr0 & E_CSR_INTR)) {
632
log_debug("irq csr0(%x)", csr0);
634
csr0 &= (E_CSR_BABL | E_CSR_CERR | E_CSR_MISS | E_CSR_MERR | E_CSR_RINT | E_CSR_TINT);
635
e1000_write_e_csr(ep, csr0 | E_CSR_IDON);
637
if (csr0 & E_CSR_TINT) {
638
int status = le16toh(ep->tx_ring->status);
639
if (status & TD_ERR) {
640
int err_status = le32toh(ep->tx_ring->misc);
641
log_error("Tx error status=%04x err_status=%08x", status, err_status);
644
ep->tx_ring->status = 0;
645
assert(ep->tx_skbs[0]);
646
skb_free(ep->tx_skbs[0]);
648
ep->tx_skbs[0] = NULL;
651
if (csr0 & E_CSR_MERR) {
652
log_debug("irq MERR");
654
if (csr0 & E_CSR_BABL) {
655
log_debug("irq BABL");
658
if (csr0 & E_CSR_MISS) {
663
log_debug("irq BABL");
666
if (csr0 & E_CSR_RINT) {
670
e1000_write_e_csr(ep, E_CSR_INEA);
675
static int l_e1000_init(struct pci_slot_dev *pci_dev) {
677
struct net_device *nic;
678
struct l_e1000_priv *nic_priv;
681
if (card_number == E1000_CARD_QUANTITY) {
684
number = card_number;
687
nic = (struct net_device *) etherdev_alloc(sizeof(struct l_e1000_priv));
692
nic->drv_ops = &l_e1000_drv_ops;
693
nic->irq = pci_dev->irq;
694
nic->base_addr = (uintptr_t) mmap_device_memory(
695
(void *) (uintptr_t) (pci_dev->bar[0] & PCI_BASE_ADDR_IO_MASK),
696
L_E1000_TOTAL_PCI_IO_SIZE,
697
PROT_WRITE | PROT_READ | PROT_NOCACHE,
699
pci_dev->bar[0] & PCI_BASE_ADDR_IO_MASK);
701
log_debug("bar (%p)\n base (%x) \n poor bar(%x)",
702
(void *) (uintptr_t) (pci_dev->bar[0] & PCI_BASE_ADDR_IO_MASK),
704
(uint64_t)(pci_dev->bar[0] & PCI_BASE_ADDR_IO_MASK));
706
nic_priv = netdev_priv(nic);
707
memset(nic_priv, 0, sizeof(*nic_priv));
709
res = irq_attach(pci_dev->irq, l_e1000_interrupt, IF_SHARESUP, nic, "l_e1000");
714
pci_set_master(pci_dev);
715
l_e1000_hw_init(pci_dev, nic, number);
717
return inetdev_register_dev(nic);
720
static const struct pci_id l_e1000_id_table[] = {
721
{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MCST_ELBRUS_E1000 },
725
PCI_DRIVER_TABLE("l_e1000", l_e1000_init, l_e1000_id_table);