embox

Форк
0
/
l_e1000.c 
725 строк · 20.3 Кб
1
/**
2
 * @file
3
 *
4
 * @date Jan 9, 2020
5
 *  @author Anton Bondarev
6
 */
7
#include <errno.h>
8
#include <stdint.h>
9
#include <string.h>
10
#include <sys/mman.h>
11
#include <unistd.h>
12

13
#include <endian.h>
14

15
#include <util/log.h>
16
#include <util/math.h>
17

18
#include <asm/io.h>
19

20
#include <kernel/irq.h>
21

22
#include <drivers/pci/pci.h>
23
#include <drivers/pci/pci_driver.h>
24

25
#include <net/l0/net_entry.h>
26
#include <net/skbuff.h>
27
#include <net/netdevice.h>
28
#include <net/l2/ethernet.h>
29
#include <net/inetdevice.h>
30
#include <net/mii.h>
31
#include <net/util/show_packet.h>
32

33
#include "l_e1000.h"
34
#include "dp83865_phy.h"
35

36
#include <framework/mod/options.h>
37

38

39

40
/** Number of receive descriptors per card. */
41
#define E1000_LOG_RX_BUFFERS       OPTION_GET(NUMBER,log_rx_desc_quantity)
42
/** Number of transmit descriptors per card. */
43
#define E1000_LOG_TX_BUFFERS       OPTION_GET(NUMBER,log_tx_desc_quantity)
44

45
#define E1000_CARD_QUANTITY        OPTION_GET(NUMBER,card_quantity)
46
#define E1000_PHY_ID               OPTION_GET(NUMBER,phy_id)
47

48
/*
49
 * Set the number of Tx and Rx buffers, using Log_2(# buffers).
50
 * Reasonable default values are 16 Tx buffers, and 256 Rx buffers.
51
 * That translates to 4 (16 == 2^^4) and 8 (256 == 2^^8).
52
 */
53
#define TX_RING_SIZE            (1 << (E1000_LOG_TX_BUFFERS))
54
#define TX_RING_MOD_MASK        (TX_RING_SIZE - 1)
55
#define TX_RING_LEN_BITS        ((E1000_LOG_TX_BUFFERS) << 12)
56

57
#define RX_RING_SIZE            (1 << (E1000_LOG_RX_BUFFERS))
58
#define RX_RING_MOD_MASK        (RX_RING_SIZE - 1)
59
#define RX_RING_LEN_BITS        ((E1000_LOG_RX_BUFFERS) << 4)
60

61

62
static const uint8_t l_base_mac_addr[6] = { 0xaa, 0xbb, 0xcc, 0xdd, 0xee, 0x02 };
63
static int card_number = 0;
64

65
struct l_e1000_dma_area {
66
	struct l_e1000_init_block init_block __attribute__((aligned(64)));
67
	struct l_e1000_rx_desc    rx_ring[RX_RING_SIZE] __attribute__((aligned(16)));
68
	struct l_e1000_tx_desc    tx_ring[TX_RING_SIZE] __attribute__((aligned(16)));
69
};
70

71
struct mii_if {
72
	unsigned char full_duplex;
73
	unsigned char supports_gmii;
74
	unsigned char phy_id_mask;
75
	unsigned char reg_num_mask;
76
	unsigned char phy_id;
77
};
78

79
static struct l_e1000_dma_area e1000_dma_area[E1000_CARD_QUANTITY] __attribute__((aligned(64)));
80

81
struct l_e1000_priv {
82
	struct net_device *netdev;
83

84
	struct sk_buff *volatile rx_skbs[RX_RING_SIZE];
85
	struct sk_buff *volatile tx_skbs[TX_RING_SIZE];
86

87
	struct l_e1000_init_block *init_block;
88
	struct l_e1000_rx_desc    *rx_ring;
89
	struct l_e1000_tx_desc    *tx_ring;
90

91
	uintptr_t base_ioaddr; /* iomapped device regs */
92

93
	struct mii_if mii_if;
94
	int cur_rx;
95
};
96

97
static inline uint32_t e1000_read_e_csr(struct l_e1000_priv *ep) {
98
	assert(ep->base_ioaddr);
99
	return e2k_read32(ep->base_ioaddr + L_E1000_E_CSR);
100
}
101

102
static inline void e1000_write_e_csr(struct l_e1000_priv *ep, int val) {
103
	assert(ep->base_ioaddr);
104
	e2k_write32(val, ep->base_ioaddr + L_E1000_E_CSR);
105
}
106

107
static inline uint32_t e1000_read_mgio_csr(struct l_e1000_priv *ep) {
108
	assert(ep->base_ioaddr);
109
	return e2k_read32(ep->base_ioaddr + L_E1000_MGIO_CSR);
110
}
111

112
static inline void e1000_write_mgio_csr(struct l_e1000_priv *ep, int val) {
113
	assert(ep->base_ioaddr);
114
	e2k_write32(val, ep->base_ioaddr + L_E1000_MGIO_CSR);
115
}
116

117
static inline uint32_t e1000_read_mgio_data(struct l_e1000_priv *ep) {
118
	assert(ep->base_ioaddr);
119
	return e2k_read32(ep->base_ioaddr + L_E1000_MGIO_DATA);
120
}
121

122
static inline void e1000_write_mgio_data(struct l_e1000_priv *ep, int val) {
123
	assert(ep->base_ioaddr);
124
	e2k_write32(val, ep->base_ioaddr + L_E1000_MGIO_DATA);
125
}
126

127
static inline void e1000_write_e_base_addr(struct l_e1000_priv *ep, int val) {
128
	assert(ep->base_ioaddr);
129
	e2k_write32(val, ep->base_ioaddr + L_E1000_E_BASE_ADDR);
130
}
131

132
static inline void e1000_write_dma_base_addr(struct l_e1000_priv *ep, int val) {
133
	assert(ep->base_ioaddr);
134
	e2k_write32(val, ep->base_ioaddr + L_E1000_DMA_BASE_ADDR);
135
}
136

137
static inline uint32_t e1000_read_psf_csr(struct l_e1000_priv *ep) {
138
	assert(ep->base_ioaddr);
139
	return e2k_read32(ep->base_ioaddr + L_E1000_PSF_CSR);
140
}
141

142
static inline uint32_t e1000_read_psf_data(struct l_e1000_priv *ep) {
143
	assert(ep->base_ioaddr);
144
	return e2k_read32(ep->base_ioaddr + L_E1000_PSF_DATA);
145
}
146

147
static int e1000_mii_readreg(struct net_device *dev, int phy_id, int reg_num) {
148
	struct l_e1000_priv *ep = netdev_priv(dev);
149
	uint32_t rd;
150
	uint16_t val_out = 0;
151
	int i = 0;
152

153
	rd = 0;
154
	rd |= 0x2 << MGIO_CS_OFF;
155
	rd |= 0x1 << MGIO_ST_OF_F_OFF;
156
	rd |= 0x2 << MGIO_OP_CODE_OFF; /* Read */
157
	rd |= (phy_id  & 0x1f) << MGIO_PHY_AD_OFF;
158
	rd |= (reg_num & 0x1f) << MGIO_REG_AD_OFF;
159

160
	e1000_write_mgio_data(ep, rd);
161
	rd = 0;
162

163
	for (i = 0; i != 1000; i++) {
164
		if (e1000_read_mgio_csr(ep) & MGIO_CSR_RRDY) {
165
			rd = (uint16_t)e1000_read_mgio_data(ep);
166

167
			val_out = rd & 0xffff;
168
			log_debug("reg 0x%x >>> 0x%x", reg_num, val_out);
169
			return val_out;
170
		}
171
		usleep(100);
172
	}
173

174
	log_error("mdio_read: Unable to read from MGIO_DATA reg\n");
175

176
	return val_out;
177
}
178

179
static void e1000_mii_writereg(struct net_device *dev, int phy_id, int reg_num, int val) {
180
	struct l_e1000_priv *ep = netdev_priv(dev);
181
	uint32_t wr;
182
	int i = 0;
183

184
	wr = 0;
185
	wr |= 0x2 << MGIO_CS_OFF;
186
	wr |= 0x1 << MGIO_ST_OF_F_OFF;
187
	wr |= 0x1 << MGIO_OP_CODE_OFF; /* Write */
188
	wr |= (phy_id  & 0x1f) << MGIO_PHY_AD_OFF;
189
	wr |= (reg_num & 0x1f) << MGIO_REG_AD_OFF;
190
	wr |= val & 0xffff;
191

192
	log_debug("reg 0x%x <<< 0x%x", reg_num, val);
193
	e1000_write_mgio_data(ep, wr);
194

195
	for (i = 0; i != 1000; i++) {
196
		if (e1000_read_mgio_csr(ep) & MGIO_CSR_RRDY) {
197
			return;
198
		}
199
		usleep(100);
200
	}
201
	log_error("Unable to write MGIO_DATA reg: val = 0x%x", wr);
202
	return;
203
}
204

205
static inline void
206
e1000_phy_mmd_write(void *eth1000_ptr, uint8_t mmd_dev, uint16_t mmd_reg, uint16_t val) {
207
	e1000_mii_writereg(eth1000_ptr, 0, 0xD, mmd_dev & 0x1F);
208
	e1000_mii_writereg(eth1000_ptr, 0, 0xE, mmd_reg);
209
	e1000_mii_writereg(eth1000_ptr, 0, 0xD, 0x01 << 14 | mmd_dev & 0x1F);
210
	e1000_mii_writereg(eth1000_ptr, 0, 0xE, val);
211
}
212

213
#define SPEED_10   10
214
#define SPEED_100  100
215
#define SPEED_1000 1000
216

217
static void l_e1000_set_phy_mode(struct net_device *dev) {
218
	static const int assigned_speed = SPEED_1000;
219
	struct l_e1000_priv *ep = netdev_priv(dev);
220
	unsigned int val;
221
	unsigned int lpa;
222
	unsigned int advertise;
223
	unsigned int media;
224
	unsigned int lpa2;
225
	unsigned int advertise2;
226
	int speed = SPEED_1000;
227

228
	lpa = e1000_mii_readreg(dev, ep->mii_if.phy_id, MII_LPA);
229
	advertise = e1000_mii_readreg(dev, ep->mii_if.phy_id, MII_ADVERTISE);
230
	log_debug("MII lpa 0x%x advertise 0x%x", lpa, advertise);
231
	if (ep->mii_if.supports_gmii) {
232
		lpa2 = e1000_mii_readreg(dev, ep->mii_if.phy_id, MII_STAT1000);
233
		advertise2 = e1000_mii_readreg(dev, ep->mii_if.phy_id, MII_CTRL1000);
234
		log_debug(" GMII status 0x%x control 0x%x", lpa2, advertise2);
235
		if ((advertise2 & (ADVERTISE_1000HALF | ADVERTISE_1000FULL)) &&
236
				(lpa2 & (LPA_1000FULL | LPA_1000HALF))) {
237
			speed = SPEED_1000;
238
		}
239
	}
240
	if (speed == 0) {
241
		media = mii_nway_result(lpa & advertise);
242
		if (media & (ADVERTISE_100FULL | ADVERTISE_100HALF)) {
243
			speed = SPEED_100;
244
		} else {
245
			speed = SPEED_10;
246
		}
247
	}
248
	if (speed != assigned_speed) {
249
		log_debug("decrease speed to %d due to module param", SPEED_1000);
250
		speed = assigned_speed;
251
		if (ep->mii_if.supports_gmii && (speed < SPEED_1000)) {
252
				lpa2 &= ~(LPA_1000FULL | LPA_1000HALF);
253
				e1000_mii_writereg(dev, ep->mii_if.phy_id, MII_STAT1000, lpa2);
254
				advertise2 &= ~(ADVERTISE_1000HALF | ADVERTISE_1000FULL);
255
				e1000_mii_writereg(dev, ep->mii_if.phy_id, MII_CTRL1000, advertise2);
256
		}
257
		if (speed == SPEED_10) {
258
			lpa &= ~(LPA_100FULL | LPA_100BASE4 | LPA_100HALF);
259
			e1000_mii_writereg(dev, ep->mii_if.phy_id, MII_LPA, lpa);
260

261
			advertise &= ~(ADVERTISE_1000XFULL |
262
				ADVERTISE_1000XHALF |
263
				ADVERTISE_1000XPSE_ASYM |
264
				ADVERTISE_1000XPAUSE);
265
			advertise |= (ADVERTISE_10FULL | ADVERTISE_10HALF);
266
			e1000_mii_writereg(dev, ep->mii_if.phy_id, MII_ADVERTISE, advertise);
267
		}
268
	}
269
	log_debug("e1000_set_phy_mode will set %d Mbits %s-duplex mode",
270
		speed, (ep->mii_if.full_duplex) ? "full" : "half");
271
	val = e1000_read_mgio_csr(ep);
272
	log_debug("mgio_csr before set : 0x%x", val);
273
	val |= MGIO_CSR_HARD;
274
	e1000_write_mgio_csr(ep, val);
275
	log_debug("mgio_csr after writing HARD = 1 in : 0x%x", e1000_read_mgio_csr(ep));
276
	val &= ~(MGIO_CSR_FETH | MGIO_CSR_GETH | MGIO_CSR_FDUP);
277
	if (ep->mii_if.full_duplex) {
278
		val |= MGIO_CSR_FDUP;
279
	}
280
	if (speed >= SPEED_1000) {
281
		val |= MGIO_CSR_GETH;
282
	} else if (speed >= SPEED_100) {
283
		val |= MGIO_CSR_FETH;
284
	}
285

286
	e1000_write_mgio_csr(ep, val);
287
	log_debug("mgio_csr after setting %d Mbits %s-duplex mode : 0x%x",
288
		speed, (ep->mii_if.full_duplex) ? "full" : "half", e1000_read_mgio_csr(ep));
289
}
290

291
static struct sk_buff *l_e1000_new_rx_buff(struct l_e1000_priv *ep, int i) {
292
	struct sk_buff *skb;
293
	struct l_e1000_rx_desc *rdesc;
294

295
	log_debug("i %d", i);
296
	skb = skb_alloc(ETH_FRAME_LEN + ETH_FCS_LEN);
297
	if (skb == NULL) {
298
		log_error("couldn't skb_alloc(i %d)", i);
299
		return NULL;
300
	}
301
	log_debug("skb = %p", skb);
302

303
	rdesc = &ep->rx_ring[i];
304
	rdesc->base = htole32((uintptr_t)skb_get_data_pointner(skb->data));
305
	rdesc->buf_length = htole16(-(ETH_FRAME_LEN + ETH_FCS_LEN));
306
	wmb();
307
	rdesc->status = htole16(RD_OWN);
308
	wmb();
309
	log_debug("rxdesc[%d](base=0x%x,len=%d)", i,
310
			le32toh(rdesc->base), le32toh(rdesc->buf_length));
311

312
	return skb;
313
}
314

315
/* Initialize the E1000 Rx and Tx rings. */
316
static int e1000_init_block(struct net_device *dev) {
317
	struct l_e1000_priv *ep = netdev_priv(dev);
318
	uint32_t init_block_addr_part;
319
	int i;
320

321
	ep->cur_rx = 0;
322

323
	for (i = 0; i < RX_RING_SIZE; i++) {
324
		ep->rx_skbs[i] = l_e1000_new_rx_buff(ep, i);
325
		if (NULL == ep->rx_skbs[i]) {
326
			log_error("Coudn't alloc sk_buff");
327
			return -ENOMEM;
328
		}
329
	}
330

331
	for (i = 0; i < TX_RING_SIZE; i++) {
332
		ep->tx_skbs[i] = NULL;
333
	}
334

335
	ep->tx_ring->status = 0;
336
	wmb();
337
	ep->tx_ring->base = 0;
338
	ep->tx_ring->buf_length = 0;
339

340
	/* Setup init block */
341
	/************************************************************************/
342
	ep->init_block->mode = htole16(FULL|PROM);
343
	ep->init_block->laddrf = 0x0;
344
	for (i = 0; i < 6; i++) {
345
		ep->init_block->paddr[i] = dev->dev_addr[i];
346
	}
347

348
	ep->init_block->rdra = htole32((uint32_t)((uintptr_t)(&ep->rx_ring[0])));
349
	ep->init_block->rdra |= htole32(E1000_LOG_RX_BUFFERS);
350
	ep->init_block->tdra = htole32((uint32_t)((uintptr_t)(&ep->tx_ring[0])));
351
	ep->init_block->tdra |= htole32(E1000_LOG_TX_BUFFERS);
352
	log_debug("Rx Desc Ring DMA Addr 0x%x", le32toh(ep->init_block->rdra));
353
	log_debug("Tx Desc Ring DMA Addr 0x%x", le32toh(ep->init_block->tdra));
354
	/***********************************************************************/
355
	/* low 32 bits */
356
	init_block_addr_part = (uint32_t)((uintptr_t)ep->init_block & 0xffffffff);
357
	e1000_write_e_base_addr(ep, init_block_addr_part);
358
	log_debug("Init Block Low  DMA addr: 0x%x", init_block_addr_part);
359
	/* high 32 bits */
360
	init_block_addr_part = (uint32_t)(((uintptr_t)(ep->init_block) >> 32) & 0xffffffff);
361
	e1000_write_dma_base_addr(ep, init_block_addr_part);
362
	log_debug("Init Block High DMA addr: 0x%x", init_block_addr_part);
363
	/************************************************************************/
364

365
	return 0;
366
}
367

368
static int l_e1000_reset(struct net_device *dev) {
369
	struct l_e1000_priv *ep = netdev_priv(dev);
370
	int i;
371

372
	/* Reset the PCNET32 */
373
	e1000_write_e_csr(ep, E_CSR_STOP);
374
	/* wait for stop */
375
	for (i = 0; i < 1000; i++) {
376
		if (e1000_read_e_csr(ep) & E_CSR_STOP) {
377
			break;
378
		}
379
	}
380

381
	e1000_init_block(dev);
382

383
	e1000_write_e_csr(ep, E_CSR_INIT);
384
	l_e1000_set_phy_mode(dev);
385

386
	sleep(5);
387

388
	i = 0;
389
	while (i++ < 1000) {
390
		if (e1000_read_e_csr(ep) & E_CSR_IDON) {
391
			break;
392
		}
393
	}
394

395
	log_debug("e_csr register after initialization: 0x%x, must be 0x%x",
396
			e1000_read_e_csr(ep), (E_CSR_IDON | E_CSR_INTR | E_CSR_INIT));
397

398
	e1000_write_e_csr(ep, E_CSR_IDON);
399
	log_debug("e_csr register after clear IDON bit: 0x%x, must be 0x%x",
400
			e1000_read_e_csr(ep), (E_CSR_INIT));
401

402
	e1000_write_e_csr(ep, E_CSR_INEA | E_CSR_STRT);
403
	log_debug("e_csr register after setting STRT bit: 0x%x, must be 0x%x",
404
			e1000_read_e_csr(ep),
405
			(E_CSR_INEA | E_CSR_RXON | E_CSR_TXON | E_CSR_STRT | E_CSR_INIT));
406

407
	return 0;
408
}
409

410
static int l_e1000_hw_init(struct pci_slot_dev *pci_dev, struct net_device *dev, int number) {
411
	struct l_e1000_priv *ep;
412
	struct l_e1000_dma_area *m;
413

414
	int fdx, mii, gmii;
415
	uint32_t val = 0;
416
	int i;
417

418
	ep = netdev_priv(dev);
419

420
	m = &e1000_dma_area[number];
421

422
	ep->init_block = &m->init_block;
423
	ep->tx_ring = &m->tx_ring[0];
424
	ep->rx_ring = &m->rx_ring[0];
425

426
	ep->base_ioaddr = dev->base_addr;
427
	ep->netdev = dev;
428

429
#if 0 /*soft reset */
430
	/* Setup STOP bit; Force e1000 resetting  */
431
	{
432
		unsigned int soft_reset;
433

434
		e1000_write_e_csr(ep, E_CSR_STOP);
435
		/* PHY Resetting */
436
		soft_reset = 0;
437
		soft_reset |= (L_E1000_RSET_POLARITY | MGIO_CSR_SRST);
438
		e1000_write_mgio_csr(ep, soft_reset); /* startup software reset */
439
		soft_reset = e1000_read_mgio_csr(ep);
440
		soft_reset &= ~(MGIO_CSR_SRST);
441
		e1000_write_mgio_csr(ep, soft_reset); /* stop software reset */
442
	}
443
#endif /*soft reset */
444

445
	for (i = 0; i < 6; i++) {
446
		dev->dev_addr[i] = l_base_mac_addr[i];
447
	}
448
	dev->dev_addr[5] = l_base_mac_addr[5] + number;
449

450
	fdx = 1; mii = 1; gmii = 1;
451
	ep->mii_if.full_duplex = fdx;
452
	ep->mii_if.supports_gmii = gmii;
453
	ep->mii_if.phy_id_mask = 0x1f;
454
	ep->mii_if.reg_num_mask = 0x1f;
455
	ep->mii_if.phy_id = E1000_PHY_ID;
456

457
	/* Setup PHY MII/GMII enable */
458
/*	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, PHY_AUX_CTRL);
459
	log_info("PHY reg # 0x12 (AUX_CTRL) : after reset : 0x%x\n", val);
460
	val &= ~(RGMII_EN_1 | RGMII_EN_0);
461
	e1000_mii_writereg(dev, ep->mii_if.phy_id, PHY_AUX_CTRL, val);
462
*/
463
	/* Setup PHY 10/100/1000 Link on 10M Link */
464
/*	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, PHY_LED_CTRL);
465
	log_info("PHY reg # 0x13 (LED_CTRL) : after reset : 0x%x\n", val);
466
	val |= RED_LEN_EN;
467
	e1000_mii_writereg(dev, ep->mii_if.phy_id, PHY_LED_CTRL, val);
468
	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, PHY_LED_CTRL);
469
	log_info("PHY reg # 0x13 (LED_CTRL) : after led is : 0x%x\n", val);
470
*/
471
	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0);
472
	log_info("PHY reg # 0x0  after reset : 0x%x\n", val);
473
	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x1);
474
	log_info("PHY reg # 0x1  after reset : 0x%x\n", val);
475
	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x1);
476
	log_info("PHY reg # 0x1  after reset : 0x%x\n", val);
477
	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x10);
478
	log_info("PHY reg # 0x10  after reset : 0x%x\n", val);
479
	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x11);
480
	log_info("PHY reg # 0x11  after reset : 0x%x\n", val);
481
	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x14);
482
	log_info("PHY reg # 0x14  after reset : 0x%x\n", val);
483
	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, 0x15);
484
	log_info("PHY reg # 0x15  after reset : 0x%x\n", val);
485
/*
486
	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, PHY_BIST_CFG2);
487
	log_info("PHY reg # 0x1a (BIST_CFG2): 0x%x\n", val);
488
	val |= LINK_SEL;
489
	e1000_mii_writereg(dev, ep->mii_if.phy_id, PHY_BIST_CFG2, val);
490
	val = e1000_mii_readreg(dev, ep->mii_if.phy_id, PHY_BIST_CFG2);
491
	log_info(" read mgio csr #0x04 (BIST_CFG2) : 0x%x\n", val);
492
*/
493
/* Microchip phy special regs */
494
#if 0
495
	e1000_phy_mmd_write(dev, 0x0, 0x4, 0x0006);
496
	e1000_phy_mmd_write(dev, 0x0, 0x3, 0x1A80);
497
#endif
498
/* End Microchip phy special regs */
499
	/* move e1000 link status select to default 0 link */
500
/* Marvel specific */
501
#if 0
502
	log_info("move e1000 link status select to default 0 link \n");
503
	val = e1000_read_mgio_csr(ep);
504
	val &= ~MGIO_CSR_LSTS;
505
	val |= MGIO_CSR_SLSP;
506
	e1000_write_mgio_csr(ep, val);
507
	log_info(" write mgio csr #0x04 (BIST_CFG2): 0x%x\n", val);
508
#endif /* End Marvel specific */
509

510
	return 0;
511
}
512

513
static int l_e1000_open(struct net_device *dev) {
514
	log_debug("mac addr: %x:%x:%x:%x:%x:%x\n",
515
			dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
516
			dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
517
	l_e1000_reset(dev);
518
	return 0;
519
}
520

521
static int l_e1000_xmit(struct net_device *dev, struct sk_buff *skb) {
522
	struct l_e1000_priv *ep = netdev_priv(dev);
523
	uint16_t status;
524
	int len;
525

526
	len = max(skb->len, ETH_ZLEN);
527

528
	show_packet(skb_get_data_pointner(skb->data), len, "transmit");
529

530
	/* wait until buff is nor free */
531

532
	while(ep->tx_skbs[0] != NULL) {
533
	}
534

535
	ep->tx_ring->base = (uint32_t)(uintptr_t)skb_get_data_pointner(skb->data);
536
	ep->tx_ring->buf_length = htole16(-len);
537
	ep->tx_ring->misc = 0x00000000;
538
	status = TD_OWN | TD_ENP | TD_STP;
539
	ep->tx_ring->status = htole16(status);
540
	wmb();
541
	e1000_write_e_csr(ep, E_CSR_INEA | E_CSR_TDMD);
542

543
	/* TODO now we send only one packet */
544
	ep->tx_skbs[0] = skb;
545

546
	return 0;
547
}
548

549
static int l_e1000_set_mac_address(struct net_device *dev, const void *addr) {
550
	memcpy(&dev->dev_addr[0], addr, sizeof(dev->dev_addr));
551
	log_debug("mac addr: %x:%x:%x:%x:%x:%x\n",
552
			dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
553
			dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
554
	return 0;
555
}
556

557
static int l_e1000_stop(struct net_device *dev) {
558
	return 0;
559
}
560

561
static const struct net_driver l_e1000_drv_ops = {
562
	.xmit = l_e1000_xmit,
563
	.start = l_e1000_open,
564
	.stop = l_e1000_stop,
565
	.set_macaddr = l_e1000_set_mac_address
566
};
567

568
static int l_e1000_rx(struct l_e1000_priv *ep) {
569
	uint8_t * buf = NULL;
570
	int msg_len;
571
	int entry;
572

573
	entry  = ep->cur_rx & RX_RING_MOD_MASK;
574

575
	/* If we own the next entry, it's a new packet. Send it up. */
576
	while ((le16toh(ep->rx_ring[entry].status)) >= 0) {
577
		struct sk_buff *skb;
578
		int status;
579

580
		ep->cur_rx++;
581

582
		status = le16toh(ep->rx_ring[entry].status);
583
		skb = ep->rx_skbs[entry];
584

585
		if ((status & 0xff00) != (RD_ENP|RD_STP)) {/* There was an error. */
586
			msg_len = 0;
587
			if (status & RD_ENP) {
588
				log_debug(" ENP");
589
			} /* No detailed rx_errors counter to increment at the */
590
			/* end of a packet.*/
591
			if (status & RD_FRAM) {
592
				log_debug(" FRAM");
593
			}
594
			if (status & RD_OFLO) {
595
				log_debug(" OFLO ");
596
			}
597
			if (status & RD_CRC) {
598
				log_debug(" CRC ");
599
			}
600
			if (status & RD_BUFF) {
601
				log_debug(" BUFF ");
602
			}
603
			skb_free(skb);
604
		} else {
605
			buf = (void *)(uintptr_t)le32toh(ep->rx_ring[entry].base);
606
			msg_len =  (le16toh(ep->rx_ring[entry].msg_length) & 0xfff) - ETH_FCS_LEN;
607
			show_packet(buf, msg_len, " --- BUF received packet print:");
608
			skb->dev = ep->netdev;
609
			skb->len = msg_len;
610

611
			netif_rx(skb);
612
		}
613

614
		ep->rx_skbs[entry] = l_e1000_new_rx_buff(ep, entry);
615

616
		entry  = ep->cur_rx & RX_RING_MOD_MASK;
617
	}
618
	return 0;
619
}
620

621
static irq_return_t l_e1000_interrupt(unsigned int irq_num, void *dev_id) {
622
	struct net_device *dev = dev_id;
623
	struct l_e1000_priv *ep;
624
	uint16_t csr0;//, csr_ack;
625

626
	ep = netdev_priv(dev);
627
	csr0 = e1000_read_e_csr(ep);
628
	if (!(csr0 & E_CSR_INTR)) {
629
		return IRQ_NONE; /* Not our interrupt */
630
	}
631

632
	log_debug("irq csr0(%x)", csr0);
633

634
	csr0 &= (E_CSR_BABL | E_CSR_CERR | E_CSR_MISS | E_CSR_MERR | E_CSR_RINT | E_CSR_TINT);
635
	e1000_write_e_csr(ep, csr0 | E_CSR_IDON);
636

637
	if (csr0 & E_CSR_TINT) {
638
		int status = le16toh(ep->tx_ring->status);
639
		if (status & TD_ERR) {
640
			int err_status = le32toh(ep->tx_ring->misc);
641
			log_error("Tx error status=%04x err_status=%08x", status, err_status);
642
		}
643

644
		ep->tx_ring->status = 0;
645
		assert(ep->tx_skbs[0]);
646
		skb_free(ep->tx_skbs[0]);
647

648
		ep->tx_skbs[0] = NULL;
649
	}
650

651
	if (csr0 & E_CSR_MERR) {
652
		log_debug("irq MERR");
653
	}
654
	if (csr0 & E_CSR_BABL) {
655
		log_debug("irq BABL");
656
	}
657

658
	if (csr0 & E_CSR_MISS) {
659
		/* The device did't set RINT in case of buffers are full
660
		* and MISS is set. So we do this manually.
661
		*/
662
		csr0 |= E_CSR_RINT;
663
		log_debug("irq BABL");
664
	}
665

666
	if (csr0 & E_CSR_RINT) {
667
		l_e1000_rx(ep);
668
	}
669

670
	e1000_write_e_csr(ep, E_CSR_INEA);
671

672
	return IRQ_HANDLED;
673
}
674

675
static int l_e1000_init(struct pci_slot_dev *pci_dev) {
676
	int res;
677
	struct net_device *nic;
678
	struct l_e1000_priv *nic_priv;
679
	int number;
680

681
	if (card_number == E1000_CARD_QUANTITY) {
682
		return 0;
683
	}
684
	number = card_number;
685
	card_number++;
686

687
	nic = (struct net_device *) etherdev_alloc(sizeof(struct l_e1000_priv));
688
	if (nic == NULL) {
689
		return -ENOMEM;
690
	}
691

692
	nic->drv_ops = &l_e1000_drv_ops;
693
	nic->irq = pci_dev->irq;
694
	nic->base_addr = (uintptr_t) mmap_device_memory(
695
			(void *) (uintptr_t) (pci_dev->bar[0] & PCI_BASE_ADDR_IO_MASK),
696
			L_E1000_TOTAL_PCI_IO_SIZE,
697
			PROT_WRITE | PROT_READ | PROT_NOCACHE,
698
			MAP_FIXED,
699
			pci_dev->bar[0] & PCI_BASE_ADDR_IO_MASK);
700

701
	log_debug("bar (%p)\n base (%x) \n poor bar(%x)",
702
			(void *) (uintptr_t) (pci_dev->bar[0] & PCI_BASE_ADDR_IO_MASK),
703
			nic->base_addr,
704
			(uint64_t)(pci_dev->bar[0] & PCI_BASE_ADDR_IO_MASK));
705
			
706
	nic_priv = netdev_priv(nic);
707
	memset(nic_priv, 0, sizeof(*nic_priv));
708

709
	res = irq_attach(pci_dev->irq, l_e1000_interrupt, IF_SHARESUP, nic, "l_e1000");
710
	if (res < 0) {
711
		return res;
712
	}
713

714
	pci_set_master(pci_dev);
715
	l_e1000_hw_init(pci_dev, nic, number);
716

717
	return inetdev_register_dev(nic);
718
}
719

720
static const struct pci_id l_e1000_id_table[] = {
721
	{ PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MCST_ELBRUS_E1000 },
722
	{ 0x1FFF, 0x8016},
723
};
724

725
PCI_DRIVER_TABLE("l_e1000", l_e1000_init, l_e1000_id_table);
726

Использование cookies

Мы используем файлы cookie в соответствии с Политикой конфиденциальности и Политикой использования cookies.

Нажимая кнопку «Принимаю», Вы даете АО «СберТех» согласие на обработку Ваших персональных данных в целях совершенствования нашего веб-сайта и Сервиса GitVerse, а также повышения удобства их использования.

Запретить использование cookies Вы можете самостоятельно в настройках Вашего браузера.