• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 
34 FILE_LICENCE ( GPL2_ONLY );
35 
36 #include <strings.h>
37 #include <errno.h>
38 #include <gpxe/malloc.h>
39 #include <gpxe/umalloc.h>
40 #include <byteswap.h>
41 #include <unistd.h>
42 #include <gpxe/io.h>
43 #include <gpxe/pci.h>
44 #include <gpxe/ethernet.h>
45 #include <gpxe/netdevice.h>
46 #include <gpxe/iobuf.h>
47 #include "mtnic.h"
48 
49 
50 /*
51 
52 
53     mtnic.c - gPXE driver for Mellanox 10Gig ConnectX EN
54 
55 
56 */
57 
58 
59 
60 /********************************************************************
61 *
62 *	MTNIC allocation functions
63 *
64 *********************************************************************/
65 /**
66 * mtnic_alloc_aligned
67 *
68 * @v 	unsigned int size       size
69 * @v 	void **va		virtual address
70 * @v 	u32 *pa			physical address
71 * @v	u32 aligment		aligment
72 *
73 * Function allocate aligned buffer and put it's virtual address in 'va'
74 * and it's physical aligned address in 'pa'
75 */
76 static int
mtnic_alloc_aligned(unsigned int size,void ** va,unsigned long * pa,unsigned int alignment)77 mtnic_alloc_aligned(unsigned int size, void **va, unsigned long *pa, unsigned int alignment)
78 {
79 	*va = alloc_memblock(size, alignment);
80 	if (!*va) {
81 		return -EADDRINUSE;
82 	}
83 	*pa = (u32)virt_to_bus(*va);
84 	return 0;
85 }
86 
87 
88 
89 /**
90  *
91  * mtnic alloc command interface
92  *
93  */
94 static int
mtnic_alloc_cmdif(struct mtnic * mtnic)95 mtnic_alloc_cmdif(struct mtnic *mtnic)
96 {
97 	u32 bar = mtnic_pci_dev.dev.bar[0];
98 
99 	mtnic->hcr = ioremap(bar + MTNIC_HCR_BASE, MTNIC_HCR_SIZE);
100 	if ( !mtnic->hcr ) {
101 		DBG("Couldn't map command register\n");
102 		return -EADDRINUSE;
103 	}
104 	mtnic_alloc_aligned(PAGE_SIZE, (void *)&mtnic->cmd.buf, &mtnic->cmd.mapping, PAGE_SIZE);
105 	if ( !mtnic->cmd.buf ) {
106 		DBG("Error in allocating buffer for command interface\n");
107 		return -EADDRINUSE;
108 	}
109 	return 0;
110 }
111 
112 /**
113  * Free RX io buffers
114  */
115 static void
mtnic_free_io_buffers(struct mtnic_ring * ring)116 mtnic_free_io_buffers(struct mtnic_ring *ring)
117 {
118 	int index;
119 
120 	for (; ring->cons <= ring->prod; ++ring->cons) {
121 		index = ring->cons & ring->size_mask;
122 		if ( ring->iobuf[index] ) {
123 			free_iob(ring->iobuf[index]);
124 		}
125 	}
126 }
127 
128 
129 
130 /**
131  *
132  * mtnic alloc and attach io buffers
133  *
134  */
135 static int
mtnic_alloc_iobuf(struct mtnic_port * priv,struct mtnic_ring * ring,unsigned int size)136 mtnic_alloc_iobuf(struct mtnic_port *priv, struct mtnic_ring *ring,
137 		  unsigned int size)
138 {
139 	struct mtnic_rx_desc *rx_desc_ptr = ring->buf;
140 	u32 index;
141 
142 	while ((u32)(ring->prod - ring->cons) < UNITS_BUFFER_SIZE) {
143 		index = ring->prod & ring->size_mask;
144 		ring->iobuf[index] = alloc_iob(size);
145 		if (!ring->iobuf[index]) {
146 			if (ring->prod <= (ring->cons + 1)) {
147 				DBG ( "Dropping packet, buffer is full\n" );
148 			}
149 			break;
150 		}
151 
152 		/* Attach io_buffer to descriptor */
153 		rx_desc_ptr = ring->buf +
154 			      (sizeof(struct mtnic_rx_desc) * index);
155 		rx_desc_ptr->data.count = cpu_to_be32(size);
156 		rx_desc_ptr->data.mem_type = priv->mtnic->fw.mem_type_snoop_be;
157 		rx_desc_ptr->data.addr_l = cpu_to_be32(
158 						      virt_to_bus(ring->iobuf[index]->data));
159 
160 		++ ring->prod;
161 	}
162 
163 	/* Update RX producer index (PI) */
164 	ring->db->count = cpu_to_be32(ring->prod & 0xffff);
165 	return 0;
166 }
167 
168 
169 /**
170  * mtnic alloc ring
171  *
172  * 	Alloc and configure TX or RX ring
173  *
174  */
175 static int
mtnic_alloc_ring(struct mtnic_port * priv,struct mtnic_ring * ring,u32 size,u16 stride,u16 cq,u8 is_rx)176 mtnic_alloc_ring(struct mtnic_port *priv, struct mtnic_ring *ring,
177 		 u32 size, u16 stride, u16 cq, u8 is_rx)
178 {
179 	unsigned int i;
180 	int err;
181 	struct mtnic_rx_desc *rx_desc;
182 	struct mtnic_tx_desc *tx_desc;
183 
184 	ring->size = size; /* Number of descriptors */
185 	ring->size_mask = size - 1;
186 	ring->stride = stride; /* Size of each entry */
187 	ring->cq = cq; /* CQ number associated with this ring */
188 	ring->cons = 0;
189 	ring->prod = 0;
190 
191 	/* Alloc descriptors buffer */
192 	ring->buf_size = ring->size * ((is_rx) ? sizeof(struct mtnic_rx_desc) :
193 				       sizeof(struct mtnic_tx_desc));
194 	err = mtnic_alloc_aligned(ring->buf_size, (void *)&ring->buf,
195 				  &ring->dma, PAGE_SIZE);
196 	if (err) {
197 		DBG("Failed allocating descriptor ring sizeof %x\n",
198 		    ring->buf_size);
199 		return -EADDRINUSE;
200 	}
201 	memset(ring->buf, 0, ring->buf_size);
202 
203 	DBG("Allocated %s ring (addr:%p) - buf:%p size:%x"
204 	    "buf_size:%x dma:%lx\n",
205 	    is_rx ? "Rx" : "Tx", ring, ring->buf, ring->size,
206 	    ring->buf_size, ring->dma);
207 
208 
209 	if (is_rx) { /* RX ring */
210 		/* Alloc doorbell */
211 		err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
212 					  (void *)&ring->db, &ring->db_dma, 32);
213 		if (err) {
214 			DBG("Failed allocating Rx ring doorbell record\n");
215 			free_memblock(ring->buf, ring->buf_size);
216 			return -EADDRINUSE;
217 		}
218 
219 		/* ==- Configure Descriptor -== */
220 		/* Init ctrl seg of rx desc */
221 		for (i = 0; i < UNITS_BUFFER_SIZE; ++i) {
222 			rx_desc = ring->buf +
223 				  (sizeof(struct mtnic_rx_desc) * i);
224 			/* Pre-link descriptor */
225 			rx_desc->next = cpu_to_be16(i + 1);
226 		}
227 		/*The last ctrl descriptor is '0' and points to the first one*/
228 
229 		/* Alloc IO_BUFFERS */
230 		err = mtnic_alloc_iobuf ( priv, ring, DEF_IOBUF_SIZE );
231 		if (err) {
232 			DBG("ERROR Allocating io buffer\n");
233 			free_memblock(ring->buf, ring->buf_size);
234 			return -EADDRINUSE;
235 		}
236 
237 	} else { /* TX ring */
238 		/* Set initial ownership of all Tx Desc' to SW (1) */
239 		for (i = 0; i < ring->size; i++) {
240 			tx_desc = ring->buf + ring->stride * i;
241 			tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_BIT_DESC_OWN);
242 		}
243 		/* DB */
244 		ring->db_offset = cpu_to_be32(
245 					     ((u32) priv->mtnic->fw.tx_offset[priv->port]) << 8);
246 
247 		/* Map Tx+CQ doorbells */
248 		DBG("Mapping TxCQ doorbell at offset:0x%x\n",
249 		    priv->mtnic->fw.txcq_db_offset);
250 		ring->txcq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
251 					priv->mtnic->fw.txcq_db_offset, PAGE_SIZE);
252 		if (!ring->txcq_db) {
253 			DBG("Couldn't map txcq doorbell, aborting...\n");
254 			free_memblock(ring->buf, ring->buf_size);
255 			return -EADDRINUSE;
256 		}
257 	}
258 
259 	return 0;
260 }
261 
262 
263 
264 /**
265  * mtnic alloc CQ
266  *
267  *	Alloc and configure CQ.
268  *
269  */
270 static int
mtnic_alloc_cq(struct net_device * dev,int num,struct mtnic_cq * cq,u8 is_rx,u32 size,u32 offset_ind)271 mtnic_alloc_cq(struct net_device *dev, int num, struct mtnic_cq *cq,
272 	       u8 is_rx, u32 size, u32 offset_ind)
273 {
274 	int err ;
275 	unsigned int i;
276 
277 	cq->num = num;
278 	cq->dev = dev;
279 	cq->size = size;
280 	cq->last = 0;
281 	cq->is_rx = is_rx;
282 	cq->offset_ind = offset_ind;
283 
284 	/* Alloc doorbell */
285 	err = mtnic_alloc_aligned(sizeof(struct mtnic_cq_db_record),
286 				  (void *)&cq->db, &cq->db_dma, 32);
287 	if (err) {
288 		DBG("Failed allocating CQ doorbell record\n");
289 		return -EADDRINUSE;
290 	}
291 	memset(cq->db, 0, sizeof(struct mtnic_cq_db_record));
292 
293 	/* Alloc CQEs buffer */
294 	cq->buf_size = size * sizeof(struct mtnic_cqe);
295 	err = mtnic_alloc_aligned(cq->buf_size,
296 				  (void *)&cq->buf, &cq->dma, PAGE_SIZE);
297 	if (err) {
298 		DBG("Failed allocating CQ buffer\n");
299 		free_memblock(cq->db, sizeof(struct mtnic_cq_db_record));
300 		return -EADDRINUSE;
301 	}
302 	memset(cq->buf, 0, cq->buf_size);
303 	DBG("Allocated CQ (addr:%p) - size:%x buf:%p buf_size:%x "
304 	    "dma:%lx db:%p db_dma:%lx\n"
305 	    "cqn offset:%x \n", cq, cq->size, cq->buf,
306 	    cq->buf_size, cq->dma, cq->db,
307 	    cq->db_dma, offset_ind);
308 
309 
310 	/* Set ownership of all CQEs to HW */
311 	DBG("Setting HW ownership for CQ:%d\n", num);
312 	for (i = 0; i < cq->size; i++) {
313 		/* Initial HW ownership is 1 */
314 		cq->buf[i].op_tr_own = MTNIC_BIT_CQ_OWN;
315 	}
316 	return 0;
317 }
318 
319 
320 
321 /**
322  * mtnic_alloc_resources
323  *
324  * 	Alloc and configure CQs, Tx, Rx
325  */
326 unsigned int
mtnic_alloc_resources(struct net_device * dev)327 mtnic_alloc_resources(struct net_device *dev)
328 {
329 	struct mtnic_port *priv = netdev_priv(dev);
330 	int err;
331 	int cq_ind = 0;
332 	int cq_offset = priv->mtnic->fw.cq_offset;
333 
334 	/* Alloc 1st CQ */
335 	err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 1 /* RX */,
336 			     UNITS_BUFFER_SIZE, cq_offset + cq_ind);
337 	if (err) {
338 		DBG("Failed allocating Rx CQ\n");
339 		return -EADDRINUSE;
340 	}
341 
342 
343 	/* Alloc RX */
344 	err = mtnic_alloc_ring(priv, &priv->rx_ring, UNITS_BUFFER_SIZE,
345 			       sizeof(struct mtnic_rx_desc), cq_ind, /* RX */1);
346 	if (err) {
347 		DBG("Failed allocating Rx Ring\n");
348 		goto cq0_error;
349 	}
350 
351 
352 	++cq_ind;
353 
354 	/* alloc 2nd CQ */
355 	err = mtnic_alloc_cq(dev, cq_ind, &priv->cq[cq_ind], 0 /* TX */,
356 			     UNITS_BUFFER_SIZE, cq_offset + cq_ind);
357 	if (err) {
358 		DBG("Failed allocating Tx CQ\n");
359 		goto rx_error;
360 	}
361 
362 	/* Alloc TX */
363 	err = mtnic_alloc_ring(priv, &priv->tx_ring, UNITS_BUFFER_SIZE,
364 			       sizeof(struct mtnic_tx_desc), cq_ind, /* TX */ 0);
365 	if (err) {
366 		DBG("Failed allocating Tx ring\n");
367 		goto cq1_error;
368 	}
369 
370 	return 0;
371 
372 cq1_error:
373 	free_memblock(priv->cq[1].buf, priv->cq[1].buf_size);
374 	free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
375 
376 rx_error:
377 	free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size);
378 	free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
379 	mtnic_free_io_buffers(&priv->rx_ring);
380 cq0_error:
381 	free_memblock(priv->cq[0].buf, priv->cq[0].buf_size);
382 	free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
383 
384 	return -EADDRINUSE;
385 }
386 
387 
388 /**
389  *  mtnic alloc_eq
390  *
391  * Note: EQ is not used by the driver but must be allocated
392  */
393 static int
mtnic_alloc_eq(struct mtnic * mtnic)394 mtnic_alloc_eq(struct mtnic *mtnic)
395 {
396 	int err;
397 	unsigned int i;
398 	struct mtnic_eqe *eqe_desc = NULL;
399 
400 	/* Allocating doorbell */
401 	mtnic->eq_db = ioremap(mtnic_pci_dev.dev.bar[2] +
402 			       mtnic->fw.eq_db_offset, sizeof(u32));
403 	if (!mtnic->eq_db) {
404 		DBG("Couldn't map EQ doorbell, aborting...\n");
405 		return -EADDRINUSE;
406 	}
407 
408 	/* Allocating buffer */
409 	mtnic->eq.size = NUM_EQES;
410 	mtnic->eq.buf_size = mtnic->eq.size * sizeof(struct mtnic_eqe);
411 	err = mtnic_alloc_aligned(mtnic->eq.buf_size, (void *)&mtnic->eq.buf,
412 				  &mtnic->eq.dma, PAGE_SIZE);
413 	if (err) {
414 		DBG("Failed allocating EQ buffer\n");
415 		iounmap(mtnic->eq_db);
416 		return -EADDRINUSE;
417 	}
418 	memset(mtnic->eq.buf, 0, mtnic->eq.buf_size);
419 
420 	for (i = 0; i < mtnic->eq.size; i++)
421 		eqe_desc = mtnic->eq.buf + (sizeof(struct mtnic_eqe) * i);
422 	eqe_desc->own |= MTNIC_BIT_EQE_OWN;
423 
424 	mdelay(20);
425 	return 0;
426 }
427 
428 
429 
430 
431 
432 
433 
434 
435 
436 
437 
438 /********************************************************************
439 *
440 * Mtnic commands functions
441 * -=-=-=-=-=-=-=-=-=-=-=-=
442 *
443 *
444 *
445 *********************************************************************/
446 static inline int
cmdif_go_bit(struct mtnic * mtnic)447 cmdif_go_bit(struct mtnic *mtnic)
448 {
449 	struct mtnic_if_cmd_reg *hcr = mtnic->hcr;
450 	u32 status;
451 	int i;
452 
453 	for (i = 0; i < TBIT_RETRIES; i++) {
454 		status = be32_to_cpu(readl(&hcr->status_go_opcode));
455 		if ((status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_T_BIT)) ==
456 		    (mtnic->cmd.tbit << MTNIC_BC_OFF(MTNIC_MASK_CMD_REG_T_BIT))) {
457 			/* Read expected t-bit - now return go-bit value */
458 			return status & MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT);
459 		}
460 	}
461 
462 	DBG("Invalid tbit after %d retries!\n", TBIT_RETRIES);
463 	return -EBUSY; /* Return busy... */
464 }
465 
466 /* Base Command interface */
467 static int
mtnic_cmd(struct mtnic * mtnic,void * in_imm,void * out_imm,u32 in_modifier,u16 op)468 mtnic_cmd(struct mtnic *mtnic, void *in_imm,
469 	  void *out_imm, u32 in_modifier, u16 op)
470 {
471 
472 	struct mtnic_if_cmd_reg *hcr = mtnic->hcr;
473 	int err = 0;
474 	u32 out_param_h = 0;
475 	u32 out_param_l = 0;
476 	u32 in_param_h = 0;
477 	u32 in_param_l = 0;
478 
479 
480 	static u16 token = 0x8000;
481 	u32 status;
482 	unsigned int timeout = 0;
483 
484 	token++;
485 
486 	if ( cmdif_go_bit ( mtnic ) ) {
487 		DBG("GO BIT BUSY:%p.\n", hcr + 6);
488 		err = -EBUSY;
489 		goto out;
490 	}
491 	if (in_imm) {
492 		in_param_h = *((u32*)in_imm);
493 		in_param_l = *((u32*)in_imm + 1);
494 	} else {
495 		in_param_l = cpu_to_be32(mtnic->cmd.mapping);
496 	}
497 	out_param_l = cpu_to_be32(mtnic->cmd.mapping);
498 
499 	/* writing to MCR */
500 	writel(in_param_h,          &hcr->in_param_h);
501 	writel(in_param_l,          &hcr->in_param_l);
502 	writel((u32) cpu_to_be32(in_modifier),  &hcr->input_modifier);
503 	writel(out_param_h,         &hcr->out_param_h);
504 	writel(out_param_l,         &hcr->out_param_l);
505 	writel((u32)cpu_to_be32(token << 16),   &hcr->token);
506 	wmb();
507 
508 	/* flip toggle bit before each write to the HCR */
509 	mtnic->cmd.tbit = !mtnic->cmd.tbit;
510 	writel( ( u32 )
511 		cpu_to_be32(MTNIC_BC_MASK(MTNIC_MASK_CMD_REG_GO_BIT) |
512 			    ( mtnic->cmd.tbit << MTNIC_BC_OFF ( MTNIC_MASK_CMD_REG_T_BIT ) ) | op ),
513 		&hcr->status_go_opcode);
514 
515 	while ( cmdif_go_bit ( mtnic ) && ( timeout <= GO_BIT_TIMEOUT ) ) {
516 		mdelay ( 1 );
517 		++timeout;
518 	}
519 
520 	if ( cmdif_go_bit ( mtnic ) ) {
521 		DBG("Command opcode:0x%x token:0x%x TIMEOUT.\n", op, token);
522 		err = -EBUSY;
523 		goto out;
524 	}
525 
526 	if (out_imm) {
527 		*((u32 *)out_imm) = readl(&hcr->out_param_h);
528 		*((u32 *)out_imm + 1) = readl(&hcr->out_param_l);
529 	}
530 
531 	status = be32_to_cpu((u32)readl(&hcr->status_go_opcode)) >> 24;
532 
533 	if (status) {
534 		DBG("Command opcode:0x%x token:0x%x returned:0x%x\n",
535 		    op, token, status);
536 		return status;
537 	}
538 
539 out:
540 	return err;
541 }
542 
543 /* MAP PAGES wrapper */
544 static int
mtnic_map_cmd(struct mtnic * mtnic,u16 op,struct mtnic_pages pages)545 mtnic_map_cmd(struct mtnic *mtnic, u16 op, struct mtnic_pages pages)
546 {
547 	unsigned int j;
548 	u32 addr;
549 	unsigned int len;
550 	u32 *page_arr = mtnic->cmd.buf;
551 	int nent = 0;
552 	int err = 0;
553 
554 	memset(page_arr, 0, PAGE_SIZE);
555 
556 	len = PAGE_SIZE * pages.num;
557 	pages.buf = (u32 *)umalloc(PAGE_SIZE * (pages.num + 1));
558 	addr = PAGE_SIZE + ((virt_to_bus(pages.buf) & 0xfffff000) + PAGE_SIZE);
559 	DBG("Mapping pages: size: %x address: %p\n", pages.num, pages.buf);
560 
561 	if (addr & (PAGE_MASK)) {
562 		DBG("Got FW area not aligned to %d (%llx/%x)\n",
563 		    PAGE_SIZE, (u64) addr, len);
564 		return -EADDRINUSE;
565 	}
566 
567 	/* Function maps each PAGE seperately */
568 	for (j = 0; j < len; j+= PAGE_SIZE) {
569 		page_arr[nent * 4 + 3] = cpu_to_be32(addr + j);
570 		if (++nent == MTNIC_MAILBOX_SIZE / 16) {
571 			err = mtnic_cmd(mtnic, NULL, NULL, nent, op);
572 			if (err)
573 				return -EIO;
574 			nent = 0;
575 		}
576 	}
577 
578 	if (nent) {
579 		err = mtnic_cmd(mtnic, NULL, NULL, nent, op);
580 	}
581 	return err;
582 }
583 
584 
585 
586 /*
587  * Query FW
588  */
589 static int
mtnic_QUERY_FW(struct mtnic * mtnic)590 mtnic_QUERY_FW ( struct mtnic *mtnic )
591 {
592 	int err;
593 	struct mtnic_if_query_fw_out_mbox *cmd = mtnic->cmd.buf;
594 
595 	err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_QUERY_FW);
596 	if (err)
597 		return -EIO;
598 
599 	/* Get FW and interface versions */
600 	mtnic->fw_ver = ((u64) be16_to_cpu(cmd->rev_maj) << 32) |
601 			((u64) be16_to_cpu(cmd->rev_min) << 16) |
602 			(u64) be16_to_cpu(cmd->rev_smin);
603 	mtnic->fw.ifc_rev = be16_to_cpu(cmd->ifc_rev);
604 
605 	/* Get offset for internal error reports (debug) */
606 	mtnic->fw.err_buf.offset = be64_to_cpu(cmd->err_buf_start);
607 	mtnic->fw.err_buf.size = be32_to_cpu(cmd->err_buf_size);
608 
609 	DBG("Error buf offset is %llx\n", mtnic->fw.err_buf.offset);
610 
611 	/* Get number of required FW (4k) pages */
612 	mtnic->fw.fw_pages.num = be16_to_cpu(cmd->fw_pages);
613 
614 	return 0;
615 }
616 
617 
618 static int
mtnic_OPEN_NIC(struct mtnic * mtnic)619 mtnic_OPEN_NIC(struct mtnic *mtnic)
620 {
621 	struct mtnic_if_open_nic_in_mbox *open_nic = mtnic->cmd.buf;
622 	u32 extra_pages[2] = {0};
623 	int err;
624 
625 	memset(open_nic, 0, sizeof *open_nic);
626 
627 	/* port 1 */
628 	open_nic->log_rx_p1 = 0;
629 	open_nic->log_cq_p1 = 1;
630 
631 	open_nic->log_tx_p1 = 0;
632 	open_nic->steer_p1 = MTNIC_IF_STEER_RSS;
633 	/* MAC + VLAN - leave reserved */
634 
635 	/* port 2 */
636 	open_nic->log_rx_p2 = 0;
637 	open_nic->log_cq_p2 = 1;
638 
639 	open_nic->log_tx_p2 = 0;
640 	open_nic->steer_p2 = MTNIC_IF_STEER_RSS;
641 	/* MAC + VLAN - leave reserved */
642 
643 	err = mtnic_cmd(mtnic, NULL, extra_pages, 0, MTNIC_IF_CMD_OPEN_NIC);
644 
645 	mtnic->fw.extra_pages.num = be32_to_cpu(*(extra_pages+1));
646 	DBG("Extra pages num is %x\n", mtnic->fw.extra_pages.num);
647 	return err;
648 }
649 
650 static int
mtnic_CONFIG_RX(struct mtnic * mtnic)651 mtnic_CONFIG_RX(struct mtnic *mtnic)
652 {
653 	struct mtnic_if_config_rx_in_imm config_rx;
654 
655 	memset(&config_rx, 0, sizeof config_rx);
656 	return mtnic_cmd(mtnic, &config_rx, NULL, 0, MTNIC_IF_CMD_CONFIG_RX);
657 }
658 
659 static int
mtnic_CONFIG_TX(struct mtnic * mtnic)660 mtnic_CONFIG_TX(struct mtnic *mtnic)
661 {
662 	struct mtnic_if_config_send_in_imm config_tx;
663 
664 	config_tx.enph_gpf = 0;
665 	return mtnic_cmd(mtnic, &config_tx, NULL, 0, MTNIC_IF_CMD_CONFIG_TX);
666 }
667 
668 static int
mtnic_HEART_BEAT(struct mtnic_port * priv,u32 * link_state)669 mtnic_HEART_BEAT(struct mtnic_port *priv, u32 *link_state)
670 {
671 	struct mtnic_if_heart_beat_out_imm heart_beat;
672 
673 	int err;
674 	u32 flags;
675 	err = mtnic_cmd(priv->mtnic, NULL, &heart_beat, 0, MTNIC_IF_CMD_HEART_BEAT);
676 	if (!err) {
677 		flags = be32_to_cpu(heart_beat.flags);
678 		if (flags & MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR)) {
679 			DBG("Internal error detected\n");
680 			return -EIO;
681 		}
682 		*link_state = flags &
683 			      ~((u32) MTNIC_BC_MASK(MTNIC_MASK_HEAR_BEAT_INT_ERROR));
684 	}
685 	return err;
686 }
687 
688 
689 /*
690  * Port commands
691  */
692 
693 static int
mtnic_SET_PORT_DEFAULT_RING(struct mtnic_port * priv,u8 port,u16 ring)694 mtnic_SET_PORT_DEFAULT_RING(struct mtnic_port *priv, u8 port, u16 ring)
695 {
696 	struct mtnic_if_set_port_default_ring_in_imm def_ring;
697 
698 	memset(&def_ring, 0, sizeof(def_ring));
699 	def_ring.ring = ring;
700 	return mtnic_cmd(priv->mtnic, &def_ring, NULL, port + 1,
701 			 MTNIC_IF_CMD_SET_PORT_DEFAULT_RING);
702 }
703 
704 static int
mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_port * priv,int port)705 mtnic_CONFIG_PORT_RSS_STEER(struct mtnic_port *priv, int port)
706 {
707 	memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE);
708 	return  mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
709 			  MTNIC_IF_CMD_CONFIG_PORT_RSS_STEER);
710 }
711 
712 static int
mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_port * priv,int port)713 mtnic_SET_PORT_RSS_INDIRECTION(struct mtnic_port *priv, int port)
714 
715 {
716 	memset(priv->mtnic->cmd.buf, 0, PAGE_SIZE);
717 	return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
718 			 MTNIC_IF_CMD_SET_PORT_RSS_INDIRECTION);
719 }
720 
721 
722 /*
723  * Config commands
724  */
725 static int
mtnic_CONFIG_CQ(struct mtnic_port * priv,int port,u16 cq_ind,struct mtnic_cq * cq)726 mtnic_CONFIG_CQ(struct mtnic_port *priv, int port,
727 		u16 cq_ind, struct mtnic_cq *cq)
728 {
729 	struct mtnic_if_config_cq_in_mbox *config_cq = priv->mtnic->cmd.buf;
730 
731 	memset(config_cq, 0, sizeof *config_cq);
732 	config_cq->cq = cq_ind;
733 	config_cq->size = fls(UNITS_BUFFER_SIZE - 1);
734 	config_cq->offset = ((cq->dma) & (PAGE_MASK)) >> 6;
735 	config_cq->db_record_addr_l = cpu_to_be32(cq->db_dma);
736 	config_cq->page_address[1] = cpu_to_be32(cq->dma);
737 	DBG("config cq address: %x dma_address: %lx"
738 	    "offset: %d size %d index: %d\n"
739 	    , config_cq->page_address[1],cq->dma,
740 	    config_cq->offset, config_cq->size, config_cq->cq );
741 
742 	return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
743 			 MTNIC_IF_CMD_CONFIG_CQ);
744 }
745 
746 
747 static int
mtnic_CONFIG_TX_RING(struct mtnic_port * priv,u8 port,u16 ring_ind,struct mtnic_ring * ring)748 mtnic_CONFIG_TX_RING(struct mtnic_port *priv, u8 port,
749 		     u16 ring_ind, struct mtnic_ring *ring)
750 {
751 	struct mtnic_if_config_send_ring_in_mbox *config_tx_ring = priv->mtnic->cmd.buf;
752 	memset(config_tx_ring, 0, sizeof *config_tx_ring);
753 	config_tx_ring->ring = cpu_to_be16(ring_ind);
754 	config_tx_ring->size = fls(UNITS_BUFFER_SIZE - 1);
755 	config_tx_ring->cq = cpu_to_be16(ring->cq);
756 	config_tx_ring->page_address[1] = cpu_to_be32(ring->dma);
757 
758 	return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
759 			 MTNIC_IF_CMD_CONFIG_TX_RING);
760 }
761 
762 static int
mtnic_CONFIG_RX_RING(struct mtnic_port * priv,u8 port,u16 ring_ind,struct mtnic_ring * ring)763 mtnic_CONFIG_RX_RING(struct mtnic_port *priv, u8 port,
764 		     u16 ring_ind, struct mtnic_ring *ring)
765 {
766 	struct mtnic_if_config_rx_ring_in_mbox *config_rx_ring = priv->mtnic->cmd.buf;
767 	memset(config_rx_ring, 0, sizeof *config_rx_ring);
768 	config_rx_ring->ring = ring_ind;
769 	MTNIC_BC_PUT(config_rx_ring->stride_size, fls(UNITS_BUFFER_SIZE - 1),
770 		     MTNIC_MASK_CONFIG_RX_RING_SIZE);
771 	MTNIC_BC_PUT(config_rx_ring->stride_size, 1,
772 		     MTNIC_MASK_CONFIG_RX_RING_STRIDE);
773 	config_rx_ring->cq = cpu_to_be16(ring->cq);
774 	config_rx_ring->db_record_addr_l = cpu_to_be32(ring->db_dma);
775 
776 	DBG("Config RX ring starting at address:%lx\n", ring->dma);
777 
778 	config_rx_ring->page_address[1] = cpu_to_be32(ring->dma);
779 
780 	return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
781 			 MTNIC_IF_CMD_CONFIG_RX_RING);
782 }
783 
784 static int
mtnic_CONFIG_EQ(struct mtnic * mtnic)785 mtnic_CONFIG_EQ(struct mtnic *mtnic)
786 {
787 	struct mtnic_if_config_eq_in_mbox *eq = mtnic->cmd.buf;
788 
789 	if (mtnic->eq.dma & (PAGE_MASK)) {
790 		DBG("misalligned eq buffer:%lx\n",
791 		    mtnic->eq.dma);
792 		return -EADDRINUSE;
793 	}
794 
795 	memset(eq, 0, sizeof *eq);
796 	MTNIC_BC_PUT(eq->offset, mtnic->eq.dma >> 6, MTNIC_MASK_CONFIG_EQ_OFFSET);
797 	MTNIC_BC_PUT(eq->size, fls(mtnic->eq.size - 1) - 1, MTNIC_MASK_CONFIG_EQ_SIZE);
798 	MTNIC_BC_PUT(eq->int_vector, 0, MTNIC_MASK_CONFIG_EQ_INT_VEC);
799 	eq->page_address[1] = cpu_to_be32(mtnic->eq.dma);
800 
801 	return mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_CONFIG_EQ);
802 }
803 
804 
805 
806 
807 static int
mtnic_SET_RX_RING_ADDR(struct mtnic_port * priv,u8 port,u64 * mac)808 mtnic_SET_RX_RING_ADDR(struct mtnic_port *priv, u8 port, u64* mac)
809 {
810 	struct mtnic_if_set_rx_ring_addr_in_imm ring_addr;
811 	u32 modifier = ((u32) port + 1) << 16;
812 
813 	memset(&ring_addr, 0, sizeof(ring_addr));
814 
815 	ring_addr.mac_31_0 = cpu_to_be32(*mac & 0xffffffff);
816 	ring_addr.mac_47_32 = cpu_to_be16((*mac >> 32) & 0xffff);
817 	ring_addr.flags_vlan_id |= cpu_to_be16(
818 					      MTNIC_BC_MASK(MTNIC_MASK_SET_RX_RING_ADDR_BY_MAC));
819 
820 	return mtnic_cmd(priv->mtnic, &ring_addr, NULL, modifier, MTNIC_IF_CMD_SET_RX_RING_ADDR);
821 }
822 
823 static int
mtnic_SET_PORT_STATE(struct mtnic_port * priv,u8 port,u8 state)824 mtnic_SET_PORT_STATE(struct mtnic_port *priv, u8 port, u8 state)
825 {
826 	struct mtnic_if_set_port_state_in_imm port_state;
827 
828 	port_state.state = state ? cpu_to_be32(
829 					      MTNIC_BC_MASK(MTNIC_MASK_CONFIG_PORT_STATE)) : 0;
830 	port_state.reserved = 0;
831 	return mtnic_cmd(priv->mtnic, &port_state, NULL, port + 1,
832 			 MTNIC_IF_CMD_SET_PORT_STATE);
833 }
834 
835 static int
mtnic_SET_PORT_MTU(struct mtnic_port * priv,u8 port,u16 mtu)836 mtnic_SET_PORT_MTU(struct mtnic_port *priv, u8 port, u16 mtu)
837 {
838 	struct mtnic_if_set_port_mtu_in_imm set_mtu;
839 
840 	memset(&set_mtu, 0, sizeof(set_mtu));
841 	set_mtu.mtu = cpu_to_be16(mtu);
842 	return mtnic_cmd(priv->mtnic, &set_mtu, NULL, port + 1,
843 			 MTNIC_IF_CMD_SET_PORT_MTU);
844 }
845 
846 /*
847 static int
848 mtnic_CONFIG_PORT_VLAN_FILTER(struct mtnic_port *priv, int port)
849 {
850 	struct mtnic_if_config_port_vlan_filter_in_mbox *vlan_filter = priv->mtnic->cmd.buf;
851 
852 	// When no vlans are configured we disable the filter
853 	// (i.e., pass all vlans) because we ignore them anyhow
854 	memset(vlan_filter, 0xff, sizeof(*vlan_filter));
855 	return mtnic_cmd(priv->mtnic, NULL, NULL, port + 1,
856 			 MTNIC_IF_CMD_CONFIG_PORT_VLAN_FILTER);
857 }
858 */
859 
860 
861 static int
mtnic_RELEASE_RESOURCE(struct mtnic_port * priv,u8 port,u8 type,u8 index)862 mtnic_RELEASE_RESOURCE(struct mtnic_port *priv, u8 port, u8 type, u8 index)
863 {
864 	struct mtnic_if_release_resource_in_imm rel;
865 	memset(&rel, 0, sizeof rel);
866 	rel.index = index;
867 	rel.type = type;
868 	return mtnic_cmd ( priv->mtnic,
869 			   &rel, NULL, ( type == MTNIC_IF_RESOURCE_TYPE_EQ ) ?
870 			   0 : port + 1, MTNIC_IF_CMD_RELEASE_RESOURCE );
871 }
872 
873 
874 static int
mtnic_QUERY_CAP(struct mtnic * mtnic,u8 index,u8 mod,u64 * result)875 mtnic_QUERY_CAP(struct mtnic *mtnic, u8 index, u8 mod, u64 *result)
876 {
877 	struct mtnic_if_query_cap_in_imm cap;
878 	u32 out_imm[2];
879 	int err;
880 
881 	memset(&cap, 0, sizeof cap);
882 	cap.cap_index = index;
883 	cap.cap_modifier = mod;
884 	err = mtnic_cmd(mtnic, &cap, &out_imm, 0, MTNIC_IF_CMD_QUERY_CAP);
885 
886 	*((u32*)result) = be32_to_cpu(*(out_imm+1));
887 	*((u32*)result + 1) = be32_to_cpu(*out_imm);
888 
889 	DBG("Called Query cap with index:0x%x mod:%d result:0x%llx"
890 	    " error:%d\n", index, mod, *result, err);
891 	return err;
892 }
893 
894 
895 #define DO_QUERY_CAP(cap, mod, var)				\
896 		err = mtnic_QUERY_CAP(mtnic, cap, mod, &result);\
897 		if (err)					\
898 			return err;				\
899 		(var) = result
900 
901 static int
mtnic_query_num_ports(struct mtnic * mtnic)902 mtnic_query_num_ports(struct mtnic *mtnic)
903 {
904 	int err = 0;
905 	u64 result;
906 
907 	DO_QUERY_CAP(MTNIC_IF_CAP_NUM_PORTS, 0, mtnic->fw.num_ports);
908 
909 	return 0;
910 }
911 
912 static int
mtnic_query_mac(struct mtnic * mtnic)913 mtnic_query_mac(struct mtnic *mtnic)
914 {
915 	int err = 0;
916 	int i;
917 	u64 result;
918 
919 	for (i = 0; i < mtnic->fw.num_ports; i++) {
920 		DO_QUERY_CAP(MTNIC_IF_CAP_DEFAULT_MAC, i + 1, mtnic->fw.mac[i]);
921 	}
922 
923 	return 0;
924 }
925 
926 static int
mtnic_query_offsets(struct mtnic * mtnic)927 mtnic_query_offsets(struct mtnic *mtnic)
928 {
929 	int err;
930 	int i;
931 	u64 result;
932 
933 	DO_QUERY_CAP(MTNIC_IF_CAP_MEM_KEY,
934 		     MTNIC_IF_MEM_TYPE_SNOOP,
935 		     mtnic->fw.mem_type_snoop_be);
936 	mtnic->fw.mem_type_snoop_be = cpu_to_be32(mtnic->fw.mem_type_snoop_be);
937 	DO_QUERY_CAP(MTNIC_IF_CAP_TX_CQ_DB_OFFSET, 0, mtnic->fw.txcq_db_offset);
938 	DO_QUERY_CAP(MTNIC_IF_CAP_EQ_DB_OFFSET, 0, mtnic->fw.eq_db_offset);
939 
940 	for (i = 0; i < mtnic->fw.num_ports; i++) {
941 		DO_QUERY_CAP(MTNIC_IF_CAP_CQ_OFFSET, i + 1, mtnic->fw.cq_offset);
942 		DO_QUERY_CAP(MTNIC_IF_CAP_TX_OFFSET, i + 1, mtnic->fw.tx_offset[i]);
943 		DO_QUERY_CAP(MTNIC_IF_CAP_RX_OFFSET, i + 1, mtnic->fw.rx_offset[i]);
944 		DBG("--> Port %d CQ offset:0x%x\n", i, mtnic->fw.cq_offset);
945 		DBG("--> Port %d Tx offset:0x%x\n", i, mtnic->fw.tx_offset[i]);
946 		DBG("--> Port %d Rx offset:0x%x\n", i, mtnic->fw.rx_offset[i]);
947 	}
948 
949 	mdelay(20);
950 	return 0;
951 }
952 
953 
954 
955 
956 
957 
958 
959 
960 
961 
962 
963 /********************************************************************
964 *
965 *	MTNIC initalization functions
966 *
967 *
968 *
969 *
970 *********************************************************************/
971 
972 /**
973  * Reset device
974  */
975 void
mtnic_reset(void)976 mtnic_reset ( void )
977 {
978 	void *reset = ioremap ( mtnic_pci_dev.dev.bar[0] + MTNIC_RESET_OFFSET,
979 				4 );
980 	writel ( cpu_to_be32 ( 1 ), reset );
981 	iounmap ( reset );
982 }
983 
984 
985 /**
986  * Restore PCI config
987  */
988 static int
restore_config(void)989 restore_config(void)
990 {
991 	int i;
992 	int rc;
993 
994 	for (i = 0; i < 64; ++i) {
995 		if (i != 22 && i != 23) {
996 			rc = pci_write_config_dword(mtnic_pci_dev.dev.dev,
997 						    i << 2,
998 						    mtnic_pci_dev.dev.
999 						    dev_config_space[i]);
1000 			if (rc)
1001 				return rc;
1002 		}
1003 	}
1004 	return 0;
1005 }
1006 
1007 
1008 
1009 /**
1010  * Init PCI configuration
1011  */
1012 static int
mtnic_init_pci(struct pci_device * dev)1013 mtnic_init_pci(struct pci_device *dev)
1014 {
1015 	int i;
1016 	int err;
1017 
1018 	/* save bars */
1019 	DBG("bus=%d devfn=0x%x\n", dev->bus, dev->devfn);
1020 	for (i = 0; i < 6; ++i) {
1021 		mtnic_pci_dev.dev.bar[i] =
1022 		pci_bar_start(dev, PCI_BASE_ADDRESS_0 + (i << 2));
1023 		DBG("bar[%d]= 0x%08lx \n", i, mtnic_pci_dev.dev.bar[i]);
1024 	}
1025 
1026 	/* save config space */
1027 	for (i = 0; i < 64; ++i) {
1028 		err = pci_read_config_dword(dev, i << 2,
1029 					    &mtnic_pci_dev.dev.
1030 					    dev_config_space[i]);
1031 		if (err) {
1032 			DBG("Can not save configuration space");
1033 			return err;
1034 		}
1035 	}
1036 
1037 	mtnic_pci_dev.dev.dev = dev;
1038 
1039 	return 0;
1040 }
1041 
1042 /**
1043  *  Initial hardware
1044  */
1045 static inline
mtnic_init_card(struct mtnic * mtnic)1046 int mtnic_init_card(struct mtnic *mtnic)
1047 {
1048 	int err = 0;
1049 
1050 
1051 	/* Alloc command interface */
1052 	err = mtnic_alloc_cmdif ( mtnic );
1053 	if (err) {
1054 		DBG("Failed to init command interface, aborting\n");
1055 		return -EADDRINUSE;
1056 	}
1057 
1058 
1059 	/**
1060 	* Bring up HW
1061 	*/
1062 	err = mtnic_QUERY_FW ( mtnic );
1063 	if (err) {
1064 		DBG("QUERY_FW command failed, aborting\n");
1065 		goto cmd_error;
1066 	}
1067 	DBG("Command interface revision:%d\n", mtnic->fw.ifc_rev);
1068 
1069 	/* Allocate memory for FW and start it */
1070 	err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_FW, mtnic->fw.fw_pages);
1071 	if (err) {
1072 		DBG("Eror In MAP_FW\n");
1073 		if (mtnic->fw.fw_pages.buf)
1074 			ufree((intptr_t)mtnic->fw.fw_pages.buf);
1075 		goto cmd_error;
1076 	}
1077 
1078 	/* Run firmware */
1079 	err = mtnic_cmd(mtnic, NULL, NULL, 0, MTNIC_IF_CMD_RUN_FW);
1080 	if (err) {
1081 		DBG("Eror In RUN FW\n");
1082 		goto map_fw_error;
1083 	}
1084 
1085 	DBG("FW version:%d.%d.%d\n",
1086 	    (u16) (mtnic->fw_ver >> 32),
1087 	    (u16) ((mtnic->fw_ver >> 16) & 0xffff),
1088 	    (u16) (mtnic->fw_ver & 0xffff));
1089 
1090 
1091 	/* Query num ports */
1092 	err = mtnic_query_num_ports(mtnic);
1093 	if (err) {
1094 		DBG("Insufficient resources, aborting\n");
1095 		goto map_fw_error;
1096 	}
1097 
1098 	/* Open NIC */
1099 	err = mtnic_OPEN_NIC(mtnic);
1100 	if (err) {
1101 		DBG("Failed opening NIC, aborting\n");
1102 		goto map_fw_error;
1103 	}
1104 
1105 	/* Allocate and map pages worksace */
1106 	err = mtnic_map_cmd(mtnic, MTNIC_IF_CMD_MAP_PAGES, mtnic->fw.extra_pages);
1107 	if (err) {
1108 		DBG("Couldn't allocate %x FW extra pages, aborting\n",
1109 		    mtnic->fw.extra_pages.num);
1110 		if (mtnic->fw.extra_pages.buf)
1111 			ufree((intptr_t)mtnic->fw.extra_pages.buf);
1112 		goto map_fw_error;
1113 	}
1114 
1115 
1116 	/* Get device information */
1117 	err = mtnic_query_mac(mtnic);
1118 	if (err) {
1119 		DBG("Insufficient resources in quesry mac, aborting\n");
1120 		goto map_fw_error;
1121 	}
1122 
1123 	/* Get device offsets */
1124 	err = mtnic_query_offsets(mtnic);
1125 	if (err) {
1126 		DBG("Failed retrieving resource offests, aborting\n");
1127 		ufree((intptr_t)mtnic->fw.extra_pages.buf);
1128 		goto map_extra_error;
1129 	}
1130 
1131 
1132 	/* Alloc EQ */
1133 	err = mtnic_alloc_eq(mtnic);
1134 	if (err) {
1135 		DBG("Failed init shared resources. error: %d\n", err);
1136 		goto map_extra_error;
1137 	}
1138 
1139 	/* Configure HW */
1140 	err = mtnic_CONFIG_EQ(mtnic);
1141 	if (err) {
1142 		DBG("Failed configuring EQ\n");
1143 		goto eq_error;
1144 	}
1145 	err = mtnic_CONFIG_RX(mtnic);
1146 	if (err) {
1147 		DBG("Failed Rx configuration\n");
1148 		goto eq_error;
1149 	}
1150 	err = mtnic_CONFIG_TX(mtnic);
1151 	if (err) {
1152 		DBG("Failed Tx configuration\n");
1153 		goto eq_error;
1154 	}
1155 
1156 
1157 	return 0;
1158 
1159 
1160 eq_error:
1161 	iounmap(mtnic->eq_db);
1162 	free_memblock(mtnic->eq.buf, mtnic->eq.buf_size);
1163 map_extra_error:
1164 	ufree((intptr_t)mtnic->fw.extra_pages.buf);
1165 map_fw_error:
1166 	ufree((intptr_t)mtnic->fw.fw_pages.buf);
1167 
1168 cmd_error:
1169 	iounmap(mtnic->hcr);
1170 	free_memblock(mtnic->cmd.buf, PAGE_SIZE);
1171 
1172 	return -EADDRINUSE;
1173 }
1174 
1175 
1176 
1177 
1178 
1179 
1180 
1181 
1182 
1183 
1184 /*******************************************************************
1185 *
1186 * Process functions
1187 *
1188 *	process compliations of TX and RX
1189 *
1190 *
1191 ********************************************************************/
mtnic_process_tx_cq(struct mtnic_port * priv,struct net_device * dev,struct mtnic_cq * cq)1192 void mtnic_process_tx_cq(struct mtnic_port *priv, struct net_device *dev,
1193 			 struct mtnic_cq *cq)
1194 {
1195 	struct mtnic_cqe *cqe = cq->buf;
1196 	struct mtnic_ring *ring = &priv->tx_ring;
1197 	u16 index;
1198 
1199 
1200 	index = cq->last & (cq->size-1);
1201 	cqe = &cq->buf[index];
1202 
1203 	/* Owner bit changes every round */
1204 	while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
1205 		netdev_tx_complete (dev, ring->iobuf[index]);
1206 		++cq->last;
1207 		index = cq->last & (cq->size-1);
1208 		cqe = &cq->buf[index];
1209 	}
1210 
1211 	/* Update consumer index */
1212 	cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
1213 	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1214 	ring->cons = cq->last;
1215 }
1216 
1217 
mtnic_process_rx_cq(struct mtnic_port * priv,struct net_device * dev,struct mtnic_cq * cq)1218 int mtnic_process_rx_cq(struct mtnic_port *priv,
1219 			struct net_device *dev,
1220 			struct mtnic_cq *cq)
1221 {
1222 	struct mtnic_cqe *cqe;
1223 	struct mtnic_ring *ring = &priv->rx_ring;
1224 	int index;
1225 	int err;
1226 	struct io_buffer *rx_iob;
1227 	unsigned int length;
1228 
1229 
1230 	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
1231 	 * descriptor offset can be deduced from the CQE index instead of
1232 	 * reading 'cqe->index' */
1233 	index = cq->last & (cq->size-1);
1234 	cqe = &cq->buf[index];
1235 
1236 	/* Process all completed CQEs */
1237 	while (XNOR(cqe->op_tr_own & MTNIC_BIT_CQ_OWN, cq->last & cq->size)) {
1238 		/* Drop packet on bad receive or bad checksum */
1239 		if ((cqe->op_tr_own & 0x1f) == MTNIC_OPCODE_ERROR) {
1240 			DBG("CQE completed with error - vendor \n");
1241 			free_iob(ring->iobuf[index]);
1242 			goto next;
1243 		}
1244 		if (cqe->enc_bf & MTNIC_BIT_BAD_FCS) {
1245 			DBG("Accepted packet with bad FCS\n");
1246 			free_iob(ring->iobuf[index]);
1247 			goto next;
1248 		}
1249 
1250 		/*
1251 		 * Packet is OK - process it.
1252 		 */
1253 		length = be32_to_cpu(cqe->byte_cnt);
1254 		rx_iob = ring->iobuf[index];
1255 		iob_put(rx_iob, length);
1256 
1257 		/* Add this packet to the receive queue. */
1258 		netdev_rx(dev, rx_iob);
1259 		ring->iobuf[index] = NULL;
1260 
1261 next:
1262 		++cq->last;
1263 		index = cq->last & (cq->size-1);
1264 		cqe = &cq->buf[index];
1265 
1266 
1267 
1268 	}
1269 
1270 	/* Update consumer index */
1271 	cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
1272 	wmb(); /* ensure HW sees CQ consumer before we post new buffers */
1273 	ring->cons = cq->last;
1274 
1275 	if (ring->prod - ring->cons < (MAX_GAP_PROD_CONS)) {
1276 		err = mtnic_alloc_iobuf(priv, &priv->rx_ring, DEF_IOBUF_SIZE);
1277 		if (err) {
1278 			DBG("ERROR Allocating io buffer");
1279 			return -EADDRINUSE;
1280 		}
1281 	}
1282 
1283 	return 0;
1284 }
1285 
1286 
1287 
1288 
1289 
1290 
1291 
1292 
1293 
1294 
1295 
1296 
1297 
1298 
1299 
1300 
1301 /********************************************************************
1302 *
1303 * net_device functions
1304 *
1305 *
1306 *	open, poll, close, probe, disable, irq
1307 *
1308 *********************************************************************/
1309 static int
mtnic_open(struct net_device * dev)1310 mtnic_open(struct net_device *dev)
1311 {
1312 	struct mtnic_port *priv = netdev_priv(dev);
1313 
1314 	int err = 0;
1315 	struct mtnic_ring *ring;
1316 	struct mtnic_cq *cq;
1317 	int cq_ind = 0;
1318 	u32 dev_link_state;
1319 	int link_check;
1320 
1321 	DBG("starting port:%d, MAC Address: 0x%12llx\n",
1322 	    priv->port, priv->mtnic->fw.mac[priv->port]);
1323 
1324 	/* Alloc and configure CQs, TX, RX */
1325 	err = mtnic_alloc_resources ( dev );
1326 	if (err) {
1327 		DBG("Error allocating resources\n");
1328 		return -EADDRINUSE;
1329 	}
1330 
1331 	/* Pass CQs configuration to HW */
1332 	for (cq_ind = 0; cq_ind < NUM_CQS; ++cq_ind) {
1333 		cq = &priv->cq[cq_ind];
1334 		err = mtnic_CONFIG_CQ(priv, priv->port, cq_ind, cq);
1335 		if (err) {
1336 			DBG("Failed configuring CQ:%d error %d\n",
1337 			    cq_ind, err);
1338 			if (cq_ind)
1339 				goto cq_error;
1340 			else
1341 				goto allocation_error;
1342 		}
1343 		/* Update consumer index */
1344 		cq->db->update_ci = cpu_to_be32(cq->last & 0xffffff);
1345 	}
1346 
1347 
1348 
1349 	/* Pass Tx configuration to HW */
1350 	ring = &priv->tx_ring;
1351 	err = mtnic_CONFIG_TX_RING(priv, priv->port, 0, ring);
1352 	if (err) {
1353 		DBG("Failed configuring Tx ring:0\n");
1354 		goto cq_error;
1355 	}
1356 
1357 	/* Pass RX configuration to HW */
1358 	ring = &priv->rx_ring;
1359 	err = mtnic_CONFIG_RX_RING(priv, priv->port, 0, ring);
1360 	if (err) {
1361 		DBG("Failed configuring Rx ring:0\n");
1362 		goto tx_error;
1363 	}
1364 
1365 	/* Configure Rx steering */
1366 	err = mtnic_CONFIG_PORT_RSS_STEER(priv, priv->port);
1367 	if (!err)
1368 		err = mtnic_SET_PORT_RSS_INDIRECTION(priv, priv->port);
1369 	if (err) {
1370 		DBG("Failed configuring RSS steering\n");
1371 		goto rx_error;
1372 	}
1373 
1374 
1375 	/* Set the port default ring to ring 0 */
1376 	err = mtnic_SET_PORT_DEFAULT_RING(priv, priv->port, 0);
1377 	if (err) {
1378 		DBG("Failed setting default ring\n");
1379 		goto rx_error;
1380 	}
1381 
1382 	/* Set Mac address */
1383 	err = mtnic_SET_RX_RING_ADDR(priv, priv->port, &priv->mtnic->fw.mac[priv->port]);
1384 	if (err) {
1385 		DBG("Failed setting default MAC address\n");
1386 		goto rx_error;
1387 	}
1388 
1389 	/* Set MTU  */
1390 	err = mtnic_SET_PORT_MTU(priv, priv->port, DEF_MTU);
1391 	if (err) {
1392 		DBG("Failed setting MTU\n");
1393 		goto rx_error;
1394 	}
1395 
1396 	/* Configure VLAN filter */
1397 	/* By adding this function, The second port won't accept packets
1398 	err = mtnic_CONFIG_PORT_VLAN_FILTER(priv, priv->port);
1399 	if (err) {
1400 		DBG("Failed configuring VLAN filter\n");
1401 		goto rx_error;
1402 	}
1403 	*/
1404 
1405 
1406 	/* Bring up physical link */
1407 	err = mtnic_SET_PORT_STATE(priv, priv->port, 1);
1408 	if (err) {
1409 		DBG("Failed bringing up port\n");
1410 		goto rx_error;
1411 	}
1412 
1413 	/* PORT IS UP */
1414 	priv->state = CARD_UP;
1415 
1416 
1417 	/* Checking Link is up */
1418 	DBG ( "Checking if link is up\n" );
1419 
1420 
1421 	for ( link_check = 0; link_check < CHECK_LINK_TIMES; link_check ++ ) {
1422 		/* Let link state stabilize if cable was connected */
1423 		mdelay ( DELAY_LINK_CHECK );
1424 
1425 		err = mtnic_HEART_BEAT(priv, &dev_link_state);
1426 		if (err) {
1427 			DBG("Failed getting device link state\n");
1428 			return -ENETDOWN;
1429 		}
1430 
1431 		if ( dev_link_state & priv->port ) {
1432 			/* Link is up */
1433 			break;
1434 		}
1435 	}
1436 
1437 
1438 	if ( ! ( dev_link_state & 0x3 ) ) {
1439 		DBG("Link down, check cables and restart\n");
1440 		netdev_link_down ( dev );
1441 		return -ENETDOWN;
1442 	}
1443 
1444 	DBG ( "Link is up!\n" );
1445 
1446 	/* Mark as link up */
1447 	netdev_link_up ( dev );
1448 
1449 	return 0;
1450 
1451 rx_error:
1452 	err = mtnic_RELEASE_RESOURCE(priv, priv->port,
1453 				     MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
1454 tx_error:
1455 	err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1456 				      MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
1457 
1458 cq_error:
1459 	while (cq_ind) {
1460 		err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1461 					      MTNIC_IF_RESOURCE_TYPE_CQ, --cq_ind);
1462 	}
1463 	if (err)
1464 		DBG("Eror Releasing resources\n");
1465 
1466 allocation_error:
1467 
1468 	free_memblock(priv->tx_ring.buf, priv->tx_ring.buf_size);
1469 	iounmap(priv->tx_ring.txcq_db);
1470 	free_memblock(priv->cq[1].buf, priv->cq[1].buf_size);
1471 	free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
1472 	free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size);
1473 	free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
1474 	free_memblock(priv->cq[0].buf, priv->cq[0].buf_size);
1475 	free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
1476 
1477 	mtnic_free_io_buffers(&priv->rx_ring);
1478 
1479 	return -ENETDOWN;
1480 }
1481 
1482 
1483 
1484 
1485 /** Check if we got completion for receive and transmit and
1486  * check the line with heart_bit command */
1487 static void
mtnic_poll(struct net_device * dev)1488 mtnic_poll ( struct net_device *dev )
1489 {
1490 	struct mtnic_port *priv = netdev_priv(dev);
1491 	struct mtnic_cq *cq;
1492 	u32 dev_link_state;
1493 	int err;
1494 	unsigned int i;
1495 
1496 	/* In case of an old error then return */
1497 	if (priv->state != CARD_UP)
1498 		return;
1499 
1500 	/* We do not check the device every call _poll call,
1501 	    since it will slow it down */
1502 	if ((priv->poll_counter % ROUND_TO_CHECK) == 0) {
1503 		/* Check device */
1504 		err = mtnic_HEART_BEAT(priv, &dev_link_state);
1505 		if (err) {
1506 			DBG("Device has internal error\n");
1507 			priv->state = CARD_LINK_DOWN;
1508 			return;
1509 		}
1510 		if (!(dev_link_state & 0x3)) {
1511 			DBG("Link down, check cables and restart\n");
1512 			priv->state = CARD_LINK_DOWN;
1513 			return;
1514 		}
1515 	}
1516 	/* Polling CQ */
1517 	for (i = 0; i < NUM_CQS; i++) {
1518 		cq = &priv->cq[i]; //Passing on the 2 cqs.
1519 
1520 		if (cq->is_rx) {
1521 			err = mtnic_process_rx_cq(priv, cq->dev, cq);
1522 			if (err) {
1523 				priv->state = CARD_LINK_DOWN;
1524 				DBG(" Error allocating RX buffers\n");
1525 				return;
1526 			}
1527 		} else {
1528 			mtnic_process_tx_cq(priv, cq->dev, cq);
1529 		}
1530 	}
1531 	++ priv->poll_counter;
1532 }
1533 
1534 
1535 
1536 static int
mtnic_transmit(struct net_device * dev,struct io_buffer * iobuf)1537 mtnic_transmit( struct net_device *dev, struct io_buffer *iobuf )
1538 {
1539 
1540 	struct mtnic_port *priv = netdev_priv(dev);
1541 	struct mtnic_ring *ring;
1542 	struct mtnic_tx_desc *tx_desc;
1543 	struct mtnic_data_seg *data;
1544 	u32 index;
1545 
1546 	/* In case of an error then return */
1547 	if (priv->state != CARD_UP)
1548 		return -ENETDOWN;
1549 
1550 	ring = &priv->tx_ring;
1551 
1552 	index = ring->prod & ring->size_mask;
1553 	if ((ring->prod - ring->cons) >= ring->size) {
1554 		DBG("No space left for descriptors!!! cons: %x prod: %x\n",
1555 		    ring->cons, ring->prod);
1556 		mdelay(5);
1557 		return -EAGAIN;/* no space left */
1558 	}
1559 
1560 	/* get current descriptor */
1561 	tx_desc = ring->buf + (index * sizeof(struct mtnic_tx_desc));
1562 
1563 	/* Prepare Data Seg */
1564 	data = &tx_desc->data;
1565 	data->addr_l = cpu_to_be32((u32)virt_to_bus(iobuf->data));
1566 	data->count = cpu_to_be32(iob_len(iobuf));
1567 	data->mem_type = priv->mtnic->fw.mem_type_snoop_be;
1568 
1569 	/* Prepare ctrl segement */
1570 	tx_desc->ctrl.size_vlan = cpu_to_be32(2);
1571 	tx_desc->ctrl.flags = cpu_to_be32(MTNIC_BIT_TX_COMP |
1572 					  MTNIC_BIT_NO_ICRC);
1573 	tx_desc->ctrl.op_own = cpu_to_be32(MTNIC_OPCODE_SEND) |
1574 			       ((ring->prod & ring->size) ?
1575 				cpu_to_be32(MTNIC_BIT_DESC_OWN) : 0);
1576 
1577 	/* Attach io_buffer */
1578 	ring->iobuf[index] = iobuf;
1579 
1580 	/* Update producer index */
1581 	++ring->prod;
1582 
1583 	/* Ring doorbell! */
1584 	wmb();
1585 	writel((u32) ring->db_offset, &ring->txcq_db->send_db);
1586 
1587 	return 0;
1588 }
1589 
1590 
1591 static void
mtnic_close(struct net_device * dev)1592 mtnic_close(struct net_device *dev)
1593 {
1594 	struct mtnic_port *priv = netdev_priv(dev);
1595 	int err = 0;
1596 	DBG("Close called for port:%d\n", priv->port);
1597 
1598 	if ( ( priv->state == CARD_UP ) ||
1599 	     ( priv->state == CARD_LINK_DOWN ) ) {
1600 
1601 		/* Disable port */
1602 		err |= mtnic_SET_PORT_STATE(priv, priv->port, 0);
1603 		/*
1604 		 * Stop HW associated with this port
1605 		 */
1606 		mdelay(5);
1607 
1608 		/* Stop RX */
1609 		err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1610 					      MTNIC_IF_RESOURCE_TYPE_RX_RING, 0);
1611 
1612 		/* Stop TX */
1613 		err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1614 					      MTNIC_IF_RESOURCE_TYPE_TX_RING, 0);
1615 
1616 		/* Stop CQs */
1617 		err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1618 					      MTNIC_IF_RESOURCE_TYPE_CQ, 0);
1619 		err |= mtnic_RELEASE_RESOURCE(priv, priv->port,
1620 					      MTNIC_IF_RESOURCE_TYPE_CQ, 1);
1621 		if (err) {
1622 			DBG("Close reported error %d\n", err);
1623 		}
1624 
1625 		mdelay ( 10 );
1626 
1627 		/* free memory */
1628 		free_memblock(priv->tx_ring.buf, priv->tx_ring.buf_size);
1629 		iounmap(priv->tx_ring.txcq_db);
1630 		free_memblock(priv->cq[1].buf, priv->cq[1].buf_size);
1631 		free_memblock(priv->cq[1].db, sizeof(struct mtnic_cq_db_record));
1632 		free_memblock(priv->rx_ring.buf, priv->rx_ring.buf_size);
1633 		free_memblock(priv->rx_ring.db, sizeof(struct mtnic_cq_db_record));
1634 		free_memblock(priv->cq[0].buf, priv->cq[0].buf_size);
1635 		free_memblock(priv->cq[0].db, sizeof(struct mtnic_cq_db_record));
1636 
1637 		/* Free RX buffers */
1638 		mtnic_free_io_buffers(&priv->rx_ring);
1639 
1640 
1641 
1642 	}
1643 
1644 	priv->state = CARD_INITIALIZED;
1645 
1646 }
1647 
1648 
1649 static void
mtnic_disable(struct pci_device * pci)1650 mtnic_disable(struct pci_device *pci)
1651 {
1652 
1653 	int err;
1654 	int i;
1655 	struct mtnic *mtnic = pci_get_drvdata(pci);
1656 
1657 
1658 	struct net_device *dev;
1659 	struct mtnic_port *priv;
1660 
1661 	for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) {
1662 
1663 		dev = mtnic->netdev[i];
1664 
1665 		priv = netdev_priv(dev);
1666 
1667 		/* Just in case */
1668 		if ( ( priv->state == CARD_UP ) ||
1669 		     ( priv->state == CARD_LINK_DOWN ) )
1670 			mtnic_close ( dev );
1671 	}
1672 
1673 	/* Releasing EQ */
1674 	priv = netdev_priv ( mtnic->netdev[0] );
1675 	err = mtnic_RELEASE_RESOURCE(priv, 1,
1676 				     MTNIC_IF_RESOURCE_TYPE_EQ, 0);
1677 
1678 	DBG("Calling MTNIC_CLOSE command\n");
1679 	err |= mtnic_cmd(mtnic, NULL, NULL, 0,
1680 			 MTNIC_IF_CMD_CLOSE_NIC);
1681 	if (err) {
1682 		DBG("Error Releasing resources %d\n", err);
1683 	}
1684 
1685 	free_memblock(mtnic->cmd.buf, PAGE_SIZE);
1686 	iounmap(mtnic->hcr);
1687 	ufree((intptr_t)mtnic->fw.fw_pages.buf);
1688 	ufree((intptr_t)mtnic->fw.extra_pages.buf);
1689 	free_memblock(mtnic->eq.buf, mtnic->eq.buf_size);
1690 	iounmap(mtnic->eq_db);
1691 
1692 
1693 	for ( i = ( mtnic->fw.num_ports - 1 ); i >= 0; i-- ) {
1694 		dev = mtnic->netdev[i];
1695 		unregister_netdev ( dev );
1696 		netdev_nullify ( dev );
1697 		netdev_put ( dev );
1698 	}
1699 
1700 	free ( mtnic );
1701 
1702 
1703 	mtnic_reset ();
1704 	mdelay ( 1000 );
1705 	/* Restore config, if we would like to retry booting */
1706 	restore_config ();
1707 
1708 
1709 }
1710 
1711 
1712 
1713 static void
mtnic_irq(struct net_device * netdev __unused,int enable __unused)1714 mtnic_irq(struct net_device *netdev __unused, int enable __unused)
1715 {
1716 	/* Not implemented */
1717 }
1718 
1719 
1720 
1721 /** mtnic net device operations */
1722 static struct net_device_operations mtnic_operations = {
1723 	.open       = mtnic_open,
1724 	.close      = mtnic_close,
1725 	.transmit   = mtnic_transmit,
1726 	.poll       = mtnic_poll,
1727 	.irq        = mtnic_irq,
1728 };
1729 
1730 
1731 
1732 
1733 
1734 
1735 
1736 static int
mtnic_probe(struct pci_device * pci,const struct pci_device_id * id __unused)1737 mtnic_probe(struct pci_device *pci,
1738 	    const struct pci_device_id *id __unused)
1739 {
1740 	struct mtnic_port *priv;
1741 	struct mtnic *mtnic;
1742 	int err;
1743 	u64 mac;
1744 	int port_index;
1745 
1746 
1747 	adjust_pci_device(pci);
1748 
1749 	err = mtnic_init_pci(pci);
1750 	if (err) {
1751 		DBG("Error in pci_init\n");
1752 		return -EIO;
1753 	}
1754 
1755 	mtnic_reset();
1756 	mdelay(1000);
1757 
1758 	err = restore_config();
1759 	if (err) {
1760 		DBG("Error in restoring config\n");
1761 		return err;
1762 	}
1763 
1764 	mtnic = zalloc ( sizeof ( *mtnic ) );
1765 	if ( ! mtnic ) {
1766 		DBG ( "Error Allocating mtnic buffer\n" );
1767 		return -EADDRINUSE;
1768 	}
1769 
1770 	pci_set_drvdata(pci, mtnic);
1771 
1772 	mtnic->pdev = pci;
1773 
1774 
1775 	/* Initialize hardware */
1776 	err = mtnic_init_card ( mtnic );
1777 	if (err) {
1778 		DBG("Error in init_card\n");
1779 		goto err_init_card;
1780 	}
1781 
1782 	for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) {
1783 		/* Initializing net device */
1784 		mtnic->netdev[port_index] = alloc_etherdev( sizeof ( struct mtnic_port ) );
1785 		if ( mtnic->netdev[port_index] == NULL ) {
1786 			DBG("Net device allocation failed\n");
1787 			goto err_alloc_mtnic;
1788 		}
1789 
1790 		/*
1791 		 * Initialize driver private data
1792 		 */
1793 
1794 		mtnic->netdev[port_index]->dev = &pci->dev;
1795 		priv = netdev_priv ( mtnic->netdev[port_index] );
1796 		memset ( priv, 0, sizeof ( struct mtnic_port ) );
1797 		priv->mtnic = mtnic;
1798 		priv->netdev = mtnic->netdev[port_index];
1799 
1800 		/* Attach pci device */
1801 		netdev_init(mtnic->netdev[port_index], &mtnic_operations);
1802 
1803 		/* Set port number */
1804 		priv->port = port_index;
1805 
1806 		/* Set state */
1807 		priv->state = CARD_DOWN;
1808 	}
1809 
1810 
1811 	int mac_idx;
1812 	for ( port_index = 0; port_index < mtnic->fw.num_ports; port_index ++ ) {
1813 		priv = netdev_priv ( mtnic->netdev[port_index] );
1814 		/* Program the MAC address */
1815 		mac = priv->mtnic->fw.mac[port_index];
1816 		for (mac_idx = 0; mac_idx < MAC_ADDRESS_SIZE; ++mac_idx) {
1817 			mtnic->netdev[port_index]->hw_addr[MAC_ADDRESS_SIZE - mac_idx - 1] = mac & 0xFF;
1818 			mac = mac >> 8;
1819 		}
1820 
1821 		if ( register_netdev ( mtnic->netdev[port_index] ) ) {
1822 			DBG("Netdev registration failed\n");
1823 			priv->state = CARD_INITIALIZED;
1824 			goto err_alloc_mtnic;
1825 		}
1826 	}
1827 
1828 
1829 	return 0;
1830 
1831 err_alloc_mtnic:
1832 	free ( mtnic );
1833 err_init_card:
1834 	return -EIO;
1835 }
1836 
1837 
1838 
1839 
1840 static struct pci_device_id mtnic_nics[] = {
1841 	PCI_ROM ( 0x15b3, 0x6368, "mt25448", "Mellanox ConnectX EN driver", 0 ),
1842 	PCI_ROM ( 0x15b3, 0x6372, "mt25458", "Mellanox ConnectX ENt driver", 0 ),
1843 	PCI_ROM ( 0x15b3, 0x6750, "mt26448", "Mellanox ConnectX EN GEN2 driver", 0 ),
1844 	PCI_ROM ( 0x15b3, 0x675a, "mt26458", "Mellanox ConnectX ENt GEN2 driver", 0 ),
1845 };
1846 
1847 struct pci_driver mtnic_driver __pci_driver = {
1848 	.ids = mtnic_nics,
1849 	.id_count = sizeof(mtnic_nics) / sizeof(mtnic_nics[0]),
1850 		    .probe = mtnic_probe,
1851 	.remove = mtnic_disable,
1852 };
1853 
1854