xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/cgx.c (revision fe8ecccc10b3adc071de05ca7af728ca1a4ac9aa)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 CGX driver
3  *
4  * Copyright (C) 2018 Marvell International Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 
11 #include <linux/acpi.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/netdevice.h>
16 #include <linux/etherdevice.h>
17 #include <linux/phy.h>
18 #include <linux/of.h>
19 #include <linux/of_mdio.h>
20 #include <linux/of_net.h>
21 
22 #include "cgx.h"
23 
24 #define DRV_NAME	"octeontx2-cgx"
25 #define DRV_STRING      "Marvell OcteonTX2 CGX/MAC Driver"
26 
27 /**
28  * struct lmac
29  * @wq_cmd_cmplt:	waitq to keep the process blocked until cmd completion
30  * @cmd_lock:		Lock to serialize the command interface
31  * @resp:		command response
32  * @event_cb:		callback for linkchange events
33  * @cmd_pend:		flag set before new command is started
34  *			flag cleared after command response is received
35  * @cgx:		parent cgx port
36  * @lmac_id:		lmac port id
37  * @name:		lmac port name
38  */
39 struct lmac {
40 	wait_queue_head_t wq_cmd_cmplt;
41 	struct mutex cmd_lock;
42 	u64 resp;
43 	struct cgx_event_cb event_cb;
44 	bool cmd_pend;
45 	struct cgx *cgx;
46 	u8 lmac_id;
47 	char *name;
48 };
49 
50 struct cgx {
51 	void __iomem		*reg_base;
52 	struct pci_dev		*pdev;
53 	u8			cgx_id;
54 	u8			lmac_count;
55 	struct lmac		*lmac_idmap[MAX_LMAC_PER_CGX];
56 	struct list_head	cgx_list;
57 };
58 
59 static LIST_HEAD(cgx_list);
60 
61 /* Supported devices */
62 static const struct pci_device_id cgx_id_table[] = {
63 	{ PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) },
64 	{ 0, }  /* end of table */
65 };
66 
67 MODULE_DEVICE_TABLE(pci, cgx_id_table);
68 
69 static void cgx_write(struct cgx *cgx, u64 lmac, u64 offset, u64 val)
70 {
71 	writeq(val, cgx->reg_base + (lmac << 18) + offset);
72 }
73 
74 static u64 cgx_read(struct cgx *cgx, u64 lmac, u64 offset)
75 {
76 	return readq(cgx->reg_base + (lmac << 18) + offset);
77 }
78 
79 static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx)
80 {
81 	if (!cgx || lmac_id >= MAX_LMAC_PER_CGX)
82 		return NULL;
83 
84 	return cgx->lmac_idmap[lmac_id];
85 }
86 
87 int cgx_get_cgx_cnt(void)
88 {
89 	struct cgx *cgx_dev;
90 	int count = 0;
91 
92 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list)
93 		count++;
94 
95 	return count;
96 }
97 EXPORT_SYMBOL(cgx_get_cgx_cnt);
98 
99 int cgx_get_lmac_cnt(void *cgxd)
100 {
101 	struct cgx *cgx = cgxd;
102 
103 	if (!cgx)
104 		return -ENODEV;
105 
106 	return cgx->lmac_count;
107 }
108 EXPORT_SYMBOL(cgx_get_lmac_cnt);
109 
110 void *cgx_get_pdata(int cgx_id)
111 {
112 	struct cgx *cgx_dev;
113 
114 	list_for_each_entry(cgx_dev, &cgx_list, cgx_list) {
115 		if (cgx_dev->cgx_id == cgx_id)
116 			return cgx_dev;
117 	}
118 	return NULL;
119 }
120 EXPORT_SYMBOL(cgx_get_pdata);
121 
122 /* CGX Firmware interface low level support */
123 static int cgx_fwi_cmd_send(u64 req, u64 *resp, struct lmac *lmac)
124 {
125 	struct cgx *cgx = lmac->cgx;
126 	struct device *dev;
127 	int err = 0;
128 	u64 cmd;
129 
130 	/* Ensure no other command is in progress */
131 	err = mutex_lock_interruptible(&lmac->cmd_lock);
132 	if (err)
133 		return err;
134 
135 	/* Ensure command register is free */
136 	cmd = cgx_read(cgx, lmac->lmac_id,  CGX_COMMAND_REG);
137 	if (FIELD_GET(CMDREG_OWN, cmd) != CGX_CMD_OWN_NS) {
138 		err = -EBUSY;
139 		goto unlock;
140 	}
141 
142 	/* Update ownership in command request */
143 	req = FIELD_SET(CMDREG_OWN, CGX_CMD_OWN_FIRMWARE, req);
144 
145 	/* Mark this lmac as pending, before we start */
146 	lmac->cmd_pend = true;
147 
148 	/* Start command in hardware */
149 	cgx_write(cgx, lmac->lmac_id, CGX_COMMAND_REG, req);
150 
151 	/* Ensure command is completed without errors */
152 	if (!wait_event_timeout(lmac->wq_cmd_cmplt, !lmac->cmd_pend,
153 				msecs_to_jiffies(CGX_CMD_TIMEOUT))) {
154 		dev = &cgx->pdev->dev;
155 		dev_err(dev, "cgx port %d:%d cmd timeout\n",
156 			cgx->cgx_id, lmac->lmac_id);
157 		err = -EIO;
158 		goto unlock;
159 	}
160 
161 	/* we have a valid command response */
162 	smp_rmb(); /* Ensure the latest updates are visible */
163 	*resp = lmac->resp;
164 
165 unlock:
166 	mutex_unlock(&lmac->cmd_lock);
167 
168 	return err;
169 }
170 
171 static inline int cgx_fwi_cmd_generic(u64 req, u64 *resp,
172 				      struct cgx *cgx, int lmac_id)
173 {
174 	struct lmac *lmac;
175 	int err;
176 
177 	lmac = lmac_pdata(lmac_id, cgx);
178 	if (!lmac)
179 		return -ENODEV;
180 
181 	err = cgx_fwi_cmd_send(req, resp, lmac);
182 
183 	/* Check for valid response */
184 	if (!err) {
185 		if (FIELD_GET(EVTREG_STAT, *resp) == CGX_STAT_FAIL)
186 			return -EIO;
187 		else
188 			return 0;
189 	}
190 
191 	return err;
192 }
193 
194 /* Hardware event handlers */
195 static inline void cgx_link_change_handler(u64 lstat,
196 					   struct lmac *lmac)
197 {
198 	struct cgx *cgx = lmac->cgx;
199 	struct cgx_link_event event;
200 	struct device *dev;
201 
202 	dev = &cgx->pdev->dev;
203 
204 	event.lstat.link_up = FIELD_GET(RESP_LINKSTAT_UP, lstat);
205 	event.lstat.full_duplex = FIELD_GET(RESP_LINKSTAT_FDUPLEX, lstat);
206 	event.lstat.speed = FIELD_GET(RESP_LINKSTAT_SPEED, lstat);
207 	event.lstat.err_type = FIELD_GET(RESP_LINKSTAT_ERRTYPE, lstat);
208 
209 	event.cgx_id = cgx->cgx_id;
210 	event.lmac_id = lmac->lmac_id;
211 
212 	if (!lmac->event_cb.notify_link_chg) {
213 		dev_dbg(dev, "cgx port %d:%d Link change handler null",
214 			cgx->cgx_id, lmac->lmac_id);
215 		if (event.lstat.err_type != CGX_ERR_NONE) {
216 			dev_err(dev, "cgx port %d:%d Link error %d\n",
217 				cgx->cgx_id, lmac->lmac_id,
218 				event.lstat.err_type);
219 		}
220 		dev_info(dev, "cgx port %d:%d Link status %s, speed %x\n",
221 			 cgx->cgx_id, lmac->lmac_id,
222 			event.lstat.link_up ? "UP" : "DOWN",
223 			event.lstat.speed);
224 		return;
225 	}
226 
227 	if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data))
228 		dev_err(dev, "event notification failure\n");
229 }
230 
231 static inline bool cgx_cmdresp_is_linkevent(u64 event)
232 {
233 	u8 id;
234 
235 	id = FIELD_GET(EVTREG_ID, event);
236 	if (id == CGX_CMD_LINK_BRING_UP ||
237 	    id == CGX_CMD_LINK_BRING_DOWN)
238 		return true;
239 	else
240 		return false;
241 }
242 
243 static inline bool cgx_event_is_linkevent(u64 event)
244 {
245 	if (FIELD_GET(EVTREG_ID, event) == CGX_EVT_LINK_CHANGE)
246 		return true;
247 	else
248 		return false;
249 }
250 
251 static irqreturn_t cgx_fwi_event_handler(int irq, void *data)
252 {
253 	struct lmac *lmac = data;
254 	struct cgx *cgx;
255 	u64 event;
256 
257 	cgx = lmac->cgx;
258 
259 	event = cgx_read(cgx, lmac->lmac_id, CGX_EVENT_REG);
260 
261 	if (!FIELD_GET(EVTREG_ACK, event))
262 		return IRQ_NONE;
263 
264 	switch (FIELD_GET(EVTREG_EVT_TYPE, event)) {
265 	case CGX_EVT_CMD_RESP:
266 		/* Copy the response. Since only one command is active at a
267 		 * time, there is no way a response can get overwritten
268 		 */
269 		lmac->resp = event;
270 		/* Ensure response is updated before thread context starts */
271 		smp_wmb();
272 
273 		/* There wont be separate events for link change initiated from
274 		 * software; Hence report the command responses as events
275 		 */
276 		if (cgx_cmdresp_is_linkevent(event))
277 			cgx_link_change_handler(event, lmac);
278 
279 		/* Release thread waiting for completion  */
280 		lmac->cmd_pend = false;
281 		wake_up_interruptible(&lmac->wq_cmd_cmplt);
282 		break;
283 	case CGX_EVT_ASYNC:
284 		if (cgx_event_is_linkevent(event))
285 			cgx_link_change_handler(event, lmac);
286 		break;
287 	}
288 
289 	/* Any new event or command response will be posted by firmware
290 	 * only after the current status is acked.
291 	 * Ack the interrupt register as well.
292 	 */
293 	cgx_write(lmac->cgx, lmac->lmac_id, CGX_EVENT_REG, 0);
294 	cgx_write(lmac->cgx, lmac->lmac_id, CGXX_CMRX_INT, FW_CGX_INT);
295 
296 	return IRQ_HANDLED;
297 }
298 
299 /* APIs for PHY management using CGX firmware interface */
300 
301 /* callback registration for hardware events like link change */
302 int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id)
303 {
304 	struct cgx *cgx = cgxd;
305 	struct lmac *lmac;
306 
307 	lmac = lmac_pdata(lmac_id, cgx);
308 	if (!lmac)
309 		return -ENODEV;
310 
311 	lmac->event_cb = *cb;
312 
313 	return 0;
314 }
315 EXPORT_SYMBOL(cgx_lmac_evh_register);
316 
317 static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx)
318 {
319 	u64 req = 0;
320 
321 	req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_FW_VER, req);
322 	return cgx_fwi_cmd_generic(req, resp, cgx, 0);
323 }
324 
325 static int cgx_lmac_verify_fwi_version(struct cgx *cgx)
326 {
327 	struct device *dev = &cgx->pdev->dev;
328 	int major_ver, minor_ver;
329 	u64 resp;
330 	int err;
331 
332 	if (!cgx->lmac_count)
333 		return 0;
334 
335 	err = cgx_fwi_read_version(&resp, cgx);
336 	if (err)
337 		return err;
338 
339 	major_ver = FIELD_GET(RESP_MAJOR_VER, resp);
340 	minor_ver = FIELD_GET(RESP_MINOR_VER, resp);
341 	dev_dbg(dev, "Firmware command interface version = %d.%d\n",
342 		major_ver, minor_ver);
343 	if (major_ver != CGX_FIRMWARE_MAJOR_VER ||
344 	    minor_ver != CGX_FIRMWARE_MINOR_VER)
345 		return -EIO;
346 	else
347 		return 0;
348 }
349 
350 static int cgx_lmac_init(struct cgx *cgx)
351 {
352 	struct lmac *lmac;
353 	int i, err;
354 
355 	cgx->lmac_count = cgx_read(cgx, 0, CGXX_CMRX_RX_LMACS) & 0x7;
356 	if (cgx->lmac_count > MAX_LMAC_PER_CGX)
357 		cgx->lmac_count = MAX_LMAC_PER_CGX;
358 
359 	for (i = 0; i < cgx->lmac_count; i++) {
360 		lmac = kcalloc(1, sizeof(struct lmac), GFP_KERNEL);
361 		if (!lmac)
362 			return -ENOMEM;
363 		lmac->name = kcalloc(1, sizeof("cgx_fwi_xxx_yyy"), GFP_KERNEL);
364 		if (!lmac->name)
365 			return -ENOMEM;
366 		sprintf(lmac->name, "cgx_fwi_%d_%d", cgx->cgx_id, i);
367 		lmac->lmac_id = i;
368 		lmac->cgx = cgx;
369 		init_waitqueue_head(&lmac->wq_cmd_cmplt);
370 		mutex_init(&lmac->cmd_lock);
371 		err = request_irq(pci_irq_vector(cgx->pdev,
372 						 CGX_LMAC_FWI + i * 9),
373 				   cgx_fwi_event_handler, 0, lmac->name, lmac);
374 		if (err)
375 			return err;
376 
377 		/* Enable interrupt */
378 		cgx_write(cgx, lmac->lmac_id, CGXX_CMRX_INT_ENA_W1S,
379 			  FW_CGX_INT);
380 
381 		/* Add reference */
382 		cgx->lmac_idmap[i] = lmac;
383 	}
384 
385 	return cgx_lmac_verify_fwi_version(cgx);
386 }
387 
388 static int cgx_lmac_exit(struct cgx *cgx)
389 {
390 	struct lmac *lmac;
391 	int i;
392 
393 	/* Free all lmac related resources */
394 	for (i = 0; i < cgx->lmac_count; i++) {
395 		lmac = cgx->lmac_idmap[i];
396 		if (!lmac)
397 			continue;
398 		free_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), lmac);
399 		kfree(lmac->name);
400 		kfree(lmac);
401 	}
402 
403 	return 0;
404 }
405 
406 static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id)
407 {
408 	struct device *dev = &pdev->dev;
409 	struct cgx *cgx;
410 	int err, nvec;
411 
412 	cgx = devm_kzalloc(dev, sizeof(*cgx), GFP_KERNEL);
413 	if (!cgx)
414 		return -ENOMEM;
415 	cgx->pdev = pdev;
416 
417 	pci_set_drvdata(pdev, cgx);
418 
419 	err = pci_enable_device(pdev);
420 	if (err) {
421 		dev_err(dev, "Failed to enable PCI device\n");
422 		pci_set_drvdata(pdev, NULL);
423 		return err;
424 	}
425 
426 	err = pci_request_regions(pdev, DRV_NAME);
427 	if (err) {
428 		dev_err(dev, "PCI request regions failed 0x%x\n", err);
429 		goto err_disable_device;
430 	}
431 
432 	/* MAP configuration registers */
433 	cgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
434 	if (!cgx->reg_base) {
435 		dev_err(dev, "CGX: Cannot map CSR memory space, aborting\n");
436 		err = -ENOMEM;
437 		goto err_release_regions;
438 	}
439 
440 	nvec = CGX_NVEC;
441 	err = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_MSIX);
442 	if (err < 0 || err != nvec) {
443 		dev_err(dev, "Request for %d msix vectors failed, err %d\n",
444 			nvec, err);
445 		goto err_release_regions;
446 	}
447 
448 	list_add(&cgx->cgx_list, &cgx_list);
449 	cgx->cgx_id = cgx_get_cgx_cnt() - 1;
450 
451 	err = cgx_lmac_init(cgx);
452 	if (err)
453 		goto err_release_lmac;
454 
455 	return 0;
456 
457 err_release_lmac:
458 	cgx_lmac_exit(cgx);
459 	list_del(&cgx->cgx_list);
460 err_release_regions:
461 	pci_release_regions(pdev);
462 err_disable_device:
463 	pci_disable_device(pdev);
464 	pci_set_drvdata(pdev, NULL);
465 	return err;
466 }
467 
468 static void cgx_remove(struct pci_dev *pdev)
469 {
470 	struct cgx *cgx = pci_get_drvdata(pdev);
471 
472 	cgx_lmac_exit(cgx);
473 	list_del(&cgx->cgx_list);
474 	pci_free_irq_vectors(pdev);
475 	pci_release_regions(pdev);
476 	pci_disable_device(pdev);
477 	pci_set_drvdata(pdev, NULL);
478 }
479 
480 struct pci_driver cgx_driver = {
481 	.name = DRV_NAME,
482 	.id_table = cgx_id_table,
483 	.probe = cgx_probe,
484 	.remove = cgx_remove,
485 };
486