xref: /freebsd/sys/dev/iavf/iavf_osdep.c (revision 3c4ba5f55438f7afd4f4b0b56f88f2bb505fd6a6)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 /*$FreeBSD$*/
32 
33 /**
34  * @file iavf_osdep.c
35  * @brief OS compatibility layer
36  *
37  * Contains definitions for various functions used to provide an OS
38  * independent layer for sharing code between drivers on different operating
39  * systems.
40  */
41 #include <machine/stdarg.h>
42 
43 #include "iavf_iflib.h"
44 
45 /********************************************************************
46  * Manage DMA'able memory.
47  *******************************************************************/
48 
49 /**
50  * iavf_dmamap_cb - DMA mapping callback function
51  * @arg: pointer to return the segment address
52  * @segs: the segments array
53  * @nseg: number of segments in the array
54  * @error: error code
55  *
56  * Callback used by the bus DMA code to obtain the segment address.
57  */
58 static void
59 iavf_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg __unused,
60 	       int error)
61 {
62         if (error)
63                 return;
64         *(bus_addr_t *) arg = segs->ds_addr;
65         return;
66 }
67 
68 /**
69  * iavf_allocate_virt_mem - Allocate virtual memory
70  * @hw: hardware structure
71  * @mem: structure describing the memory allocation
72  * @size: size of the allocation
73  *
74  * OS compatibility function to allocate virtual memory.
75  *
76  * @returns zero on success, or a status code on failure.
77  */
78 enum iavf_status
79 iavf_allocate_virt_mem(struct iavf_hw *hw __unused, struct iavf_virt_mem *mem,
80 		       u32 size)
81 {
82 	mem->va = malloc(size, M_IAVF, M_NOWAIT | M_ZERO);
83 	return(mem->va == NULL);
84 }
85 
86 /**
87  * iavf_free_virt_mem - Free virtual memory
88  * @hw: hardware structure
89  * @mem: structure describing the memory to free
90  *
91  * OS compatibility function to free virtual memory
92  *
93  * @returns zero.
94  */
95 enum iavf_status
96 iavf_free_virt_mem(struct iavf_hw *hw __unused, struct iavf_virt_mem *mem)
97 {
98 	free(mem->va, M_IAVF);
99 	mem->va = NULL;
100 
101 	return(0);
102 }
103 
104 /**
105  * iavf_allocate_dma_mem - Allocate DMA memory
106  * @hw: hardware structure
107  * @mem: structure describing the memory allocation
108  * @type: unused type parameter specifying the type of allocation
109  * @size: size of the allocation
110  * @alignment: alignment requirements for the allocation
111  *
112  * Allocates DMA memory by using bus_dma_tag_create to create a DMA tag, and
113  * them bus_dmamem_alloc to allocate the associated memory.
114  *
115  * @returns zero on success, or a status code on failure.
116  */
117 enum iavf_status
118 iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem,
119 	enum iavf_memory_type type __unused, u64 size, u32 alignment)
120 {
121 	device_t	dev = ((struct iavf_osdep *)hw->back)->dev;
122 	int		err;
123 
124 
125 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
126 			       alignment, 0,	/* alignment, bounds */
127 			       BUS_SPACE_MAXADDR,	/* lowaddr */
128 			       BUS_SPACE_MAXADDR,	/* highaddr */
129 			       NULL, NULL,	/* filter, filterarg */
130 			       size,	/* maxsize */
131 			       1,	/* nsegments */
132 			       size,	/* maxsegsize */
133 			       BUS_DMA_ALLOCNOW, /* flags */
134 			       NULL,	/* lockfunc */
135 			       NULL,	/* lockfuncarg */
136 			       &mem->tag);
137 	if (err != 0) {
138 		device_printf(dev,
139 		    "iavf_allocate_dma: bus_dma_tag_create failed, "
140 		    "error %u\n", err);
141 		goto fail_0;
142 	}
143 	err = bus_dmamem_alloc(mem->tag, (void **)&mem->va,
144 			     BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
145 	if (err != 0) {
146 		device_printf(dev,
147 		    "iavf_allocate_dma: bus_dmamem_alloc failed, "
148 		    "error %u\n", err);
149 		goto fail_1;
150 	}
151 	err = bus_dmamap_load(mem->tag, mem->map, mem->va,
152 			    size,
153 			    iavf_dmamap_cb,
154 			    &mem->pa,
155 			    BUS_DMA_NOWAIT);
156 	if (err != 0) {
157 		device_printf(dev,
158 		    "iavf_allocate_dma: bus_dmamap_load failed, "
159 		    "error %u\n", err);
160 		goto fail_2;
161 	}
162 	mem->nseg = 1;
163 	mem->size = size;
164 	bus_dmamap_sync(mem->tag, mem->map,
165 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
166 	return (0);
167 fail_2:
168 	bus_dmamem_free(mem->tag, mem->va, mem->map);
169 fail_1:
170 	bus_dma_tag_destroy(mem->tag);
171 fail_0:
172 	mem->map = NULL;
173 	mem->tag = NULL;
174 	return (err);
175 }
176 
177 /**
178  * iavf_free_dma_mem - Free DMA memory allocation
179  * @hw: hardware structure
180  * @mem: pointer to memory structure previously allocated
181  *
182  * Releases DMA memory that was previously allocated by iavf_allocate_dma_mem.
183  *
184  * @returns zero.
185  */
186 enum iavf_status
187 iavf_free_dma_mem(struct iavf_hw *hw __unused, struct iavf_dma_mem *mem)
188 {
189 	bus_dmamap_sync(mem->tag, mem->map,
190 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
191 	bus_dmamap_unload(mem->tag, mem->map);
192 	bus_dmamem_free(mem->tag, mem->va, mem->map);
193 	bus_dma_tag_destroy(mem->tag);
194 	return (0);
195 }
196 
197 /**
198  * iavf_init_spinlock - Initialize a spinlock
199  * @lock: OS compatibility lock structure
200  *
201  * Use the mutex layer to initialize a spin lock that can be used via the OS
202  * compatibility layer accessors.
203  *
204  * @remark we pass MTX_DUPOK because the mutex name will not be unique. An
205  * alternative would be to somehow generate a name, such as by passing in the
206  * __file__ and __line__ values from a macro.
207  */
208 void
209 iavf_init_spinlock(struct iavf_spinlock *lock)
210 {
211 	mtx_init(&lock->mutex, "mutex",
212 	    "iavf spinlock", MTX_DEF | MTX_DUPOK);
213 }
214 
215 /**
216  * iavf_acquire_spinlock - Acquire a spin lock
217  * @lock: OS compatibility lock structure
218  *
219  * Acquire a spin lock using mtx_lock.
220  */
221 void
222 iavf_acquire_spinlock(struct iavf_spinlock *lock)
223 {
224 	mtx_lock(&lock->mutex);
225 }
226 
227 /**
228  * iavf_release_spinlock - Release a spin lock
229  * @lock: OS compatibility lock structure
230  *
231  * Release a spin lock using mtx_unlock.
232  */
233 void
234 iavf_release_spinlock(struct iavf_spinlock *lock)
235 {
236 	mtx_unlock(&lock->mutex);
237 }
238 
239 /**
240  * iavf_destroy_spinlock - Destroy a spin lock
241  * @lock: OS compatibility lock structure
242  *
243  * Destroy (deinitialize) a spin lock by calling mtx_destroy.
244  *
245  * @remark we only destroy the lock if it was initialized. This means that
246  * calling iavf_destroy_spinlock on a lock that was already destroyed or was
247  * never initialized is not considered a bug.
248  */
249 void
250 iavf_destroy_spinlock(struct iavf_spinlock *lock)
251 {
252 	if (mtx_initialized(&lock->mutex))
253 		mtx_destroy(&lock->mutex);
254 }
255 
256 /**
257  * iavf_debug_shared - Log a debug message if enabled
258  * @hw: device hardware structure
259  * @mask: bit indicating the type of the message
260  * @fmt: printf format string
261  *
262  * Checks if the mask is enabled in the hw->debug_mask. If so, prints
263  * a message to the console using vprintf().
264  */
265 void
266 iavf_debug_shared(struct iavf_hw *hw, uint64_t mask, char *fmt, ...)
267 {
268 	va_list args;
269 	device_t dev;
270 
271 	if (!(mask & ((struct iavf_hw *)hw)->debug_mask))
272 		return;
273 
274 	dev = ((struct iavf_osdep *)hw->back)->dev;
275 
276 	/* Re-implement device_printf() */
277 	device_print_prettyname(dev);
278 	va_start(args, fmt);
279 	vprintf(fmt, args);
280 	va_end(args);
281 }
282 
283 /**
284  * iavf_read_pci_cfg - Read a PCI config register
285  * @hw: device hardware structure
286  * @reg: the PCI register to read
287  *
288  * Calls pci_read_config to read the given PCI register from the PCI config
289  * space.
290  *
291  * @returns the value of the register.
292  */
293 u16
294 iavf_read_pci_cfg(struct iavf_hw *hw, u32 reg)
295 {
296         u16 value;
297 
298         value = pci_read_config(((struct iavf_osdep *)hw->back)->dev,
299             reg, 2);
300 
301         return (value);
302 }
303 
304 /**
305  * iavf_write_pci_cfg - Write a PCI config register
306  * @hw: device hardware structure
307  * @reg: the PCI register to write
308  * @value: the value to write
309  *
310  * Calls pci_write_config to write to a given PCI register in the PCI config
311  * space.
312  */
313 void
314 iavf_write_pci_cfg(struct iavf_hw *hw, u32 reg, u16 value)
315 {
316         pci_write_config(((struct iavf_osdep *)hw->back)->dev,
317             reg, value, 2);
318 
319         return;
320 }
321 
322 /**
323  * iavf_rd32 - Read a 32bit hardware register value
324  * @hw: the private hardware structure
325  * @reg: register address to read
326  *
327  * Read the specified 32bit register value from BAR0 and return its contents.
328  *
329  * @returns the value of the 32bit register.
330  */
331 inline uint32_t
332 iavf_rd32(struct iavf_hw *hw, uint32_t reg)
333 {
334 	struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
335 
336 	KASSERT(reg < osdep->mem_bus_space_size,
337 	    ("iavf: register offset %#jx too large (max is %#jx)",
338 	    (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
339 
340 	return (bus_space_read_4(osdep->mem_bus_space_tag,
341 	    osdep->mem_bus_space_handle, reg));
342 }
343 
344 /**
345  * iavf_wr32 - Write a 32bit hardware register
346  * @hw: the private hardware structure
347  * @reg: the register address to write to
348  * @val: the 32bit value to write
349  *
350  * Write the specified 32bit value to a register address in BAR0.
351  */
352 inline void
353 iavf_wr32(struct iavf_hw *hw, uint32_t reg, uint32_t val)
354 {
355 	struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
356 
357 	KASSERT(reg < osdep->mem_bus_space_size,
358 	    ("iavf: register offset %#jx too large (max is %#jx)",
359 	    (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
360 
361 	bus_space_write_4(osdep->mem_bus_space_tag,
362 	    osdep->mem_bus_space_handle, reg, val);
363 }
364 
365 /**
366  * iavf_flush - Flush register writes
367  * @hw: private hardware structure
368  *
369  * Forces the completion of outstanding PCI register writes by reading from
370  * a specific hardware register.
371  */
372 inline void
373 iavf_flush(struct iavf_hw *hw)
374 {
375 	struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
376 
377 	rd32(hw, osdep->flush_reg);
378 }
379 
380 /**
381  * iavf_debug_core - Debug printf for core driver code
382  * @dev: the device_t to log under
383  * @enabled_mask: the mask of enabled messages
384  * @mask: the mask of the requested message to print
385  * @fmt: printf format string
386  *
387  * If enabled_mask has the bit from the mask set, print a message to the
388  * console using the specified format. This is used to conditionally enable
389  * log messages at run time by toggling the enabled_mask in the device
390  * structure.
391  */
392 void
393 iavf_debug_core(device_t dev, u32 enabled_mask, u32 mask, char *fmt, ...)
394 {
395 	va_list args;
396 
397 	if (!(mask & enabled_mask))
398 		return;
399 
400 	/* Re-implement device_printf() */
401 	device_print_prettyname(dev);
402 	va_start(args, fmt);
403 	vprintf(fmt, args);
404 	va_end(args);
405 }
406