xref: /freebsd/sys/dev/iavf/iavf_osdep.c (revision 783d3ff6d7fae619db8a7990b8a6387de0c677b5)
1 /* SPDX-License-Identifier: BSD-3-Clause */
2 /*  Copyright (c) 2021, Intel Corporation
3  *  All rights reserved.
4  *
5  *  Redistribution and use in source and binary forms, with or without
6  *  modification, are permitted provided that the following conditions are met:
7  *
8  *   1. Redistributions of source code must retain the above copyright notice,
9  *      this list of conditions and the following disclaimer.
10  *
11  *   2. Redistributions in binary form must reproduce the above copyright
12  *      notice, this list of conditions and the following disclaimer in the
13  *      documentation and/or other materials provided with the distribution.
14  *
15  *   3. Neither the name of the Intel Corporation nor the names of its
16  *      contributors may be used to endorse or promote products derived from
17  *      this software without specific prior written permission.
18  *
19  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  *  AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  *  IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  *  ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
23  *  LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  *  POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file iavf_osdep.c
34  * @brief OS compatibility layer
35  *
36  * Contains definitions for various functions used to provide an OS
37  * independent layer for sharing code between drivers on different operating
38  * systems.
39  */
40 #include <machine/stdarg.h>
41 
42 #include "iavf_iflib.h"
43 
44 /********************************************************************
45  * Manage DMA'able memory.
46  *******************************************************************/
47 
48 /**
49  * iavf_dmamap_cb - DMA mapping callback function
50  * @arg: pointer to return the segment address
51  * @segs: the segments array
52  * @nseg: number of segments in the array
53  * @error: error code
54  *
55  * Callback used by the bus DMA code to obtain the segment address.
56  */
57 static void
58 iavf_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg __unused,
59 	       int error)
60 {
61         if (error)
62                 return;
63         *(bus_addr_t *) arg = segs->ds_addr;
64         return;
65 }
66 
67 /**
68  * iavf_allocate_virt_mem - Allocate virtual memory
69  * @hw: hardware structure
70  * @mem: structure describing the memory allocation
71  * @size: size of the allocation
72  *
73  * OS compatibility function to allocate virtual memory.
74  *
75  * @returns zero on success, or a status code on failure.
76  */
77 enum iavf_status
78 iavf_allocate_virt_mem(struct iavf_hw *hw __unused, struct iavf_virt_mem *mem,
79 		       u32 size)
80 {
81 	mem->va = malloc(size, M_IAVF, M_NOWAIT | M_ZERO);
82 	return(mem->va == NULL);
83 }
84 
85 /**
86  * iavf_free_virt_mem - Free virtual memory
87  * @hw: hardware structure
88  * @mem: structure describing the memory to free
89  *
90  * OS compatibility function to free virtual memory
91  *
92  * @returns zero.
93  */
94 enum iavf_status
95 iavf_free_virt_mem(struct iavf_hw *hw __unused, struct iavf_virt_mem *mem)
96 {
97 	free(mem->va, M_IAVF);
98 	mem->va = NULL;
99 
100 	return(0);
101 }
102 
103 /**
104  * iavf_allocate_dma_mem - Allocate DMA memory
105  * @hw: hardware structure
106  * @mem: structure describing the memory allocation
107  * @type: unused type parameter specifying the type of allocation
108  * @size: size of the allocation
109  * @alignment: alignment requirements for the allocation
110  *
111  * Allocates DMA memory by using bus_dma_tag_create to create a DMA tag, and
112  * them bus_dmamem_alloc to allocate the associated memory.
113  *
114  * @returns zero on success, or a status code on failure.
115  */
116 enum iavf_status
117 iavf_allocate_dma_mem(struct iavf_hw *hw, struct iavf_dma_mem *mem,
118 	enum iavf_memory_type type __unused, u64 size, u32 alignment)
119 {
120 	device_t	dev = ((struct iavf_osdep *)hw->back)->dev;
121 	int		err;
122 
123 
124 	err = bus_dma_tag_create(bus_get_dma_tag(dev),	/* parent */
125 			       alignment, 0,	/* alignment, bounds */
126 			       BUS_SPACE_MAXADDR,	/* lowaddr */
127 			       BUS_SPACE_MAXADDR,	/* highaddr */
128 			       NULL, NULL,	/* filter, filterarg */
129 			       size,	/* maxsize */
130 			       1,	/* nsegments */
131 			       size,	/* maxsegsize */
132 			       BUS_DMA_ALLOCNOW, /* flags */
133 			       NULL,	/* lockfunc */
134 			       NULL,	/* lockfuncarg */
135 			       &mem->tag);
136 	if (err != 0) {
137 		device_printf(dev,
138 		    "iavf_allocate_dma: bus_dma_tag_create failed, "
139 		    "error %u\n", err);
140 		goto fail_0;
141 	}
142 	err = bus_dmamem_alloc(mem->tag, (void **)&mem->va,
143 			     BUS_DMA_NOWAIT | BUS_DMA_ZERO, &mem->map);
144 	if (err != 0) {
145 		device_printf(dev,
146 		    "iavf_allocate_dma: bus_dmamem_alloc failed, "
147 		    "error %u\n", err);
148 		goto fail_1;
149 	}
150 	err = bus_dmamap_load(mem->tag, mem->map, mem->va,
151 			    size,
152 			    iavf_dmamap_cb,
153 			    &mem->pa,
154 			    BUS_DMA_NOWAIT);
155 	if (err != 0) {
156 		device_printf(dev,
157 		    "iavf_allocate_dma: bus_dmamap_load failed, "
158 		    "error %u\n", err);
159 		goto fail_2;
160 	}
161 	mem->nseg = 1;
162 	mem->size = size;
163 	bus_dmamap_sync(mem->tag, mem->map,
164 	    BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
165 	return (0);
166 fail_2:
167 	bus_dmamem_free(mem->tag, mem->va, mem->map);
168 fail_1:
169 	bus_dma_tag_destroy(mem->tag);
170 fail_0:
171 	mem->map = NULL;
172 	mem->tag = NULL;
173 	return (err);
174 }
175 
176 /**
177  * iavf_free_dma_mem - Free DMA memory allocation
178  * @hw: hardware structure
179  * @mem: pointer to memory structure previously allocated
180  *
181  * Releases DMA memory that was previously allocated by iavf_allocate_dma_mem.
182  *
183  * @returns zero.
184  */
185 enum iavf_status
186 iavf_free_dma_mem(struct iavf_hw *hw __unused, struct iavf_dma_mem *mem)
187 {
188 	bus_dmamap_sync(mem->tag, mem->map,
189 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
190 	bus_dmamap_unload(mem->tag, mem->map);
191 	bus_dmamem_free(mem->tag, mem->va, mem->map);
192 	bus_dma_tag_destroy(mem->tag);
193 	return (0);
194 }
195 
196 /**
197  * iavf_init_spinlock - Initialize a spinlock
198  * @lock: OS compatibility lock structure
199  *
200  * Use the mutex layer to initialize a spin lock that can be used via the OS
201  * compatibility layer accessors.
202  *
203  * @remark we pass MTX_DUPOK because the mutex name will not be unique. An
204  * alternative would be to somehow generate a name, such as by passing in the
205  * __file__ and __line__ values from a macro.
206  */
207 void
208 iavf_init_spinlock(struct iavf_spinlock *lock)
209 {
210 	mtx_init(&lock->mutex, "mutex",
211 	    "iavf spinlock", MTX_DEF | MTX_DUPOK);
212 }
213 
214 /**
215  * iavf_acquire_spinlock - Acquire a spin lock
216  * @lock: OS compatibility lock structure
217  *
218  * Acquire a spin lock using mtx_lock.
219  */
220 void
221 iavf_acquire_spinlock(struct iavf_spinlock *lock)
222 {
223 	mtx_lock(&lock->mutex);
224 }
225 
226 /**
227  * iavf_release_spinlock - Release a spin lock
228  * @lock: OS compatibility lock structure
229  *
230  * Release a spin lock using mtx_unlock.
231  */
232 void
233 iavf_release_spinlock(struct iavf_spinlock *lock)
234 {
235 	mtx_unlock(&lock->mutex);
236 }
237 
238 /**
239  * iavf_destroy_spinlock - Destroy a spin lock
240  * @lock: OS compatibility lock structure
241  *
242  * Destroy (deinitialize) a spin lock by calling mtx_destroy.
243  *
244  * @remark we only destroy the lock if it was initialized. This means that
245  * calling iavf_destroy_spinlock on a lock that was already destroyed or was
246  * never initialized is not considered a bug.
247  */
248 void
249 iavf_destroy_spinlock(struct iavf_spinlock *lock)
250 {
251 	if (mtx_initialized(&lock->mutex))
252 		mtx_destroy(&lock->mutex);
253 }
254 
255 /**
256  * iavf_debug_shared - Log a debug message if enabled
257  * @hw: device hardware structure
258  * @mask: bit indicating the type of the message
259  * @fmt: printf format string
260  *
261  * Checks if the mask is enabled in the hw->debug_mask. If so, prints
262  * a message to the console using vprintf().
263  */
264 void
265 iavf_debug_shared(struct iavf_hw *hw, uint64_t mask, char *fmt, ...)
266 {
267 	va_list args;
268 	device_t dev;
269 
270 	if (!(mask & ((struct iavf_hw *)hw)->debug_mask))
271 		return;
272 
273 	dev = ((struct iavf_osdep *)hw->back)->dev;
274 
275 	/* Re-implement device_printf() */
276 	device_print_prettyname(dev);
277 	va_start(args, fmt);
278 	vprintf(fmt, args);
279 	va_end(args);
280 }
281 
282 /**
283  * iavf_read_pci_cfg - Read a PCI config register
284  * @hw: device hardware structure
285  * @reg: the PCI register to read
286  *
287  * Calls pci_read_config to read the given PCI register from the PCI config
288  * space.
289  *
290  * @returns the value of the register.
291  */
292 u16
293 iavf_read_pci_cfg(struct iavf_hw *hw, u32 reg)
294 {
295         u16 value;
296 
297         value = pci_read_config(((struct iavf_osdep *)hw->back)->dev,
298             reg, 2);
299 
300         return (value);
301 }
302 
303 /**
304  * iavf_write_pci_cfg - Write a PCI config register
305  * @hw: device hardware structure
306  * @reg: the PCI register to write
307  * @value: the value to write
308  *
309  * Calls pci_write_config to write to a given PCI register in the PCI config
310  * space.
311  */
312 void
313 iavf_write_pci_cfg(struct iavf_hw *hw, u32 reg, u16 value)
314 {
315         pci_write_config(((struct iavf_osdep *)hw->back)->dev,
316             reg, value, 2);
317 
318         return;
319 }
320 
321 /**
322  * iavf_rd32 - Read a 32bit hardware register value
323  * @hw: the private hardware structure
324  * @reg: register address to read
325  *
326  * Read the specified 32bit register value from BAR0 and return its contents.
327  *
328  * @returns the value of the 32bit register.
329  */
330 inline uint32_t
331 iavf_rd32(struct iavf_hw *hw, uint32_t reg)
332 {
333 	struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
334 
335 	KASSERT(reg < osdep->mem_bus_space_size,
336 	    ("iavf: register offset %#jx too large (max is %#jx)",
337 	    (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
338 
339 	return (bus_space_read_4(osdep->mem_bus_space_tag,
340 	    osdep->mem_bus_space_handle, reg));
341 }
342 
343 /**
344  * iavf_wr32 - Write a 32bit hardware register
345  * @hw: the private hardware structure
346  * @reg: the register address to write to
347  * @val: the 32bit value to write
348  *
349  * Write the specified 32bit value to a register address in BAR0.
350  */
351 inline void
352 iavf_wr32(struct iavf_hw *hw, uint32_t reg, uint32_t val)
353 {
354 	struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
355 
356 	KASSERT(reg < osdep->mem_bus_space_size,
357 	    ("iavf: register offset %#jx too large (max is %#jx)",
358 	    (uintmax_t)reg, (uintmax_t)osdep->mem_bus_space_size));
359 
360 	bus_space_write_4(osdep->mem_bus_space_tag,
361 	    osdep->mem_bus_space_handle, reg, val);
362 }
363 
364 /**
365  * iavf_flush - Flush register writes
366  * @hw: private hardware structure
367  *
368  * Forces the completion of outstanding PCI register writes by reading from
369  * a specific hardware register.
370  */
371 inline void
372 iavf_flush(struct iavf_hw *hw)
373 {
374 	struct iavf_osdep *osdep = (struct iavf_osdep *)hw->back;
375 
376 	rd32(hw, osdep->flush_reg);
377 }
378 
379 /**
380  * iavf_debug_core - Debug printf for core driver code
381  * @dev: the device_t to log under
382  * @enabled_mask: the mask of enabled messages
383  * @mask: the mask of the requested message to print
384  * @fmt: printf format string
385  *
386  * If enabled_mask has the bit from the mask set, print a message to the
387  * console using the specified format. This is used to conditionally enable
388  * log messages at run time by toggling the enabled_mask in the device
389  * structure.
390  */
391 void
392 iavf_debug_core(device_t dev, u32 enabled_mask, u32 mask, char *fmt, ...)
393 {
394 	va_list args;
395 
396 	if (!(mask & enabled_mask))
397 		return;
398 
399 	/* Re-implement device_printf() */
400 	device_print_prettyname(dev);
401 	va_start(args, fmt);
402 	vprintf(fmt, args);
403 	va_end(args);
404 }
405