1*9207f9d2SChandrakanth patil /* 2*9207f9d2SChandrakanth patil * Copyright (c) 2024, Broadcom. All rights reserved. The term 3*9207f9d2SChandrakanth patil * Broadcom refers to Broadcom Limited and/or its subsidiaries. 4*9207f9d2SChandrakanth patil * 5*9207f9d2SChandrakanth patil * Redistribution and use in source and binary forms, with or without 6*9207f9d2SChandrakanth patil * modification, are permitted provided that the following conditions 7*9207f9d2SChandrakanth patil * are met: 8*9207f9d2SChandrakanth patil * 9*9207f9d2SChandrakanth patil * 1. Redistributions of source code must retain the above copyright 10*9207f9d2SChandrakanth patil * notice, this list of conditions and the following disclaimer. 11*9207f9d2SChandrakanth patil * 2. Redistributions in binary form must reproduce the above copyright 12*9207f9d2SChandrakanth patil * notice, this list of conditions and the following disclaimer in 13*9207f9d2SChandrakanth patil * the documentation and/or other materials provided with the 14*9207f9d2SChandrakanth patil * distribution. 15*9207f9d2SChandrakanth patil * 16*9207f9d2SChandrakanth patil * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' 17*9207f9d2SChandrakanth patil * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 18*9207f9d2SChandrakanth patil * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 19*9207f9d2SChandrakanth patil * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS 20*9207f9d2SChandrakanth patil * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 21*9207f9d2SChandrakanth patil * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 22*9207f9d2SChandrakanth patil * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR 23*9207f9d2SChandrakanth patil * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 24*9207f9d2SChandrakanth patil * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE 25*9207f9d2SChandrakanth patil * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN 26*9207f9d2SChandrakanth patil * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27*9207f9d2SChandrakanth patil * 28*9207f9d2SChandrakanth patil */ 29*9207f9d2SChandrakanth patil 30*9207f9d2SChandrakanth patil #ifndef __BNXT_RE_MEMORY_H__ 31*9207f9d2SChandrakanth patil #define __BNXT_RE_MEMORY_H__ 32*9207f9d2SChandrakanth patil 33*9207f9d2SChandrakanth patil #include <pthread.h> 34*9207f9d2SChandrakanth patil 35*9207f9d2SChandrakanth patil #include "main.h" 36*9207f9d2SChandrakanth patil 37*9207f9d2SChandrakanth patil struct bnxt_re_mem { 38*9207f9d2SChandrakanth patil void *va_head; 39*9207f9d2SChandrakanth patil void *va_tail; 40*9207f9d2SChandrakanth patil uint32_t head; 41*9207f9d2SChandrakanth patil uint32_t tail; 42*9207f9d2SChandrakanth patil uint32_t size; 43*9207f9d2SChandrakanth patil uint32_t pad; 44*9207f9d2SChandrakanth patil }; 45*9207f9d2SChandrakanth patil 46*9207f9d2SChandrakanth patil #define BNXT_RE_QATTR_SQ_INDX 0 47*9207f9d2SChandrakanth patil #define BNXT_RE_QATTR_RQ_INDX 1 48*9207f9d2SChandrakanth patil struct bnxt_re_qattr { 49*9207f9d2SChandrakanth patil uint32_t esize; 50*9207f9d2SChandrakanth patil uint32_t slots; 51*9207f9d2SChandrakanth patil uint32_t nwr; 52*9207f9d2SChandrakanth patil uint32_t sz_ring; 53*9207f9d2SChandrakanth patil uint32_t sz_shad; 54*9207f9d2SChandrakanth patil }; 55*9207f9d2SChandrakanth patil 56*9207f9d2SChandrakanth patil /* spin lock wrapper struct */ 57*9207f9d2SChandrakanth patil struct bnxt_spinlock { 58*9207f9d2SChandrakanth patil pthread_spinlock_t lock; 59*9207f9d2SChandrakanth patil int in_use; 60*9207f9d2SChandrakanth patil int need_lock; 61*9207f9d2SChandrakanth patil }; 62*9207f9d2SChandrakanth patil 63*9207f9d2SChandrakanth patil struct bnxt_re_queue { 64*9207f9d2SChandrakanth patil struct bnxt_spinlock qlock; 65*9207f9d2SChandrakanth patil uint32_t flags; 66*9207f9d2SChandrakanth patil uint32_t *dbtail; 67*9207f9d2SChandrakanth patil void *va; 68*9207f9d2SChandrakanth patil uint32_t head; 69*9207f9d2SChandrakanth patil uint32_t depth; /* no of entries */ 70*9207f9d2SChandrakanth patil void *pad; 71*9207f9d2SChandrakanth patil uint32_t pad_stride_log2; 72*9207f9d2SChandrakanth patil uint32_t tail; 73*9207f9d2SChandrakanth patil uint32_t max_slots; 74*9207f9d2SChandrakanth patil /* Represents the difference between the real queue depth allocated in 75*9207f9d2SChandrakanth patil * HW and the user requested queue depth and is used to correctly flag 76*9207f9d2SChandrakanth patil * queue full condition based on user supplied queue depth. 77*9207f9d2SChandrakanth patil * This value can vary depending on the type of queue and any HW 78*9207f9d2SChandrakanth patil * requirements that mandate keeping a fixed gap between the producer 79*9207f9d2SChandrakanth patil * and the consumer indices in the queue 80*9207f9d2SChandrakanth patil */ 81*9207f9d2SChandrakanth patil uint32_t diff; 82*9207f9d2SChandrakanth patil uint32_t stride; 83*9207f9d2SChandrakanth patil uint32_t msn; 84*9207f9d2SChandrakanth patil uint32_t msn_tbl_sz; 85*9207f9d2SChandrakanth patil }; 86*9207f9d2SChandrakanth patil 87*9207f9d2SChandrakanth patil static inline unsigned long get_aligned(uint32_t size, uint32_t al_size) 88*9207f9d2SChandrakanth patil { 89*9207f9d2SChandrakanth patil return (unsigned long) (size + al_size - 1) & ~(al_size - 1); 90*9207f9d2SChandrakanth patil } 91*9207f9d2SChandrakanth patil 92*9207f9d2SChandrakanth patil static inline unsigned long roundup_pow_of_two(unsigned long val) 93*9207f9d2SChandrakanth patil { 94*9207f9d2SChandrakanth patil unsigned long roundup = 1; 95*9207f9d2SChandrakanth patil 96*9207f9d2SChandrakanth patil if (val == 1) 97*9207f9d2SChandrakanth patil return (roundup << 1); 98*9207f9d2SChandrakanth patil 99*9207f9d2SChandrakanth patil while (roundup < val) 100*9207f9d2SChandrakanth patil roundup <<= 1; 101*9207f9d2SChandrakanth patil 102*9207f9d2SChandrakanth patil return roundup; 103*9207f9d2SChandrakanth patil } 104*9207f9d2SChandrakanth patil 105*9207f9d2SChandrakanth patil #define iowrite64(dst, val) (*((volatile __u64 *) (dst)) = val) 106*9207f9d2SChandrakanth patil #define iowrite32(dst, val) (*((volatile __u32 *) (dst)) = val) 107*9207f9d2SChandrakanth patil 108*9207f9d2SChandrakanth patil /* Basic queue operation */ 109*9207f9d2SChandrakanth patil static inline void *bnxt_re_get_hwqe(struct bnxt_re_queue *que, uint32_t idx) 110*9207f9d2SChandrakanth patil { 111*9207f9d2SChandrakanth patil idx += que->tail; 112*9207f9d2SChandrakanth patil if (idx >= que->depth) 113*9207f9d2SChandrakanth patil idx -= que->depth; 114*9207f9d2SChandrakanth patil return (void *)(que->va + (idx << 4)); 115*9207f9d2SChandrakanth patil } 116*9207f9d2SChandrakanth patil 117*9207f9d2SChandrakanth patil static inline void *bnxt_re_get_hwqe_hdr(struct bnxt_re_queue *que) 118*9207f9d2SChandrakanth patil { 119*9207f9d2SChandrakanth patil return (void *)(que->va + ((que->tail) << 4)); 120*9207f9d2SChandrakanth patil } 121*9207f9d2SChandrakanth patil 122*9207f9d2SChandrakanth patil static inline uint32_t bnxt_re_is_que_full(struct bnxt_re_queue *que, 123*9207f9d2SChandrakanth patil uint32_t slots) 124*9207f9d2SChandrakanth patil { 125*9207f9d2SChandrakanth patil int32_t avail, head, tail; 126*9207f9d2SChandrakanth patil 127*9207f9d2SChandrakanth patil head = que->head; 128*9207f9d2SChandrakanth patil tail = que->tail; 129*9207f9d2SChandrakanth patil avail = head - tail; 130*9207f9d2SChandrakanth patil if (head <= tail) 131*9207f9d2SChandrakanth patil avail += que->depth; 132*9207f9d2SChandrakanth patil return avail <= (slots + que->diff); 133*9207f9d2SChandrakanth patil } 134*9207f9d2SChandrakanth patil 135*9207f9d2SChandrakanth patil static inline uint32_t bnxt_re_is_que_empty(struct bnxt_re_queue *que) 136*9207f9d2SChandrakanth patil { 137*9207f9d2SChandrakanth patil return que->tail == que->head; 138*9207f9d2SChandrakanth patil } 139*9207f9d2SChandrakanth patil 140*9207f9d2SChandrakanth patil static inline void bnxt_re_incr_tail(struct bnxt_re_queue *que, uint8_t cnt) 141*9207f9d2SChandrakanth patil { 142*9207f9d2SChandrakanth patil que->tail += cnt; 143*9207f9d2SChandrakanth patil if (que->tail >= que->depth) { 144*9207f9d2SChandrakanth patil que->tail %= que->depth; 145*9207f9d2SChandrakanth patil /* Rolled over, Toggle Tail bit in epoch flags */ 146*9207f9d2SChandrakanth patil que->flags ^= 1UL << BNXT_RE_FLAG_EPOCH_TAIL_SHIFT; 147*9207f9d2SChandrakanth patil } 148*9207f9d2SChandrakanth patil } 149*9207f9d2SChandrakanth patil 150*9207f9d2SChandrakanth patil static inline void bnxt_re_incr_head(struct bnxt_re_queue *que, uint8_t cnt) 151*9207f9d2SChandrakanth patil { 152*9207f9d2SChandrakanth patil que->head += cnt; 153*9207f9d2SChandrakanth patil if (que->head >= que->depth) { 154*9207f9d2SChandrakanth patil que->head %= que->depth; 155*9207f9d2SChandrakanth patil /* Rolled over, Toggle HEAD bit in epoch flags */ 156*9207f9d2SChandrakanth patil que->flags ^= 1UL << BNXT_RE_FLAG_EPOCH_HEAD_SHIFT; 157*9207f9d2SChandrakanth patil } 158*9207f9d2SChandrakanth patil 159*9207f9d2SChandrakanth patil } 160*9207f9d2SChandrakanth patil 161*9207f9d2SChandrakanth patil void bnxt_re_free_mem(struct bnxt_re_mem *mem); 162*9207f9d2SChandrakanth patil void *bnxt_re_alloc_mem(size_t size, uint32_t pg_size); 163*9207f9d2SChandrakanth patil void *bnxt_re_get_obj(struct bnxt_re_mem *mem, size_t req); 164*9207f9d2SChandrakanth patil void *bnxt_re_get_ring(struct bnxt_re_mem *mem, size_t req); 165*9207f9d2SChandrakanth patil 166*9207f9d2SChandrakanth patil #endif 167