1 #define _GNU_SOURCE 2 #include "main.h" 3 #include <stdlib.h> 4 #include <stdio.h> 5 #include <string.h> 6 #include <pthread.h> 7 #include <malloc.h> 8 #include <assert.h> 9 #include <errno.h> 10 #include <limits.h> 11 12 #define SMP_CACHE_BYTES 64 13 #define cache_line_size() SMP_CACHE_BYTES 14 #define ____cacheline_aligned_in_smp __attribute__ ((aligned (SMP_CACHE_BYTES))) 15 #define unlikely(x) (__builtin_expect(!!(x), 0)) 16 #define likely(x) (__builtin_expect(!!(x), 1)) 17 #define ALIGN(x, a) (((x) + (a) - 1) / (a) * (a)) 18 typedef pthread_spinlock_t spinlock_t; 19 20 typedef int gfp_t; 21 static void *kmalloc(unsigned size, gfp_t gfp) 22 { 23 return memalign(64, size); 24 } 25 26 static void *kzalloc(unsigned size, gfp_t gfp) 27 { 28 void *p = memalign(64, size); 29 if (!p) 30 return p; 31 memset(p, 0, size); 32 33 return p; 34 } 35 36 static void kfree(void *p) 37 { 38 if (p) 39 free(p); 40 } 41 42 static void spin_lock_init(spinlock_t *lock) 43 { 44 int r = pthread_spin_init(lock, 0); 45 assert(!r); 46 } 47 48 static void spin_lock(spinlock_t *lock) 49 { 50 int ret = pthread_spin_lock(lock); 51 assert(!ret); 52 } 53 54 static void spin_unlock(spinlock_t *lock) 55 { 56 int ret = pthread_spin_unlock(lock); 57 assert(!ret); 58 } 59 60 static void spin_lock_bh(spinlock_t *lock) 61 { 62 spin_lock(lock); 63 } 64 65 static void spin_unlock_bh(spinlock_t *lock) 66 { 67 spin_unlock(lock); 68 } 69 70 static void spin_lock_irq(spinlock_t *lock) 71 { 72 spin_lock(lock); 73 } 74 75 static void spin_unlock_irq(spinlock_t *lock) 76 { 77 spin_unlock(lock); 78 } 79 80 static void spin_lock_irqsave(spinlock_t *lock, unsigned long f) 81 { 82 spin_lock(lock); 83 } 84 85 static void spin_unlock_irqrestore(spinlock_t *lock, unsigned long f) 86 { 87 spin_unlock(lock); 88 } 89 90 #include "../../../include/linux/ptr_ring.h" 91 92 static unsigned long long headcnt, tailcnt; 93 static struct ptr_ring array ____cacheline_aligned_in_smp; 94 95 /* implemented by ring */ 96 void alloc_ring(void) 97 { 98 int ret = ptr_ring_init(&array, ring_size, 0); 99 assert(!ret); 100 } 101 102 /* guest side */ 103 int add_inbuf(unsigned len, void *buf, void *datap) 104 { 105 int ret; 106 107 ret = __ptr_ring_produce(&array, buf); 108 if (ret >= 0) { 109 ret = 0; 110 headcnt++; 111 } 112 113 return ret; 114 } 115 116 /* 117 * ptr_ring API provides no way for producer to find out whether a given 118 * buffer was consumed. Our tests merely require that a successful get_buf 119 * implies that add_inbuf succeed in the past, and that add_inbuf will succeed, 120 * fake it accordingly. 121 */ 122 void *get_buf(unsigned *lenp, void **bufp) 123 { 124 void *datap; 125 126 if (tailcnt == headcnt || __ptr_ring_full(&array)) 127 datap = NULL; 128 else { 129 datap = "Buffer\n"; 130 ++tailcnt; 131 } 132 133 return datap; 134 } 135 136 bool used_empty() 137 { 138 return (tailcnt == headcnt || __ptr_ring_full(&array)); 139 } 140 141 void disable_call() 142 { 143 assert(0); 144 } 145 146 bool enable_call() 147 { 148 assert(0); 149 } 150 151 void kick_available(void) 152 { 153 assert(0); 154 } 155 156 /* host side */ 157 void disable_kick() 158 { 159 assert(0); 160 } 161 162 bool enable_kick() 163 { 164 assert(0); 165 } 166 167 bool avail_empty() 168 { 169 return !__ptr_ring_peek(&array); 170 } 171 172 bool use_buf(unsigned *lenp, void **bufp) 173 { 174 void *ptr; 175 176 ptr = __ptr_ring_consume(&array); 177 178 return ptr; 179 } 180 181 void call_used(void) 182 { 183 assert(0); 184 } 185