1 /****************************************************************************** 2 * ring.h 3 * 4 * Shared producer-consumer ring macros. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a copy 7 * of this software and associated documentation files (the "Software"), to 8 * deal in the Software without restriction, including without limitation the 9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or 10 * sell copies of the Software, and to permit persons to whom the Software is 11 * furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 22 * DEALINGS IN THE SOFTWARE. 23 * 24 * Tim Deegan and Andrew Warfield November 2004. 25 */ 26 27 #ifndef __XEN_PUBLIC_IO_RING_H__ 28 #define __XEN_PUBLIC_IO_RING_H__ 29 30 /* 31 * When #include'ing this header, you need to provide the following 32 * declaration upfront: 33 * - standard integers types (uint8_t, uint16_t, etc) 34 * They are provided by stdint.h of the standard headers. 35 * 36 * In addition, if you intend to use the FLEX macros, you also need to 37 * provide the following, before invoking the FLEX macros: 38 * - size_t 39 * - memcpy 40 * - grant_ref_t 41 * These declarations are provided by string.h of the standard headers, 42 * and grant_table.h from the Xen public headers. 43 */ 44 45 #include "../xen-compat.h" 46 47 #if __XEN_INTERFACE_VERSION__ < 0x00030208 48 #define xen_mb() mb() 49 #define xen_rmb() rmb() 50 #define xen_wmb() wmb() 51 #endif 52 53 typedef unsigned int RING_IDX; 54 55 /* Round a 32-bit unsigned constant down to the nearest power of two. */ 56 #define __RD2(_x) (((_x) & 0x00000002) ? 0x2 : ((_x) & 0x1)) 57 #define __RD4(_x) (((_x) & 0x0000000c) ? __RD2((_x)>>2)<<2 : __RD2(_x)) 58 #define __RD8(_x) (((_x) & 0x000000f0) ? __RD4((_x)>>4)<<4 : __RD4(_x)) 59 #define __RD16(_x) (((_x) & 0x0000ff00) ? __RD8((_x)>>8)<<8 : __RD8(_x)) 60 #define __RD32(_x) (((_x) & 0xffff0000) ? __RD16((_x)>>16)<<16 : __RD16(_x)) 61 62 /* 63 * Calculate size of a shared ring, given the total available space for the 64 * ring and indexes (_sz), and the name tag of the request/response structure. 65 * A ring contains as many entries as will fit, rounded down to the nearest 66 * power of two (so we can mask with (size-1) to loop around). 67 */ 68 #define __CONST_RING_SIZE(_s, _sz) \ 69 (__RD32(((_sz) - offsetof(struct _s##_sring, ring)) / \ 70 sizeof(((struct _s##_sring *)0)->ring[0]))) 71 /* 72 * The same for passing in an actual pointer instead of a name tag. 73 */ 74 #define __RING_SIZE(_s, _sz) \ 75 (__RD32(((_sz) - (long)(_s)->ring + (long)(_s)) / sizeof((_s)->ring[0]))) 76 77 /* 78 * Macros to make the correct C datatypes for a new kind of ring. 79 * 80 * To make a new ring datatype, you need to have two message structures, 81 * let's say request_t, and response_t already defined. 82 * 83 * In a header where you want the ring datatype declared, you then do: 84 * 85 * DEFINE_RING_TYPES(mytag, request_t, response_t); 86 * 87 * These expand out to give you a set of types, as you can see below. 88 * The most important of these are: 89 * 90 * mytag_sring_t - The shared ring. 91 * mytag_front_ring_t - The 'front' half of the ring. 92 * mytag_back_ring_t - The 'back' half of the ring. 93 * 94 * To initialize a ring in your code you need to know the location and size 95 * of the shared memory area (PAGE_SIZE, for instance). To initialise 96 * the front half: 97 * 98 * mytag_front_ring_t front_ring; 99 * SHARED_RING_INIT((mytag_sring_t *)shared_page); 100 * FRONT_RING_INIT(&front_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 101 * 102 * Initializing the back follows similarly (note that only the front 103 * initializes the shared ring): 104 * 105 * mytag_back_ring_t back_ring; 106 * BACK_RING_INIT(&back_ring, (mytag_sring_t *)shared_page, PAGE_SIZE); 107 */ 108 109 #define DEFINE_RING_TYPES(__name, __req_t, __rsp_t) \ 110 \ 111 /* Shared ring entry */ \ 112 union __name##_sring_entry { \ 113 __req_t req; \ 114 __rsp_t rsp; \ 115 }; \ 116 \ 117 /* Shared ring page */ \ 118 struct __name##_sring { \ 119 RING_IDX req_prod, req_event; \ 120 RING_IDX rsp_prod, rsp_event; \ 121 union { \ 122 struct { \ 123 uint8_t smartpoll_active; \ 124 } netif; \ 125 struct { \ 126 uint8_t msg; \ 127 } tapif_user; \ 128 uint8_t pvt_pad[4]; \ 129 } pvt; \ 130 uint8_t __pad[44]; \ 131 union __name##_sring_entry ring[1]; /* variable-length */ \ 132 }; \ 133 \ 134 /* "Front" end's private variables */ \ 135 struct __name##_front_ring { \ 136 RING_IDX req_prod_pvt; \ 137 RING_IDX rsp_cons; \ 138 unsigned int nr_ents; \ 139 struct __name##_sring *sring; \ 140 }; \ 141 \ 142 /* "Back" end's private variables */ \ 143 struct __name##_back_ring { \ 144 RING_IDX rsp_prod_pvt; \ 145 RING_IDX req_cons; \ 146 unsigned int nr_ents; \ 147 struct __name##_sring *sring; \ 148 }; \ 149 \ 150 /* Syntactic sugar */ \ 151 typedef struct __name##_sring __name##_sring_t; \ 152 typedef struct __name##_front_ring __name##_front_ring_t; \ 153 typedef struct __name##_back_ring __name##_back_ring_t 154 155 /* 156 * Macros for manipulating rings. 157 * 158 * FRONT_RING_whatever works on the "front end" of a ring: here 159 * requests are pushed on to the ring and responses taken off it. 160 * 161 * BACK_RING_whatever works on the "back end" of a ring: here 162 * requests are taken off the ring and responses put on. 163 * 164 * N.B. these macros do NO INTERLOCKS OR FLOW CONTROL. 165 * This is OK in 1-for-1 request-response situations where the 166 * requestor (front end) never has more than RING_SIZE()-1 167 * outstanding requests. 168 */ 169 170 /* Initialising empty rings */ 171 #define SHARED_RING_INIT(_s) do { \ 172 (_s)->req_prod = (_s)->rsp_prod = 0; \ 173 (_s)->req_event = (_s)->rsp_event = 1; \ 174 (void)memset((_s)->pvt.pvt_pad, 0, sizeof((_s)->pvt.pvt_pad)); \ 175 (void)memset((_s)->__pad, 0, sizeof((_s)->__pad)); \ 176 } while(0) 177 178 #define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \ 179 (_r)->req_prod_pvt = (_i); \ 180 (_r)->rsp_cons = (_i); \ 181 (_r)->nr_ents = __RING_SIZE(_s, __size); \ 182 (_r)->sring = (_s); \ 183 } while (0) 184 185 #define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size) 186 187 #define BACK_RING_ATTACH(_r, _s, _i, __size) do { \ 188 (_r)->rsp_prod_pvt = (_i); \ 189 (_r)->req_cons = (_i); \ 190 (_r)->nr_ents = __RING_SIZE(_s, __size); \ 191 (_r)->sring = (_s); \ 192 } while (0) 193 194 #define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size) 195 196 /* How big is this ring? */ 197 #define RING_SIZE(_r) \ 198 ((_r)->nr_ents) 199 200 /* Number of free requests (for use on front side only). */ 201 #define RING_FREE_REQUESTS(_r) \ 202 (RING_SIZE(_r) - ((_r)->req_prod_pvt - (_r)->rsp_cons)) 203 204 /* Test if there is an empty slot available on the front ring. 205 * (This is only meaningful from the front. ) 206 */ 207 #define RING_FULL(_r) \ 208 (RING_FREE_REQUESTS(_r) == 0) 209 210 /* Test if there are outstanding messages to be processed on a ring. */ 211 #define RING_HAS_UNCONSUMED_RESPONSES(_r) \ 212 ((_r)->sring->rsp_prod - (_r)->rsp_cons) 213 214 #ifdef __GNUC__ 215 #define RING_HAS_UNCONSUMED_REQUESTS(_r) ({ \ 216 unsigned int req = (_r)->sring->req_prod - (_r)->req_cons; \ 217 unsigned int rsp = RING_SIZE(_r) - \ 218 ((_r)->req_cons - (_r)->rsp_prod_pvt); \ 219 req < rsp ? req : rsp; \ 220 }) 221 #else 222 /* Same as above, but without the nice GCC ({ ... }) syntax. */ 223 #define RING_HAS_UNCONSUMED_REQUESTS(_r) \ 224 ((((_r)->sring->req_prod - (_r)->req_cons) < \ 225 (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) ? \ 226 ((_r)->sring->req_prod - (_r)->req_cons) : \ 227 (RING_SIZE(_r) - ((_r)->req_cons - (_r)->rsp_prod_pvt))) 228 #endif 229 230 /* Direct access to individual ring elements, by index. */ 231 #define RING_GET_REQUEST(_r, _idx) \ 232 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].req)) 233 234 #define RING_GET_RESPONSE(_r, _idx) \ 235 (&((_r)->sring->ring[((_idx) & (RING_SIZE(_r) - 1))].rsp)) 236 237 /* 238 * Get a local copy of a request/response. 239 * 240 * Use this in preference to RING_GET_{REQUEST,RESPONSE}() so all processing is 241 * done on a local copy that cannot be modified by the other end. 242 * 243 * Note that https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58145 may cause this 244 * to be ineffective where dest is a struct which consists of only bitfields. 245 */ 246 #define RING_COPY_(type, r, idx, dest) do { \ 247 /* Use volatile to force the copy into dest. */ \ 248 *(dest) = *(volatile __typeof__(dest))RING_GET_##type(r, idx); \ 249 } while (0) 250 251 #define RING_COPY_REQUEST(r, idx, req) RING_COPY_(REQUEST, r, idx, req) 252 #define RING_COPY_RESPONSE(r, idx, rsp) RING_COPY_(RESPONSE, r, idx, rsp) 253 254 /* Loop termination condition: Would the specified index overflow the ring? */ 255 #define RING_REQUEST_CONS_OVERFLOW(_r, _cons) \ 256 (((_cons) - (_r)->rsp_prod_pvt) >= RING_SIZE(_r)) 257 258 /* Ill-behaved frontend determination: Can there be this many requests? */ 259 #define RING_REQUEST_PROD_OVERFLOW(_r, _prod) \ 260 (((_prod) - (_r)->rsp_prod_pvt) > RING_SIZE(_r)) 261 262 /* Ill-behaved backend determination: Can there be this many responses? */ 263 #define RING_RESPONSE_PROD_OVERFLOW(_r, _prod) \ 264 (((_prod) - (_r)->rsp_cons) > RING_SIZE(_r)) 265 266 #define RING_PUSH_REQUESTS(_r) do { \ 267 xen_wmb(); /* back sees requests /before/ updated producer index */ \ 268 (_r)->sring->req_prod = (_r)->req_prod_pvt; \ 269 } while (0) 270 271 #define RING_PUSH_RESPONSES(_r) do { \ 272 xen_wmb(); /* front sees resps /before/ updated producer index */ \ 273 (_r)->sring->rsp_prod = (_r)->rsp_prod_pvt; \ 274 } while (0) 275 276 /* 277 * Notification hold-off (req_event and rsp_event): 278 * 279 * When queueing requests or responses on a shared ring, it may not always be 280 * necessary to notify the remote end. For example, if requests are in flight 281 * in a backend, the front may be able to queue further requests without 282 * notifying the back (if the back checks for new requests when it queues 283 * responses). 284 * 285 * When enqueuing requests or responses: 286 * 287 * Use RING_PUSH_{REQUESTS,RESPONSES}_AND_CHECK_NOTIFY(). The second argument 288 * is a boolean return value. True indicates that the receiver requires an 289 * asynchronous notification. 290 * 291 * After dequeuing requests or responses (before sleeping the connection): 292 * 293 * Use RING_FINAL_CHECK_FOR_REQUESTS() or RING_FINAL_CHECK_FOR_RESPONSES(). 294 * The second argument is a boolean return value. True indicates that there 295 * are pending messages on the ring (i.e., the connection should not be put 296 * to sleep). 297 * 298 * These macros will set the req_event/rsp_event field to trigger a 299 * notification on the very next message that is enqueued. If you want to 300 * create batches of work (i.e., only receive a notification after several 301 * messages have been enqueued) then you will need to create a customised 302 * version of the FINAL_CHECK macro in your own code, which sets the event 303 * field appropriately. 304 */ 305 306 #define RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(_r, _notify) do { \ 307 RING_IDX __old = (_r)->sring->req_prod; \ 308 RING_IDX __new = (_r)->req_prod_pvt; \ 309 xen_wmb(); /* back sees requests /before/ updated producer index */ \ 310 (_r)->sring->req_prod = __new; \ 311 xen_mb(); /* back sees new requests /before/ we check req_event */ \ 312 (_notify) = ((RING_IDX)(__new - (_r)->sring->req_event) < \ 313 (RING_IDX)(__new - __old)); \ 314 } while (0) 315 316 #define RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(_r, _notify) do { \ 317 RING_IDX __old = (_r)->sring->rsp_prod; \ 318 RING_IDX __new = (_r)->rsp_prod_pvt; \ 319 xen_wmb(); /* front sees resps /before/ updated producer index */ \ 320 (_r)->sring->rsp_prod = __new; \ 321 xen_mb(); /* front sees new resps /before/ we check rsp_event */ \ 322 (_notify) = ((RING_IDX)(__new - (_r)->sring->rsp_event) < \ 323 (RING_IDX)(__new - __old)); \ 324 } while (0) 325 326 #define RING_FINAL_CHECK_FOR_REQUESTS(_r, _work_to_do) do { \ 327 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ 328 if (_work_to_do) break; \ 329 (_r)->sring->req_event = (_r)->req_cons + 1; \ 330 xen_mb(); \ 331 (_work_to_do) = RING_HAS_UNCONSUMED_REQUESTS(_r); \ 332 } while (0) 333 334 #define RING_FINAL_CHECK_FOR_RESPONSES(_r, _work_to_do) do { \ 335 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ 336 if (_work_to_do) break; \ 337 (_r)->sring->rsp_event = (_r)->rsp_cons + 1; \ 338 xen_mb(); \ 339 (_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \ 340 } while (0) 341 342 343 /* 344 * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and 345 * functions to check if there is data on the ring, and to read and 346 * write to them. 347 * 348 * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but 349 * does not define the indexes page. As different protocols can have 350 * extensions to the basic format, this macro allow them to define their 351 * own struct. 352 * 353 * XEN_FLEX_RING_SIZE 354 * Convenience macro to calculate the size of one of the two rings 355 * from the overall order. 356 * 357 * $NAME_mask 358 * Function to apply the size mask to an index, to reduce the index 359 * within the range [0-size]. 360 * 361 * $NAME_read_packet 362 * Function to read data from the ring. The amount of data to read is 363 * specified by the "size" argument. 364 * 365 * $NAME_write_packet 366 * Function to write data to the ring. The amount of data to write is 367 * specified by the "size" argument. 368 * 369 * $NAME_get_ring_ptr 370 * Convenience function that returns a pointer to read/write to the 371 * ring at the right location. 372 * 373 * $NAME_data_intf 374 * Indexes page, shared between frontend and backend. It also 375 * contains the array of grant refs. 376 * 377 * $NAME_queued 378 * Function to calculate how many bytes are currently on the ring, 379 * ready to be read. It can also be used to calculate how much free 380 * space is currently on the ring (XEN_FLEX_RING_SIZE() - 381 * $NAME_queued()). 382 */ 383 384 #ifndef XEN_PAGE_SHIFT 385 /* The PAGE_SIZE for ring protocols and hypercall interfaces is always 386 * 4K, regardless of the architecture, and page granularity chosen by 387 * operating systems. 388 */ 389 #define XEN_PAGE_SHIFT 12 390 #endif 391 #define XEN_FLEX_RING_SIZE(order) \ 392 (1UL << ((order) + XEN_PAGE_SHIFT - 1)) 393 394 #define DEFINE_XEN_FLEX_RING(name) \ 395 static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \ 396 { \ 397 return idx & (ring_size - 1); \ 398 } \ 399 \ 400 static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \ 401 RING_IDX idx, \ 402 RING_IDX ring_size) \ 403 { \ 404 return buf + name##_mask(idx, ring_size); \ 405 } \ 406 \ 407 static inline void name##_read_packet(void *opaque, \ 408 const unsigned char *buf, \ 409 size_t size, \ 410 RING_IDX masked_prod, \ 411 RING_IDX *masked_cons, \ 412 RING_IDX ring_size) \ 413 { \ 414 if (*masked_cons < masked_prod || \ 415 size <= ring_size - *masked_cons) { \ 416 memcpy(opaque, buf + *masked_cons, size); \ 417 } else { \ 418 memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \ 419 memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \ 420 size - (ring_size - *masked_cons)); \ 421 } \ 422 *masked_cons = name##_mask(*masked_cons + size, ring_size); \ 423 } \ 424 \ 425 static inline void name##_write_packet(unsigned char *buf, \ 426 const void *opaque, \ 427 size_t size, \ 428 RING_IDX *masked_prod, \ 429 RING_IDX masked_cons, \ 430 RING_IDX ring_size) \ 431 { \ 432 if (*masked_prod < masked_cons || \ 433 size <= ring_size - *masked_prod) { \ 434 memcpy(buf + *masked_prod, opaque, size); \ 435 } else { \ 436 memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \ 437 memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \ 438 size - (ring_size - *masked_prod)); \ 439 } \ 440 *masked_prod = name##_mask(*masked_prod + size, ring_size); \ 441 } \ 442 \ 443 static inline RING_IDX name##_queued(RING_IDX prod, \ 444 RING_IDX cons, \ 445 RING_IDX ring_size) \ 446 { \ 447 RING_IDX size; \ 448 \ 449 if (prod == cons) \ 450 return 0; \ 451 \ 452 prod = name##_mask(prod, ring_size); \ 453 cons = name##_mask(cons, ring_size); \ 454 \ 455 if (prod == cons) \ 456 return ring_size; \ 457 \ 458 if (prod > cons) \ 459 size = prod - cons; \ 460 else \ 461 size = ring_size - (cons - prod); \ 462 return size; \ 463 } \ 464 \ 465 struct name##_data { \ 466 unsigned char *in; /* half of the allocation */ \ 467 unsigned char *out; /* half of the allocation */ \ 468 } 469 470 #define DEFINE_XEN_FLEX_RING_AND_INTF(name) \ 471 struct name##_data_intf { \ 472 RING_IDX in_cons, in_prod; \ 473 \ 474 uint8_t pad1[56]; \ 475 \ 476 RING_IDX out_cons, out_prod; \ 477 \ 478 uint8_t pad2[56]; \ 479 \ 480 RING_IDX ring_order; \ 481 grant_ref_t ref[]; \ 482 }; \ 483 DEFINE_XEN_FLEX_RING(name) 484 485 #endif /* __XEN_PUBLIC_IO_RING_H__ */ 486 487 /* 488 * Local variables: 489 * mode: C 490 * c-file-style: "BSD" 491 * c-basic-offset: 4 492 * tab-width: 4 493 * indent-tabs-mode: nil 494 * End: 495 */ 496