1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #include <linux/etherdevice.h> 8 #include <linux/pci.h> 9 #include "gve.h" 10 #include "gve_adminq.h" 11 #include "gve_register.h" 12 13 #define GVE_MAX_ADMINQ_RELEASE_CHECK 500 14 #define GVE_ADMINQ_SLEEP_LEN 20 15 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100 16 17 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \ 18 "Expected: length=%d, feature_mask=%x.\n" \ 19 "Actual: length=%d, feature_mask=%x.\n" 20 21 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n" 22 23 static 24 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor, 25 struct gve_device_option *option) 26 { 27 void *option_end, *descriptor_end; 28 29 option_end = (void *)(option + 1) + be16_to_cpu(option->option_length); 30 descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length); 31 32 return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end; 33 } 34 35 #define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8 36 37 static 38 void gve_parse_device_option(struct gve_priv *priv, 39 struct gve_device_descriptor *device_descriptor, 40 struct gve_device_option *option, 41 struct gve_device_option_gqi_rda **dev_op_gqi_rda, 42 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, 43 struct gve_device_option_dqo_rda **dev_op_dqo_rda, 44 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, 45 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, 46 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, 47 struct gve_device_option_modify_ring **dev_op_modify_ring) 48 { 49 u32 req_feat_mask = be32_to_cpu(option->required_features_mask); 50 u16 option_length = be16_to_cpu(option->option_length); 51 u16 option_id = be16_to_cpu(option->option_id); 52 53 /* If the length or feature mask doesn't match, continue without 54 * enabling the feature. 55 */ 56 switch (option_id) { 57 case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING: 58 if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING || 59 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) { 60 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 61 "Raw Addressing", 62 GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING, 63 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING, 64 option_length, req_feat_mask); 65 break; 66 } 67 68 dev_info(&priv->pdev->dev, 69 "Gqi raw addressing device option enabled.\n"); 70 priv->queue_format = GVE_GQI_RDA_FORMAT; 71 break; 72 case GVE_DEV_OPT_ID_GQI_RDA: 73 if (option_length < sizeof(**dev_op_gqi_rda) || 74 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) { 75 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 76 "GQI RDA", (int)sizeof(**dev_op_gqi_rda), 77 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA, 78 option_length, req_feat_mask); 79 break; 80 } 81 82 if (option_length > sizeof(**dev_op_gqi_rda)) { 83 dev_warn(&priv->pdev->dev, 84 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA"); 85 } 86 *dev_op_gqi_rda = (void *)(option + 1); 87 break; 88 case GVE_DEV_OPT_ID_GQI_QPL: 89 if (option_length < sizeof(**dev_op_gqi_qpl) || 90 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) { 91 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 92 "GQI QPL", (int)sizeof(**dev_op_gqi_qpl), 93 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL, 94 option_length, req_feat_mask); 95 break; 96 } 97 98 if (option_length > sizeof(**dev_op_gqi_qpl)) { 99 dev_warn(&priv->pdev->dev, 100 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL"); 101 } 102 *dev_op_gqi_qpl = (void *)(option + 1); 103 break; 104 case GVE_DEV_OPT_ID_DQO_RDA: 105 if (option_length < sizeof(**dev_op_dqo_rda) || 106 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) { 107 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 108 "DQO RDA", (int)sizeof(**dev_op_dqo_rda), 109 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA, 110 option_length, req_feat_mask); 111 break; 112 } 113 114 if (option_length > sizeof(**dev_op_dqo_rda)) { 115 dev_warn(&priv->pdev->dev, 116 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA"); 117 } 118 *dev_op_dqo_rda = (void *)(option + 1); 119 break; 120 case GVE_DEV_OPT_ID_DQO_QPL: 121 if (option_length < sizeof(**dev_op_dqo_qpl) || 122 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) { 123 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 124 "DQO QPL", (int)sizeof(**dev_op_dqo_qpl), 125 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL, 126 option_length, req_feat_mask); 127 break; 128 } 129 130 if (option_length > sizeof(**dev_op_dqo_qpl)) { 131 dev_warn(&priv->pdev->dev, 132 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL"); 133 } 134 *dev_op_dqo_qpl = (void *)(option + 1); 135 break; 136 case GVE_DEV_OPT_ID_JUMBO_FRAMES: 137 if (option_length < sizeof(**dev_op_jumbo_frames) || 138 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) { 139 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 140 "Jumbo Frames", 141 (int)sizeof(**dev_op_jumbo_frames), 142 GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES, 143 option_length, req_feat_mask); 144 break; 145 } 146 147 if (option_length > sizeof(**dev_op_jumbo_frames)) { 148 dev_warn(&priv->pdev->dev, 149 GVE_DEVICE_OPTION_TOO_BIG_FMT, 150 "Jumbo Frames"); 151 } 152 *dev_op_jumbo_frames = (void *)(option + 1); 153 break; 154 case GVE_DEV_OPT_ID_BUFFER_SIZES: 155 if (option_length < sizeof(**dev_op_buffer_sizes) || 156 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) { 157 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 158 "Buffer Sizes", 159 (int)sizeof(**dev_op_buffer_sizes), 160 GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES, 161 option_length, req_feat_mask); 162 break; 163 } 164 165 if (option_length > sizeof(**dev_op_buffer_sizes)) 166 dev_warn(&priv->pdev->dev, 167 GVE_DEVICE_OPTION_TOO_BIG_FMT, 168 "Buffer Sizes"); 169 *dev_op_buffer_sizes = (void *)(option + 1); 170 break; 171 case GVE_DEV_OPT_ID_MODIFY_RING: 172 if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE || 173 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) { 174 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 175 "Modify Ring", (int)sizeof(**dev_op_modify_ring), 176 GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING, 177 option_length, req_feat_mask); 178 break; 179 } 180 181 if (option_length > sizeof(**dev_op_modify_ring)) { 182 dev_warn(&priv->pdev->dev, 183 GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring"); 184 } 185 186 *dev_op_modify_ring = (void *)(option + 1); 187 188 /* device has not provided min ring size */ 189 if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE) 190 priv->default_min_ring_size = true; 191 break; 192 default: 193 /* If we don't recognize the option just continue 194 * without doing anything. 195 */ 196 dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n", 197 option_id); 198 } 199 } 200 201 /* Process all device options for a given describe device call. */ 202 static int 203 gve_process_device_options(struct gve_priv *priv, 204 struct gve_device_descriptor *descriptor, 205 struct gve_device_option_gqi_rda **dev_op_gqi_rda, 206 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, 207 struct gve_device_option_dqo_rda **dev_op_dqo_rda, 208 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, 209 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, 210 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, 211 struct gve_device_option_modify_ring **dev_op_modify_ring) 212 { 213 const int num_options = be16_to_cpu(descriptor->num_device_options); 214 struct gve_device_option *dev_opt; 215 int i; 216 217 /* The options struct directly follows the device descriptor. */ 218 dev_opt = (void *)(descriptor + 1); 219 for (i = 0; i < num_options; i++) { 220 struct gve_device_option *next_opt; 221 222 next_opt = gve_get_next_option(descriptor, dev_opt); 223 if (!next_opt) { 224 dev_err(&priv->dev->dev, 225 "options exceed device_descriptor's total length.\n"); 226 return -EINVAL; 227 } 228 229 gve_parse_device_option(priv, descriptor, dev_opt, 230 dev_op_gqi_rda, dev_op_gqi_qpl, 231 dev_op_dqo_rda, dev_op_jumbo_frames, 232 dev_op_dqo_qpl, dev_op_buffer_sizes, 233 dev_op_modify_ring); 234 dev_opt = next_opt; 235 } 236 237 return 0; 238 } 239 240 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) 241 { 242 priv->adminq_pool = dma_pool_create("adminq_pool", dev, 243 GVE_ADMINQ_BUFFER_SIZE, 0, 0); 244 if (unlikely(!priv->adminq_pool)) 245 return -ENOMEM; 246 priv->adminq = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, 247 &priv->adminq_bus_addr); 248 if (unlikely(!priv->adminq)) { 249 dma_pool_destroy(priv->adminq_pool); 250 return -ENOMEM; 251 } 252 253 priv->adminq_mask = 254 (GVE_ADMINQ_BUFFER_SIZE / sizeof(union gve_adminq_command)) - 1; 255 priv->adminq_prod_cnt = 0; 256 priv->adminq_cmd_fail = 0; 257 priv->adminq_timeouts = 0; 258 priv->adminq_describe_device_cnt = 0; 259 priv->adminq_cfg_device_resources_cnt = 0; 260 priv->adminq_register_page_list_cnt = 0; 261 priv->adminq_unregister_page_list_cnt = 0; 262 priv->adminq_create_tx_queue_cnt = 0; 263 priv->adminq_create_rx_queue_cnt = 0; 264 priv->adminq_destroy_tx_queue_cnt = 0; 265 priv->adminq_destroy_rx_queue_cnt = 0; 266 priv->adminq_dcfg_device_resources_cnt = 0; 267 priv->adminq_set_driver_parameter_cnt = 0; 268 priv->adminq_report_stats_cnt = 0; 269 priv->adminq_report_link_speed_cnt = 0; 270 priv->adminq_get_ptype_map_cnt = 0; 271 272 /* Setup Admin queue with the device */ 273 if (priv->pdev->revision < 0x1) { 274 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE, 275 &priv->reg_bar0->adminq_pfn); 276 } else { 277 iowrite16be(GVE_ADMINQ_BUFFER_SIZE, 278 &priv->reg_bar0->adminq_length); 279 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 280 iowrite32be(priv->adminq_bus_addr >> 32, 281 &priv->reg_bar0->adminq_base_address_hi); 282 #endif 283 iowrite32be(priv->adminq_bus_addr, 284 &priv->reg_bar0->adminq_base_address_lo); 285 iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status); 286 } 287 gve_set_admin_queue_ok(priv); 288 return 0; 289 } 290 291 void gve_adminq_release(struct gve_priv *priv) 292 { 293 int i = 0; 294 295 /* Tell the device the adminq is leaving */ 296 if (priv->pdev->revision < 0x1) { 297 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn); 298 while (ioread32be(&priv->reg_bar0->adminq_pfn)) { 299 /* If this is reached the device is unrecoverable and still 300 * holding memory. Continue looping to avoid memory corruption, 301 * but WARN so it is visible what is going on. 302 */ 303 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK) 304 WARN(1, "Unrecoverable platform error!"); 305 i++; 306 msleep(GVE_ADMINQ_SLEEP_LEN); 307 } 308 } else { 309 iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status); 310 while (!(ioread32be(&priv->reg_bar0->device_status) 311 & GVE_DEVICE_STATUS_DEVICE_IS_RESET)) { 312 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK) 313 WARN(1, "Unrecoverable platform error!"); 314 i++; 315 msleep(GVE_ADMINQ_SLEEP_LEN); 316 } 317 } 318 gve_clear_device_rings_ok(priv); 319 gve_clear_device_resources_ok(priv); 320 gve_clear_admin_queue_ok(priv); 321 } 322 323 void gve_adminq_free(struct device *dev, struct gve_priv *priv) 324 { 325 if (!gve_get_admin_queue_ok(priv)) 326 return; 327 gve_adminq_release(priv); 328 dma_pool_free(priv->adminq_pool, priv->adminq, priv->adminq_bus_addr); 329 dma_pool_destroy(priv->adminq_pool); 330 gve_clear_admin_queue_ok(priv); 331 } 332 333 static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt) 334 { 335 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell); 336 } 337 338 static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt) 339 { 340 int i; 341 342 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) { 343 if (ioread32be(&priv->reg_bar0->adminq_event_counter) 344 == prod_cnt) 345 return true; 346 msleep(GVE_ADMINQ_SLEEP_LEN); 347 } 348 349 return false; 350 } 351 352 static int gve_adminq_parse_err(struct gve_priv *priv, u32 status) 353 { 354 if (status != GVE_ADMINQ_COMMAND_PASSED && 355 status != GVE_ADMINQ_COMMAND_UNSET) { 356 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status); 357 priv->adminq_cmd_fail++; 358 } 359 switch (status) { 360 case GVE_ADMINQ_COMMAND_PASSED: 361 return 0; 362 case GVE_ADMINQ_COMMAND_UNSET: 363 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n"); 364 return -EINVAL; 365 case GVE_ADMINQ_COMMAND_ERROR_ABORTED: 366 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED: 367 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS: 368 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION: 369 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE: 370 return -EAGAIN; 371 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS: 372 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR: 373 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT: 374 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND: 375 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE: 376 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR: 377 return -EINVAL; 378 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED: 379 return -ETIME; 380 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED: 381 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED: 382 return -EACCES; 383 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED: 384 return -ENOMEM; 385 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED: 386 return -EOPNOTSUPP; 387 default: 388 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status); 389 return -EINVAL; 390 } 391 } 392 393 /* Flushes all AQ commands currently queued and waits for them to complete. 394 * If there are failures, it will return the first error. 395 */ 396 static int gve_adminq_kick_and_wait(struct gve_priv *priv) 397 { 398 int tail, head; 399 int i; 400 401 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 402 head = priv->adminq_prod_cnt; 403 404 gve_adminq_kick_cmd(priv, head); 405 if (!gve_adminq_wait_for_cmd(priv, head)) { 406 dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n"); 407 priv->adminq_timeouts++; 408 return -ENOTRECOVERABLE; 409 } 410 411 for (i = tail; i < head; i++) { 412 union gve_adminq_command *cmd; 413 u32 status, err; 414 415 cmd = &priv->adminq[i & priv->adminq_mask]; 416 status = be32_to_cpu(READ_ONCE(cmd->status)); 417 err = gve_adminq_parse_err(priv, status); 418 if (err) 419 // Return the first error if we failed. 420 return err; 421 } 422 423 return 0; 424 } 425 426 /* This function is not threadsafe - the caller is responsible for any 427 * necessary locks. 428 */ 429 static int gve_adminq_issue_cmd(struct gve_priv *priv, 430 union gve_adminq_command *cmd_orig) 431 { 432 union gve_adminq_command *cmd; 433 u32 opcode; 434 u32 tail; 435 436 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 437 438 // Check if next command will overflow the buffer. 439 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == 440 (tail & priv->adminq_mask)) { 441 int err; 442 443 // Flush existing commands to make room. 444 err = gve_adminq_kick_and_wait(priv); 445 if (err) 446 return err; 447 448 // Retry. 449 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 450 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == 451 (tail & priv->adminq_mask)) { 452 // This should never happen. We just flushed the 453 // command queue so there should be enough space. 454 return -ENOMEM; 455 } 456 } 457 458 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask]; 459 priv->adminq_prod_cnt++; 460 461 memcpy(cmd, cmd_orig, sizeof(*cmd_orig)); 462 opcode = be32_to_cpu(READ_ONCE(cmd->opcode)); 463 464 switch (opcode) { 465 case GVE_ADMINQ_DESCRIBE_DEVICE: 466 priv->adminq_describe_device_cnt++; 467 break; 468 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES: 469 priv->adminq_cfg_device_resources_cnt++; 470 break; 471 case GVE_ADMINQ_REGISTER_PAGE_LIST: 472 priv->adminq_register_page_list_cnt++; 473 break; 474 case GVE_ADMINQ_UNREGISTER_PAGE_LIST: 475 priv->adminq_unregister_page_list_cnt++; 476 break; 477 case GVE_ADMINQ_CREATE_TX_QUEUE: 478 priv->adminq_create_tx_queue_cnt++; 479 break; 480 case GVE_ADMINQ_CREATE_RX_QUEUE: 481 priv->adminq_create_rx_queue_cnt++; 482 break; 483 case GVE_ADMINQ_DESTROY_TX_QUEUE: 484 priv->adminq_destroy_tx_queue_cnt++; 485 break; 486 case GVE_ADMINQ_DESTROY_RX_QUEUE: 487 priv->adminq_destroy_rx_queue_cnt++; 488 break; 489 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES: 490 priv->adminq_dcfg_device_resources_cnt++; 491 break; 492 case GVE_ADMINQ_SET_DRIVER_PARAMETER: 493 priv->adminq_set_driver_parameter_cnt++; 494 break; 495 case GVE_ADMINQ_REPORT_STATS: 496 priv->adminq_report_stats_cnt++; 497 break; 498 case GVE_ADMINQ_REPORT_LINK_SPEED: 499 priv->adminq_report_link_speed_cnt++; 500 break; 501 case GVE_ADMINQ_GET_PTYPE_MAP: 502 priv->adminq_get_ptype_map_cnt++; 503 break; 504 case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY: 505 priv->adminq_verify_driver_compatibility_cnt++; 506 break; 507 default: 508 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); 509 } 510 511 return 0; 512 } 513 514 /* This function is not threadsafe - the caller is responsible for any 515 * necessary locks. 516 * The caller is also responsible for making sure there are no commands 517 * waiting to be executed. 518 */ 519 static int gve_adminq_execute_cmd(struct gve_priv *priv, 520 union gve_adminq_command *cmd_orig) 521 { 522 u32 tail, head; 523 int err; 524 525 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 526 head = priv->adminq_prod_cnt; 527 if (tail != head) 528 // This is not a valid path 529 return -EINVAL; 530 531 err = gve_adminq_issue_cmd(priv, cmd_orig); 532 if (err) 533 return err; 534 535 return gve_adminq_kick_and_wait(priv); 536 } 537 538 /* The device specifies that the management vector can either be the first irq 539 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to 540 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then 541 * the management vector is first. 542 * 543 * gve arranges the msix vectors so that the management vector is last. 544 */ 545 #define GVE_NTFY_BLK_BASE_MSIX_IDX 0 546 int gve_adminq_configure_device_resources(struct gve_priv *priv, 547 dma_addr_t counter_array_bus_addr, 548 u32 num_counters, 549 dma_addr_t db_array_bus_addr, 550 u32 num_ntfy_blks) 551 { 552 union gve_adminq_command cmd; 553 554 memset(&cmd, 0, sizeof(cmd)); 555 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES); 556 cmd.configure_device_resources = 557 (struct gve_adminq_configure_device_resources) { 558 .counter_array = cpu_to_be64(counter_array_bus_addr), 559 .num_counters = cpu_to_be32(num_counters), 560 .irq_db_addr = cpu_to_be64(db_array_bus_addr), 561 .num_irq_dbs = cpu_to_be32(num_ntfy_blks), 562 .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)), 563 .ntfy_blk_msix_base_idx = 564 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX), 565 .queue_format = priv->queue_format, 566 }; 567 568 return gve_adminq_execute_cmd(priv, &cmd); 569 } 570 571 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv) 572 { 573 union gve_adminq_command cmd; 574 575 memset(&cmd, 0, sizeof(cmd)); 576 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES); 577 578 return gve_adminq_execute_cmd(priv, &cmd); 579 } 580 581 static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) 582 { 583 struct gve_tx_ring *tx = &priv->tx[queue_index]; 584 union gve_adminq_command cmd; 585 586 memset(&cmd, 0, sizeof(cmd)); 587 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE); 588 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) { 589 .queue_id = cpu_to_be32(queue_index), 590 .queue_resources_addr = 591 cpu_to_be64(tx->q_resources_bus), 592 .tx_ring_addr = cpu_to_be64(tx->bus), 593 .ntfy_id = cpu_to_be32(tx->ntfy_id), 594 .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt), 595 }; 596 597 if (gve_is_gqi(priv)) { 598 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ? 599 GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id; 600 601 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 602 } else { 603 u32 qpl_id = 0; 604 605 if (priv->queue_format == GVE_DQO_RDA_FORMAT) 606 qpl_id = GVE_RAW_ADDRESSING_QPL_ID; 607 else 608 qpl_id = tx->dqo.qpl->id; 609 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 610 cmd.create_tx_queue.tx_comp_ring_addr = 611 cpu_to_be64(tx->complq_bus_dqo); 612 cmd.create_tx_queue.tx_comp_ring_size = 613 cpu_to_be16(priv->tx_desc_cnt); 614 } 615 616 return gve_adminq_issue_cmd(priv, &cmd); 617 } 618 619 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues) 620 { 621 int err; 622 int i; 623 624 for (i = start_id; i < start_id + num_queues; i++) { 625 err = gve_adminq_create_tx_queue(priv, i); 626 if (err) 627 return err; 628 } 629 630 return gve_adminq_kick_and_wait(priv); 631 } 632 633 static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) 634 { 635 struct gve_rx_ring *rx = &priv->rx[queue_index]; 636 union gve_adminq_command cmd; 637 638 memset(&cmd, 0, sizeof(cmd)); 639 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE); 640 cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) { 641 .queue_id = cpu_to_be32(queue_index), 642 .ntfy_id = cpu_to_be32(rx->ntfy_id), 643 .queue_resources_addr = cpu_to_be64(rx->q_resources_bus), 644 .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt), 645 }; 646 647 if (gve_is_gqi(priv)) { 648 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ? 649 GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id; 650 651 cmd.create_rx_queue.rx_desc_ring_addr = 652 cpu_to_be64(rx->desc.bus), 653 cmd.create_rx_queue.rx_data_ring_addr = 654 cpu_to_be64(rx->data.data_bus), 655 cmd.create_rx_queue.index = cpu_to_be32(queue_index); 656 cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 657 cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); 658 } else { 659 u32 qpl_id = 0; 660 661 if (priv->queue_format == GVE_DQO_RDA_FORMAT) 662 qpl_id = GVE_RAW_ADDRESSING_QPL_ID; 663 else 664 qpl_id = rx->dqo.qpl->id; 665 cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 666 cmd.create_rx_queue.rx_desc_ring_addr = 667 cpu_to_be64(rx->dqo.complq.bus); 668 cmd.create_rx_queue.rx_data_ring_addr = 669 cpu_to_be64(rx->dqo.bufq.bus); 670 cmd.create_rx_queue.packet_buffer_size = 671 cpu_to_be16(priv->data_buffer_size_dqo); 672 cmd.create_rx_queue.rx_buff_ring_size = 673 cpu_to_be16(priv->rx_desc_cnt); 674 cmd.create_rx_queue.enable_rsc = 675 !!(priv->dev->features & NETIF_F_LRO); 676 if (priv->header_split_enabled) 677 cmd.create_rx_queue.header_buffer_size = 678 cpu_to_be16(priv->header_buf_size); 679 } 680 681 return gve_adminq_issue_cmd(priv, &cmd); 682 } 683 684 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues) 685 { 686 int err; 687 int i; 688 689 for (i = 0; i < num_queues; i++) { 690 err = gve_adminq_create_rx_queue(priv, i); 691 if (err) 692 return err; 693 } 694 695 return gve_adminq_kick_and_wait(priv); 696 } 697 698 static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index) 699 { 700 union gve_adminq_command cmd; 701 int err; 702 703 memset(&cmd, 0, sizeof(cmd)); 704 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE); 705 cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) { 706 .queue_id = cpu_to_be32(queue_index), 707 }; 708 709 err = gve_adminq_issue_cmd(priv, &cmd); 710 if (err) 711 return err; 712 713 return 0; 714 } 715 716 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues) 717 { 718 int err; 719 int i; 720 721 for (i = start_id; i < start_id + num_queues; i++) { 722 err = gve_adminq_destroy_tx_queue(priv, i); 723 if (err) 724 return err; 725 } 726 727 return gve_adminq_kick_and_wait(priv); 728 } 729 730 static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index) 731 { 732 union gve_adminq_command cmd; 733 int err; 734 735 memset(&cmd, 0, sizeof(cmd)); 736 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE); 737 cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) { 738 .queue_id = cpu_to_be32(queue_index), 739 }; 740 741 err = gve_adminq_issue_cmd(priv, &cmd); 742 if (err) 743 return err; 744 745 return 0; 746 } 747 748 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) 749 { 750 int err; 751 int i; 752 753 for (i = 0; i < num_queues; i++) { 754 err = gve_adminq_destroy_rx_queue(priv, i); 755 if (err) 756 return err; 757 } 758 759 return gve_adminq_kick_and_wait(priv); 760 } 761 762 static void gve_set_default_desc_cnt(struct gve_priv *priv, 763 const struct gve_device_descriptor *descriptor) 764 { 765 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); 766 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); 767 768 /* set default ranges */ 769 priv->max_tx_desc_cnt = priv->tx_desc_cnt; 770 priv->max_rx_desc_cnt = priv->rx_desc_cnt; 771 priv->min_tx_desc_cnt = priv->tx_desc_cnt; 772 priv->min_rx_desc_cnt = priv->rx_desc_cnt; 773 } 774 775 static void gve_enable_supported_features(struct gve_priv *priv, 776 u32 supported_features_mask, 777 const struct gve_device_option_jumbo_frames 778 *dev_op_jumbo_frames, 779 const struct gve_device_option_dqo_qpl 780 *dev_op_dqo_qpl, 781 const struct gve_device_option_buffer_sizes 782 *dev_op_buffer_sizes, 783 const struct gve_device_option_modify_ring 784 *dev_op_modify_ring) 785 { 786 /* Before control reaches this point, the page-size-capped max MTU from 787 * the gve_device_descriptor field has already been stored in 788 * priv->dev->max_mtu. We overwrite it with the true max MTU below. 789 */ 790 if (dev_op_jumbo_frames && 791 (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) { 792 dev_info(&priv->pdev->dev, 793 "JUMBO FRAMES device option enabled.\n"); 794 priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu); 795 } 796 797 /* Override pages for qpl for DQO-QPL */ 798 if (dev_op_dqo_qpl) { 799 priv->tx_pages_per_qpl = 800 be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl); 801 if (priv->tx_pages_per_qpl == 0) 802 priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES; 803 } 804 805 if (dev_op_buffer_sizes && 806 (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) { 807 priv->max_rx_buffer_size = 808 be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size); 809 priv->header_buf_size = 810 be16_to_cpu(dev_op_buffer_sizes->header_buffer_size); 811 dev_info(&priv->pdev->dev, 812 "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n", 813 priv->max_rx_buffer_size, priv->header_buf_size); 814 } 815 816 /* Read and store ring size ranges given by device */ 817 if (dev_op_modify_ring && 818 (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) { 819 priv->modify_ring_size_enabled = true; 820 821 /* max ring size for DQO QPL should not be overwritten because of device limit */ 822 if (priv->queue_format != GVE_DQO_QPL_FORMAT) { 823 priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size); 824 priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size); 825 } 826 if (priv->default_min_ring_size) { 827 /* If device hasn't provided minimums, use default minimums */ 828 priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE; 829 priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE; 830 } else { 831 priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size); 832 priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size); 833 } 834 } 835 } 836 837 int gve_adminq_describe_device(struct gve_priv *priv) 838 { 839 struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; 840 struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; 841 struct gve_device_option_modify_ring *dev_op_modify_ring = NULL; 842 struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; 843 struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; 844 struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL; 845 struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL; 846 struct gve_device_descriptor *descriptor; 847 u32 supported_features_mask = 0; 848 union gve_adminq_command cmd; 849 dma_addr_t descriptor_bus; 850 int err = 0; 851 u8 *mac; 852 u16 mtu; 853 854 memset(&cmd, 0, sizeof(cmd)); 855 descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, 856 &descriptor_bus); 857 if (!descriptor) 858 return -ENOMEM; 859 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE); 860 cmd.describe_device.device_descriptor_addr = 861 cpu_to_be64(descriptor_bus); 862 cmd.describe_device.device_descriptor_version = 863 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION); 864 cmd.describe_device.available_length = 865 cpu_to_be32(GVE_ADMINQ_BUFFER_SIZE); 866 867 err = gve_adminq_execute_cmd(priv, &cmd); 868 if (err) 869 goto free_device_descriptor; 870 871 err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, 872 &dev_op_gqi_qpl, &dev_op_dqo_rda, 873 &dev_op_jumbo_frames, &dev_op_dqo_qpl, 874 &dev_op_buffer_sizes, 875 &dev_op_modify_ring); 876 if (err) 877 goto free_device_descriptor; 878 879 /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format 880 * is not set to GqiRda, choose the queue format in a priority order: 881 * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default. 882 */ 883 if (dev_op_dqo_rda) { 884 priv->queue_format = GVE_DQO_RDA_FORMAT; 885 dev_info(&priv->pdev->dev, 886 "Driver is running with DQO RDA queue format.\n"); 887 supported_features_mask = 888 be32_to_cpu(dev_op_dqo_rda->supported_features_mask); 889 } else if (dev_op_dqo_qpl) { 890 priv->queue_format = GVE_DQO_QPL_FORMAT; 891 supported_features_mask = 892 be32_to_cpu(dev_op_dqo_qpl->supported_features_mask); 893 } else if (dev_op_gqi_rda) { 894 priv->queue_format = GVE_GQI_RDA_FORMAT; 895 dev_info(&priv->pdev->dev, 896 "Driver is running with GQI RDA queue format.\n"); 897 supported_features_mask = 898 be32_to_cpu(dev_op_gqi_rda->supported_features_mask); 899 } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) { 900 dev_info(&priv->pdev->dev, 901 "Driver is running with GQI RDA queue format.\n"); 902 } else { 903 priv->queue_format = GVE_GQI_QPL_FORMAT; 904 if (dev_op_gqi_qpl) 905 supported_features_mask = 906 be32_to_cpu(dev_op_gqi_qpl->supported_features_mask); 907 dev_info(&priv->pdev->dev, 908 "Driver is running with GQI QPL queue format.\n"); 909 } 910 911 /* set default descriptor counts */ 912 gve_set_default_desc_cnt(priv, descriptor); 913 914 /* DQO supports LRO. */ 915 if (!gve_is_gqi(priv)) 916 priv->dev->hw_features |= NETIF_F_LRO; 917 918 priv->max_registered_pages = 919 be64_to_cpu(descriptor->max_registered_pages); 920 mtu = be16_to_cpu(descriptor->mtu); 921 if (mtu < ETH_MIN_MTU) { 922 dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu); 923 err = -EINVAL; 924 goto free_device_descriptor; 925 } 926 priv->dev->max_mtu = mtu; 927 priv->num_event_counters = be16_to_cpu(descriptor->counters); 928 eth_hw_addr_set(priv->dev, descriptor->mac); 929 mac = descriptor->mac; 930 dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac); 931 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl); 932 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); 933 934 gve_enable_supported_features(priv, supported_features_mask, 935 dev_op_jumbo_frames, dev_op_dqo_qpl, 936 dev_op_buffer_sizes, dev_op_modify_ring); 937 938 free_device_descriptor: 939 dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); 940 return err; 941 } 942 943 int gve_adminq_register_page_list(struct gve_priv *priv, 944 struct gve_queue_page_list *qpl) 945 { 946 struct device *hdev = &priv->pdev->dev; 947 u32 num_entries = qpl->num_entries; 948 u32 size = num_entries * sizeof(qpl->page_buses[0]); 949 union gve_adminq_command cmd; 950 dma_addr_t page_list_bus; 951 __be64 *page_list; 952 int err; 953 int i; 954 955 memset(&cmd, 0, sizeof(cmd)); 956 page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL); 957 if (!page_list) 958 return -ENOMEM; 959 960 for (i = 0; i < num_entries; i++) 961 page_list[i] = cpu_to_be64(qpl->page_buses[i]); 962 963 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST); 964 cmd.reg_page_list = (struct gve_adminq_register_page_list) { 965 .page_list_id = cpu_to_be32(qpl->id), 966 .num_pages = cpu_to_be32(num_entries), 967 .page_address_list_addr = cpu_to_be64(page_list_bus), 968 .page_size = cpu_to_be64(PAGE_SIZE), 969 }; 970 971 err = gve_adminq_execute_cmd(priv, &cmd); 972 dma_free_coherent(hdev, size, page_list, page_list_bus); 973 return err; 974 } 975 976 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id) 977 { 978 union gve_adminq_command cmd; 979 980 memset(&cmd, 0, sizeof(cmd)); 981 cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST); 982 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) { 983 .page_list_id = cpu_to_be32(page_list_id), 984 }; 985 986 return gve_adminq_execute_cmd(priv, &cmd); 987 } 988 989 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu) 990 { 991 union gve_adminq_command cmd; 992 993 memset(&cmd, 0, sizeof(cmd)); 994 cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER); 995 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) { 996 .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU), 997 .parameter_value = cpu_to_be64(mtu), 998 }; 999 1000 return gve_adminq_execute_cmd(priv, &cmd); 1001 } 1002 1003 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, 1004 dma_addr_t stats_report_addr, u64 interval) 1005 { 1006 union gve_adminq_command cmd; 1007 1008 memset(&cmd, 0, sizeof(cmd)); 1009 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS); 1010 cmd.report_stats = (struct gve_adminq_report_stats) { 1011 .stats_report_len = cpu_to_be64(stats_report_len), 1012 .stats_report_addr = cpu_to_be64(stats_report_addr), 1013 .interval = cpu_to_be64(interval), 1014 }; 1015 1016 return gve_adminq_execute_cmd(priv, &cmd); 1017 } 1018 1019 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, 1020 u64 driver_info_len, 1021 dma_addr_t driver_info_addr) 1022 { 1023 union gve_adminq_command cmd; 1024 1025 memset(&cmd, 0, sizeof(cmd)); 1026 cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY); 1027 cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) { 1028 .driver_info_len = cpu_to_be64(driver_info_len), 1029 .driver_info_addr = cpu_to_be64(driver_info_addr), 1030 }; 1031 1032 return gve_adminq_execute_cmd(priv, &cmd); 1033 } 1034 1035 int gve_adminq_report_link_speed(struct gve_priv *priv) 1036 { 1037 union gve_adminq_command gvnic_cmd; 1038 dma_addr_t link_speed_region_bus; 1039 __be64 *link_speed_region; 1040 int err; 1041 1042 link_speed_region = 1043 dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region), 1044 &link_speed_region_bus, GFP_KERNEL); 1045 1046 if (!link_speed_region) 1047 return -ENOMEM; 1048 1049 memset(&gvnic_cmd, 0, sizeof(gvnic_cmd)); 1050 gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED); 1051 gvnic_cmd.report_link_speed.link_speed_address = 1052 cpu_to_be64(link_speed_region_bus); 1053 1054 err = gve_adminq_execute_cmd(priv, &gvnic_cmd); 1055 1056 priv->link_speed = be64_to_cpu(*link_speed_region); 1057 dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region, 1058 link_speed_region_bus); 1059 return err; 1060 } 1061 1062 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, 1063 struct gve_ptype_lut *ptype_lut) 1064 { 1065 struct gve_ptype_map *ptype_map; 1066 union gve_adminq_command cmd; 1067 dma_addr_t ptype_map_bus; 1068 int err = 0; 1069 int i; 1070 1071 memset(&cmd, 0, sizeof(cmd)); 1072 ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map), 1073 &ptype_map_bus, GFP_KERNEL); 1074 if (!ptype_map) 1075 return -ENOMEM; 1076 1077 cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP); 1078 cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) { 1079 .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)), 1080 .ptype_map_addr = cpu_to_be64(ptype_map_bus), 1081 }; 1082 1083 err = gve_adminq_execute_cmd(priv, &cmd); 1084 if (err) 1085 goto err; 1086 1087 /* Populate ptype_lut. */ 1088 for (i = 0; i < GVE_NUM_PTYPES; i++) { 1089 ptype_lut->ptypes[i].l3_type = 1090 ptype_map->ptypes[i].l3_type; 1091 ptype_lut->ptypes[i].l4_type = 1092 ptype_map->ptypes[i].l4_type; 1093 } 1094 err: 1095 dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map, 1096 ptype_map_bus); 1097 return err; 1098 } 1099