1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #include <linux/etherdevice.h> 8 #include <linux/pci.h> 9 #include "gve.h" 10 #include "gve_adminq.h" 11 #include "gve_register.h" 12 13 #define GVE_MAX_ADMINQ_RELEASE_CHECK 500 14 #define GVE_ADMINQ_SLEEP_LEN 20 15 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100 16 17 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \ 18 "Expected: length=%d, feature_mask=%x.\n" \ 19 "Actual: length=%d, feature_mask=%x.\n" 20 21 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n" 22 23 static 24 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor, 25 struct gve_device_option *option) 26 { 27 void *option_end, *descriptor_end; 28 29 option_end = (void *)(option + 1) + be16_to_cpu(option->option_length); 30 descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length); 31 32 return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end; 33 } 34 35 #define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8 36 37 static 38 void gve_parse_device_option(struct gve_priv *priv, 39 struct gve_device_descriptor *device_descriptor, 40 struct gve_device_option *option, 41 struct gve_device_option_gqi_rda **dev_op_gqi_rda, 42 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, 43 struct gve_device_option_dqo_rda **dev_op_dqo_rda, 44 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, 45 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, 46 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, 47 struct gve_device_option_modify_ring **dev_op_modify_ring) 48 { 49 u32 req_feat_mask = be32_to_cpu(option->required_features_mask); 50 u16 option_length = be16_to_cpu(option->option_length); 51 u16 option_id = be16_to_cpu(option->option_id); 52 53 /* If the length or feature mask doesn't match, continue without 54 * enabling the feature. 55 */ 56 switch (option_id) { 57 case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING: 58 if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING || 59 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) { 60 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 61 "Raw Addressing", 62 GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING, 63 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING, 64 option_length, req_feat_mask); 65 break; 66 } 67 68 dev_info(&priv->pdev->dev, 69 "Gqi raw addressing device option enabled.\n"); 70 priv->queue_format = GVE_GQI_RDA_FORMAT; 71 break; 72 case GVE_DEV_OPT_ID_GQI_RDA: 73 if (option_length < sizeof(**dev_op_gqi_rda) || 74 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) { 75 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 76 "GQI RDA", (int)sizeof(**dev_op_gqi_rda), 77 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA, 78 option_length, req_feat_mask); 79 break; 80 } 81 82 if (option_length > sizeof(**dev_op_gqi_rda)) { 83 dev_warn(&priv->pdev->dev, 84 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA"); 85 } 86 *dev_op_gqi_rda = (void *)(option + 1); 87 break; 88 case GVE_DEV_OPT_ID_GQI_QPL: 89 if (option_length < sizeof(**dev_op_gqi_qpl) || 90 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) { 91 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 92 "GQI QPL", (int)sizeof(**dev_op_gqi_qpl), 93 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL, 94 option_length, req_feat_mask); 95 break; 96 } 97 98 if (option_length > sizeof(**dev_op_gqi_qpl)) { 99 dev_warn(&priv->pdev->dev, 100 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL"); 101 } 102 *dev_op_gqi_qpl = (void *)(option + 1); 103 break; 104 case GVE_DEV_OPT_ID_DQO_RDA: 105 if (option_length < sizeof(**dev_op_dqo_rda) || 106 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) { 107 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 108 "DQO RDA", (int)sizeof(**dev_op_dqo_rda), 109 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA, 110 option_length, req_feat_mask); 111 break; 112 } 113 114 if (option_length > sizeof(**dev_op_dqo_rda)) { 115 dev_warn(&priv->pdev->dev, 116 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA"); 117 } 118 *dev_op_dqo_rda = (void *)(option + 1); 119 break; 120 case GVE_DEV_OPT_ID_DQO_QPL: 121 if (option_length < sizeof(**dev_op_dqo_qpl) || 122 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) { 123 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 124 "DQO QPL", (int)sizeof(**dev_op_dqo_qpl), 125 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL, 126 option_length, req_feat_mask); 127 break; 128 } 129 130 if (option_length > sizeof(**dev_op_dqo_qpl)) { 131 dev_warn(&priv->pdev->dev, 132 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL"); 133 } 134 *dev_op_dqo_qpl = (void *)(option + 1); 135 break; 136 case GVE_DEV_OPT_ID_JUMBO_FRAMES: 137 if (option_length < sizeof(**dev_op_jumbo_frames) || 138 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) { 139 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 140 "Jumbo Frames", 141 (int)sizeof(**dev_op_jumbo_frames), 142 GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES, 143 option_length, req_feat_mask); 144 break; 145 } 146 147 if (option_length > sizeof(**dev_op_jumbo_frames)) { 148 dev_warn(&priv->pdev->dev, 149 GVE_DEVICE_OPTION_TOO_BIG_FMT, 150 "Jumbo Frames"); 151 } 152 *dev_op_jumbo_frames = (void *)(option + 1); 153 break; 154 case GVE_DEV_OPT_ID_BUFFER_SIZES: 155 if (option_length < sizeof(**dev_op_buffer_sizes) || 156 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) { 157 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 158 "Buffer Sizes", 159 (int)sizeof(**dev_op_buffer_sizes), 160 GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES, 161 option_length, req_feat_mask); 162 break; 163 } 164 165 if (option_length > sizeof(**dev_op_buffer_sizes)) 166 dev_warn(&priv->pdev->dev, 167 GVE_DEVICE_OPTION_TOO_BIG_FMT, 168 "Buffer Sizes"); 169 *dev_op_buffer_sizes = (void *)(option + 1); 170 break; 171 case GVE_DEV_OPT_ID_MODIFY_RING: 172 if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE || 173 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) { 174 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 175 "Modify Ring", (int)sizeof(**dev_op_modify_ring), 176 GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING, 177 option_length, req_feat_mask); 178 break; 179 } 180 181 if (option_length > sizeof(**dev_op_modify_ring)) { 182 dev_warn(&priv->pdev->dev, 183 GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring"); 184 } 185 186 *dev_op_modify_ring = (void *)(option + 1); 187 188 /* device has not provided min ring size */ 189 if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE) 190 priv->default_min_ring_size = true; 191 break; 192 default: 193 /* If we don't recognize the option just continue 194 * without doing anything. 195 */ 196 dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n", 197 option_id); 198 } 199 } 200 201 /* Process all device options for a given describe device call. */ 202 static int 203 gve_process_device_options(struct gve_priv *priv, 204 struct gve_device_descriptor *descriptor, 205 struct gve_device_option_gqi_rda **dev_op_gqi_rda, 206 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, 207 struct gve_device_option_dqo_rda **dev_op_dqo_rda, 208 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, 209 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, 210 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, 211 struct gve_device_option_modify_ring **dev_op_modify_ring) 212 { 213 const int num_options = be16_to_cpu(descriptor->num_device_options); 214 struct gve_device_option *dev_opt; 215 int i; 216 217 /* The options struct directly follows the device descriptor. */ 218 dev_opt = (void *)(descriptor + 1); 219 for (i = 0; i < num_options; i++) { 220 struct gve_device_option *next_opt; 221 222 next_opt = gve_get_next_option(descriptor, dev_opt); 223 if (!next_opt) { 224 dev_err(&priv->dev->dev, 225 "options exceed device_descriptor's total length.\n"); 226 return -EINVAL; 227 } 228 229 gve_parse_device_option(priv, descriptor, dev_opt, 230 dev_op_gqi_rda, dev_op_gqi_qpl, 231 dev_op_dqo_rda, dev_op_jumbo_frames, 232 dev_op_dqo_qpl, dev_op_buffer_sizes, 233 dev_op_modify_ring); 234 dev_opt = next_opt; 235 } 236 237 return 0; 238 } 239 240 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) 241 { 242 priv->adminq_pool = dma_pool_create("adminq_pool", dev, 243 GVE_ADMINQ_BUFFER_SIZE, 0, 0); 244 if (unlikely(!priv->adminq_pool)) 245 return -ENOMEM; 246 priv->adminq = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, 247 &priv->adminq_bus_addr); 248 if (unlikely(!priv->adminq)) { 249 dma_pool_destroy(priv->adminq_pool); 250 return -ENOMEM; 251 } 252 253 priv->adminq_mask = 254 (GVE_ADMINQ_BUFFER_SIZE / sizeof(union gve_adminq_command)) - 1; 255 priv->adminq_prod_cnt = 0; 256 priv->adminq_cmd_fail = 0; 257 priv->adminq_timeouts = 0; 258 priv->adminq_describe_device_cnt = 0; 259 priv->adminq_cfg_device_resources_cnt = 0; 260 priv->adminq_register_page_list_cnt = 0; 261 priv->adminq_unregister_page_list_cnt = 0; 262 priv->adminq_create_tx_queue_cnt = 0; 263 priv->adminq_create_rx_queue_cnt = 0; 264 priv->adminq_destroy_tx_queue_cnt = 0; 265 priv->adminq_destroy_rx_queue_cnt = 0; 266 priv->adminq_dcfg_device_resources_cnt = 0; 267 priv->adminq_set_driver_parameter_cnt = 0; 268 priv->adminq_report_stats_cnt = 0; 269 priv->adminq_report_link_speed_cnt = 0; 270 priv->adminq_get_ptype_map_cnt = 0; 271 272 /* Setup Admin queue with the device */ 273 if (priv->pdev->revision < 0x1) { 274 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE, 275 &priv->reg_bar0->adminq_pfn); 276 } else { 277 iowrite16be(GVE_ADMINQ_BUFFER_SIZE, 278 &priv->reg_bar0->adminq_length); 279 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 280 iowrite32be(priv->adminq_bus_addr >> 32, 281 &priv->reg_bar0->adminq_base_address_hi); 282 #endif 283 iowrite32be(priv->adminq_bus_addr, 284 &priv->reg_bar0->adminq_base_address_lo); 285 iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status); 286 } 287 gve_set_admin_queue_ok(priv); 288 return 0; 289 } 290 291 void gve_adminq_release(struct gve_priv *priv) 292 { 293 int i = 0; 294 295 /* Tell the device the adminq is leaving */ 296 if (priv->pdev->revision < 0x1) { 297 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn); 298 while (ioread32be(&priv->reg_bar0->adminq_pfn)) { 299 /* If this is reached the device is unrecoverable and still 300 * holding memory. Continue looping to avoid memory corruption, 301 * but WARN so it is visible what is going on. 302 */ 303 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK) 304 WARN(1, "Unrecoverable platform error!"); 305 i++; 306 msleep(GVE_ADMINQ_SLEEP_LEN); 307 } 308 } else { 309 iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status); 310 while (!(ioread32be(&priv->reg_bar0->device_status) 311 & GVE_DEVICE_STATUS_DEVICE_IS_RESET)) { 312 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK) 313 WARN(1, "Unrecoverable platform error!"); 314 i++; 315 msleep(GVE_ADMINQ_SLEEP_LEN); 316 } 317 } 318 gve_clear_device_rings_ok(priv); 319 gve_clear_device_resources_ok(priv); 320 gve_clear_admin_queue_ok(priv); 321 } 322 323 void gve_adminq_free(struct device *dev, struct gve_priv *priv) 324 { 325 if (!gve_get_admin_queue_ok(priv)) 326 return; 327 gve_adminq_release(priv); 328 dma_pool_free(priv->adminq_pool, priv->adminq, priv->adminq_bus_addr); 329 dma_pool_destroy(priv->adminq_pool); 330 gve_clear_admin_queue_ok(priv); 331 } 332 333 static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt) 334 { 335 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell); 336 } 337 338 static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt) 339 { 340 int i; 341 342 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) { 343 if (ioread32be(&priv->reg_bar0->adminq_event_counter) 344 == prod_cnt) 345 return true; 346 msleep(GVE_ADMINQ_SLEEP_LEN); 347 } 348 349 return false; 350 } 351 352 static int gve_adminq_parse_err(struct gve_priv *priv, u32 status) 353 { 354 if (status != GVE_ADMINQ_COMMAND_PASSED && 355 status != GVE_ADMINQ_COMMAND_UNSET) { 356 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status); 357 priv->adminq_cmd_fail++; 358 } 359 switch (status) { 360 case GVE_ADMINQ_COMMAND_PASSED: 361 return 0; 362 case GVE_ADMINQ_COMMAND_UNSET: 363 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n"); 364 return -EINVAL; 365 case GVE_ADMINQ_COMMAND_ERROR_ABORTED: 366 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED: 367 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS: 368 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION: 369 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE: 370 return -EAGAIN; 371 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS: 372 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR: 373 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT: 374 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND: 375 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE: 376 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR: 377 return -EINVAL; 378 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED: 379 return -ETIME; 380 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED: 381 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED: 382 return -EACCES; 383 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED: 384 return -ENOMEM; 385 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED: 386 return -EOPNOTSUPP; 387 default: 388 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status); 389 return -EINVAL; 390 } 391 } 392 393 /* Flushes all AQ commands currently queued and waits for them to complete. 394 * If there are failures, it will return the first error. 395 */ 396 static int gve_adminq_kick_and_wait(struct gve_priv *priv) 397 { 398 int tail, head; 399 int i; 400 401 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 402 head = priv->adminq_prod_cnt; 403 404 gve_adminq_kick_cmd(priv, head); 405 if (!gve_adminq_wait_for_cmd(priv, head)) { 406 dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n"); 407 priv->adminq_timeouts++; 408 return -ENOTRECOVERABLE; 409 } 410 411 for (i = tail; i < head; i++) { 412 union gve_adminq_command *cmd; 413 u32 status, err; 414 415 cmd = &priv->adminq[i & priv->adminq_mask]; 416 status = be32_to_cpu(READ_ONCE(cmd->status)); 417 err = gve_adminq_parse_err(priv, status); 418 if (err) 419 // Return the first error if we failed. 420 return err; 421 } 422 423 return 0; 424 } 425 426 /* This function is not threadsafe - the caller is responsible for any 427 * necessary locks. 428 */ 429 static int gve_adminq_issue_cmd(struct gve_priv *priv, 430 union gve_adminq_command *cmd_orig) 431 { 432 union gve_adminq_command *cmd; 433 u32 opcode; 434 u32 tail; 435 436 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 437 438 // Check if next command will overflow the buffer. 439 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == 440 (tail & priv->adminq_mask)) { 441 int err; 442 443 // Flush existing commands to make room. 444 err = gve_adminq_kick_and_wait(priv); 445 if (err) 446 return err; 447 448 // Retry. 449 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 450 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == 451 (tail & priv->adminq_mask)) { 452 // This should never happen. We just flushed the 453 // command queue so there should be enough space. 454 return -ENOMEM; 455 } 456 } 457 458 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask]; 459 priv->adminq_prod_cnt++; 460 461 memcpy(cmd, cmd_orig, sizeof(*cmd_orig)); 462 opcode = be32_to_cpu(READ_ONCE(cmd->opcode)); 463 464 switch (opcode) { 465 case GVE_ADMINQ_DESCRIBE_DEVICE: 466 priv->adminq_describe_device_cnt++; 467 break; 468 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES: 469 priv->adminq_cfg_device_resources_cnt++; 470 break; 471 case GVE_ADMINQ_REGISTER_PAGE_LIST: 472 priv->adminq_register_page_list_cnt++; 473 break; 474 case GVE_ADMINQ_UNREGISTER_PAGE_LIST: 475 priv->adminq_unregister_page_list_cnt++; 476 break; 477 case GVE_ADMINQ_CREATE_TX_QUEUE: 478 priv->adminq_create_tx_queue_cnt++; 479 break; 480 case GVE_ADMINQ_CREATE_RX_QUEUE: 481 priv->adminq_create_rx_queue_cnt++; 482 break; 483 case GVE_ADMINQ_DESTROY_TX_QUEUE: 484 priv->adminq_destroy_tx_queue_cnt++; 485 break; 486 case GVE_ADMINQ_DESTROY_RX_QUEUE: 487 priv->adminq_destroy_rx_queue_cnt++; 488 break; 489 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES: 490 priv->adminq_dcfg_device_resources_cnt++; 491 break; 492 case GVE_ADMINQ_SET_DRIVER_PARAMETER: 493 priv->adminq_set_driver_parameter_cnt++; 494 break; 495 case GVE_ADMINQ_REPORT_STATS: 496 priv->adminq_report_stats_cnt++; 497 break; 498 case GVE_ADMINQ_REPORT_LINK_SPEED: 499 priv->adminq_report_link_speed_cnt++; 500 break; 501 case GVE_ADMINQ_GET_PTYPE_MAP: 502 priv->adminq_get_ptype_map_cnt++; 503 break; 504 case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY: 505 priv->adminq_verify_driver_compatibility_cnt++; 506 break; 507 default: 508 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); 509 } 510 511 return 0; 512 } 513 514 /* This function is not threadsafe - the caller is responsible for any 515 * necessary locks. 516 * The caller is also responsible for making sure there are no commands 517 * waiting to be executed. 518 */ 519 static int gve_adminq_execute_cmd(struct gve_priv *priv, 520 union gve_adminq_command *cmd_orig) 521 { 522 u32 tail, head; 523 int err; 524 525 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 526 head = priv->adminq_prod_cnt; 527 if (tail != head) 528 // This is not a valid path 529 return -EINVAL; 530 531 err = gve_adminq_issue_cmd(priv, cmd_orig); 532 if (err) 533 return err; 534 535 return gve_adminq_kick_and_wait(priv); 536 } 537 538 /* The device specifies that the management vector can either be the first irq 539 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to 540 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then 541 * the management vector is first. 542 * 543 * gve arranges the msix vectors so that the management vector is last. 544 */ 545 #define GVE_NTFY_BLK_BASE_MSIX_IDX 0 546 int gve_adminq_configure_device_resources(struct gve_priv *priv, 547 dma_addr_t counter_array_bus_addr, 548 u32 num_counters, 549 dma_addr_t db_array_bus_addr, 550 u32 num_ntfy_blks) 551 { 552 union gve_adminq_command cmd; 553 554 memset(&cmd, 0, sizeof(cmd)); 555 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES); 556 cmd.configure_device_resources = 557 (struct gve_adminq_configure_device_resources) { 558 .counter_array = cpu_to_be64(counter_array_bus_addr), 559 .num_counters = cpu_to_be32(num_counters), 560 .irq_db_addr = cpu_to_be64(db_array_bus_addr), 561 .num_irq_dbs = cpu_to_be32(num_ntfy_blks), 562 .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)), 563 .ntfy_blk_msix_base_idx = 564 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX), 565 .queue_format = priv->queue_format, 566 }; 567 568 return gve_adminq_execute_cmd(priv, &cmd); 569 } 570 571 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv) 572 { 573 union gve_adminq_command cmd; 574 575 memset(&cmd, 0, sizeof(cmd)); 576 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES); 577 578 return gve_adminq_execute_cmd(priv, &cmd); 579 } 580 581 static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) 582 { 583 struct gve_tx_ring *tx = &priv->tx[queue_index]; 584 union gve_adminq_command cmd; 585 586 memset(&cmd, 0, sizeof(cmd)); 587 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE); 588 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) { 589 .queue_id = cpu_to_be32(queue_index), 590 .queue_resources_addr = 591 cpu_to_be64(tx->q_resources_bus), 592 .tx_ring_addr = cpu_to_be64(tx->bus), 593 .ntfy_id = cpu_to_be32(tx->ntfy_id), 594 .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt), 595 }; 596 597 if (gve_is_gqi(priv)) { 598 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ? 599 GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id; 600 601 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 602 } else { 603 u32 qpl_id = 0; 604 605 if (priv->queue_format == GVE_DQO_RDA_FORMAT) 606 qpl_id = GVE_RAW_ADDRESSING_QPL_ID; 607 else 608 qpl_id = tx->dqo.qpl->id; 609 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 610 cmd.create_tx_queue.tx_comp_ring_addr = 611 cpu_to_be64(tx->complq_bus_dqo); 612 cmd.create_tx_queue.tx_comp_ring_size = 613 cpu_to_be16(priv->tx_desc_cnt); 614 } 615 616 return gve_adminq_issue_cmd(priv, &cmd); 617 } 618 619 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues) 620 { 621 int err; 622 int i; 623 624 for (i = start_id; i < start_id + num_queues; i++) { 625 err = gve_adminq_create_tx_queue(priv, i); 626 if (err) 627 return err; 628 } 629 630 return gve_adminq_kick_and_wait(priv); 631 } 632 633 static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, 634 union gve_adminq_command *cmd, 635 u32 queue_index) 636 { 637 struct gve_rx_ring *rx = &priv->rx[queue_index]; 638 639 memset(cmd, 0, sizeof(*cmd)); 640 cmd->opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE); 641 cmd->create_rx_queue = (struct gve_adminq_create_rx_queue) { 642 .queue_id = cpu_to_be32(queue_index), 643 .ntfy_id = cpu_to_be32(rx->ntfy_id), 644 .queue_resources_addr = cpu_to_be64(rx->q_resources_bus), 645 .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt), 646 }; 647 648 if (gve_is_gqi(priv)) { 649 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ? 650 GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id; 651 652 cmd->create_rx_queue.rx_desc_ring_addr = 653 cpu_to_be64(rx->desc.bus); 654 cmd->create_rx_queue.rx_data_ring_addr = 655 cpu_to_be64(rx->data.data_bus); 656 cmd->create_rx_queue.index = cpu_to_be32(queue_index); 657 cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 658 cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); 659 } else { 660 u32 qpl_id = 0; 661 662 if (priv->queue_format == GVE_DQO_RDA_FORMAT) 663 qpl_id = GVE_RAW_ADDRESSING_QPL_ID; 664 else 665 qpl_id = rx->dqo.qpl->id; 666 cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 667 cmd->create_rx_queue.rx_desc_ring_addr = 668 cpu_to_be64(rx->dqo.complq.bus); 669 cmd->create_rx_queue.rx_data_ring_addr = 670 cpu_to_be64(rx->dqo.bufq.bus); 671 cmd->create_rx_queue.packet_buffer_size = 672 cpu_to_be16(priv->data_buffer_size_dqo); 673 cmd->create_rx_queue.rx_buff_ring_size = 674 cpu_to_be16(priv->rx_desc_cnt); 675 cmd->create_rx_queue.enable_rsc = 676 !!(priv->dev->features & NETIF_F_LRO); 677 if (priv->header_split_enabled) 678 cmd->create_rx_queue.header_buffer_size = 679 cpu_to_be16(priv->header_buf_size); 680 } 681 } 682 683 static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) 684 { 685 union gve_adminq_command cmd; 686 687 gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index); 688 return gve_adminq_issue_cmd(priv, &cmd); 689 } 690 691 /* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */ 692 int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index) 693 { 694 union gve_adminq_command cmd; 695 696 gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index); 697 return gve_adminq_execute_cmd(priv, &cmd); 698 } 699 700 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues) 701 { 702 int err; 703 int i; 704 705 for (i = 0; i < num_queues; i++) { 706 err = gve_adminq_create_rx_queue(priv, i); 707 if (err) 708 return err; 709 } 710 711 return gve_adminq_kick_and_wait(priv); 712 } 713 714 static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index) 715 { 716 union gve_adminq_command cmd; 717 int err; 718 719 memset(&cmd, 0, sizeof(cmd)); 720 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE); 721 cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) { 722 .queue_id = cpu_to_be32(queue_index), 723 }; 724 725 err = gve_adminq_issue_cmd(priv, &cmd); 726 if (err) 727 return err; 728 729 return 0; 730 } 731 732 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues) 733 { 734 int err; 735 int i; 736 737 for (i = start_id; i < start_id + num_queues; i++) { 738 err = gve_adminq_destroy_tx_queue(priv, i); 739 if (err) 740 return err; 741 } 742 743 return gve_adminq_kick_and_wait(priv); 744 } 745 746 static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd, 747 u32 queue_index) 748 { 749 memset(cmd, 0, sizeof(*cmd)); 750 cmd->opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE); 751 cmd->destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) { 752 .queue_id = cpu_to_be32(queue_index), 753 }; 754 } 755 756 static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index) 757 { 758 union gve_adminq_command cmd; 759 760 gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index); 761 return gve_adminq_issue_cmd(priv, &cmd); 762 } 763 764 /* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */ 765 int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index) 766 { 767 union gve_adminq_command cmd; 768 769 gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index); 770 return gve_adminq_execute_cmd(priv, &cmd); 771 } 772 773 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) 774 { 775 int err; 776 int i; 777 778 for (i = 0; i < num_queues; i++) { 779 err = gve_adminq_destroy_rx_queue(priv, i); 780 if (err) 781 return err; 782 } 783 784 return gve_adminq_kick_and_wait(priv); 785 } 786 787 static void gve_set_default_desc_cnt(struct gve_priv *priv, 788 const struct gve_device_descriptor *descriptor) 789 { 790 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); 791 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); 792 793 /* set default ranges */ 794 priv->max_tx_desc_cnt = priv->tx_desc_cnt; 795 priv->max_rx_desc_cnt = priv->rx_desc_cnt; 796 priv->min_tx_desc_cnt = priv->tx_desc_cnt; 797 priv->min_rx_desc_cnt = priv->rx_desc_cnt; 798 } 799 800 static void gve_enable_supported_features(struct gve_priv *priv, 801 u32 supported_features_mask, 802 const struct gve_device_option_jumbo_frames 803 *dev_op_jumbo_frames, 804 const struct gve_device_option_dqo_qpl 805 *dev_op_dqo_qpl, 806 const struct gve_device_option_buffer_sizes 807 *dev_op_buffer_sizes, 808 const struct gve_device_option_modify_ring 809 *dev_op_modify_ring) 810 { 811 /* Before control reaches this point, the page-size-capped max MTU from 812 * the gve_device_descriptor field has already been stored in 813 * priv->dev->max_mtu. We overwrite it with the true max MTU below. 814 */ 815 if (dev_op_jumbo_frames && 816 (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) { 817 dev_info(&priv->pdev->dev, 818 "JUMBO FRAMES device option enabled.\n"); 819 priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu); 820 } 821 822 /* Override pages for qpl for DQO-QPL */ 823 if (dev_op_dqo_qpl) { 824 priv->tx_pages_per_qpl = 825 be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl); 826 if (priv->tx_pages_per_qpl == 0) 827 priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES; 828 } 829 830 if (dev_op_buffer_sizes && 831 (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) { 832 priv->max_rx_buffer_size = 833 be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size); 834 priv->header_buf_size = 835 be16_to_cpu(dev_op_buffer_sizes->header_buffer_size); 836 dev_info(&priv->pdev->dev, 837 "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n", 838 priv->max_rx_buffer_size, priv->header_buf_size); 839 } 840 841 /* Read and store ring size ranges given by device */ 842 if (dev_op_modify_ring && 843 (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) { 844 priv->modify_ring_size_enabled = true; 845 846 /* max ring size for DQO QPL should not be overwritten because of device limit */ 847 if (priv->queue_format != GVE_DQO_QPL_FORMAT) { 848 priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size); 849 priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size); 850 } 851 if (priv->default_min_ring_size) { 852 /* If device hasn't provided minimums, use default minimums */ 853 priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE; 854 priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE; 855 } else { 856 priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size); 857 priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size); 858 } 859 } 860 } 861 862 int gve_adminq_describe_device(struct gve_priv *priv) 863 { 864 struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; 865 struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; 866 struct gve_device_option_modify_ring *dev_op_modify_ring = NULL; 867 struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; 868 struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; 869 struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL; 870 struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL; 871 struct gve_device_descriptor *descriptor; 872 u32 supported_features_mask = 0; 873 union gve_adminq_command cmd; 874 dma_addr_t descriptor_bus; 875 int err = 0; 876 u8 *mac; 877 u16 mtu; 878 879 memset(&cmd, 0, sizeof(cmd)); 880 descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, 881 &descriptor_bus); 882 if (!descriptor) 883 return -ENOMEM; 884 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE); 885 cmd.describe_device.device_descriptor_addr = 886 cpu_to_be64(descriptor_bus); 887 cmd.describe_device.device_descriptor_version = 888 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION); 889 cmd.describe_device.available_length = 890 cpu_to_be32(GVE_ADMINQ_BUFFER_SIZE); 891 892 err = gve_adminq_execute_cmd(priv, &cmd); 893 if (err) 894 goto free_device_descriptor; 895 896 err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, 897 &dev_op_gqi_qpl, &dev_op_dqo_rda, 898 &dev_op_jumbo_frames, &dev_op_dqo_qpl, 899 &dev_op_buffer_sizes, 900 &dev_op_modify_ring); 901 if (err) 902 goto free_device_descriptor; 903 904 /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format 905 * is not set to GqiRda, choose the queue format in a priority order: 906 * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default. 907 */ 908 if (dev_op_dqo_rda) { 909 priv->queue_format = GVE_DQO_RDA_FORMAT; 910 dev_info(&priv->pdev->dev, 911 "Driver is running with DQO RDA queue format.\n"); 912 supported_features_mask = 913 be32_to_cpu(dev_op_dqo_rda->supported_features_mask); 914 } else if (dev_op_dqo_qpl) { 915 priv->queue_format = GVE_DQO_QPL_FORMAT; 916 supported_features_mask = 917 be32_to_cpu(dev_op_dqo_qpl->supported_features_mask); 918 } else if (dev_op_gqi_rda) { 919 priv->queue_format = GVE_GQI_RDA_FORMAT; 920 dev_info(&priv->pdev->dev, 921 "Driver is running with GQI RDA queue format.\n"); 922 supported_features_mask = 923 be32_to_cpu(dev_op_gqi_rda->supported_features_mask); 924 } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) { 925 dev_info(&priv->pdev->dev, 926 "Driver is running with GQI RDA queue format.\n"); 927 } else { 928 priv->queue_format = GVE_GQI_QPL_FORMAT; 929 if (dev_op_gqi_qpl) 930 supported_features_mask = 931 be32_to_cpu(dev_op_gqi_qpl->supported_features_mask); 932 dev_info(&priv->pdev->dev, 933 "Driver is running with GQI QPL queue format.\n"); 934 } 935 936 /* set default descriptor counts */ 937 gve_set_default_desc_cnt(priv, descriptor); 938 939 /* DQO supports LRO. */ 940 if (!gve_is_gqi(priv)) 941 priv->dev->hw_features |= NETIF_F_LRO; 942 943 priv->max_registered_pages = 944 be64_to_cpu(descriptor->max_registered_pages); 945 mtu = be16_to_cpu(descriptor->mtu); 946 if (mtu < ETH_MIN_MTU) { 947 dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu); 948 err = -EINVAL; 949 goto free_device_descriptor; 950 } 951 priv->dev->max_mtu = mtu; 952 priv->num_event_counters = be16_to_cpu(descriptor->counters); 953 eth_hw_addr_set(priv->dev, descriptor->mac); 954 mac = descriptor->mac; 955 dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac); 956 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl); 957 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); 958 959 gve_enable_supported_features(priv, supported_features_mask, 960 dev_op_jumbo_frames, dev_op_dqo_qpl, 961 dev_op_buffer_sizes, dev_op_modify_ring); 962 963 free_device_descriptor: 964 dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); 965 return err; 966 } 967 968 int gve_adminq_register_page_list(struct gve_priv *priv, 969 struct gve_queue_page_list *qpl) 970 { 971 struct device *hdev = &priv->pdev->dev; 972 u32 num_entries = qpl->num_entries; 973 u32 size = num_entries * sizeof(qpl->page_buses[0]); 974 union gve_adminq_command cmd; 975 dma_addr_t page_list_bus; 976 __be64 *page_list; 977 int err; 978 int i; 979 980 memset(&cmd, 0, sizeof(cmd)); 981 page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL); 982 if (!page_list) 983 return -ENOMEM; 984 985 for (i = 0; i < num_entries; i++) 986 page_list[i] = cpu_to_be64(qpl->page_buses[i]); 987 988 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST); 989 cmd.reg_page_list = (struct gve_adminq_register_page_list) { 990 .page_list_id = cpu_to_be32(qpl->id), 991 .num_pages = cpu_to_be32(num_entries), 992 .page_address_list_addr = cpu_to_be64(page_list_bus), 993 .page_size = cpu_to_be64(PAGE_SIZE), 994 }; 995 996 err = gve_adminq_execute_cmd(priv, &cmd); 997 dma_free_coherent(hdev, size, page_list, page_list_bus); 998 return err; 999 } 1000 1001 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id) 1002 { 1003 union gve_adminq_command cmd; 1004 1005 memset(&cmd, 0, sizeof(cmd)); 1006 cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST); 1007 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) { 1008 .page_list_id = cpu_to_be32(page_list_id), 1009 }; 1010 1011 return gve_adminq_execute_cmd(priv, &cmd); 1012 } 1013 1014 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu) 1015 { 1016 union gve_adminq_command cmd; 1017 1018 memset(&cmd, 0, sizeof(cmd)); 1019 cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER); 1020 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) { 1021 .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU), 1022 .parameter_value = cpu_to_be64(mtu), 1023 }; 1024 1025 return gve_adminq_execute_cmd(priv, &cmd); 1026 } 1027 1028 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, 1029 dma_addr_t stats_report_addr, u64 interval) 1030 { 1031 union gve_adminq_command cmd; 1032 1033 memset(&cmd, 0, sizeof(cmd)); 1034 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS); 1035 cmd.report_stats = (struct gve_adminq_report_stats) { 1036 .stats_report_len = cpu_to_be64(stats_report_len), 1037 .stats_report_addr = cpu_to_be64(stats_report_addr), 1038 .interval = cpu_to_be64(interval), 1039 }; 1040 1041 return gve_adminq_execute_cmd(priv, &cmd); 1042 } 1043 1044 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, 1045 u64 driver_info_len, 1046 dma_addr_t driver_info_addr) 1047 { 1048 union gve_adminq_command cmd; 1049 1050 memset(&cmd, 0, sizeof(cmd)); 1051 cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY); 1052 cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) { 1053 .driver_info_len = cpu_to_be64(driver_info_len), 1054 .driver_info_addr = cpu_to_be64(driver_info_addr), 1055 }; 1056 1057 return gve_adminq_execute_cmd(priv, &cmd); 1058 } 1059 1060 int gve_adminq_report_link_speed(struct gve_priv *priv) 1061 { 1062 union gve_adminq_command gvnic_cmd; 1063 dma_addr_t link_speed_region_bus; 1064 __be64 *link_speed_region; 1065 int err; 1066 1067 link_speed_region = 1068 dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region), 1069 &link_speed_region_bus, GFP_KERNEL); 1070 1071 if (!link_speed_region) 1072 return -ENOMEM; 1073 1074 memset(&gvnic_cmd, 0, sizeof(gvnic_cmd)); 1075 gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED); 1076 gvnic_cmd.report_link_speed.link_speed_address = 1077 cpu_to_be64(link_speed_region_bus); 1078 1079 err = gve_adminq_execute_cmd(priv, &gvnic_cmd); 1080 1081 priv->link_speed = be64_to_cpu(*link_speed_region); 1082 dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region, 1083 link_speed_region_bus); 1084 return err; 1085 } 1086 1087 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, 1088 struct gve_ptype_lut *ptype_lut) 1089 { 1090 struct gve_ptype_map *ptype_map; 1091 union gve_adminq_command cmd; 1092 dma_addr_t ptype_map_bus; 1093 int err = 0; 1094 int i; 1095 1096 memset(&cmd, 0, sizeof(cmd)); 1097 ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map), 1098 &ptype_map_bus, GFP_KERNEL); 1099 if (!ptype_map) 1100 return -ENOMEM; 1101 1102 cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP); 1103 cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) { 1104 .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)), 1105 .ptype_map_addr = cpu_to_be64(ptype_map_bus), 1106 }; 1107 1108 err = gve_adminq_execute_cmd(priv, &cmd); 1109 if (err) 1110 goto err; 1111 1112 /* Populate ptype_lut. */ 1113 for (i = 0; i < GVE_NUM_PTYPES; i++) { 1114 ptype_lut->ptypes[i].l3_type = 1115 ptype_map->ptypes[i].l3_type; 1116 ptype_lut->ptypes[i].l4_type = 1117 ptype_map->ptypes[i].l4_type; 1118 } 1119 err: 1120 dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map, 1121 ptype_map_bus); 1122 return err; 1123 } 1124