1 // SPDX-License-Identifier: (GPL-2.0 OR MIT) 2 /* Google virtual Ethernet (gve) driver 3 * 4 * Copyright (C) 2015-2021 Google, Inc. 5 */ 6 7 #include <linux/etherdevice.h> 8 #include <linux/pci.h> 9 #include "gve.h" 10 #include "gve_adminq.h" 11 #include "gve_register.h" 12 13 #define GVE_MAX_ADMINQ_RELEASE_CHECK 500 14 #define GVE_ADMINQ_SLEEP_LEN 20 15 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100 16 17 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \ 18 "Expected: length=%d, feature_mask=%x.\n" \ 19 "Actual: length=%d, feature_mask=%x.\n" 20 21 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n" 22 23 static 24 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor, 25 struct gve_device_option *option) 26 { 27 void *option_end, *descriptor_end; 28 29 option_end = (void *)(option + 1) + be16_to_cpu(option->option_length); 30 descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length); 31 32 return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end; 33 } 34 35 #define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8 36 37 static 38 void gve_parse_device_option(struct gve_priv *priv, 39 struct gve_device_descriptor *device_descriptor, 40 struct gve_device_option *option, 41 struct gve_device_option_gqi_rda **dev_op_gqi_rda, 42 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, 43 struct gve_device_option_dqo_rda **dev_op_dqo_rda, 44 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, 45 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, 46 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, 47 struct gve_device_option_flow_steering **dev_op_flow_steering, 48 struct gve_device_option_modify_ring **dev_op_modify_ring) 49 { 50 u32 req_feat_mask = be32_to_cpu(option->required_features_mask); 51 u16 option_length = be16_to_cpu(option->option_length); 52 u16 option_id = be16_to_cpu(option->option_id); 53 54 /* If the length or feature mask doesn't match, continue without 55 * enabling the feature. 56 */ 57 switch (option_id) { 58 case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING: 59 if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING || 60 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) { 61 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 62 "Raw Addressing", 63 GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING, 64 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING, 65 option_length, req_feat_mask); 66 break; 67 } 68 69 dev_info(&priv->pdev->dev, 70 "Gqi raw addressing device option enabled.\n"); 71 priv->queue_format = GVE_GQI_RDA_FORMAT; 72 break; 73 case GVE_DEV_OPT_ID_GQI_RDA: 74 if (option_length < sizeof(**dev_op_gqi_rda) || 75 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) { 76 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 77 "GQI RDA", (int)sizeof(**dev_op_gqi_rda), 78 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA, 79 option_length, req_feat_mask); 80 break; 81 } 82 83 if (option_length > sizeof(**dev_op_gqi_rda)) { 84 dev_warn(&priv->pdev->dev, 85 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA"); 86 } 87 *dev_op_gqi_rda = (void *)(option + 1); 88 break; 89 case GVE_DEV_OPT_ID_GQI_QPL: 90 if (option_length < sizeof(**dev_op_gqi_qpl) || 91 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) { 92 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 93 "GQI QPL", (int)sizeof(**dev_op_gqi_qpl), 94 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL, 95 option_length, req_feat_mask); 96 break; 97 } 98 99 if (option_length > sizeof(**dev_op_gqi_qpl)) { 100 dev_warn(&priv->pdev->dev, 101 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL"); 102 } 103 *dev_op_gqi_qpl = (void *)(option + 1); 104 break; 105 case GVE_DEV_OPT_ID_DQO_RDA: 106 if (option_length < sizeof(**dev_op_dqo_rda) || 107 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) { 108 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 109 "DQO RDA", (int)sizeof(**dev_op_dqo_rda), 110 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA, 111 option_length, req_feat_mask); 112 break; 113 } 114 115 if (option_length > sizeof(**dev_op_dqo_rda)) { 116 dev_warn(&priv->pdev->dev, 117 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA"); 118 } 119 *dev_op_dqo_rda = (void *)(option + 1); 120 break; 121 case GVE_DEV_OPT_ID_DQO_QPL: 122 if (option_length < sizeof(**dev_op_dqo_qpl) || 123 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) { 124 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 125 "DQO QPL", (int)sizeof(**dev_op_dqo_qpl), 126 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL, 127 option_length, req_feat_mask); 128 break; 129 } 130 131 if (option_length > sizeof(**dev_op_dqo_qpl)) { 132 dev_warn(&priv->pdev->dev, 133 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL"); 134 } 135 *dev_op_dqo_qpl = (void *)(option + 1); 136 break; 137 case GVE_DEV_OPT_ID_JUMBO_FRAMES: 138 if (option_length < sizeof(**dev_op_jumbo_frames) || 139 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) { 140 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 141 "Jumbo Frames", 142 (int)sizeof(**dev_op_jumbo_frames), 143 GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES, 144 option_length, req_feat_mask); 145 break; 146 } 147 148 if (option_length > sizeof(**dev_op_jumbo_frames)) { 149 dev_warn(&priv->pdev->dev, 150 GVE_DEVICE_OPTION_TOO_BIG_FMT, 151 "Jumbo Frames"); 152 } 153 *dev_op_jumbo_frames = (void *)(option + 1); 154 break; 155 case GVE_DEV_OPT_ID_BUFFER_SIZES: 156 if (option_length < sizeof(**dev_op_buffer_sizes) || 157 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) { 158 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 159 "Buffer Sizes", 160 (int)sizeof(**dev_op_buffer_sizes), 161 GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES, 162 option_length, req_feat_mask); 163 break; 164 } 165 166 if (option_length > sizeof(**dev_op_buffer_sizes)) 167 dev_warn(&priv->pdev->dev, 168 GVE_DEVICE_OPTION_TOO_BIG_FMT, 169 "Buffer Sizes"); 170 *dev_op_buffer_sizes = (void *)(option + 1); 171 break; 172 case GVE_DEV_OPT_ID_MODIFY_RING: 173 if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE || 174 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) { 175 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 176 "Modify Ring", (int)sizeof(**dev_op_modify_ring), 177 GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING, 178 option_length, req_feat_mask); 179 break; 180 } 181 182 if (option_length > sizeof(**dev_op_modify_ring)) { 183 dev_warn(&priv->pdev->dev, 184 GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring"); 185 } 186 187 *dev_op_modify_ring = (void *)(option + 1); 188 189 /* device has not provided min ring size */ 190 if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE) 191 priv->default_min_ring_size = true; 192 break; 193 case GVE_DEV_OPT_ID_FLOW_STEERING: 194 if (option_length < sizeof(**dev_op_flow_steering) || 195 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) { 196 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, 197 "Flow Steering", 198 (int)sizeof(**dev_op_flow_steering), 199 GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING, 200 option_length, req_feat_mask); 201 break; 202 } 203 204 if (option_length > sizeof(**dev_op_flow_steering)) 205 dev_warn(&priv->pdev->dev, 206 GVE_DEVICE_OPTION_TOO_BIG_FMT, 207 "Flow Steering"); 208 *dev_op_flow_steering = (void *)(option + 1); 209 break; 210 default: 211 /* If we don't recognize the option just continue 212 * without doing anything. 213 */ 214 dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n", 215 option_id); 216 } 217 } 218 219 /* Process all device options for a given describe device call. */ 220 static int 221 gve_process_device_options(struct gve_priv *priv, 222 struct gve_device_descriptor *descriptor, 223 struct gve_device_option_gqi_rda **dev_op_gqi_rda, 224 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl, 225 struct gve_device_option_dqo_rda **dev_op_dqo_rda, 226 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, 227 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, 228 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, 229 struct gve_device_option_flow_steering **dev_op_flow_steering, 230 struct gve_device_option_modify_ring **dev_op_modify_ring) 231 { 232 const int num_options = be16_to_cpu(descriptor->num_device_options); 233 struct gve_device_option *dev_opt; 234 int i; 235 236 /* The options struct directly follows the device descriptor. */ 237 dev_opt = (void *)(descriptor + 1); 238 for (i = 0; i < num_options; i++) { 239 struct gve_device_option *next_opt; 240 241 next_opt = gve_get_next_option(descriptor, dev_opt); 242 if (!next_opt) { 243 dev_err(&priv->dev->dev, 244 "options exceed device_descriptor's total length.\n"); 245 return -EINVAL; 246 } 247 248 gve_parse_device_option(priv, descriptor, dev_opt, 249 dev_op_gqi_rda, dev_op_gqi_qpl, 250 dev_op_dqo_rda, dev_op_jumbo_frames, 251 dev_op_dqo_qpl, dev_op_buffer_sizes, 252 dev_op_flow_steering, dev_op_modify_ring); 253 dev_opt = next_opt; 254 } 255 256 return 0; 257 } 258 259 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) 260 { 261 priv->adminq_pool = dma_pool_create("adminq_pool", dev, 262 GVE_ADMINQ_BUFFER_SIZE, 0, 0); 263 if (unlikely(!priv->adminq_pool)) 264 return -ENOMEM; 265 priv->adminq = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, 266 &priv->adminq_bus_addr); 267 if (unlikely(!priv->adminq)) { 268 dma_pool_destroy(priv->adminq_pool); 269 return -ENOMEM; 270 } 271 272 priv->adminq_mask = 273 (GVE_ADMINQ_BUFFER_SIZE / sizeof(union gve_adminq_command)) - 1; 274 priv->adminq_prod_cnt = 0; 275 priv->adminq_cmd_fail = 0; 276 priv->adminq_timeouts = 0; 277 priv->adminq_describe_device_cnt = 0; 278 priv->adminq_cfg_device_resources_cnt = 0; 279 priv->adminq_register_page_list_cnt = 0; 280 priv->adminq_unregister_page_list_cnt = 0; 281 priv->adminq_create_tx_queue_cnt = 0; 282 priv->adminq_create_rx_queue_cnt = 0; 283 priv->adminq_destroy_tx_queue_cnt = 0; 284 priv->adminq_destroy_rx_queue_cnt = 0; 285 priv->adminq_dcfg_device_resources_cnt = 0; 286 priv->adminq_set_driver_parameter_cnt = 0; 287 priv->adminq_report_stats_cnt = 0; 288 priv->adminq_report_link_speed_cnt = 0; 289 priv->adminq_get_ptype_map_cnt = 0; 290 priv->adminq_query_flow_rules_cnt = 0; 291 priv->adminq_cfg_flow_rule_cnt = 0; 292 293 /* Setup Admin queue with the device */ 294 if (priv->pdev->revision < 0x1) { 295 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE, 296 &priv->reg_bar0->adminq_pfn); 297 } else { 298 iowrite16be(GVE_ADMINQ_BUFFER_SIZE, 299 &priv->reg_bar0->adminq_length); 300 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT 301 iowrite32be(priv->adminq_bus_addr >> 32, 302 &priv->reg_bar0->adminq_base_address_hi); 303 #endif 304 iowrite32be(priv->adminq_bus_addr, 305 &priv->reg_bar0->adminq_base_address_lo); 306 iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status); 307 } 308 mutex_init(&priv->adminq_lock); 309 gve_set_admin_queue_ok(priv); 310 return 0; 311 } 312 313 void gve_adminq_release(struct gve_priv *priv) 314 { 315 int i = 0; 316 317 /* Tell the device the adminq is leaving */ 318 if (priv->pdev->revision < 0x1) { 319 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn); 320 while (ioread32be(&priv->reg_bar0->adminq_pfn)) { 321 /* If this is reached the device is unrecoverable and still 322 * holding memory. Continue looping to avoid memory corruption, 323 * but WARN so it is visible what is going on. 324 */ 325 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK) 326 WARN(1, "Unrecoverable platform error!"); 327 i++; 328 msleep(GVE_ADMINQ_SLEEP_LEN); 329 } 330 } else { 331 iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status); 332 while (!(ioread32be(&priv->reg_bar0->device_status) 333 & GVE_DEVICE_STATUS_DEVICE_IS_RESET)) { 334 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK) 335 WARN(1, "Unrecoverable platform error!"); 336 i++; 337 msleep(GVE_ADMINQ_SLEEP_LEN); 338 } 339 } 340 gve_clear_device_rings_ok(priv); 341 gve_clear_device_resources_ok(priv); 342 gve_clear_admin_queue_ok(priv); 343 } 344 345 void gve_adminq_free(struct device *dev, struct gve_priv *priv) 346 { 347 if (!gve_get_admin_queue_ok(priv)) 348 return; 349 gve_adminq_release(priv); 350 dma_pool_free(priv->adminq_pool, priv->adminq, priv->adminq_bus_addr); 351 dma_pool_destroy(priv->adminq_pool); 352 gve_clear_admin_queue_ok(priv); 353 } 354 355 static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt) 356 { 357 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell); 358 } 359 360 static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt) 361 { 362 int i; 363 364 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) { 365 if (ioread32be(&priv->reg_bar0->adminq_event_counter) 366 == prod_cnt) 367 return true; 368 msleep(GVE_ADMINQ_SLEEP_LEN); 369 } 370 371 return false; 372 } 373 374 static int gve_adminq_parse_err(struct gve_priv *priv, u32 status) 375 { 376 if (status != GVE_ADMINQ_COMMAND_PASSED && 377 status != GVE_ADMINQ_COMMAND_UNSET) { 378 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status); 379 priv->adminq_cmd_fail++; 380 } 381 switch (status) { 382 case GVE_ADMINQ_COMMAND_PASSED: 383 return 0; 384 case GVE_ADMINQ_COMMAND_UNSET: 385 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n"); 386 return -EINVAL; 387 case GVE_ADMINQ_COMMAND_ERROR_ABORTED: 388 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED: 389 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS: 390 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION: 391 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE: 392 return -EAGAIN; 393 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS: 394 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR: 395 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT: 396 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND: 397 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE: 398 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR: 399 return -EINVAL; 400 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED: 401 return -ETIME; 402 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED: 403 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED: 404 return -EACCES; 405 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED: 406 return -ENOMEM; 407 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED: 408 return -EOPNOTSUPP; 409 default: 410 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status); 411 return -EINVAL; 412 } 413 } 414 415 /* Flushes all AQ commands currently queued and waits for them to complete. 416 * If there are failures, it will return the first error. 417 */ 418 static int gve_adminq_kick_and_wait(struct gve_priv *priv) 419 { 420 int tail, head; 421 int i; 422 423 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 424 head = priv->adminq_prod_cnt; 425 426 gve_adminq_kick_cmd(priv, head); 427 if (!gve_adminq_wait_for_cmd(priv, head)) { 428 dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n"); 429 priv->adminq_timeouts++; 430 return -ENOTRECOVERABLE; 431 } 432 433 for (i = tail; i < head; i++) { 434 union gve_adminq_command *cmd; 435 u32 status, err; 436 437 cmd = &priv->adminq[i & priv->adminq_mask]; 438 status = be32_to_cpu(READ_ONCE(cmd->status)); 439 err = gve_adminq_parse_err(priv, status); 440 if (err) 441 // Return the first error if we failed. 442 return err; 443 } 444 445 return 0; 446 } 447 448 /* This function is not threadsafe - the caller is responsible for any 449 * necessary locks. 450 */ 451 static int gve_adminq_issue_cmd(struct gve_priv *priv, 452 union gve_adminq_command *cmd_orig) 453 { 454 union gve_adminq_command *cmd; 455 u32 opcode; 456 u32 tail; 457 458 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 459 460 // Check if next command will overflow the buffer. 461 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == 462 (tail & priv->adminq_mask)) { 463 int err; 464 465 // Flush existing commands to make room. 466 err = gve_adminq_kick_and_wait(priv); 467 if (err) 468 return err; 469 470 // Retry. 471 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 472 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == 473 (tail & priv->adminq_mask)) { 474 // This should never happen. We just flushed the 475 // command queue so there should be enough space. 476 return -ENOMEM; 477 } 478 } 479 480 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask]; 481 priv->adminq_prod_cnt++; 482 483 memcpy(cmd, cmd_orig, sizeof(*cmd_orig)); 484 opcode = be32_to_cpu(READ_ONCE(cmd->opcode)); 485 if (opcode == GVE_ADMINQ_EXTENDED_COMMAND) 486 opcode = be32_to_cpu(cmd->extended_command.inner_opcode); 487 488 switch (opcode) { 489 case GVE_ADMINQ_DESCRIBE_DEVICE: 490 priv->adminq_describe_device_cnt++; 491 break; 492 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES: 493 priv->adminq_cfg_device_resources_cnt++; 494 break; 495 case GVE_ADMINQ_REGISTER_PAGE_LIST: 496 priv->adminq_register_page_list_cnt++; 497 break; 498 case GVE_ADMINQ_UNREGISTER_PAGE_LIST: 499 priv->adminq_unregister_page_list_cnt++; 500 break; 501 case GVE_ADMINQ_CREATE_TX_QUEUE: 502 priv->adminq_create_tx_queue_cnt++; 503 break; 504 case GVE_ADMINQ_CREATE_RX_QUEUE: 505 priv->adminq_create_rx_queue_cnt++; 506 break; 507 case GVE_ADMINQ_DESTROY_TX_QUEUE: 508 priv->adminq_destroy_tx_queue_cnt++; 509 break; 510 case GVE_ADMINQ_DESTROY_RX_QUEUE: 511 priv->adminq_destroy_rx_queue_cnt++; 512 break; 513 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES: 514 priv->adminq_dcfg_device_resources_cnt++; 515 break; 516 case GVE_ADMINQ_SET_DRIVER_PARAMETER: 517 priv->adminq_set_driver_parameter_cnt++; 518 break; 519 case GVE_ADMINQ_REPORT_STATS: 520 priv->adminq_report_stats_cnt++; 521 break; 522 case GVE_ADMINQ_REPORT_LINK_SPEED: 523 priv->adminq_report_link_speed_cnt++; 524 break; 525 case GVE_ADMINQ_GET_PTYPE_MAP: 526 priv->adminq_get_ptype_map_cnt++; 527 break; 528 case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY: 529 priv->adminq_verify_driver_compatibility_cnt++; 530 break; 531 case GVE_ADMINQ_QUERY_FLOW_RULES: 532 priv->adminq_query_flow_rules_cnt++; 533 break; 534 case GVE_ADMINQ_CONFIGURE_FLOW_RULE: 535 priv->adminq_cfg_flow_rule_cnt++; 536 break; 537 default: 538 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); 539 } 540 541 return 0; 542 } 543 544 static int gve_adminq_execute_cmd(struct gve_priv *priv, 545 union gve_adminq_command *cmd_orig) 546 { 547 u32 tail, head; 548 int err; 549 550 mutex_lock(&priv->adminq_lock); 551 tail = ioread32be(&priv->reg_bar0->adminq_event_counter); 552 head = priv->adminq_prod_cnt; 553 if (tail != head) { 554 err = -EINVAL; 555 goto out; 556 } 557 558 err = gve_adminq_issue_cmd(priv, cmd_orig); 559 if (err) 560 goto out; 561 562 err = gve_adminq_kick_and_wait(priv); 563 564 out: 565 mutex_unlock(&priv->adminq_lock); 566 return err; 567 } 568 569 static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode, 570 size_t cmd_size, void *cmd_orig) 571 { 572 union gve_adminq_command cmd; 573 dma_addr_t inner_cmd_bus; 574 void *inner_cmd; 575 int err; 576 577 inner_cmd = dma_alloc_coherent(&priv->pdev->dev, cmd_size, 578 &inner_cmd_bus, GFP_KERNEL); 579 if (!inner_cmd) 580 return -ENOMEM; 581 582 memcpy(inner_cmd, cmd_orig, cmd_size); 583 584 memset(&cmd, 0, sizeof(cmd)); 585 cmd.opcode = cpu_to_be32(GVE_ADMINQ_EXTENDED_COMMAND); 586 cmd.extended_command = (struct gve_adminq_extended_command) { 587 .inner_opcode = cpu_to_be32(opcode), 588 .inner_length = cpu_to_be32(cmd_size), 589 .inner_command_addr = cpu_to_be64(inner_cmd_bus), 590 }; 591 592 err = gve_adminq_execute_cmd(priv, &cmd); 593 594 dma_free_coherent(&priv->pdev->dev, cmd_size, inner_cmd, inner_cmd_bus); 595 return err; 596 } 597 598 /* The device specifies that the management vector can either be the first irq 599 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to 600 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then 601 * the management vector is first. 602 * 603 * gve arranges the msix vectors so that the management vector is last. 604 */ 605 #define GVE_NTFY_BLK_BASE_MSIX_IDX 0 606 int gve_adminq_configure_device_resources(struct gve_priv *priv, 607 dma_addr_t counter_array_bus_addr, 608 u32 num_counters, 609 dma_addr_t db_array_bus_addr, 610 u32 num_ntfy_blks) 611 { 612 union gve_adminq_command cmd; 613 614 memset(&cmd, 0, sizeof(cmd)); 615 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES); 616 cmd.configure_device_resources = 617 (struct gve_adminq_configure_device_resources) { 618 .counter_array = cpu_to_be64(counter_array_bus_addr), 619 .num_counters = cpu_to_be32(num_counters), 620 .irq_db_addr = cpu_to_be64(db_array_bus_addr), 621 .num_irq_dbs = cpu_to_be32(num_ntfy_blks), 622 .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)), 623 .ntfy_blk_msix_base_idx = 624 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX), 625 .queue_format = priv->queue_format, 626 }; 627 628 return gve_adminq_execute_cmd(priv, &cmd); 629 } 630 631 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv) 632 { 633 union gve_adminq_command cmd; 634 635 memset(&cmd, 0, sizeof(cmd)); 636 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES); 637 638 return gve_adminq_execute_cmd(priv, &cmd); 639 } 640 641 static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index) 642 { 643 struct gve_tx_ring *tx = &priv->tx[queue_index]; 644 union gve_adminq_command cmd; 645 646 memset(&cmd, 0, sizeof(cmd)); 647 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE); 648 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) { 649 .queue_id = cpu_to_be32(queue_index), 650 .queue_resources_addr = 651 cpu_to_be64(tx->q_resources_bus), 652 .tx_ring_addr = cpu_to_be64(tx->bus), 653 .ntfy_id = cpu_to_be32(tx->ntfy_id), 654 .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt), 655 }; 656 657 if (gve_is_gqi(priv)) { 658 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ? 659 GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id; 660 661 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 662 } else { 663 u32 qpl_id = 0; 664 665 if (priv->queue_format == GVE_DQO_RDA_FORMAT) 666 qpl_id = GVE_RAW_ADDRESSING_QPL_ID; 667 else 668 qpl_id = tx->dqo.qpl->id; 669 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 670 cmd.create_tx_queue.tx_comp_ring_addr = 671 cpu_to_be64(tx->complq_bus_dqo); 672 cmd.create_tx_queue.tx_comp_ring_size = 673 cpu_to_be16(priv->tx_desc_cnt); 674 } 675 676 return gve_adminq_issue_cmd(priv, &cmd); 677 } 678 679 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues) 680 { 681 int err; 682 int i; 683 684 for (i = start_id; i < start_id + num_queues; i++) { 685 err = gve_adminq_create_tx_queue(priv, i); 686 if (err) 687 return err; 688 } 689 690 return gve_adminq_kick_and_wait(priv); 691 } 692 693 static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv, 694 union gve_adminq_command *cmd, 695 u32 queue_index) 696 { 697 struct gve_rx_ring *rx = &priv->rx[queue_index]; 698 699 memset(cmd, 0, sizeof(*cmd)); 700 cmd->opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE); 701 cmd->create_rx_queue = (struct gve_adminq_create_rx_queue) { 702 .queue_id = cpu_to_be32(queue_index), 703 .ntfy_id = cpu_to_be32(rx->ntfy_id), 704 .queue_resources_addr = cpu_to_be64(rx->q_resources_bus), 705 .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt), 706 }; 707 708 if (gve_is_gqi(priv)) { 709 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ? 710 GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id; 711 712 cmd->create_rx_queue.rx_desc_ring_addr = 713 cpu_to_be64(rx->desc.bus); 714 cmd->create_rx_queue.rx_data_ring_addr = 715 cpu_to_be64(rx->data.data_bus); 716 cmd->create_rx_queue.index = cpu_to_be32(queue_index); 717 cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 718 cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size); 719 } else { 720 u32 qpl_id = 0; 721 722 if (priv->queue_format == GVE_DQO_RDA_FORMAT) 723 qpl_id = GVE_RAW_ADDRESSING_QPL_ID; 724 else 725 qpl_id = rx->dqo.qpl->id; 726 cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id); 727 cmd->create_rx_queue.rx_desc_ring_addr = 728 cpu_to_be64(rx->dqo.complq.bus); 729 cmd->create_rx_queue.rx_data_ring_addr = 730 cpu_to_be64(rx->dqo.bufq.bus); 731 cmd->create_rx_queue.packet_buffer_size = 732 cpu_to_be16(priv->data_buffer_size_dqo); 733 cmd->create_rx_queue.rx_buff_ring_size = 734 cpu_to_be16(priv->rx_desc_cnt); 735 cmd->create_rx_queue.enable_rsc = 736 !!(priv->dev->features & NETIF_F_LRO); 737 if (priv->header_split_enabled) 738 cmd->create_rx_queue.header_buffer_size = 739 cpu_to_be16(priv->header_buf_size); 740 } 741 } 742 743 static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index) 744 { 745 union gve_adminq_command cmd; 746 747 gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index); 748 return gve_adminq_issue_cmd(priv, &cmd); 749 } 750 751 /* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */ 752 int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index) 753 { 754 union gve_adminq_command cmd; 755 756 gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index); 757 return gve_adminq_execute_cmd(priv, &cmd); 758 } 759 760 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues) 761 { 762 int err; 763 int i; 764 765 for (i = 0; i < num_queues; i++) { 766 err = gve_adminq_create_rx_queue(priv, i); 767 if (err) 768 return err; 769 } 770 771 return gve_adminq_kick_and_wait(priv); 772 } 773 774 static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index) 775 { 776 union gve_adminq_command cmd; 777 int err; 778 779 memset(&cmd, 0, sizeof(cmd)); 780 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE); 781 cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) { 782 .queue_id = cpu_to_be32(queue_index), 783 }; 784 785 err = gve_adminq_issue_cmd(priv, &cmd); 786 if (err) 787 return err; 788 789 return 0; 790 } 791 792 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues) 793 { 794 int err; 795 int i; 796 797 for (i = start_id; i < start_id + num_queues; i++) { 798 err = gve_adminq_destroy_tx_queue(priv, i); 799 if (err) 800 return err; 801 } 802 803 return gve_adminq_kick_and_wait(priv); 804 } 805 806 static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd, 807 u32 queue_index) 808 { 809 memset(cmd, 0, sizeof(*cmd)); 810 cmd->opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE); 811 cmd->destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) { 812 .queue_id = cpu_to_be32(queue_index), 813 }; 814 } 815 816 static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index) 817 { 818 union gve_adminq_command cmd; 819 820 gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index); 821 return gve_adminq_issue_cmd(priv, &cmd); 822 } 823 824 /* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */ 825 int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index) 826 { 827 union gve_adminq_command cmd; 828 829 gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index); 830 return gve_adminq_execute_cmd(priv, &cmd); 831 } 832 833 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues) 834 { 835 int err; 836 int i; 837 838 for (i = 0; i < num_queues; i++) { 839 err = gve_adminq_destroy_rx_queue(priv, i); 840 if (err) 841 return err; 842 } 843 844 return gve_adminq_kick_and_wait(priv); 845 } 846 847 static void gve_set_default_desc_cnt(struct gve_priv *priv, 848 const struct gve_device_descriptor *descriptor) 849 { 850 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries); 851 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries); 852 853 /* set default ranges */ 854 priv->max_tx_desc_cnt = priv->tx_desc_cnt; 855 priv->max_rx_desc_cnt = priv->rx_desc_cnt; 856 priv->min_tx_desc_cnt = priv->tx_desc_cnt; 857 priv->min_rx_desc_cnt = priv->rx_desc_cnt; 858 } 859 860 static void gve_enable_supported_features(struct gve_priv *priv, 861 u32 supported_features_mask, 862 const struct gve_device_option_jumbo_frames 863 *dev_op_jumbo_frames, 864 const struct gve_device_option_dqo_qpl 865 *dev_op_dqo_qpl, 866 const struct gve_device_option_buffer_sizes 867 *dev_op_buffer_sizes, 868 const struct gve_device_option_flow_steering 869 *dev_op_flow_steering, 870 const struct gve_device_option_modify_ring 871 *dev_op_modify_ring) 872 { 873 /* Before control reaches this point, the page-size-capped max MTU from 874 * the gve_device_descriptor field has already been stored in 875 * priv->dev->max_mtu. We overwrite it with the true max MTU below. 876 */ 877 if (dev_op_jumbo_frames && 878 (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) { 879 dev_info(&priv->pdev->dev, 880 "JUMBO FRAMES device option enabled.\n"); 881 priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu); 882 } 883 884 /* Override pages for qpl for DQO-QPL */ 885 if (dev_op_dqo_qpl) { 886 priv->tx_pages_per_qpl = 887 be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl); 888 if (priv->tx_pages_per_qpl == 0) 889 priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES; 890 } 891 892 if (dev_op_buffer_sizes && 893 (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) { 894 priv->max_rx_buffer_size = 895 be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size); 896 priv->header_buf_size = 897 be16_to_cpu(dev_op_buffer_sizes->header_buffer_size); 898 dev_info(&priv->pdev->dev, 899 "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n", 900 priv->max_rx_buffer_size, priv->header_buf_size); 901 } 902 903 /* Read and store ring size ranges given by device */ 904 if (dev_op_modify_ring && 905 (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) { 906 priv->modify_ring_size_enabled = true; 907 908 /* max ring size for DQO QPL should not be overwritten because of device limit */ 909 if (priv->queue_format != GVE_DQO_QPL_FORMAT) { 910 priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size); 911 priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size); 912 } 913 if (priv->default_min_ring_size) { 914 /* If device hasn't provided minimums, use default minimums */ 915 priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE; 916 priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE; 917 } else { 918 priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size); 919 priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size); 920 } 921 } 922 923 if (dev_op_flow_steering && 924 (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) { 925 if (dev_op_flow_steering->max_flow_rules) { 926 priv->max_flow_rules = 927 be32_to_cpu(dev_op_flow_steering->max_flow_rules); 928 priv->dev->hw_features |= NETIF_F_NTUPLE; 929 dev_info(&priv->pdev->dev, 930 "FLOW STEERING device option enabled with max rule limit of %u.\n", 931 priv->max_flow_rules); 932 } 933 } 934 } 935 936 int gve_adminq_describe_device(struct gve_priv *priv) 937 { 938 struct gve_device_option_flow_steering *dev_op_flow_steering = NULL; 939 struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; 940 struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; 941 struct gve_device_option_modify_ring *dev_op_modify_ring = NULL; 942 struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL; 943 struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL; 944 struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL; 945 struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL; 946 struct gve_device_descriptor *descriptor; 947 u32 supported_features_mask = 0; 948 union gve_adminq_command cmd; 949 dma_addr_t descriptor_bus; 950 int err = 0; 951 u8 *mac; 952 u16 mtu; 953 954 memset(&cmd, 0, sizeof(cmd)); 955 descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, 956 &descriptor_bus); 957 if (!descriptor) 958 return -ENOMEM; 959 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE); 960 cmd.describe_device.device_descriptor_addr = 961 cpu_to_be64(descriptor_bus); 962 cmd.describe_device.device_descriptor_version = 963 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION); 964 cmd.describe_device.available_length = 965 cpu_to_be32(GVE_ADMINQ_BUFFER_SIZE); 966 967 err = gve_adminq_execute_cmd(priv, &cmd); 968 if (err) 969 goto free_device_descriptor; 970 971 err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda, 972 &dev_op_gqi_qpl, &dev_op_dqo_rda, 973 &dev_op_jumbo_frames, &dev_op_dqo_qpl, 974 &dev_op_buffer_sizes, 975 &dev_op_flow_steering, 976 &dev_op_modify_ring); 977 if (err) 978 goto free_device_descriptor; 979 980 /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format 981 * is not set to GqiRda, choose the queue format in a priority order: 982 * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default. 983 */ 984 if (dev_op_dqo_rda) { 985 priv->queue_format = GVE_DQO_RDA_FORMAT; 986 dev_info(&priv->pdev->dev, 987 "Driver is running with DQO RDA queue format.\n"); 988 supported_features_mask = 989 be32_to_cpu(dev_op_dqo_rda->supported_features_mask); 990 } else if (dev_op_dqo_qpl) { 991 priv->queue_format = GVE_DQO_QPL_FORMAT; 992 supported_features_mask = 993 be32_to_cpu(dev_op_dqo_qpl->supported_features_mask); 994 } else if (dev_op_gqi_rda) { 995 priv->queue_format = GVE_GQI_RDA_FORMAT; 996 dev_info(&priv->pdev->dev, 997 "Driver is running with GQI RDA queue format.\n"); 998 supported_features_mask = 999 be32_to_cpu(dev_op_gqi_rda->supported_features_mask); 1000 } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) { 1001 dev_info(&priv->pdev->dev, 1002 "Driver is running with GQI RDA queue format.\n"); 1003 } else { 1004 priv->queue_format = GVE_GQI_QPL_FORMAT; 1005 if (dev_op_gqi_qpl) 1006 supported_features_mask = 1007 be32_to_cpu(dev_op_gqi_qpl->supported_features_mask); 1008 dev_info(&priv->pdev->dev, 1009 "Driver is running with GQI QPL queue format.\n"); 1010 } 1011 1012 /* set default descriptor counts */ 1013 gve_set_default_desc_cnt(priv, descriptor); 1014 1015 /* DQO supports LRO. */ 1016 if (!gve_is_gqi(priv)) 1017 priv->dev->hw_features |= NETIF_F_LRO; 1018 1019 priv->max_registered_pages = 1020 be64_to_cpu(descriptor->max_registered_pages); 1021 mtu = be16_to_cpu(descriptor->mtu); 1022 if (mtu < ETH_MIN_MTU) { 1023 dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu); 1024 err = -EINVAL; 1025 goto free_device_descriptor; 1026 } 1027 priv->dev->max_mtu = mtu; 1028 priv->num_event_counters = be16_to_cpu(descriptor->counters); 1029 eth_hw_addr_set(priv->dev, descriptor->mac); 1030 mac = descriptor->mac; 1031 dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac); 1032 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl); 1033 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues); 1034 1035 gve_enable_supported_features(priv, supported_features_mask, 1036 dev_op_jumbo_frames, dev_op_dqo_qpl, 1037 dev_op_buffer_sizes, dev_op_flow_steering, 1038 dev_op_modify_ring); 1039 1040 free_device_descriptor: 1041 dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); 1042 return err; 1043 } 1044 1045 int gve_adminq_register_page_list(struct gve_priv *priv, 1046 struct gve_queue_page_list *qpl) 1047 { 1048 struct device *hdev = &priv->pdev->dev; 1049 u32 num_entries = qpl->num_entries; 1050 u32 size = num_entries * sizeof(qpl->page_buses[0]); 1051 union gve_adminq_command cmd; 1052 dma_addr_t page_list_bus; 1053 __be64 *page_list; 1054 int err; 1055 int i; 1056 1057 memset(&cmd, 0, sizeof(cmd)); 1058 page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL); 1059 if (!page_list) 1060 return -ENOMEM; 1061 1062 for (i = 0; i < num_entries; i++) 1063 page_list[i] = cpu_to_be64(qpl->page_buses[i]); 1064 1065 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST); 1066 cmd.reg_page_list = (struct gve_adminq_register_page_list) { 1067 .page_list_id = cpu_to_be32(qpl->id), 1068 .num_pages = cpu_to_be32(num_entries), 1069 .page_address_list_addr = cpu_to_be64(page_list_bus), 1070 .page_size = cpu_to_be64(PAGE_SIZE), 1071 }; 1072 1073 err = gve_adminq_execute_cmd(priv, &cmd); 1074 dma_free_coherent(hdev, size, page_list, page_list_bus); 1075 return err; 1076 } 1077 1078 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id) 1079 { 1080 union gve_adminq_command cmd; 1081 1082 memset(&cmd, 0, sizeof(cmd)); 1083 cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST); 1084 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) { 1085 .page_list_id = cpu_to_be32(page_list_id), 1086 }; 1087 1088 return gve_adminq_execute_cmd(priv, &cmd); 1089 } 1090 1091 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu) 1092 { 1093 union gve_adminq_command cmd; 1094 1095 memset(&cmd, 0, sizeof(cmd)); 1096 cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER); 1097 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) { 1098 .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU), 1099 .parameter_value = cpu_to_be64(mtu), 1100 }; 1101 1102 return gve_adminq_execute_cmd(priv, &cmd); 1103 } 1104 1105 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len, 1106 dma_addr_t stats_report_addr, u64 interval) 1107 { 1108 union gve_adminq_command cmd; 1109 1110 memset(&cmd, 0, sizeof(cmd)); 1111 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS); 1112 cmd.report_stats = (struct gve_adminq_report_stats) { 1113 .stats_report_len = cpu_to_be64(stats_report_len), 1114 .stats_report_addr = cpu_to_be64(stats_report_addr), 1115 .interval = cpu_to_be64(interval), 1116 }; 1117 1118 return gve_adminq_execute_cmd(priv, &cmd); 1119 } 1120 1121 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, 1122 u64 driver_info_len, 1123 dma_addr_t driver_info_addr) 1124 { 1125 union gve_adminq_command cmd; 1126 1127 memset(&cmd, 0, sizeof(cmd)); 1128 cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY); 1129 cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) { 1130 .driver_info_len = cpu_to_be64(driver_info_len), 1131 .driver_info_addr = cpu_to_be64(driver_info_addr), 1132 }; 1133 1134 return gve_adminq_execute_cmd(priv, &cmd); 1135 } 1136 1137 int gve_adminq_report_link_speed(struct gve_priv *priv) 1138 { 1139 union gve_adminq_command gvnic_cmd; 1140 dma_addr_t link_speed_region_bus; 1141 __be64 *link_speed_region; 1142 int err; 1143 1144 link_speed_region = 1145 dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region), 1146 &link_speed_region_bus, GFP_KERNEL); 1147 1148 if (!link_speed_region) 1149 return -ENOMEM; 1150 1151 memset(&gvnic_cmd, 0, sizeof(gvnic_cmd)); 1152 gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED); 1153 gvnic_cmd.report_link_speed.link_speed_address = 1154 cpu_to_be64(link_speed_region_bus); 1155 1156 err = gve_adminq_execute_cmd(priv, &gvnic_cmd); 1157 1158 priv->link_speed = be64_to_cpu(*link_speed_region); 1159 dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region, 1160 link_speed_region_bus); 1161 return err; 1162 } 1163 1164 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, 1165 struct gve_ptype_lut *ptype_lut) 1166 { 1167 struct gve_ptype_map *ptype_map; 1168 union gve_adminq_command cmd; 1169 dma_addr_t ptype_map_bus; 1170 int err = 0; 1171 int i; 1172 1173 memset(&cmd, 0, sizeof(cmd)); 1174 ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map), 1175 &ptype_map_bus, GFP_KERNEL); 1176 if (!ptype_map) 1177 return -ENOMEM; 1178 1179 cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP); 1180 cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) { 1181 .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)), 1182 .ptype_map_addr = cpu_to_be64(ptype_map_bus), 1183 }; 1184 1185 err = gve_adminq_execute_cmd(priv, &cmd); 1186 if (err) 1187 goto err; 1188 1189 /* Populate ptype_lut. */ 1190 for (i = 0; i < GVE_NUM_PTYPES; i++) { 1191 ptype_lut->ptypes[i].l3_type = 1192 ptype_map->ptypes[i].l3_type; 1193 ptype_lut->ptypes[i].l4_type = 1194 ptype_map->ptypes[i].l4_type; 1195 } 1196 err: 1197 dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map, 1198 ptype_map_bus); 1199 return err; 1200 } 1201 1202 static int 1203 gve_adminq_configure_flow_rule(struct gve_priv *priv, 1204 struct gve_adminq_configure_flow_rule *flow_rule_cmd) 1205 { 1206 int err = gve_adminq_execute_extended_cmd(priv, 1207 GVE_ADMINQ_CONFIGURE_FLOW_RULE, 1208 sizeof(struct gve_adminq_configure_flow_rule), 1209 flow_rule_cmd); 1210 1211 if (err) { 1212 dev_err(&priv->pdev->dev, "Timeout to configure the flow rule, trigger reset"); 1213 gve_reset(priv, true); 1214 } else { 1215 priv->flow_rules_cache.rules_cache_synced = false; 1216 } 1217 1218 return err; 1219 } 1220 1221 int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc) 1222 { 1223 struct gve_adminq_configure_flow_rule flow_rule_cmd = { 1224 .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_ADD), 1225 .location = cpu_to_be32(loc), 1226 .rule = *rule, 1227 }; 1228 1229 return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); 1230 } 1231 1232 int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc) 1233 { 1234 struct gve_adminq_configure_flow_rule flow_rule_cmd = { 1235 .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_DEL), 1236 .location = cpu_to_be32(loc), 1237 }; 1238 1239 return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); 1240 } 1241 1242 int gve_adminq_reset_flow_rules(struct gve_priv *priv) 1243 { 1244 struct gve_adminq_configure_flow_rule flow_rule_cmd = { 1245 .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_RESET), 1246 }; 1247 1248 return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); 1249 } 1250 1251 /* In the dma memory that the driver allocated for the device to query the flow rules, the device 1252 * will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device 1253 * will write an array of rules or rule ids with the count that specified in the descriptor. 1254 * For GVE_FLOW_RULE_QUERY_STATS, the device will only write the descriptor. 1255 */ 1256 static int gve_adminq_process_flow_rules_query(struct gve_priv *priv, u16 query_opcode, 1257 struct gve_query_flow_rules_descriptor *descriptor) 1258 { 1259 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; 1260 u32 num_queried_rules, total_memory_len, rule_info_len; 1261 void *rule_info; 1262 1263 total_memory_len = be32_to_cpu(descriptor->total_length); 1264 num_queried_rules = be32_to_cpu(descriptor->num_queried_rules); 1265 rule_info = (void *)(descriptor + 1); 1266 1267 switch (query_opcode) { 1268 case GVE_FLOW_RULE_QUERY_RULES: 1269 rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rules_cache); 1270 if (sizeof(*descriptor) + rule_info_len != total_memory_len) { 1271 dev_err(&priv->dev->dev, "flow rules query is out of memory.\n"); 1272 return -ENOMEM; 1273 } 1274 1275 memcpy(flow_rules_cache->rules_cache, rule_info, rule_info_len); 1276 flow_rules_cache->rules_cache_num = num_queried_rules; 1277 break; 1278 case GVE_FLOW_RULE_QUERY_IDS: 1279 rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rule_ids_cache); 1280 if (sizeof(*descriptor) + rule_info_len != total_memory_len) { 1281 dev_err(&priv->dev->dev, "flow rule ids query is out of memory.\n"); 1282 return -ENOMEM; 1283 } 1284 1285 memcpy(flow_rules_cache->rule_ids_cache, rule_info, rule_info_len); 1286 flow_rules_cache->rule_ids_cache_num = num_queried_rules; 1287 break; 1288 case GVE_FLOW_RULE_QUERY_STATS: 1289 priv->num_flow_rules = be32_to_cpu(descriptor->num_flow_rules); 1290 priv->max_flow_rules = be32_to_cpu(descriptor->max_flow_rules); 1291 return 0; 1292 default: 1293 return -EINVAL; 1294 } 1295 1296 return 0; 1297 } 1298 1299 int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc) 1300 { 1301 struct gve_query_flow_rules_descriptor *descriptor; 1302 union gve_adminq_command cmd; 1303 dma_addr_t descriptor_bus; 1304 int err = 0; 1305 1306 memset(&cmd, 0, sizeof(cmd)); 1307 descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus); 1308 if (!descriptor) 1309 return -ENOMEM; 1310 1311 cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_FLOW_RULES); 1312 cmd.query_flow_rules = (struct gve_adminq_query_flow_rules) { 1313 .opcode = cpu_to_be16(query_opcode), 1314 .starting_rule_id = cpu_to_be32(starting_loc), 1315 .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE), 1316 .rule_descriptor_addr = cpu_to_be64(descriptor_bus), 1317 }; 1318 err = gve_adminq_execute_cmd(priv, &cmd); 1319 if (err) 1320 goto out; 1321 1322 err = gve_adminq_process_flow_rules_query(priv, query_opcode, descriptor); 1323 1324 out: 1325 dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); 1326 return err; 1327 } 1328