1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ 4 5 #include <linux/devcoredump.h> 6 #include <linux/firmware.h> 7 #include <linux/limits.h> 8 #include <linux/mhi.h> 9 #include <linux/minmax.h> 10 #include <linux/mod_devicetable.h> 11 #include <linux/overflow.h> 12 #include <linux/types.h> 13 #include <linux/vmalloc.h> 14 #include <linux/workqueue.h> 15 16 #include "sahara.h" 17 18 #define SAHARA_HELLO_CMD 0x1 /* Min protocol version 1.0 */ 19 #define SAHARA_HELLO_RESP_CMD 0x2 /* Min protocol version 1.0 */ 20 #define SAHARA_READ_DATA_CMD 0x3 /* Min protocol version 1.0 */ 21 #define SAHARA_END_OF_IMAGE_CMD 0x4 /* Min protocol version 1.0 */ 22 #define SAHARA_DONE_CMD 0x5 /* Min protocol version 1.0 */ 23 #define SAHARA_DONE_RESP_CMD 0x6 /* Min protocol version 1.0 */ 24 #define SAHARA_RESET_CMD 0x7 /* Min protocol version 1.0 */ 25 #define SAHARA_RESET_RESP_CMD 0x8 /* Min protocol version 1.0 */ 26 #define SAHARA_MEM_DEBUG_CMD 0x9 /* Min protocol version 2.0 */ 27 #define SAHARA_MEM_READ_CMD 0xa /* Min protocol version 2.0 */ 28 #define SAHARA_CMD_READY_CMD 0xb /* Min protocol version 2.1 */ 29 #define SAHARA_SWITCH_MODE_CMD 0xc /* Min protocol version 2.1 */ 30 #define SAHARA_EXECUTE_CMD 0xd /* Min protocol version 2.1 */ 31 #define SAHARA_EXECUTE_RESP_CMD 0xe /* Min protocol version 2.1 */ 32 #define SAHARA_EXECUTE_DATA_CMD 0xf /* Min protocol version 2.1 */ 33 #define SAHARA_MEM_DEBUG64_CMD 0x10 /* Min protocol version 2.5 */ 34 #define SAHARA_MEM_READ64_CMD 0x11 /* Min protocol version 2.5 */ 35 #define SAHARA_READ_DATA64_CMD 0x12 /* Min protocol version 2.8 */ 36 #define SAHARA_RESET_STATE_CMD 0x13 /* Min protocol version 2.9 */ 37 #define SAHARA_WRITE_DATA_CMD 0x14 /* Min protocol version 3.0 */ 38 39 #define SAHARA_PACKET_MAX_SIZE 0xffffU /* MHI_MAX_MTU */ 40 #define SAHARA_TRANSFER_MAX_SIZE 0x80000 41 #define SAHARA_READ_MAX_SIZE 0xfff0U /* Avoid unaligned requests */ 42 #define SAHARA_NUM_TX_BUF DIV_ROUND_UP(SAHARA_TRANSFER_MAX_SIZE,\ 43 SAHARA_PACKET_MAX_SIZE) 44 #define SAHARA_IMAGE_ID_NONE U32_MAX 45 46 #define SAHARA_VERSION 2 47 #define SAHARA_SUCCESS 0 48 #define SAHARA_TABLE_ENTRY_STR_LEN 20 49 50 #define SAHARA_MODE_IMAGE_TX_PENDING 0x0 51 #define SAHARA_MODE_IMAGE_TX_COMPLETE 0x1 52 #define SAHARA_MODE_MEMORY_DEBUG 0x2 53 #define SAHARA_MODE_COMMAND 0x3 54 55 #define SAHARA_HELLO_LENGTH 0x30 56 #define SAHARA_READ_DATA_LENGTH 0x14 57 #define SAHARA_END_OF_IMAGE_LENGTH 0x10 58 #define SAHARA_DONE_LENGTH 0x8 59 #define SAHARA_RESET_LENGTH 0x8 60 #define SAHARA_MEM_DEBUG64_LENGTH 0x18 61 #define SAHARA_MEM_READ64_LENGTH 0x18 62 63 struct sahara_packet { 64 __le32 cmd; 65 __le32 length; 66 67 union { 68 struct { 69 __le32 version; 70 __le32 version_compat; 71 __le32 max_length; 72 __le32 mode; 73 } hello; 74 struct { 75 __le32 version; 76 __le32 version_compat; 77 __le32 status; 78 __le32 mode; 79 } hello_resp; 80 struct { 81 __le32 image; 82 __le32 offset; 83 __le32 length; 84 } read_data; 85 struct { 86 __le32 image; 87 __le32 status; 88 } end_of_image; 89 struct { 90 __le64 table_address; 91 __le64 table_length; 92 } memory_debug64; 93 struct { 94 __le64 memory_address; 95 __le64 memory_length; 96 } memory_read64; 97 }; 98 }; 99 100 struct sahara_debug_table_entry64 { 101 __le64 type; 102 __le64 address; 103 __le64 length; 104 char description[SAHARA_TABLE_ENTRY_STR_LEN]; 105 char filename[SAHARA_TABLE_ENTRY_STR_LEN]; 106 }; 107 108 struct sahara_dump_table_entry { 109 u64 type; 110 u64 address; 111 u64 length; 112 char description[SAHARA_TABLE_ENTRY_STR_LEN]; 113 char filename[SAHARA_TABLE_ENTRY_STR_LEN]; 114 }; 115 116 #define SAHARA_DUMP_V1_MAGIC 0x1234567890abcdef 117 #define SAHARA_DUMP_V1_VER 1 118 struct sahara_memory_dump_meta_v1 { 119 u64 magic; 120 u64 version; 121 u64 dump_size; 122 u64 table_size; 123 }; 124 125 /* 126 * Layout of crashdump provided to user via devcoredump 127 * +------------------------------------------+ 128 * | Crashdump Meta structure | 129 * | type: struct sahara_memory_dump_meta_v1 | 130 * +------------------------------------------+ 131 * | Crashdump Table | 132 * | type: array of struct | 133 * | sahara_dump_table_entry | 134 * | | 135 * | | 136 * +------------------------------------------+ 137 * | Crashdump | 138 * | | 139 * | | 140 * | | 141 * | | 142 * | | 143 * +------------------------------------------+ 144 * 145 * First is the metadata header. Userspace can use the magic number to verify 146 * the content type, and then check the version for the rest of the format. 147 * New versions should keep the magic number location/value, and version 148 * location, but increment the version value. 149 * 150 * For v1, the metadata lists the size of the entire dump (header + table + 151 * dump) and the size of the table. Then the dump image table, which describes 152 * the contents of the dump. Finally all the images are listed in order, with 153 * no deadspace in between. Userspace can use the sizes listed in the image 154 * table to reconstruct the individual images. 155 */ 156 157 struct sahara_context { 158 struct sahara_packet *tx[SAHARA_NUM_TX_BUF]; 159 struct sahara_packet *rx; 160 struct work_struct fw_work; 161 struct work_struct dump_work; 162 struct mhi_device *mhi_dev; 163 const char * const *image_table; 164 u32 table_size; 165 u32 active_image_id; 166 const struct firmware *firmware; 167 u64 dump_table_address; 168 u64 dump_table_length; 169 size_t rx_size; 170 size_t rx_size_requested; 171 void *mem_dump; 172 size_t mem_dump_sz; 173 struct sahara_dump_table_entry *dump_image; 174 u64 dump_image_offset; 175 void *mem_dump_freespace; 176 u64 dump_images_left; 177 bool is_mem_dump_mode; 178 }; 179 180 static const char * const aic100_image_table[] = { 181 [1] = "qcom/aic100/fw1.bin", 182 [2] = "qcom/aic100/fw2.bin", 183 [4] = "qcom/aic100/fw4.bin", 184 [5] = "qcom/aic100/fw5.bin", 185 [6] = "qcom/aic100/fw6.bin", 186 [8] = "qcom/aic100/fw8.bin", 187 [9] = "qcom/aic100/fw9.bin", 188 [10] = "qcom/aic100/fw10.bin", 189 }; 190 191 static const char * const aic200_image_table[] = { 192 [5] = "qcom/aic200/uefi.elf", 193 [12] = "qcom/aic200/aic200-nsp.bin", 194 [23] = "qcom/aic200/aop.mbn", 195 [32] = "qcom/aic200/tz.mbn", 196 [33] = "qcom/aic200/hypvm.mbn", 197 [39] = "qcom/aic200/aic200_abl.elf", 198 [40] = "qcom/aic200/apdp.mbn", 199 [41] = "qcom/aic200/devcfg.mbn", 200 [42] = "qcom/aic200/sec.elf", 201 [43] = "qcom/aic200/aic200-hlos.elf", 202 [49] = "qcom/aic200/shrm.elf", 203 [50] = "qcom/aic200/cpucp.elf", 204 [51] = "qcom/aic200/aop_devcfg.mbn", 205 [57] = "qcom/aic200/cpucp_dtbs.elf", 206 [62] = "qcom/aic200/uefi_dtbs.elf", 207 [63] = "qcom/aic200/xbl_ac_config.mbn", 208 [64] = "qcom/aic200/tz_ac_config.mbn", 209 [65] = "qcom/aic200/hyp_ac_config.mbn", 210 [66] = "qcom/aic200/pdp.elf", 211 [67] = "qcom/aic200/pdp_cdb.elf", 212 [68] = "qcom/aic200/sdi.mbn", 213 [69] = "qcom/aic200/dcd.mbn", 214 [73] = "qcom/aic200/gearvm.mbn", 215 [74] = "qcom/aic200/sti.bin", 216 [75] = "qcom/aic200/pvs.bin", 217 }; 218 219 static int sahara_find_image(struct sahara_context *context, u32 image_id) 220 { 221 int ret; 222 223 if (image_id == context->active_image_id) 224 return 0; 225 226 if (context->active_image_id != SAHARA_IMAGE_ID_NONE) { 227 dev_err(&context->mhi_dev->dev, "image id %d is not valid as %d is active\n", 228 image_id, context->active_image_id); 229 return -EINVAL; 230 } 231 232 if (image_id >= context->table_size || !context->image_table[image_id]) { 233 dev_err(&context->mhi_dev->dev, "request for unknown image: %d\n", image_id); 234 return -EINVAL; 235 } 236 237 /* 238 * This image might be optional. The device may continue without it. 239 * Only the device knows. Suppress error messages that could suggest an 240 * a problem when we were actually able to continue. 241 */ 242 ret = firmware_request_nowarn(&context->firmware, 243 context->image_table[image_id], 244 &context->mhi_dev->dev); 245 if (ret) { 246 dev_dbg(&context->mhi_dev->dev, "request for image id %d / file %s failed %d\n", 247 image_id, context->image_table[image_id], ret); 248 return ret; 249 } 250 251 context->active_image_id = image_id; 252 253 return 0; 254 } 255 256 static void sahara_release_image(struct sahara_context *context) 257 { 258 if (context->active_image_id != SAHARA_IMAGE_ID_NONE) 259 release_firmware(context->firmware); 260 context->active_image_id = SAHARA_IMAGE_ID_NONE; 261 } 262 263 static void sahara_send_reset(struct sahara_context *context) 264 { 265 int ret; 266 267 context->is_mem_dump_mode = false; 268 269 context->tx[0]->cmd = cpu_to_le32(SAHARA_RESET_CMD); 270 context->tx[0]->length = cpu_to_le32(SAHARA_RESET_LENGTH); 271 272 ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], 273 SAHARA_RESET_LENGTH, MHI_EOT); 274 if (ret) 275 dev_err(&context->mhi_dev->dev, "Unable to send reset response %d\n", ret); 276 } 277 278 static void sahara_hello(struct sahara_context *context) 279 { 280 int ret; 281 282 dev_dbg(&context->mhi_dev->dev, 283 "HELLO cmd received. length:%d version:%d version_compat:%d max_length:%d mode:%d\n", 284 le32_to_cpu(context->rx->length), 285 le32_to_cpu(context->rx->hello.version), 286 le32_to_cpu(context->rx->hello.version_compat), 287 le32_to_cpu(context->rx->hello.max_length), 288 le32_to_cpu(context->rx->hello.mode)); 289 290 if (le32_to_cpu(context->rx->length) != SAHARA_HELLO_LENGTH) { 291 dev_err(&context->mhi_dev->dev, "Malformed hello packet - length %d\n", 292 le32_to_cpu(context->rx->length)); 293 return; 294 } 295 if (le32_to_cpu(context->rx->hello.version) != SAHARA_VERSION) { 296 dev_err(&context->mhi_dev->dev, "Unsupported hello packet - version %d\n", 297 le32_to_cpu(context->rx->hello.version)); 298 return; 299 } 300 301 if (le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_PENDING && 302 le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_IMAGE_TX_COMPLETE && 303 le32_to_cpu(context->rx->hello.mode) != SAHARA_MODE_MEMORY_DEBUG) { 304 dev_err(&context->mhi_dev->dev, "Unsupported hello packet - mode %d\n", 305 le32_to_cpu(context->rx->hello.mode)); 306 return; 307 } 308 309 context->tx[0]->cmd = cpu_to_le32(SAHARA_HELLO_RESP_CMD); 310 context->tx[0]->length = cpu_to_le32(SAHARA_HELLO_LENGTH); 311 context->tx[0]->hello_resp.version = cpu_to_le32(SAHARA_VERSION); 312 context->tx[0]->hello_resp.version_compat = cpu_to_le32(SAHARA_VERSION); 313 context->tx[0]->hello_resp.status = cpu_to_le32(SAHARA_SUCCESS); 314 context->tx[0]->hello_resp.mode = context->rx->hello_resp.mode; 315 316 ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], 317 SAHARA_HELLO_LENGTH, MHI_EOT); 318 if (ret) 319 dev_err(&context->mhi_dev->dev, "Unable to send hello response %d\n", ret); 320 } 321 322 static void sahara_read_data(struct sahara_context *context) 323 { 324 u32 image_id, data_offset, data_len, pkt_data_len; 325 int ret; 326 int i; 327 328 dev_dbg(&context->mhi_dev->dev, 329 "READ_DATA cmd received. length:%d image:%d offset:%d data_length:%d\n", 330 le32_to_cpu(context->rx->length), 331 le32_to_cpu(context->rx->read_data.image), 332 le32_to_cpu(context->rx->read_data.offset), 333 le32_to_cpu(context->rx->read_data.length)); 334 335 if (le32_to_cpu(context->rx->length) != SAHARA_READ_DATA_LENGTH) { 336 dev_err(&context->mhi_dev->dev, "Malformed read_data packet - length %d\n", 337 le32_to_cpu(context->rx->length)); 338 return; 339 } 340 341 image_id = le32_to_cpu(context->rx->read_data.image); 342 data_offset = le32_to_cpu(context->rx->read_data.offset); 343 data_len = le32_to_cpu(context->rx->read_data.length); 344 345 ret = sahara_find_image(context, image_id); 346 if (ret) { 347 sahara_send_reset(context); 348 return; 349 } 350 351 /* 352 * Image is released when the device is done with it via 353 * SAHARA_END_OF_IMAGE_CMD. sahara_send_reset() will either cause the 354 * device to retry the operation with a modification, or decide to be 355 * done with the image and trigger SAHARA_END_OF_IMAGE_CMD. 356 * release_image() is called from SAHARA_END_OF_IMAGE_CMD. processing 357 * and is not needed here on error. 358 */ 359 360 if (data_len > SAHARA_TRANSFER_MAX_SIZE) { 361 dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data len %d exceeds max xfer size %d\n", 362 data_len, SAHARA_TRANSFER_MAX_SIZE); 363 sahara_send_reset(context); 364 return; 365 } 366 367 if (data_offset >= context->firmware->size) { 368 dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d exceeds file size %zu\n", 369 data_offset, context->firmware->size); 370 sahara_send_reset(context); 371 return; 372 } 373 374 if (size_add(data_offset, data_len) > context->firmware->size) { 375 dev_err(&context->mhi_dev->dev, "Malformed read_data packet - data offset %d and length %d exceeds file size %zu\n", 376 data_offset, data_len, context->firmware->size); 377 sahara_send_reset(context); 378 return; 379 } 380 381 for (i = 0; i < SAHARA_NUM_TX_BUF && data_len; ++i) { 382 pkt_data_len = min(data_len, SAHARA_PACKET_MAX_SIZE); 383 384 memcpy(context->tx[i], &context->firmware->data[data_offset], pkt_data_len); 385 386 data_offset += pkt_data_len; 387 data_len -= pkt_data_len; 388 389 ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, 390 context->tx[i], pkt_data_len, 391 !data_len ? MHI_EOT : MHI_CHAIN); 392 if (ret) { 393 dev_err(&context->mhi_dev->dev, "Unable to send read_data response %d\n", 394 ret); 395 return; 396 } 397 } 398 } 399 400 static void sahara_end_of_image(struct sahara_context *context) 401 { 402 int ret; 403 404 dev_dbg(&context->mhi_dev->dev, 405 "END_OF_IMAGE cmd received. length:%d image:%d status:%d\n", 406 le32_to_cpu(context->rx->length), 407 le32_to_cpu(context->rx->end_of_image.image), 408 le32_to_cpu(context->rx->end_of_image.status)); 409 410 if (le32_to_cpu(context->rx->length) != SAHARA_END_OF_IMAGE_LENGTH) { 411 dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - length %d\n", 412 le32_to_cpu(context->rx->length)); 413 return; 414 } 415 416 if (context->active_image_id != SAHARA_IMAGE_ID_NONE && 417 le32_to_cpu(context->rx->end_of_image.image) != context->active_image_id) { 418 dev_err(&context->mhi_dev->dev, "Malformed end_of_image packet - image %d is not the active image\n", 419 le32_to_cpu(context->rx->end_of_image.image)); 420 return; 421 } 422 423 sahara_release_image(context); 424 425 if (le32_to_cpu(context->rx->end_of_image.status)) 426 return; 427 428 context->tx[0]->cmd = cpu_to_le32(SAHARA_DONE_CMD); 429 context->tx[0]->length = cpu_to_le32(SAHARA_DONE_LENGTH); 430 431 ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], 432 SAHARA_DONE_LENGTH, MHI_EOT); 433 if (ret) 434 dev_dbg(&context->mhi_dev->dev, "Unable to send done response %d\n", ret); 435 } 436 437 static void sahara_memory_debug64(struct sahara_context *context) 438 { 439 int ret; 440 441 dev_dbg(&context->mhi_dev->dev, 442 "MEMORY DEBUG64 cmd received. length:%d table_address:%#llx table_length:%#llx\n", 443 le32_to_cpu(context->rx->length), 444 le64_to_cpu(context->rx->memory_debug64.table_address), 445 le64_to_cpu(context->rx->memory_debug64.table_length)); 446 447 if (le32_to_cpu(context->rx->length) != SAHARA_MEM_DEBUG64_LENGTH) { 448 dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - length %d\n", 449 le32_to_cpu(context->rx->length)); 450 return; 451 } 452 453 context->dump_table_address = le64_to_cpu(context->rx->memory_debug64.table_address); 454 context->dump_table_length = le64_to_cpu(context->rx->memory_debug64.table_length); 455 456 if (context->dump_table_length % sizeof(struct sahara_debug_table_entry64) != 0 || 457 !context->dump_table_length) { 458 dev_err(&context->mhi_dev->dev, "Malformed memory debug64 packet - table length %lld\n", 459 context->dump_table_length); 460 return; 461 } 462 463 /* 464 * From this point, the protocol flips. We make memory_read requests to 465 * the device, and the device responds with the raw data. If the device 466 * has an error, it will send an End of Image command. First we need to 467 * request the memory dump table so that we know where all the pieces 468 * of the dump are that we can consume. 469 */ 470 471 context->is_mem_dump_mode = true; 472 473 /* 474 * Assume that the table is smaller than our MTU so that we can read it 475 * in one shot. The spec does not put an upper limit on the table, but 476 * no known device will exceed this. 477 */ 478 if (context->dump_table_length > SAHARA_PACKET_MAX_SIZE) { 479 dev_err(&context->mhi_dev->dev, "Memory dump table length %lld exceeds supported size. Discarding dump\n", 480 context->dump_table_length); 481 sahara_send_reset(context); 482 return; 483 } 484 485 context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD); 486 context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH); 487 context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_table_address); 488 context->tx[0]->memory_read64.memory_length = cpu_to_le64(context->dump_table_length); 489 490 context->rx_size_requested = context->dump_table_length; 491 492 ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], 493 SAHARA_MEM_READ64_LENGTH, MHI_EOT); 494 if (ret) 495 dev_err(&context->mhi_dev->dev, "Unable to send read for dump table %d\n", ret); 496 } 497 498 static void sahara_processing(struct work_struct *work) 499 { 500 struct sahara_context *context = container_of(work, struct sahara_context, fw_work); 501 int ret; 502 503 switch (le32_to_cpu(context->rx->cmd)) { 504 case SAHARA_HELLO_CMD: 505 sahara_hello(context); 506 break; 507 case SAHARA_READ_DATA_CMD: 508 sahara_read_data(context); 509 break; 510 case SAHARA_END_OF_IMAGE_CMD: 511 sahara_end_of_image(context); 512 break; 513 case SAHARA_DONE_RESP_CMD: 514 /* Intentional do nothing as we don't need to exit an app */ 515 break; 516 case SAHARA_RESET_RESP_CMD: 517 /* Intentional do nothing as we don't need to exit an app */ 518 break; 519 case SAHARA_MEM_DEBUG64_CMD: 520 sahara_memory_debug64(context); 521 break; 522 default: 523 dev_err(&context->mhi_dev->dev, "Unknown command %d\n", 524 le32_to_cpu(context->rx->cmd)); 525 break; 526 } 527 528 ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx, 529 SAHARA_PACKET_MAX_SIZE, MHI_EOT); 530 if (ret) 531 dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret); 532 } 533 534 static void sahara_parse_dump_table(struct sahara_context *context) 535 { 536 struct sahara_dump_table_entry *image_out_table; 537 struct sahara_debug_table_entry64 *dev_table; 538 struct sahara_memory_dump_meta_v1 *dump_meta; 539 u64 table_nents; 540 u64 dump_length; 541 int ret; 542 u64 i; 543 544 table_nents = context->dump_table_length / sizeof(*dev_table); 545 context->dump_images_left = table_nents; 546 dump_length = 0; 547 548 dev_table = (struct sahara_debug_table_entry64 *)(context->rx); 549 for (i = 0; i < table_nents; ++i) { 550 /* Do not trust the device, ensure the strings are terminated */ 551 dev_table[i].description[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0; 552 dev_table[i].filename[SAHARA_TABLE_ENTRY_STR_LEN - 1] = 0; 553 554 dump_length = size_add(dump_length, le64_to_cpu(dev_table[i].length)); 555 if (dump_length == SIZE_MAX) { 556 /* Discard the dump */ 557 sahara_send_reset(context); 558 return; 559 } 560 561 dev_dbg(&context->mhi_dev->dev, 562 "Memory dump table entry %lld type: %lld address: %#llx length: %#llx description: \"%s\" filename \"%s\"\n", 563 i, 564 le64_to_cpu(dev_table[i].type), 565 le64_to_cpu(dev_table[i].address), 566 le64_to_cpu(dev_table[i].length), 567 dev_table[i].description, 568 dev_table[i].filename); 569 } 570 571 dump_length = size_add(dump_length, sizeof(*dump_meta)); 572 if (dump_length == SIZE_MAX) { 573 /* Discard the dump */ 574 sahara_send_reset(context); 575 return; 576 } 577 dump_length = size_add(dump_length, size_mul(sizeof(*image_out_table), table_nents)); 578 if (dump_length == SIZE_MAX) { 579 /* Discard the dump */ 580 sahara_send_reset(context); 581 return; 582 } 583 584 context->mem_dump_sz = dump_length; 585 context->mem_dump = vzalloc(dump_length); 586 if (!context->mem_dump) { 587 /* Discard the dump */ 588 sahara_send_reset(context); 589 return; 590 } 591 592 /* Populate the dump metadata and table for userspace */ 593 dump_meta = context->mem_dump; 594 dump_meta->magic = SAHARA_DUMP_V1_MAGIC; 595 dump_meta->version = SAHARA_DUMP_V1_VER; 596 dump_meta->dump_size = dump_length; 597 dump_meta->table_size = context->dump_table_length; 598 599 image_out_table = context->mem_dump + sizeof(*dump_meta); 600 for (i = 0; i < table_nents; ++i) { 601 image_out_table[i].type = le64_to_cpu(dev_table[i].type); 602 image_out_table[i].address = le64_to_cpu(dev_table[i].address); 603 image_out_table[i].length = le64_to_cpu(dev_table[i].length); 604 strscpy(image_out_table[i].description, dev_table[i].description, 605 SAHARA_TABLE_ENTRY_STR_LEN); 606 strscpy(image_out_table[i].filename, 607 dev_table[i].filename, 608 SAHARA_TABLE_ENTRY_STR_LEN); 609 } 610 611 context->mem_dump_freespace = &image_out_table[i]; 612 613 /* Done parsing the table, switch to image dump mode */ 614 context->dump_table_length = 0; 615 616 /* Request the first chunk of the first image */ 617 context->dump_image = &image_out_table[0]; 618 dump_length = min(context->dump_image->length, SAHARA_READ_MAX_SIZE); 619 /* Avoid requesting EOI sized data so that we can identify errors */ 620 if (dump_length == SAHARA_END_OF_IMAGE_LENGTH) 621 dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2; 622 623 context->dump_image_offset = dump_length; 624 625 context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD); 626 context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH); 627 context->tx[0]->memory_read64.memory_address = cpu_to_le64(context->dump_image->address); 628 context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length); 629 630 context->rx_size_requested = dump_length; 631 632 ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], 633 SAHARA_MEM_READ64_LENGTH, MHI_EOT); 634 if (ret) 635 dev_err(&context->mhi_dev->dev, "Unable to send read for dump content %d\n", ret); 636 } 637 638 static void sahara_parse_dump_image(struct sahara_context *context) 639 { 640 u64 dump_length; 641 int ret; 642 643 memcpy(context->mem_dump_freespace, context->rx, context->rx_size); 644 context->mem_dump_freespace += context->rx_size; 645 646 if (context->dump_image_offset >= context->dump_image->length) { 647 /* Need to move to next image */ 648 context->dump_image++; 649 context->dump_images_left--; 650 context->dump_image_offset = 0; 651 652 if (!context->dump_images_left) { 653 /* Dump done */ 654 dev_coredumpv(context->mhi_dev->mhi_cntrl->cntrl_dev, 655 context->mem_dump, 656 context->mem_dump_sz, 657 GFP_KERNEL); 658 context->mem_dump = NULL; 659 sahara_send_reset(context); 660 return; 661 } 662 } 663 664 /* Get next image chunk */ 665 dump_length = context->dump_image->length - context->dump_image_offset; 666 dump_length = min(dump_length, SAHARA_READ_MAX_SIZE); 667 /* Avoid requesting EOI sized data so that we can identify errors */ 668 if (dump_length == SAHARA_END_OF_IMAGE_LENGTH) 669 dump_length = SAHARA_END_OF_IMAGE_LENGTH / 2; 670 671 context->tx[0]->cmd = cpu_to_le32(SAHARA_MEM_READ64_CMD); 672 context->tx[0]->length = cpu_to_le32(SAHARA_MEM_READ64_LENGTH); 673 context->tx[0]->memory_read64.memory_address = 674 cpu_to_le64(context->dump_image->address + context->dump_image_offset); 675 context->tx[0]->memory_read64.memory_length = cpu_to_le64(dump_length); 676 677 context->dump_image_offset += dump_length; 678 context->rx_size_requested = dump_length; 679 680 ret = mhi_queue_buf(context->mhi_dev, DMA_TO_DEVICE, context->tx[0], 681 SAHARA_MEM_READ64_LENGTH, MHI_EOT); 682 if (ret) 683 dev_err(&context->mhi_dev->dev, 684 "Unable to send read for dump content %d\n", ret); 685 } 686 687 static void sahara_dump_processing(struct work_struct *work) 688 { 689 struct sahara_context *context = container_of(work, struct sahara_context, dump_work); 690 int ret; 691 692 /* 693 * We should get the expected raw data, but if the device has an error 694 * it is supposed to send EOI with an error code. 695 */ 696 if (context->rx_size != context->rx_size_requested && 697 context->rx_size != SAHARA_END_OF_IMAGE_LENGTH) { 698 dev_err(&context->mhi_dev->dev, 699 "Unexpected response to read_data. Expected size: %#zx got: %#zx\n", 700 context->rx_size_requested, 701 context->rx_size); 702 goto error; 703 } 704 705 if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH && 706 le32_to_cpu(context->rx->cmd) == SAHARA_END_OF_IMAGE_CMD) { 707 dev_err(&context->mhi_dev->dev, 708 "Unexpected EOI response to read_data. Status: %d\n", 709 le32_to_cpu(context->rx->end_of_image.status)); 710 goto error; 711 } 712 713 if (context->rx_size == SAHARA_END_OF_IMAGE_LENGTH && 714 le32_to_cpu(context->rx->cmd) != SAHARA_END_OF_IMAGE_CMD) { 715 dev_err(&context->mhi_dev->dev, 716 "Invalid EOI response to read_data. CMD: %d\n", 717 le32_to_cpu(context->rx->cmd)); 718 goto error; 719 } 720 721 /* 722 * Need to know if we received the dump table, or part of a dump image. 723 * Since we get raw data, we cannot tell from the data itself. Instead, 724 * we use the stored dump_table_length, which we zero after we read and 725 * process the entire table. 726 */ 727 if (context->dump_table_length) 728 sahara_parse_dump_table(context); 729 else 730 sahara_parse_dump_image(context); 731 732 ret = mhi_queue_buf(context->mhi_dev, DMA_FROM_DEVICE, context->rx, 733 SAHARA_PACKET_MAX_SIZE, MHI_EOT); 734 if (ret) 735 dev_err(&context->mhi_dev->dev, "Unable to requeue rx buf %d\n", ret); 736 737 return; 738 739 error: 740 vfree(context->mem_dump); 741 context->mem_dump = NULL; 742 sahara_send_reset(context); 743 } 744 745 static int sahara_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) 746 { 747 struct sahara_context *context; 748 int ret; 749 int i; 750 751 context = devm_kzalloc(&mhi_dev->dev, sizeof(*context), GFP_KERNEL); 752 if (!context) 753 return -ENOMEM; 754 755 context->rx = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL); 756 if (!context->rx) 757 return -ENOMEM; 758 759 /* 760 * AIC100 defines SAHARA_TRANSFER_MAX_SIZE as the largest value it 761 * will request for READ_DATA. This is larger than 762 * SAHARA_PACKET_MAX_SIZE, and we need 9x SAHARA_PACKET_MAX_SIZE to 763 * cover SAHARA_TRANSFER_MAX_SIZE. When the remote side issues a 764 * READ_DATA, it requires a transfer of the exact size requested. We 765 * can use MHI_CHAIN to link multiple buffers into a single transfer 766 * but the remote side will not consume the buffers until it sees an 767 * EOT, thus we need to allocate enough buffers to put in the tx fifo 768 * to cover an entire READ_DATA request of the max size. 769 */ 770 for (i = 0; i < SAHARA_NUM_TX_BUF; ++i) { 771 context->tx[i] = devm_kzalloc(&mhi_dev->dev, SAHARA_PACKET_MAX_SIZE, GFP_KERNEL); 772 if (!context->tx[i]) 773 return -ENOMEM; 774 } 775 776 context->mhi_dev = mhi_dev; 777 INIT_WORK(&context->fw_work, sahara_processing); 778 INIT_WORK(&context->dump_work, sahara_dump_processing); 779 780 if (!strcmp(mhi_dev->mhi_cntrl->name, "AIC200")) { 781 context->image_table = aic200_image_table; 782 context->table_size = ARRAY_SIZE(aic200_image_table); 783 } else { 784 context->image_table = aic100_image_table; 785 context->table_size = ARRAY_SIZE(aic100_image_table); 786 } 787 788 context->active_image_id = SAHARA_IMAGE_ID_NONE; 789 dev_set_drvdata(&mhi_dev->dev, context); 790 791 ret = mhi_prepare_for_transfer(mhi_dev); 792 if (ret) 793 return ret; 794 795 ret = mhi_queue_buf(mhi_dev, DMA_FROM_DEVICE, context->rx, SAHARA_PACKET_MAX_SIZE, MHI_EOT); 796 if (ret) { 797 mhi_unprepare_from_transfer(mhi_dev); 798 return ret; 799 } 800 801 return 0; 802 } 803 804 static void sahara_mhi_remove(struct mhi_device *mhi_dev) 805 { 806 struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev); 807 808 cancel_work_sync(&context->fw_work); 809 cancel_work_sync(&context->dump_work); 810 vfree(context->mem_dump); 811 sahara_release_image(context); 812 mhi_unprepare_from_transfer(mhi_dev); 813 } 814 815 static void sahara_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) 816 { 817 } 818 819 static void sahara_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) 820 { 821 struct sahara_context *context = dev_get_drvdata(&mhi_dev->dev); 822 823 if (!mhi_result->transaction_status) { 824 context->rx_size = mhi_result->bytes_xferd; 825 if (context->is_mem_dump_mode) 826 schedule_work(&context->dump_work); 827 else 828 schedule_work(&context->fw_work); 829 } 830 831 } 832 833 static const struct mhi_device_id sahara_mhi_match_table[] = { 834 { .chan = "QAIC_SAHARA", }, 835 {}, 836 }; 837 838 static struct mhi_driver sahara_mhi_driver = { 839 .id_table = sahara_mhi_match_table, 840 .remove = sahara_mhi_remove, 841 .probe = sahara_mhi_probe, 842 .ul_xfer_cb = sahara_mhi_ul_xfer_cb, 843 .dl_xfer_cb = sahara_mhi_dl_xfer_cb, 844 .driver = { 845 .name = "sahara", 846 }, 847 }; 848 849 int sahara_register(void) 850 { 851 return mhi_driver_register(&sahara_mhi_driver); 852 } 853 854 void sahara_unregister(void) 855 { 856 mhi_driver_unregister(&sahara_mhi_driver); 857 } 858