1 /* 2 * Thunderbolt DMA configuration based mailbox support 3 * 4 * Copyright (C) 2017, Intel Corporation 5 * Authors: Michael Jamet <michael.jamet@intel.com> 6 * Mika Westerberg <mika.westerberg@linux.intel.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/delay.h> 14 #include <linux/slab.h> 15 16 #include "dma_port.h" 17 #include "tb_regs.h" 18 19 #define DMA_PORT_CAP 0x3e 20 21 #define MAIL_DATA 1 22 #define MAIL_DATA_DWORDS 16 23 24 #define MAIL_IN 17 25 #define MAIL_IN_CMD_SHIFT 28 26 #define MAIL_IN_CMD_MASK GENMASK(31, 28) 27 #define MAIL_IN_CMD_FLASH_WRITE 0x0 28 #define MAIL_IN_CMD_FLASH_UPDATE_AUTH 0x1 29 #define MAIL_IN_CMD_FLASH_READ 0x2 30 #define MAIL_IN_CMD_POWER_CYCLE 0x4 31 #define MAIL_IN_DWORDS_SHIFT 24 32 #define MAIL_IN_DWORDS_MASK GENMASK(27, 24) 33 #define MAIL_IN_ADDRESS_SHIFT 2 34 #define MAIL_IN_ADDRESS_MASK GENMASK(23, 2) 35 #define MAIL_IN_CSS BIT(1) 36 #define MAIL_IN_OP_REQUEST BIT(0) 37 38 #define MAIL_OUT 18 39 #define MAIL_OUT_STATUS_RESPONSE BIT(29) 40 #define MAIL_OUT_STATUS_CMD_SHIFT 4 41 #define MAIL_OUT_STATUS_CMD_MASK GENMASK(7, 4) 42 #define MAIL_OUT_STATUS_MASK GENMASK(3, 0) 43 #define MAIL_OUT_STATUS_COMPLETED 0 44 #define MAIL_OUT_STATUS_ERR_AUTH 1 45 #define MAIL_OUT_STATUS_ERR_ACCESS 2 46 47 #define DMA_PORT_TIMEOUT 5000 /* ms */ 48 #define DMA_PORT_RETRIES 3 49 50 /** 51 * struct tb_dma_port - DMA control port 52 * @sw: Switch the DMA port belongs to 53 * @port: Switch port number where DMA capability is found 54 * @base: Start offset of the mailbox registers 55 * @buf: Temporary buffer to store a single block 56 */ 57 struct tb_dma_port { 58 struct tb_switch *sw; 59 u8 port; 60 u32 base; 61 u8 *buf; 62 }; 63 64 /* 65 * When the switch is in safe mode it supports very little functionality 66 * so we don't validate that much here. 67 */ 68 static bool dma_port_match(const struct tb_cfg_request *req, 69 const struct ctl_pkg *pkg) 70 { 71 u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); 72 73 if (pkg->frame.eof == TB_CFG_PKG_ERROR) 74 return true; 75 if (pkg->frame.eof != req->response_type) 76 return false; 77 if (route != tb_cfg_get_route(req->request)) 78 return false; 79 if (pkg->frame.size != req->response_size) 80 return false; 81 82 return true; 83 } 84 85 static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) 86 { 87 memcpy(req->response, pkg->buffer, req->response_size); 88 return true; 89 } 90 91 static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route, 92 u32 port, u32 offset, u32 length, int timeout_msec) 93 { 94 struct cfg_read_pkg request = { 95 .header = tb_cfg_make_header(route), 96 .addr = { 97 .seq = 1, 98 .port = port, 99 .space = TB_CFG_PORT, 100 .offset = offset, 101 .length = length, 102 }, 103 }; 104 struct tb_cfg_request *req; 105 struct cfg_write_pkg reply; 106 struct tb_cfg_result res; 107 108 req = tb_cfg_request_alloc(); 109 if (!req) 110 return -ENOMEM; 111 112 req->match = dma_port_match; 113 req->copy = dma_port_copy; 114 req->request = &request; 115 req->request_size = sizeof(request); 116 req->request_type = TB_CFG_PKG_READ; 117 req->response = &reply; 118 req->response_size = 12 + 4 * length; 119 req->response_type = TB_CFG_PKG_READ; 120 121 res = tb_cfg_request_sync(ctl, req, timeout_msec); 122 123 tb_cfg_request_put(req); 124 125 if (res.err) 126 return res.err; 127 128 memcpy(buffer, &reply.data, 4 * length); 129 return 0; 130 } 131 132 static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route, 133 u32 port, u32 offset, u32 length, int timeout_msec) 134 { 135 struct cfg_write_pkg request = { 136 .header = tb_cfg_make_header(route), 137 .addr = { 138 .seq = 1, 139 .port = port, 140 .space = TB_CFG_PORT, 141 .offset = offset, 142 .length = length, 143 }, 144 }; 145 struct tb_cfg_request *req; 146 struct cfg_read_pkg reply; 147 struct tb_cfg_result res; 148 149 memcpy(&request.data, buffer, length * 4); 150 151 req = tb_cfg_request_alloc(); 152 if (!req) 153 return -ENOMEM; 154 155 req->match = dma_port_match; 156 req->copy = dma_port_copy; 157 req->request = &request; 158 req->request_size = 12 + 4 * length; 159 req->request_type = TB_CFG_PKG_WRITE; 160 req->response = &reply; 161 req->response_size = sizeof(reply); 162 req->response_type = TB_CFG_PKG_WRITE; 163 164 res = tb_cfg_request_sync(ctl, req, timeout_msec); 165 166 tb_cfg_request_put(req); 167 168 return res.err; 169 } 170 171 static int dma_find_port(struct tb_switch *sw) 172 { 173 static const int ports[] = { 3, 5, 7 }; 174 int i; 175 176 /* 177 * The DMA (NHI) port is either 3, 5 or 7 depending on the 178 * controller. Try all of them. 179 */ 180 for (i = 0; i < ARRAY_SIZE(ports); i++) { 181 u32 type; 182 int ret; 183 184 ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), ports[i], 185 2, 1, DMA_PORT_TIMEOUT); 186 if (!ret && (type & 0xffffff) == TB_TYPE_NHI) 187 return ports[i]; 188 } 189 190 return -ENODEV; 191 } 192 193 /** 194 * dma_port_alloc() - Finds DMA control port from a switch pointed by route 195 * @sw: Switch from where find the DMA port 196 * 197 * Function checks if the switch NHI port supports DMA configuration 198 * based mailbox capability and if it does, allocates and initializes 199 * DMA port structure. Returns %NULL if the capabity was not found. 200 * 201 * The DMA control port is functional also when the switch is in safe 202 * mode. 203 */ 204 struct tb_dma_port *dma_port_alloc(struct tb_switch *sw) 205 { 206 struct tb_dma_port *dma; 207 int port; 208 209 port = dma_find_port(sw); 210 if (port < 0) 211 return NULL; 212 213 dma = kzalloc(sizeof(*dma), GFP_KERNEL); 214 if (!dma) 215 return NULL; 216 217 dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL); 218 if (!dma->buf) { 219 kfree(dma); 220 return NULL; 221 } 222 223 dma->sw = sw; 224 dma->port = port; 225 dma->base = DMA_PORT_CAP; 226 227 return dma; 228 } 229 230 /** 231 * dma_port_free() - Release DMA control port structure 232 * @dma: DMA control port 233 */ 234 void dma_port_free(struct tb_dma_port *dma) 235 { 236 if (dma) { 237 kfree(dma->buf); 238 kfree(dma); 239 } 240 } 241 242 static int dma_port_wait_for_completion(struct tb_dma_port *dma, 243 unsigned int timeout) 244 { 245 unsigned long end = jiffies + msecs_to_jiffies(timeout); 246 struct tb_switch *sw = dma->sw; 247 248 do { 249 int ret; 250 u32 in; 251 252 ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port, 253 dma->base + MAIL_IN, 1, 50); 254 if (ret) { 255 if (ret != -ETIMEDOUT) 256 return ret; 257 } else if (!(in & MAIL_IN_OP_REQUEST)) { 258 return 0; 259 } 260 261 usleep_range(50, 100); 262 } while (time_before(jiffies, end)); 263 264 return -ETIMEDOUT; 265 } 266 267 static int status_to_errno(u32 status) 268 { 269 switch (status & MAIL_OUT_STATUS_MASK) { 270 case MAIL_OUT_STATUS_COMPLETED: 271 return 0; 272 case MAIL_OUT_STATUS_ERR_AUTH: 273 return -EINVAL; 274 case MAIL_OUT_STATUS_ERR_ACCESS: 275 return -EACCES; 276 } 277 278 return -EIO; 279 } 280 281 static int dma_port_request(struct tb_dma_port *dma, u32 in, 282 unsigned int timeout) 283 { 284 struct tb_switch *sw = dma->sw; 285 u32 out; 286 int ret; 287 288 ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port, 289 dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT); 290 if (ret) 291 return ret; 292 293 ret = dma_port_wait_for_completion(dma, timeout); 294 if (ret) 295 return ret; 296 297 ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port, 298 dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT); 299 if (ret) 300 return ret; 301 302 return status_to_errno(out); 303 } 304 305 static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address, 306 void *buf, u32 size) 307 { 308 struct tb_switch *sw = dma->sw; 309 u32 in, dwaddress, dwords; 310 int ret; 311 312 dwaddress = address / 4; 313 dwords = size / 4; 314 315 in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT; 316 if (dwords < MAIL_DATA_DWORDS) 317 in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK; 318 in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK; 319 in |= MAIL_IN_OP_REQUEST; 320 321 ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT); 322 if (ret) 323 return ret; 324 325 return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port, 326 dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT); 327 } 328 329 static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address, 330 const void *buf, u32 size) 331 { 332 struct tb_switch *sw = dma->sw; 333 u32 in, dwaddress, dwords; 334 int ret; 335 336 dwords = size / 4; 337 338 /* Write the block to MAIL_DATA registers */ 339 ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port, 340 dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT); 341 342 in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT; 343 344 /* CSS header write is always done to the same magic address */ 345 if (address >= DMA_PORT_CSS_ADDRESS) { 346 dwaddress = DMA_PORT_CSS_ADDRESS; 347 in |= MAIL_IN_CSS; 348 } else { 349 dwaddress = address / 4; 350 } 351 352 in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK; 353 in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK; 354 in |= MAIL_IN_OP_REQUEST; 355 356 return dma_port_request(dma, in, DMA_PORT_TIMEOUT); 357 } 358 359 /** 360 * dma_port_flash_read() - Read from active flash region 361 * @dma: DMA control port 362 * @address: Address relative to the start of active region 363 * @buf: Buffer where the data is read 364 * @size: Size of the buffer 365 */ 366 int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address, 367 void *buf, size_t size) 368 { 369 unsigned int retries = DMA_PORT_RETRIES; 370 unsigned int offset; 371 372 offset = address & 3; 373 address = address & ~3; 374 375 do { 376 u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4); 377 int ret; 378 379 ret = dma_port_flash_read_block(dma, address, dma->buf, 380 ALIGN(nbytes, 4)); 381 if (ret) { 382 if (ret == -ETIMEDOUT) { 383 if (retries--) 384 continue; 385 ret = -EIO; 386 } 387 return ret; 388 } 389 390 memcpy(buf, dma->buf + offset, nbytes); 391 392 size -= nbytes; 393 address += nbytes; 394 buf += nbytes; 395 } while (size > 0); 396 397 return 0; 398 } 399 400 /** 401 * dma_port_flash_write() - Write to non-active flash region 402 * @dma: DMA control port 403 * @address: Address relative to the start of non-active region 404 * @buf: Data to write 405 * @size: Size of the buffer 406 * 407 * Writes block of data to the non-active flash region of the switch. If 408 * the address is given as %DMA_PORT_CSS_ADDRESS the block is written 409 * using CSS command. 410 */ 411 int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address, 412 const void *buf, size_t size) 413 { 414 unsigned int retries = DMA_PORT_RETRIES; 415 unsigned int offset; 416 417 if (address >= DMA_PORT_CSS_ADDRESS) { 418 offset = 0; 419 if (size > DMA_PORT_CSS_MAX_SIZE) 420 return -E2BIG; 421 } else { 422 offset = address & 3; 423 address = address & ~3; 424 } 425 426 do { 427 u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4); 428 int ret; 429 430 memcpy(dma->buf + offset, buf, nbytes); 431 432 ret = dma_port_flash_write_block(dma, address, buf, nbytes); 433 if (ret) { 434 if (ret == -ETIMEDOUT) { 435 if (retries--) 436 continue; 437 ret = -EIO; 438 } 439 return ret; 440 } 441 442 size -= nbytes; 443 address += nbytes; 444 buf += nbytes; 445 } while (size > 0); 446 447 return 0; 448 } 449 450 /** 451 * dma_port_flash_update_auth() - Starts flash authenticate cycle 452 * @dma: DMA control port 453 * 454 * Starts the flash update authentication cycle. If the image in the 455 * non-active area was valid, the switch starts upgrade process where 456 * active and non-active area get swapped in the end. Caller should call 457 * dma_port_flash_update_auth_status() to get status of this command. 458 * This is because if the switch in question is root switch the 459 * thunderbolt host controller gets reset as well. 460 */ 461 int dma_port_flash_update_auth(struct tb_dma_port *dma) 462 { 463 u32 in; 464 465 in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT; 466 in |= MAIL_IN_OP_REQUEST; 467 468 return dma_port_request(dma, in, 150); 469 } 470 471 /** 472 * dma_port_flash_update_auth_status() - Reads status of update auth command 473 * @dma: DMA control port 474 * @status: Status code of the operation 475 * 476 * The function checks if there is status available from the last update 477 * auth command. Returns %0 if there is no status and no further 478 * action is required. If there is status, %1 is returned instead and 479 * @status holds the failure code. 480 * 481 * Negative return means there was an error reading status from the 482 * switch. 483 */ 484 int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status) 485 { 486 struct tb_switch *sw = dma->sw; 487 u32 out, cmd; 488 int ret; 489 490 ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port, 491 dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT); 492 if (ret) 493 return ret; 494 495 /* Check if the status relates to flash update auth */ 496 cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT; 497 if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) { 498 if (status) 499 *status = out & MAIL_OUT_STATUS_MASK; 500 501 /* Reset is needed in any case */ 502 return 1; 503 } 504 505 return 0; 506 } 507 508 /** 509 * dma_port_power_cycle() - Power cycles the switch 510 * @dma: DMA control port 511 * 512 * Triggers power cycle to the switch. 513 */ 514 int dma_port_power_cycle(struct tb_dma_port *dma) 515 { 516 u32 in; 517 518 in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT; 519 in |= MAIL_IN_OP_REQUEST; 520 521 return dma_port_request(dma, in, 150); 522 } 523