1 /* 2 * driver/s390/cio/qdio_setup.c 3 * 4 * qdio queue initialization 5 * 6 * Copyright (C) IBM Corp. 2008 7 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com> 8 */ 9 #include <linux/kernel.h> 10 #include <linux/slab.h> 11 #include <linux/export.h> 12 #include <asm/qdio.h> 13 14 #include "cio.h" 15 #include "css.h" 16 #include "device.h" 17 #include "ioasm.h" 18 #include "chsc.h" 19 #include "qdio.h" 20 #include "qdio_debug.h" 21 22 static struct kmem_cache *qdio_q_cache; 23 static struct kmem_cache *qdio_aob_cache; 24 25 struct qaob *qdio_allocate_aob(void) 26 { 27 return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC); 28 } 29 EXPORT_SYMBOL_GPL(qdio_allocate_aob); 30 31 void qdio_release_aob(struct qaob *aob) 32 { 33 kmem_cache_free(qdio_aob_cache, aob); 34 } 35 EXPORT_SYMBOL_GPL(qdio_release_aob); 36 37 /* 38 * qebsm is only available under 64bit but the adapter sets the feature 39 * flag anyway, so we manually override it. 40 */ 41 static inline int qebsm_possible(void) 42 { 43 #ifdef CONFIG_64BIT 44 return css_general_characteristics.qebsm; 45 #endif 46 return 0; 47 } 48 49 /* 50 * qib_param_field: pointer to 128 bytes or NULL, if no param field 51 * nr_input_qs: pointer to nr_queues*128 words of data or NULL 52 */ 53 static void set_impl_params(struct qdio_irq *irq_ptr, 54 unsigned int qib_param_field_format, 55 unsigned char *qib_param_field, 56 unsigned long *input_slib_elements, 57 unsigned long *output_slib_elements) 58 { 59 struct qdio_q *q; 60 int i, j; 61 62 if (!irq_ptr) 63 return; 64 65 irq_ptr->qib.pfmt = qib_param_field_format; 66 if (qib_param_field) 67 memcpy(irq_ptr->qib.parm, qib_param_field, 68 QDIO_MAX_BUFFERS_PER_Q); 69 70 if (!input_slib_elements) 71 goto output; 72 73 for_each_input_queue(irq_ptr, q, i) { 74 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 75 q->slib->slibe[j].parms = 76 input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; 77 } 78 output: 79 if (!output_slib_elements) 80 return; 81 82 for_each_output_queue(irq_ptr, q, i) { 83 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 84 q->slib->slibe[j].parms = 85 output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j]; 86 } 87 } 88 89 static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues) 90 { 91 struct qdio_q *q; 92 int i; 93 94 for (i = 0; i < nr_queues; i++) { 95 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); 96 if (!q) 97 return -ENOMEM; 98 99 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); 100 if (!q->slib) { 101 kmem_cache_free(qdio_q_cache, q); 102 return -ENOMEM; 103 } 104 irq_ptr_qs[i] = q; 105 } 106 return 0; 107 } 108 109 int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs) 110 { 111 int rc; 112 113 rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs); 114 if (rc) 115 return rc; 116 rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs); 117 return rc; 118 } 119 120 static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, 121 qdio_handler_t *handler, int i) 122 { 123 struct slib *slib = q->slib; 124 125 /* queue must be cleared for qdio_establish */ 126 memset(q, 0, sizeof(*q)); 127 memset(slib, 0, PAGE_SIZE); 128 q->slib = slib; 129 q->irq_ptr = irq_ptr; 130 q->mask = 1 << (31 - i); 131 q->nr = i; 132 q->handler = handler; 133 } 134 135 static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, 136 void **sbals_array, int i) 137 { 138 struct qdio_q *prev; 139 int j; 140 141 DBF_HEX(&q, sizeof(void *)); 142 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); 143 144 /* fill in sbal */ 145 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) { 146 q->sbal[j] = *sbals_array++; 147 BUG_ON((unsigned long)q->sbal[j] & 0xff); 148 } 149 150 /* fill in slib */ 151 if (i > 0) { 152 prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1] 153 : irq_ptr->output_qs[i - 1]; 154 prev->slib->nsliba = (unsigned long)q->slib; 155 } 156 157 q->slib->sla = (unsigned long)q->sl; 158 q->slib->slsba = (unsigned long)&q->slsb.val[0]; 159 160 /* fill in sl */ 161 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) 162 q->sl->element[j].sbal = (unsigned long)q->sbal[j]; 163 } 164 165 static void setup_queues(struct qdio_irq *irq_ptr, 166 struct qdio_initialize *qdio_init) 167 { 168 struct qdio_q *q; 169 void **input_sbal_array = qdio_init->input_sbal_addr_array; 170 void **output_sbal_array = qdio_init->output_sbal_addr_array; 171 struct qdio_outbuf_state *output_sbal_state_array = 172 qdio_init->output_sbal_state_array; 173 int i; 174 175 for_each_input_queue(irq_ptr, q, i) { 176 DBF_EVENT("inq:%1d", i); 177 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 178 179 q->is_input_q = 1; 180 q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ? 181 qdio_init->queue_start_poll_array[i] : NULL; 182 183 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 184 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 185 186 if (is_thinint_irq(irq_ptr)) { 187 tasklet_init(&q->tasklet, tiqdio_inbound_processing, 188 (unsigned long) q); 189 } else { 190 tasklet_init(&q->tasklet, qdio_inbound_processing, 191 (unsigned long) q); 192 } 193 } 194 195 for_each_output_queue(irq_ptr, q, i) { 196 DBF_EVENT("outq:%1d", i); 197 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 198 199 q->u.out.sbal_state = output_sbal_state_array; 200 output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q; 201 202 q->is_input_q = 0; 203 q->u.out.scan_threshold = qdio_init->scan_threshold; 204 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 205 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 206 207 tasklet_init(&q->tasklet, qdio_outbound_processing, 208 (unsigned long) q); 209 setup_timer(&q->u.out.timer, (void(*)(unsigned long)) 210 &qdio_outbound_timer, (unsigned long)q); 211 } 212 } 213 214 static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) 215 { 216 if (qdioac & AC1_SIGA_INPUT_NEEDED) 217 irq_ptr->siga_flag.input = 1; 218 if (qdioac & AC1_SIGA_OUTPUT_NEEDED) 219 irq_ptr->siga_flag.output = 1; 220 if (qdioac & AC1_SIGA_SYNC_NEEDED) 221 irq_ptr->siga_flag.sync = 1; 222 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT)) 223 irq_ptr->siga_flag.sync_after_ai = 1; 224 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI)) 225 irq_ptr->siga_flag.sync_out_after_pci = 1; 226 } 227 228 static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, 229 unsigned char qdioac, unsigned long token) 230 { 231 if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) 232 goto no_qebsm; 233 if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || 234 (!(qdioac & AC1_SC_QEBSM_ENABLED))) 235 goto no_qebsm; 236 237 irq_ptr->sch_token = token; 238 239 DBF_EVENT("V=V:1"); 240 DBF_EVENT("%8lx", irq_ptr->sch_token); 241 return; 242 243 no_qebsm: 244 irq_ptr->sch_token = 0; 245 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; 246 DBF_EVENT("noV=V"); 247 } 248 249 /* 250 * If there is a qdio_irq we use the chsc_page and store the information 251 * in the qdio_irq, otherwise we copy it to the specified structure. 252 */ 253 int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, 254 struct subchannel_id *schid, 255 struct qdio_ssqd_desc *data) 256 { 257 struct chsc_ssqd_area *ssqd; 258 int rc; 259 260 DBF_EVENT("getssqd:%4x", schid->sch_no); 261 if (irq_ptr != NULL) 262 ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; 263 else 264 ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); 265 memset(ssqd, 0, PAGE_SIZE); 266 267 ssqd->request = (struct chsc_header) { 268 .length = 0x0010, 269 .code = 0x0024, 270 }; 271 ssqd->first_sch = schid->sch_no; 272 ssqd->last_sch = schid->sch_no; 273 ssqd->ssid = schid->ssid; 274 275 if (chsc(ssqd)) 276 return -EIO; 277 rc = chsc_error_from_response(ssqd->response.code); 278 if (rc) 279 return rc; 280 281 if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || 282 !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || 283 (ssqd->qdio_ssqd.sch != schid->sch_no)) 284 return -EINVAL; 285 286 if (irq_ptr != NULL) 287 memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, 288 sizeof(struct qdio_ssqd_desc)); 289 else { 290 memcpy(data, &ssqd->qdio_ssqd, 291 sizeof(struct qdio_ssqd_desc)); 292 free_page((unsigned long)ssqd); 293 } 294 return 0; 295 } 296 297 void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) 298 { 299 unsigned char qdioac; 300 int rc; 301 302 rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL); 303 if (rc) { 304 DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); 305 DBF_ERROR("rc:%x", rc); 306 /* all flags set, worst case */ 307 qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | 308 AC1_SIGA_SYNC_NEEDED; 309 } else 310 qdioac = irq_ptr->ssqd_desc.qdioac1; 311 312 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); 313 process_ac_flags(irq_ptr, qdioac); 314 DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2); 315 DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac); 316 } 317 318 void qdio_release_memory(struct qdio_irq *irq_ptr) 319 { 320 struct qdio_q *q; 321 int i; 322 323 /* 324 * Must check queue array manually since irq_ptr->nr_input_queues / 325 * irq_ptr->nr_input_queues may not yet be set. 326 */ 327 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 328 q = irq_ptr->input_qs[i]; 329 if (q) { 330 free_page((unsigned long) q->slib); 331 kmem_cache_free(qdio_q_cache, q); 332 } 333 } 334 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) { 335 q = irq_ptr->output_qs[i]; 336 if (q) { 337 if (q->u.out.use_cq) { 338 int n; 339 340 for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) { 341 struct qaob *aob = q->u.out.aobs[n]; 342 if (aob) { 343 qdio_release_aob(aob); 344 q->u.out.aobs[n] = NULL; 345 } 346 } 347 348 qdio_disable_async_operation(&q->u.out); 349 } 350 free_page((unsigned long) q->slib); 351 kmem_cache_free(qdio_q_cache, q); 352 } 353 } 354 free_page((unsigned long) irq_ptr->qdr); 355 free_page(irq_ptr->chsc_page); 356 free_page((unsigned long) irq_ptr); 357 } 358 359 static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr, 360 struct qdio_q **irq_ptr_qs, 361 int i, int nr) 362 { 363 irq_ptr->qdr->qdf0[i + nr].sliba = 364 (unsigned long)irq_ptr_qs[i]->slib; 365 366 irq_ptr->qdr->qdf0[i + nr].sla = 367 (unsigned long)irq_ptr_qs[i]->sl; 368 369 irq_ptr->qdr->qdf0[i + nr].slsba = 370 (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; 371 372 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4; 373 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4; 374 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4; 375 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4; 376 } 377 378 static void setup_qdr(struct qdio_irq *irq_ptr, 379 struct qdio_initialize *qdio_init) 380 { 381 int i; 382 383 irq_ptr->qdr->qfmt = qdio_init->q_format; 384 irq_ptr->qdr->ac = qdio_init->qdr_ac; 385 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs; 386 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs; 387 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 388 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; 389 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; 390 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4; 391 392 for (i = 0; i < qdio_init->no_input_qs; i++) 393 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); 394 395 for (i = 0; i < qdio_init->no_output_qs; i++) 396 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i, 397 qdio_init->no_input_qs); 398 } 399 400 static void setup_qib(struct qdio_irq *irq_ptr, 401 struct qdio_initialize *init_data) 402 { 403 if (qebsm_possible()) 404 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM; 405 406 irq_ptr->qib.rflags |= init_data->qib_rflags; 407 408 irq_ptr->qib.qfmt = init_data->q_format; 409 if (init_data->no_input_qs) 410 irq_ptr->qib.isliba = 411 (unsigned long)(irq_ptr->input_qs[0]->slib); 412 if (init_data->no_output_qs) 413 irq_ptr->qib.osliba = 414 (unsigned long)(irq_ptr->output_qs[0]->slib); 415 memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8); 416 } 417 418 int qdio_setup_irq(struct qdio_initialize *init_data) 419 { 420 struct ciw *ciw; 421 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 422 int rc; 423 424 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib)); 425 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag)); 426 memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw)); 427 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc)); 428 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); 429 430 irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL; 431 irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0; 432 433 /* wipes qib.ac, required by ar7063 */ 434 memset(irq_ptr->qdr, 0, sizeof(struct qdr)); 435 436 irq_ptr->int_parm = init_data->int_parm; 437 irq_ptr->nr_input_qs = init_data->no_input_qs; 438 irq_ptr->nr_output_qs = init_data->no_output_qs; 439 440 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev); 441 irq_ptr->cdev = init_data->cdev; 442 setup_queues(irq_ptr, init_data); 443 444 setup_qib(irq_ptr, init_data); 445 qdio_setup_thinint(irq_ptr); 446 set_impl_params(irq_ptr, init_data->qib_param_field_format, 447 init_data->qib_param_field, 448 init_data->input_slib_elements, 449 init_data->output_slib_elements); 450 451 /* fill input and output descriptors */ 452 setup_qdr(irq_ptr, init_data); 453 454 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */ 455 456 /* get qdio commands */ 457 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); 458 if (!ciw) { 459 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); 460 rc = -EINVAL; 461 goto out_err; 462 } 463 irq_ptr->equeue = *ciw; 464 465 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); 466 if (!ciw) { 467 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); 468 rc = -EINVAL; 469 goto out_err; 470 } 471 irq_ptr->aqueue = *ciw; 472 473 /* set new interrupt handler */ 474 irq_ptr->orig_handler = init_data->cdev->handler; 475 init_data->cdev->handler = qdio_int_handler; 476 return 0; 477 out_err: 478 qdio_release_memory(irq_ptr); 479 return rc; 480 } 481 482 void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, 483 struct ccw_device *cdev) 484 { 485 char s[80]; 486 487 snprintf(s, 80, "qdio: %s %s on SC %x using " 488 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n", 489 dev_name(&cdev->dev), 490 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 491 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 492 irq_ptr->schid.sch_no, 493 is_thinint_irq(irq_ptr), 494 (irq_ptr->sch_token) ? 1 : 0, 495 (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0, 496 css_general_characteristics.aif_tdd, 497 (irq_ptr->siga_flag.input) ? "R" : " ", 498 (irq_ptr->siga_flag.output) ? "W" : " ", 499 (irq_ptr->siga_flag.sync) ? "S" : " ", 500 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ", 501 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " "); 502 printk(KERN_INFO "%s", s); 503 } 504 505 int qdio_enable_async_operation(struct qdio_output_q *outq) 506 { 507 outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q, 508 GFP_ATOMIC); 509 if (!outq->aobs) { 510 outq->use_cq = 0; 511 return -ENOMEM; 512 } 513 outq->use_cq = 1; 514 return 0; 515 } 516 517 void qdio_disable_async_operation(struct qdio_output_q *q) 518 { 519 kfree(q->aobs); 520 q->aobs = NULL; 521 q->use_cq = 0; 522 } 523 524 int __init qdio_setup_init(void) 525 { 526 int rc; 527 528 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), 529 256, 0, NULL); 530 if (!qdio_q_cache) 531 return -ENOMEM; 532 533 qdio_aob_cache = kmem_cache_create("qdio_aob", 534 sizeof(struct qaob), 535 sizeof(struct qaob), 536 0, 537 NULL); 538 if (!qdio_aob_cache) { 539 rc = -ENOMEM; 540 goto free_qdio_q_cache; 541 } 542 543 /* Check for OSA/FCP thin interrupts (bit 67). */ 544 DBF_EVENT("thinint:%1d", 545 (css_general_characteristics.aif_osa) ? 1 : 0); 546 547 /* Check for QEBSM support in general (bit 58). */ 548 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); 549 rc = 0; 550 out: 551 return rc; 552 free_qdio_q_cache: 553 kmem_cache_destroy(qdio_q_cache); 554 goto out; 555 } 556 557 void qdio_setup_exit(void) 558 { 559 kmem_cache_destroy(qdio_aob_cache); 560 kmem_cache_destroy(qdio_q_cache); 561 } 562