1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2015, Sony Mobile Communications AB. 4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. 5 */ 6 7 #include <linux/interrupt.h> 8 #include <linux/list.h> 9 #include <linux/io.h> 10 #include <linux/of.h> 11 #include <linux/irq.h> 12 #include <linux/irqdomain.h> 13 #include <linux/mailbox_client.h> 14 #include <linux/mfd/syscon.h> 15 #include <linux/module.h> 16 #include <linux/platform_device.h> 17 #include <linux/pm_wakeirq.h> 18 #include <linux/regmap.h> 19 #include <linux/seq_file.h> 20 #include <linux/soc/qcom/smem.h> 21 #include <linux/soc/qcom/smem_state.h> 22 #include <linux/spinlock.h> 23 24 /* 25 * The Shared Memory Point to Point (SMP2P) protocol facilitates communication 26 * of a single 32-bit value between two processors. Each value has a single 27 * writer (the local side) and a single reader (the remote side). Values are 28 * uniquely identified in the system by the directed edge (local processor ID 29 * to remote processor ID) and a string identifier. 30 * 31 * Each processor is responsible for creating the outgoing SMEM items and each 32 * item is writable by the local processor and readable by the remote 33 * processor. By using two separate SMEM items that are single-reader and 34 * single-writer, SMP2P does not require any remote locking mechanisms. 35 * 36 * The driver uses the Linux GPIO and interrupt framework to expose a virtual 37 * GPIO for each outbound entry and a virtual interrupt controller for each 38 * inbound entry. 39 */ 40 41 #define SMP2P_MAX_ENTRY 16 42 #define SMP2P_MAX_ENTRY_NAME 16 43 44 #define SMP2P_FEATURE_SSR_ACK 0x1 45 #define SMP2P_FLAGS_RESTART_DONE_BIT 0 46 #define SMP2P_FLAGS_RESTART_ACK_BIT 1 47 48 #define SMP2P_MAGIC 0x504d5324 49 #define SMP2P_ALL_FEATURES SMP2P_FEATURE_SSR_ACK 50 51 /** 52 * struct smp2p_smem_item - in memory communication structure 53 * @magic: magic number 54 * @version: version - must be 1 55 * @features: features flag - currently unused 56 * @local_pid: processor id of sending end 57 * @remote_pid: processor id of receiving end 58 * @total_entries: number of entries - always SMP2P_MAX_ENTRY 59 * @valid_entries: number of allocated entries 60 * @flags: 61 * @entries: individual communication entries 62 * @entries.name: name of the entry 63 * @entries.value: content of the entry 64 */ 65 struct smp2p_smem_item { 66 u32 magic; 67 u8 version; 68 unsigned features:24; 69 u16 local_pid; 70 u16 remote_pid; 71 u16 total_entries; 72 u16 valid_entries; 73 u32 flags; 74 75 struct { 76 u8 name[SMP2P_MAX_ENTRY_NAME]; 77 u32 value; 78 } entries[SMP2P_MAX_ENTRY]; 79 } __packed; 80 81 /** 82 * struct smp2p_entry - driver context matching one entry 83 * @node: list entry to keep track of allocated entries 84 * @smp2p: reference to the device driver context 85 * @name: name of the entry, to match against smp2p_smem_item 86 * @value: pointer to smp2p_smem_item entry value 87 * @last_value: last handled value 88 * @domain: irq_domain for inbound entries 89 * @irq_enabled:bitmap to track enabled irq bits 90 * @irq_rising: bitmap to mark irq bits for rising detection 91 * @irq_falling:bitmap to mark irq bits for falling detection 92 * @state: smem state handle 93 * @lock: spinlock to protect read-modify-write of the value 94 */ 95 struct smp2p_entry { 96 struct list_head node; 97 struct qcom_smp2p *smp2p; 98 99 const char *name; 100 u32 *value; 101 u32 last_value; 102 103 struct irq_domain *domain; 104 DECLARE_BITMAP(irq_enabled, 32); 105 DECLARE_BITMAP(irq_rising, 32); 106 DECLARE_BITMAP(irq_falling, 32); 107 108 struct qcom_smem_state *state; 109 110 spinlock_t lock; 111 }; 112 113 #define SMP2P_INBOUND 0 114 #define SMP2P_OUTBOUND 1 115 116 /** 117 * struct qcom_smp2p - device driver context 118 * @dev: device driver handle 119 * @in: pointer to the inbound smem item 120 * @out: pointer to the outbound smem item 121 * @smem_items: ids of the two smem items 122 * @valid_entries: already scanned inbound entries 123 * @ssr_ack_enabled: SMP2P_FEATURE_SSR_ACK feature is supported and was enabled 124 * @ssr_ack: current cached state of the local ack bit 125 * @negotiation_done: whether negotiating finished 126 * @local_pid: processor id of the inbound edge 127 * @remote_pid: processor id of the outbound edge 128 * @ipc_regmap: regmap for the outbound ipc 129 * @ipc_offset: offset within the regmap 130 * @ipc_bit: bit in regmap@offset to kick to signal remote processor 131 * @mbox_client: mailbox client handle 132 * @mbox_chan: apcs ipc mailbox channel handle 133 * @inbound: list of inbound entries 134 * @outbound: list of outbound entries 135 */ 136 struct qcom_smp2p { 137 struct device *dev; 138 139 struct smp2p_smem_item *in; 140 struct smp2p_smem_item *out; 141 142 unsigned smem_items[SMP2P_OUTBOUND + 1]; 143 144 unsigned valid_entries; 145 146 bool ssr_ack_enabled; 147 bool ssr_ack; 148 bool negotiation_done; 149 150 unsigned local_pid; 151 unsigned remote_pid; 152 153 struct regmap *ipc_regmap; 154 int ipc_offset; 155 int ipc_bit; 156 157 struct mbox_client mbox_client; 158 struct mbox_chan *mbox_chan; 159 160 struct list_head inbound; 161 struct list_head outbound; 162 }; 163 164 #define CREATE_TRACE_POINTS 165 #include "trace-smp2p.h" 166 167 static void qcom_smp2p_kick(struct qcom_smp2p *smp2p) 168 { 169 /* Make sure any updated data is written before the kick */ 170 wmb(); 171 172 if (smp2p->mbox_chan) { 173 mbox_send_message(smp2p->mbox_chan, NULL); 174 mbox_client_txdone(smp2p->mbox_chan, 0); 175 } else { 176 regmap_write(smp2p->ipc_regmap, smp2p->ipc_offset, BIT(smp2p->ipc_bit)); 177 } 178 } 179 180 static bool qcom_smp2p_check_ssr(struct qcom_smp2p *smp2p) 181 { 182 struct smp2p_smem_item *in = smp2p->in; 183 bool restart; 184 185 if (!smp2p->ssr_ack_enabled) 186 return false; 187 188 restart = in->flags & BIT(SMP2P_FLAGS_RESTART_DONE_BIT); 189 190 return restart != smp2p->ssr_ack; 191 } 192 193 static void qcom_smp2p_do_ssr_ack(struct qcom_smp2p *smp2p) 194 { 195 struct smp2p_smem_item *out = smp2p->out; 196 u32 val; 197 198 trace_smp2p_ssr_ack(smp2p->dev); 199 smp2p->ssr_ack = !smp2p->ssr_ack; 200 201 val = out->flags & ~BIT(SMP2P_FLAGS_RESTART_ACK_BIT); 202 if (smp2p->ssr_ack) 203 val |= BIT(SMP2P_FLAGS_RESTART_ACK_BIT); 204 out->flags = val; 205 206 qcom_smp2p_kick(smp2p); 207 } 208 209 static void qcom_smp2p_negotiate(struct qcom_smp2p *smp2p) 210 { 211 struct smp2p_smem_item *out = smp2p->out; 212 struct smp2p_smem_item *in = smp2p->in; 213 214 if (in->version == out->version) { 215 out->features &= in->features; 216 217 if (out->features & SMP2P_FEATURE_SSR_ACK) 218 smp2p->ssr_ack_enabled = true; 219 220 smp2p->negotiation_done = true; 221 trace_smp2p_negotiate(smp2p->dev, out->features); 222 } 223 } 224 225 static void qcom_smp2p_notify_in(struct qcom_smp2p *smp2p) 226 { 227 struct smp2p_smem_item *in; 228 struct smp2p_entry *entry; 229 int irq_pin; 230 u32 status; 231 char buf[SMP2P_MAX_ENTRY_NAME]; 232 u32 val; 233 int i; 234 235 in = smp2p->in; 236 237 /* Match newly created entries */ 238 for (i = smp2p->valid_entries; i < in->valid_entries; i++) { 239 list_for_each_entry(entry, &smp2p->inbound, node) { 240 memcpy(buf, in->entries[i].name, sizeof(buf)); 241 if (!strcmp(buf, entry->name)) { 242 entry->value = &in->entries[i].value; 243 break; 244 } 245 } 246 } 247 smp2p->valid_entries = i; 248 249 /* Fire interrupts based on any value changes */ 250 list_for_each_entry(entry, &smp2p->inbound, node) { 251 /* Ignore entries not yet allocated by the remote side */ 252 if (!entry->value) 253 continue; 254 255 val = readl(entry->value); 256 257 status = val ^ entry->last_value; 258 entry->last_value = val; 259 260 trace_smp2p_notify_in(entry, status, val); 261 262 /* No changes of this entry? */ 263 if (!status) 264 continue; 265 266 for_each_set_bit(i, entry->irq_enabled, 32) { 267 if (!(status & BIT(i))) 268 continue; 269 270 if ((val & BIT(i) && test_bit(i, entry->irq_rising)) || 271 (!(val & BIT(i)) && test_bit(i, entry->irq_falling))) { 272 irq_pin = irq_find_mapping(entry->domain, i); 273 handle_nested_irq(irq_pin); 274 } 275 } 276 } 277 } 278 279 /** 280 * qcom_smp2p_intr() - interrupt handler for incoming notifications 281 * @irq: unused 282 * @data: smp2p driver context 283 * 284 * Handle notifications from the remote side to handle newly allocated entries 285 * or any changes to the state bits of existing entries. 286 * 287 * Return: %IRQ_HANDLED 288 */ 289 static irqreturn_t qcom_smp2p_intr(int irq, void *data) 290 { 291 struct smp2p_smem_item *in; 292 struct qcom_smp2p *smp2p = data; 293 unsigned int smem_id = smp2p->smem_items[SMP2P_INBOUND]; 294 unsigned int pid = smp2p->remote_pid; 295 bool ack_restart; 296 size_t size; 297 298 in = smp2p->in; 299 300 /* Acquire smem item, if not already found */ 301 if (!in) { 302 in = qcom_smem_get(pid, smem_id, &size); 303 if (IS_ERR(in)) { 304 dev_err(smp2p->dev, 305 "Unable to acquire remote smp2p item\n"); 306 goto out; 307 } 308 309 smp2p->in = in; 310 } 311 312 if (!smp2p->negotiation_done) 313 qcom_smp2p_negotiate(smp2p); 314 315 if (smp2p->negotiation_done) { 316 ack_restart = qcom_smp2p_check_ssr(smp2p); 317 qcom_smp2p_notify_in(smp2p); 318 319 if (ack_restart) 320 qcom_smp2p_do_ssr_ack(smp2p); 321 } 322 323 out: 324 return IRQ_HANDLED; 325 } 326 327 static void smp2p_mask_irq(struct irq_data *irqd) 328 { 329 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); 330 irq_hw_number_t irq = irqd_to_hwirq(irqd); 331 332 clear_bit(irq, entry->irq_enabled); 333 } 334 335 static void smp2p_unmask_irq(struct irq_data *irqd) 336 { 337 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); 338 irq_hw_number_t irq = irqd_to_hwirq(irqd); 339 340 set_bit(irq, entry->irq_enabled); 341 } 342 343 static int smp2p_set_irq_type(struct irq_data *irqd, unsigned int type) 344 { 345 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); 346 irq_hw_number_t irq = irqd_to_hwirq(irqd); 347 348 if (!(type & IRQ_TYPE_EDGE_BOTH)) 349 return -EINVAL; 350 351 if (type & IRQ_TYPE_EDGE_RISING) 352 set_bit(irq, entry->irq_rising); 353 else 354 clear_bit(irq, entry->irq_rising); 355 356 if (type & IRQ_TYPE_EDGE_FALLING) 357 set_bit(irq, entry->irq_falling); 358 else 359 clear_bit(irq, entry->irq_falling); 360 361 return 0; 362 } 363 364 static void smp2p_irq_print_chip(struct irq_data *irqd, struct seq_file *p) 365 { 366 struct smp2p_entry *entry = irq_data_get_irq_chip_data(irqd); 367 368 seq_printf(p, " %8s", dev_name(entry->smp2p->dev)); 369 } 370 371 static struct irq_chip smp2p_irq_chip = { 372 .name = "smp2p", 373 .irq_mask = smp2p_mask_irq, 374 .irq_unmask = smp2p_unmask_irq, 375 .irq_set_type = smp2p_set_irq_type, 376 .irq_print_chip = smp2p_irq_print_chip, 377 }; 378 379 static int smp2p_irq_map(struct irq_domain *d, 380 unsigned int irq, 381 irq_hw_number_t hw) 382 { 383 struct smp2p_entry *entry = d->host_data; 384 385 irq_set_chip_and_handler(irq, &smp2p_irq_chip, handle_level_irq); 386 irq_set_chip_data(irq, entry); 387 irq_set_nested_thread(irq, 1); 388 irq_set_noprobe(irq); 389 390 return 0; 391 } 392 393 static const struct irq_domain_ops smp2p_irq_ops = { 394 .map = smp2p_irq_map, 395 .xlate = irq_domain_xlate_twocell, 396 }; 397 398 static int qcom_smp2p_inbound_entry(struct qcom_smp2p *smp2p, 399 struct smp2p_entry *entry, 400 struct device_node *node) 401 { 402 entry->domain = irq_domain_add_linear(node, 32, &smp2p_irq_ops, entry); 403 if (!entry->domain) { 404 dev_err(smp2p->dev, "failed to add irq_domain\n"); 405 return -ENOMEM; 406 } 407 408 return 0; 409 } 410 411 static int smp2p_update_bits(void *data, u32 mask, u32 value) 412 { 413 struct smp2p_entry *entry = data; 414 unsigned long flags; 415 u32 orig; 416 u32 val; 417 418 spin_lock_irqsave(&entry->lock, flags); 419 val = orig = readl(entry->value); 420 val &= ~mask; 421 val |= value; 422 writel(val, entry->value); 423 spin_unlock_irqrestore(&entry->lock, flags); 424 425 trace_smp2p_update_bits(entry, orig, val); 426 427 if (val != orig) 428 qcom_smp2p_kick(entry->smp2p); 429 430 return 0; 431 } 432 433 static const struct qcom_smem_state_ops smp2p_state_ops = { 434 .update_bits = smp2p_update_bits, 435 }; 436 437 static int qcom_smp2p_outbound_entry(struct qcom_smp2p *smp2p, 438 struct smp2p_entry *entry, 439 struct device_node *node) 440 { 441 struct smp2p_smem_item *out = smp2p->out; 442 char buf[SMP2P_MAX_ENTRY_NAME] = {}; 443 444 /* Allocate an entry from the smem item */ 445 strscpy(buf, entry->name, SMP2P_MAX_ENTRY_NAME); 446 memcpy(out->entries[out->valid_entries].name, buf, SMP2P_MAX_ENTRY_NAME); 447 448 /* Make the logical entry reference the physical value */ 449 entry->value = &out->entries[out->valid_entries].value; 450 451 out->valid_entries++; 452 453 entry->state = qcom_smem_state_register(node, &smp2p_state_ops, entry); 454 if (IS_ERR(entry->state)) { 455 dev_err(smp2p->dev, "failed to register qcom_smem_state\n"); 456 return PTR_ERR(entry->state); 457 } 458 459 return 0; 460 } 461 462 static int qcom_smp2p_alloc_outbound_item(struct qcom_smp2p *smp2p) 463 { 464 struct smp2p_smem_item *out; 465 unsigned smem_id = smp2p->smem_items[SMP2P_OUTBOUND]; 466 unsigned pid = smp2p->remote_pid; 467 int ret; 468 469 ret = qcom_smem_alloc(pid, smem_id, sizeof(*out)); 470 if (ret < 0 && ret != -EEXIST) 471 return dev_err_probe(smp2p->dev, ret, 472 "unable to allocate local smp2p item\n"); 473 474 out = qcom_smem_get(pid, smem_id, NULL); 475 if (IS_ERR(out)) { 476 dev_err(smp2p->dev, "Unable to acquire local smp2p item\n"); 477 return PTR_ERR(out); 478 } 479 480 memset(out, 0, sizeof(*out)); 481 out->magic = SMP2P_MAGIC; 482 out->local_pid = smp2p->local_pid; 483 out->remote_pid = smp2p->remote_pid; 484 out->total_entries = SMP2P_MAX_ENTRY; 485 out->valid_entries = 0; 486 out->features = SMP2P_ALL_FEATURES; 487 488 /* 489 * Make sure the rest of the header is written before we validate the 490 * item by writing a valid version number. 491 */ 492 wmb(); 493 out->version = 1; 494 495 qcom_smp2p_kick(smp2p); 496 497 smp2p->out = out; 498 499 return 0; 500 } 501 502 static int smp2p_parse_ipc(struct qcom_smp2p *smp2p) 503 { 504 struct device_node *syscon; 505 struct device *dev = smp2p->dev; 506 const char *key; 507 int ret; 508 509 syscon = of_parse_phandle(dev->of_node, "qcom,ipc", 0); 510 if (!syscon) { 511 dev_err(dev, "no qcom,ipc node\n"); 512 return -ENODEV; 513 } 514 515 smp2p->ipc_regmap = syscon_node_to_regmap(syscon); 516 of_node_put(syscon); 517 if (IS_ERR(smp2p->ipc_regmap)) 518 return PTR_ERR(smp2p->ipc_regmap); 519 520 key = "qcom,ipc"; 521 ret = of_property_read_u32_index(dev->of_node, key, 1, &smp2p->ipc_offset); 522 if (ret < 0) { 523 dev_err(dev, "no offset in %s\n", key); 524 return -EINVAL; 525 } 526 527 ret = of_property_read_u32_index(dev->of_node, key, 2, &smp2p->ipc_bit); 528 if (ret < 0) { 529 dev_err(dev, "no bit in %s\n", key); 530 return -EINVAL; 531 } 532 533 return 0; 534 } 535 536 static int qcom_smp2p_probe(struct platform_device *pdev) 537 { 538 struct smp2p_entry *entry; 539 struct qcom_smp2p *smp2p; 540 const char *key; 541 int irq; 542 int ret; 543 544 smp2p = devm_kzalloc(&pdev->dev, sizeof(*smp2p), GFP_KERNEL); 545 if (!smp2p) 546 return -ENOMEM; 547 548 smp2p->dev = &pdev->dev; 549 INIT_LIST_HEAD(&smp2p->inbound); 550 INIT_LIST_HEAD(&smp2p->outbound); 551 552 platform_set_drvdata(pdev, smp2p); 553 554 key = "qcom,smem"; 555 ret = of_property_read_u32_array(pdev->dev.of_node, key, 556 smp2p->smem_items, 2); 557 if (ret) 558 return ret; 559 560 key = "qcom,local-pid"; 561 ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->local_pid); 562 if (ret) 563 goto report_read_failure; 564 565 key = "qcom,remote-pid"; 566 ret = of_property_read_u32(pdev->dev.of_node, key, &smp2p->remote_pid); 567 if (ret) 568 goto report_read_failure; 569 570 irq = platform_get_irq(pdev, 0); 571 if (irq < 0) 572 return irq; 573 574 smp2p->mbox_client.dev = &pdev->dev; 575 smp2p->mbox_client.knows_txdone = true; 576 smp2p->mbox_chan = mbox_request_channel(&smp2p->mbox_client, 0); 577 if (IS_ERR(smp2p->mbox_chan)) { 578 if (PTR_ERR(smp2p->mbox_chan) != -ENODEV) 579 return PTR_ERR(smp2p->mbox_chan); 580 581 smp2p->mbox_chan = NULL; 582 583 ret = smp2p_parse_ipc(smp2p); 584 if (ret) 585 return ret; 586 } 587 588 ret = qcom_smp2p_alloc_outbound_item(smp2p); 589 if (ret < 0) 590 goto release_mbox; 591 592 for_each_available_child_of_node_scoped(pdev->dev.of_node, node) { 593 entry = devm_kzalloc(&pdev->dev, sizeof(*entry), GFP_KERNEL); 594 if (!entry) { 595 ret = -ENOMEM; 596 goto unwind_interfaces; 597 } 598 599 entry->smp2p = smp2p; 600 spin_lock_init(&entry->lock); 601 602 ret = of_property_read_string(node, "qcom,entry-name", &entry->name); 603 if (ret < 0) 604 goto unwind_interfaces; 605 606 if (of_property_read_bool(node, "interrupt-controller")) { 607 ret = qcom_smp2p_inbound_entry(smp2p, entry, node); 608 if (ret < 0) 609 goto unwind_interfaces; 610 611 list_add(&entry->node, &smp2p->inbound); 612 } else { 613 ret = qcom_smp2p_outbound_entry(smp2p, entry, node); 614 if (ret < 0) 615 goto unwind_interfaces; 616 617 list_add(&entry->node, &smp2p->outbound); 618 } 619 } 620 621 /* Kick the outgoing edge after allocating entries */ 622 qcom_smp2p_kick(smp2p); 623 624 ret = devm_request_threaded_irq(&pdev->dev, irq, 625 NULL, qcom_smp2p_intr, 626 IRQF_ONESHOT, 627 NULL, (void *)smp2p); 628 if (ret) { 629 dev_err(&pdev->dev, "failed to request interrupt\n"); 630 goto unwind_interfaces; 631 } 632 633 /* 634 * Treat smp2p interrupt as wakeup source, but keep it disabled 635 * by default. User space can decide enabling it depending on its 636 * use cases. For example if remoteproc crashes and device wants 637 * to handle it immediatedly (e.g. to not miss phone calls) it can 638 * enable wakeup source from user space, while other devices which 639 * do not have proper autosleep feature may want to handle it with 640 * other wakeup events (e.g. Power button) instead waking up immediately. 641 */ 642 device_set_wakeup_capable(&pdev->dev, true); 643 644 ret = dev_pm_set_wake_irq(&pdev->dev, irq); 645 if (ret) 646 goto set_wake_irq_fail; 647 648 return 0; 649 650 set_wake_irq_fail: 651 dev_pm_clear_wake_irq(&pdev->dev); 652 653 unwind_interfaces: 654 list_for_each_entry(entry, &smp2p->inbound, node) 655 irq_domain_remove(entry->domain); 656 657 list_for_each_entry(entry, &smp2p->outbound, node) 658 qcom_smem_state_unregister(entry->state); 659 660 smp2p->out->valid_entries = 0; 661 662 release_mbox: 663 mbox_free_channel(smp2p->mbox_chan); 664 665 return ret; 666 667 report_read_failure: 668 dev_err(&pdev->dev, "failed to read %s\n", key); 669 return -EINVAL; 670 } 671 672 static void qcom_smp2p_remove(struct platform_device *pdev) 673 { 674 struct qcom_smp2p *smp2p = platform_get_drvdata(pdev); 675 struct smp2p_entry *entry; 676 677 dev_pm_clear_wake_irq(&pdev->dev); 678 679 list_for_each_entry(entry, &smp2p->inbound, node) 680 irq_domain_remove(entry->domain); 681 682 list_for_each_entry(entry, &smp2p->outbound, node) 683 qcom_smem_state_unregister(entry->state); 684 685 mbox_free_channel(smp2p->mbox_chan); 686 687 smp2p->out->valid_entries = 0; 688 } 689 690 static const struct of_device_id qcom_smp2p_of_match[] = { 691 { .compatible = "qcom,smp2p" }, 692 {} 693 }; 694 MODULE_DEVICE_TABLE(of, qcom_smp2p_of_match); 695 696 static struct platform_driver qcom_smp2p_driver = { 697 .probe = qcom_smp2p_probe, 698 .remove = qcom_smp2p_remove, 699 .driver = { 700 .name = "qcom_smp2p", 701 .of_match_table = qcom_smp2p_of_match, 702 }, 703 }; 704 module_platform_driver(qcom_smp2p_driver); 705 706 MODULE_DESCRIPTION("Qualcomm Shared Memory Point to Point driver"); 707 MODULE_LICENSE("GPL v2"); 708