1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Notification support 4 * 5 * Copyright (C) 2020-2021 ARM Ltd. 6 */ 7 /** 8 * DOC: Theory of operation 9 * 10 * SCMI Protocol specification allows the platform to signal events to 11 * interested agents via notification messages: this is an implementation 12 * of the dispatch and delivery of such notifications to the interested users 13 * inside the Linux kernel. 14 * 15 * An SCMI Notification core instance is initialized for each active platform 16 * instance identified by the means of the usual &struct scmi_handle. 17 * 18 * Each SCMI Protocol implementation, during its initialization, registers with 19 * this core its set of supported events using scmi_register_protocol_events(): 20 * all the needed descriptors are stored in the &struct registered_protocols and 21 * &struct registered_events arrays. 22 * 23 * Kernel users interested in some specific event can register their callbacks 24 * providing the usual notifier_block descriptor, since this core implements 25 * events' delivery using the standard Kernel notification chains machinery. 26 * 27 * Given the number of possible events defined by SCMI and the extensibility 28 * of the SCMI Protocol itself, the underlying notification chains are created 29 * and destroyed dynamically on demand depending on the number of users 30 * effectively registered for an event, so that no support structures or chains 31 * are allocated until at least one user has registered a notifier_block for 32 * such event. Similarly, events' generation itself is enabled at the platform 33 * level only after at least one user has registered, and it is shutdown after 34 * the last user for that event has gone. 35 * 36 * All users provided callbacks and allocated notification-chains are stored in 37 * the @registered_events_handlers hashtable. Callbacks' registration requests 38 * for still to be registered events are instead kept in the dedicated common 39 * hashtable @pending_events_handlers. 40 * 41 * An event is identified univocally by the tuple (proto_id, evt_id, src_id) 42 * and is served by its own dedicated notification chain; information contained 43 * in such tuples is used, in a few different ways, to generate the needed 44 * hash-keys. 45 * 46 * Here proto_id and evt_id are simply the protocol_id and message_id numbers 47 * as described in the SCMI Protocol specification, while src_id represents an 48 * optional, protocol dependent, source identifier (like domain_id, perf_id 49 * or sensor_id and so forth). 50 * 51 * Upon reception of a notification message from the platform the SCMI RX ISR 52 * passes the received message payload and some ancillary information (including 53 * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which 54 * pushes the event-data itself on a protocol-dedicated kfifo queue for further 55 * deferred processing as specified in @scmi_events_dispatcher(). 56 * 57 * Each protocol has it own dedicated work_struct and worker which, once kicked 58 * by the ISR, takes care to empty its own dedicated queue, deliverying the 59 * queued items into the proper notification-chain: notifications processing can 60 * proceed concurrently on distinct workers only between events belonging to 61 * different protocols while delivery of events within the same protocol is 62 * still strictly sequentially ordered by time of arrival. 63 * 64 * Events' information is then extracted from the SCMI Notification messages and 65 * conveyed, converted into a custom per-event report struct, as the void *data 66 * param to the user callback provided by the registered notifier_block, so that 67 * from the user perspective his callback will look invoked like: 68 * 69 * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report) 70 * 71 */ 72 73 #define dev_fmt(fmt) "SCMI Notifications - " fmt 74 #define pr_fmt(fmt) "SCMI Notifications - " fmt 75 76 #include <linux/bitfield.h> 77 #include <linux/bug.h> 78 #include <linux/compiler.h> 79 #include <linux/device.h> 80 #include <linux/err.h> 81 #include <linux/hashtable.h> 82 #include <linux/kernel.h> 83 #include <linux/ktime.h> 84 #include <linux/kfifo.h> 85 #include <linux/list.h> 86 #include <linux/mutex.h> 87 #include <linux/notifier.h> 88 #include <linux/refcount.h> 89 #include <linux/scmi_protocol.h> 90 #include <linux/slab.h> 91 #include <linux/types.h> 92 #include <linux/workqueue.h> 93 94 #include "common.h" 95 #include "notify.h" 96 97 #define SCMI_MAX_PROTO 256 98 99 #define PROTO_ID_MASK GENMASK(31, 24) 100 #define EVT_ID_MASK GENMASK(23, 16) 101 #define SRC_ID_MASK GENMASK(15, 0) 102 #define NOTIF_UNSUPP -1 103 104 /* 105 * Builds an unsigned 32bit key from the given input tuple to be used 106 * as a key in hashtables. 107 */ 108 #define MAKE_HASH_KEY(p, e, s) \ 109 (FIELD_PREP(PROTO_ID_MASK, (p)) | \ 110 FIELD_PREP(EVT_ID_MASK, (e)) | \ 111 FIELD_PREP(SRC_ID_MASK, (s))) 112 113 #define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK) 114 115 /* 116 * Assumes that the stored obj includes its own hash-key in a field named 'key': 117 * with this simplification this macro can be equally used for all the objects' 118 * types hashed by this implementation. 119 * 120 * @__ht: The hashtable name 121 * @__obj: A pointer to the object type to be retrieved from the hashtable; 122 * it will be used as a cursor while scanning the hastable and it will 123 * be possibly left as NULL when @__k is not found 124 * @__k: The key to search for 125 */ 126 #define KEY_FIND(__ht, __obj, __k) \ 127 ({ \ 128 typeof(__k) k_ = __k; \ 129 typeof(__obj) obj_; \ 130 \ 131 hash_for_each_possible((__ht), obj_, hash, k_) \ 132 if (obj_->key == k_) \ 133 break; \ 134 __obj = obj_; \ 135 }) 136 137 #define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key)) 138 #define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key)) 139 #define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key)) 140 141 /* 142 * A set of macros used to access safely @registered_protocols and 143 * @registered_events arrays; these are fixed in size and each entry is possibly 144 * populated at protocols' registration time and then only read but NEVER 145 * modified or removed. 146 */ 147 #define SCMI_GET_PROTO(__ni, __pid) \ 148 ({ \ 149 typeof(__ni) ni_ = __ni; \ 150 struct scmi_registered_events_desc *__pd = NULL; \ 151 \ 152 if (ni_) \ 153 __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \ 154 __pd; \ 155 }) 156 157 #define SCMI_GET_REVT_FROM_PD(__pd, __eid) \ 158 ({ \ 159 typeof(__pd) pd_ = __pd; \ 160 typeof(__eid) eid_ = __eid; \ 161 struct scmi_registered_event *__revt = NULL; \ 162 \ 163 if (pd_ && eid_ < pd_->num_events) \ 164 __revt = READ_ONCE(pd_->registered_events[eid_]); \ 165 __revt; \ 166 }) 167 168 #define SCMI_GET_REVT(__ni, __pid, __eid) \ 169 ({ \ 170 struct scmi_registered_event *__revt; \ 171 struct scmi_registered_events_desc *__pd; \ 172 \ 173 __pd = SCMI_GET_PROTO((__ni), (__pid)); \ 174 __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \ 175 __revt; \ 176 }) 177 178 /* A couple of utility macros to limit cruft when calling protocols' helpers */ 179 #define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \ 180 ({ \ 181 typeof(revt) r = revt; \ 182 r->proto->ops->set_notify_enabled(r->proto->ph, \ 183 (eid), (sid), (state)); \ 184 }) 185 186 #define REVT_NOTIFY_ENABLE(revt, eid, sid) \ 187 REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true) 188 189 #define REVT_NOTIFY_DISABLE(revt, eid, sid) \ 190 REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false) 191 192 #define REVT_FILL_REPORT(revt, ...) \ 193 ({ \ 194 typeof(revt) r = revt; \ 195 r->proto->ops->fill_custom_report(r->proto->ph, \ 196 __VA_ARGS__); \ 197 }) 198 199 #define SCMI_PENDING_HASH_SZ 4 200 #define SCMI_REGISTERED_HASH_SZ 6 201 202 struct scmi_registered_events_desc; 203 204 /** 205 * struct scmi_notify_instance - Represents an instance of the notification 206 * core 207 * @gid: GroupID used for devres 208 * @handle: A reference to the platform instance 209 * @init_work: A work item to perform final initializations of pending handlers 210 * @notify_wq: A reference to the allocated Kernel cmwq 211 * @pending_mtx: A mutex to protect @pending_events_handlers 212 * @registered_protocols: A statically allocated array containing pointers to 213 * all the registered protocol-level specific information 214 * related to events' handling 215 * @pending_events_handlers: An hashtable containing all pending events' 216 * handlers descriptors 217 * 218 * Each platform instance, represented by a handle, has its own instance of 219 * the notification subsystem represented by this structure. 220 */ 221 struct scmi_notify_instance { 222 void *gid; 223 struct scmi_handle *handle; 224 struct work_struct init_work; 225 struct workqueue_struct *notify_wq; 226 /* lock to protect pending_events_handlers */ 227 struct mutex pending_mtx; 228 struct scmi_registered_events_desc **registered_protocols; 229 DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ); 230 }; 231 232 /** 233 * struct events_queue - Describes a queue and its associated worker 234 * @sz: Size in bytes of the related kfifo 235 * @kfifo: A dedicated Kernel kfifo descriptor 236 * @notify_work: A custom work item bound to this queue 237 * @wq: A reference to the associated workqueue 238 * 239 * Each protocol has its own dedicated events_queue descriptor. 240 */ 241 struct events_queue { 242 size_t sz; 243 struct kfifo kfifo; 244 struct work_struct notify_work; 245 struct workqueue_struct *wq; 246 }; 247 248 /** 249 * struct scmi_event_header - A utility header 250 * @timestamp: The timestamp, in nanoseconds (boottime), which was associated 251 * to this event as soon as it entered the SCMI RX ISR 252 * @payld_sz: Effective size of the embedded message payload which follows 253 * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol) 254 * @payld: A reference to the embedded event payload 255 * 256 * This header is prepended to each received event message payload before 257 * queueing it on the related &struct events_queue. 258 */ 259 struct scmi_event_header { 260 ktime_t timestamp; 261 size_t payld_sz; 262 unsigned char evt_id; 263 unsigned char payld[]; 264 }; 265 266 struct scmi_registered_event; 267 268 /** 269 * struct scmi_registered_events_desc - Protocol Specific information 270 * @id: Protocol ID 271 * @ops: Protocol specific and event-related operations 272 * @equeue: The embedded per-protocol events_queue 273 * @ni: A reference to the initialized instance descriptor 274 * @eh: A reference to pre-allocated buffer to be used as a scratch area by the 275 * deferred worker when fetching data from the kfifo 276 * @eh_sz: Size of the pre-allocated buffer @eh 277 * @in_flight: A reference to an in flight &struct scmi_registered_event 278 * @num_events: Number of events in @registered_events 279 * @registered_events: A dynamically allocated array holding all the registered 280 * events' descriptors, whose fixed-size is determined at 281 * compile time. 282 * @registered_mtx: A mutex to protect @registered_events_handlers 283 * @ph: SCMI protocol handle reference 284 * @registered_events_handlers: An hashtable containing all events' handlers 285 * descriptors registered for this protocol 286 * 287 * All protocols that register at least one event have their protocol-specific 288 * information stored here, together with the embedded allocated events_queue. 289 * These descriptors are stored in the @registered_protocols array at protocol 290 * registration time. 291 * 292 * Once these descriptors are successfully registered, they are NEVER again 293 * removed or modified since protocols do not unregister ever, so that, once 294 * we safely grab a NON-NULL reference from the array we can keep it and use it. 295 */ 296 struct scmi_registered_events_desc { 297 u8 id; 298 const struct scmi_event_ops *ops; 299 struct events_queue equeue; 300 struct scmi_notify_instance *ni; 301 struct scmi_event_header *eh; 302 size_t eh_sz; 303 void *in_flight; 304 int num_events; 305 struct scmi_registered_event **registered_events; 306 /* mutex to protect registered_events_handlers */ 307 struct mutex registered_mtx; 308 const struct scmi_protocol_handle *ph; 309 DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ); 310 }; 311 312 /** 313 * struct scmi_registered_event - Event Specific Information 314 * @proto: A reference to the associated protocol descriptor 315 * @evt: A reference to the associated event descriptor (as provided at 316 * registration time) 317 * @report: A pre-allocated buffer used by the deferred worker to fill a 318 * customized event report 319 * @num_sources: The number of possible sources for this event as stated at 320 * events' registration time 321 * @not_supported_by_platform: A flag to indicate that not even one source was 322 * found to be supported by the platform for this 323 * event 324 * @sources: A reference to a dynamically allocated array used to refcount the 325 * events' enable requests for all the existing sources 326 * @sources_mtx: A mutex to serialize the access to @sources 327 * 328 * All registered events are represented by one of these structures that are 329 * stored in the @registered_events array at protocol registration time. 330 * 331 * Once these descriptors are successfully registered, they are NEVER again 332 * removed or modified since protocols do not unregister ever, so that once we 333 * safely grab a NON-NULL reference from the table we can keep it and use it. 334 */ 335 struct scmi_registered_event { 336 struct scmi_registered_events_desc *proto; 337 const struct scmi_event *evt; 338 void *report; 339 u32 num_sources; 340 bool not_supported_by_platform; 341 refcount_t *sources; 342 /* locking to serialize the access to sources */ 343 struct mutex sources_mtx; 344 }; 345 346 /** 347 * struct scmi_event_handler - Event handler information 348 * @key: The used hashkey 349 * @users: A reference count for number of active users for this handler 350 * @r_evt: A reference to the associated registered event; when this is NULL 351 * this handler is pending, which means that identifies a set of 352 * callbacks intended to be attached to an event which is still not 353 * known nor registered by any protocol at that point in time 354 * @chain: The notification chain dedicated to this specific event tuple 355 * @hash: The hlist_node used for collision handling 356 * @enabled: A boolean which records if event's generation has been already 357 * enabled for this handler as a whole 358 * 359 * This structure collects all the information needed to process a received 360 * event identified by the tuple (proto_id, evt_id, src_id). 361 * These descriptors are stored in a per-protocol @registered_events_handlers 362 * table using as a key a value derived from that tuple. 363 */ 364 struct scmi_event_handler { 365 u32 key; 366 refcount_t users; 367 struct scmi_registered_event *r_evt; 368 struct blocking_notifier_head chain; 369 struct hlist_node hash; 370 bool enabled; 371 }; 372 373 #define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt) 374 375 static struct scmi_event_handler * 376 scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key); 377 static void scmi_put_active_handler(struct scmi_notify_instance *ni, 378 struct scmi_event_handler *hndl); 379 static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni, 380 struct scmi_event_handler *hndl); 381 382 /** 383 * scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it 384 * @ni: A reference to the notification instance to use 385 * @evt_key: The key to use to lookup the related notification chain 386 * @report: The customized event-specific report to pass down to the callbacks 387 * as their *data parameter. 388 */ 389 static inline void 390 scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni, 391 u32 evt_key, void *report) 392 { 393 int ret; 394 struct scmi_event_handler *hndl; 395 396 /* 397 * Here ensure the event handler cannot vanish while using it. 398 * It is legitimate, though, for an handler not to be found at all here, 399 * e.g. when it has been unregistered by the user after some events had 400 * already been queued. 401 */ 402 hndl = scmi_get_active_handler(ni, evt_key); 403 if (!hndl) 404 return; 405 406 ret = blocking_notifier_call_chain(&hndl->chain, 407 KEY_XTRACT_EVT_ID(evt_key), 408 report); 409 /* Notifiers are NOT supposed to cut the chain ... */ 410 WARN_ON_ONCE(ret & NOTIFY_STOP_MASK); 411 412 scmi_put_active_handler(ni, hndl); 413 } 414 415 /** 416 * scmi_process_event_header() - Dequeue and process an event header 417 * @eq: The queue to use 418 * @pd: The protocol descriptor to use 419 * 420 * Read an event header from the protocol queue into the dedicated scratch 421 * buffer and looks for a matching registered event; in case an anomalously 422 * sized read is detected just flush the queue. 423 * 424 * Return: 425 * * a reference to the matching registered event when found 426 * * ERR_PTR(-EINVAL) when NO registered event could be found 427 * * NULL when the queue is empty 428 */ 429 static inline struct scmi_registered_event * 430 scmi_process_event_header(struct events_queue *eq, 431 struct scmi_registered_events_desc *pd) 432 { 433 unsigned int outs; 434 struct scmi_registered_event *r_evt; 435 436 outs = kfifo_out(&eq->kfifo, pd->eh, 437 sizeof(struct scmi_event_header)); 438 if (!outs) 439 return NULL; 440 if (outs != sizeof(struct scmi_event_header)) { 441 dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n"); 442 kfifo_reset_out(&eq->kfifo); 443 return NULL; 444 } 445 446 r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id); 447 if (!r_evt) 448 r_evt = ERR_PTR(-EINVAL); 449 450 return r_evt; 451 } 452 453 /** 454 * scmi_process_event_payload() - Dequeue and process an event payload 455 * @eq: The queue to use 456 * @pd: The protocol descriptor to use 457 * @r_evt: The registered event descriptor to use 458 * 459 * Read an event payload from the protocol queue into the dedicated scratch 460 * buffer, fills a custom report and then look for matching event handlers and 461 * call them; skip any unknown event (as marked by scmi_process_event_header()) 462 * and in case an anomalously sized read is detected just flush the queue. 463 * 464 * Return: False when the queue is empty 465 */ 466 static inline bool 467 scmi_process_event_payload(struct events_queue *eq, 468 struct scmi_registered_events_desc *pd, 469 struct scmi_registered_event *r_evt) 470 { 471 u32 src_id, key; 472 unsigned int outs; 473 void *report = NULL; 474 475 outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz); 476 if (!outs) 477 return false; 478 479 /* Any in-flight event has now been officially processed */ 480 pd->in_flight = NULL; 481 482 if (outs != pd->eh->payld_sz) { 483 dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n"); 484 kfifo_reset_out(&eq->kfifo); 485 return false; 486 } 487 488 if (IS_ERR(r_evt)) { 489 dev_warn(pd->ni->handle->dev, 490 "SKIP UNKNOWN EVT - proto:%X evt:%d\n", 491 pd->id, pd->eh->evt_id); 492 return true; 493 } 494 495 report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp, 496 pd->eh->payld, pd->eh->payld_sz, 497 r_evt->report, &src_id); 498 if (!report) { 499 dev_err(pd->ni->handle->dev, 500 "report not available - proto:%X evt:%d\n", 501 pd->id, pd->eh->evt_id); 502 return true; 503 } 504 505 /* At first search for a generic ALL src_ids handler... */ 506 key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id); 507 scmi_lookup_and_call_event_chain(pd->ni, key, report); 508 509 /* ...then search for any specific src_id */ 510 key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id); 511 scmi_lookup_and_call_event_chain(pd->ni, key, report); 512 513 return true; 514 } 515 516 /** 517 * scmi_events_dispatcher() - Common worker logic for all work items. 518 * @work: The work item to use, which is associated to a dedicated events_queue 519 * 520 * Logic: 521 * 1. dequeue one pending RX notification (queued in SCMI RX ISR context) 522 * 2. generate a custom event report from the received event message 523 * 3. lookup for any registered ALL_SRC_IDs handler: 524 * - > call the related notification chain passing in the report 525 * 4. lookup for any registered specific SRC_ID handler: 526 * - > call the related notification chain passing in the report 527 * 528 * Note that: 529 * * a dedicated per-protocol kfifo queue is used: in this way an anomalous 530 * flood of events cannot saturate other protocols' queues. 531 * * each per-protocol queue is associated to a distinct work_item, which 532 * means, in turn, that: 533 * + all protocols can process their dedicated queues concurrently 534 * (since notify_wq:max_active != 1) 535 * + anyway at most one worker instance is allowed to run on the same queue 536 * concurrently: this ensures that we can have only one concurrent 537 * reader/writer on the associated kfifo, so that we can use it lock-less 538 * 539 * Context: Process context. 540 */ 541 static void scmi_events_dispatcher(struct work_struct *work) 542 { 543 struct events_queue *eq; 544 struct scmi_registered_events_desc *pd; 545 struct scmi_registered_event *r_evt; 546 547 eq = container_of(work, struct events_queue, notify_work); 548 pd = container_of(eq, struct scmi_registered_events_desc, equeue); 549 /* 550 * In order to keep the queue lock-less and the number of memcopies 551 * to the bare minimum needed, the dispatcher accounts for the 552 * possibility of per-protocol in-flight events: i.e. an event whose 553 * reception could end up being split across two subsequent runs of this 554 * worker, first the header, then the payload. 555 */ 556 do { 557 if (!pd->in_flight) { 558 r_evt = scmi_process_event_header(eq, pd); 559 if (!r_evt) 560 break; 561 pd->in_flight = r_evt; 562 } else { 563 r_evt = pd->in_flight; 564 } 565 } while (scmi_process_event_payload(eq, pd, r_evt)); 566 } 567 568 /** 569 * scmi_notify() - Queues a notification for further deferred processing 570 * @handle: The handle identifying the platform instance from which the 571 * dispatched event is generated 572 * @proto_id: Protocol ID 573 * @evt_id: Event ID (msgID) 574 * @buf: Event Message Payload (without the header) 575 * @len: Event Message Payload size 576 * @ts: RX Timestamp in nanoseconds (boottime) 577 * 578 * Context: Called in interrupt context to queue a received event for 579 * deferred processing. 580 * 581 * Return: 0 on Success 582 */ 583 int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id, 584 const void *buf, size_t len, ktime_t ts) 585 { 586 struct scmi_registered_event *r_evt; 587 struct scmi_event_header eh; 588 struct scmi_notify_instance *ni; 589 590 ni = scmi_notification_instance_data_get(handle); 591 if (!ni) 592 return 0; 593 594 r_evt = SCMI_GET_REVT(ni, proto_id, evt_id); 595 if (!r_evt) 596 return -EINVAL; 597 598 if (len > r_evt->evt->max_payld_sz) { 599 dev_err(handle->dev, "discard badly sized message\n"); 600 return -EINVAL; 601 } 602 if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) { 603 dev_warn(handle->dev, 604 "queue full, dropping proto_id:%d evt_id:%d ts:%lld\n", 605 proto_id, evt_id, ktime_to_ns(ts)); 606 return -ENOMEM; 607 } 608 609 eh.timestamp = ts; 610 eh.evt_id = evt_id; 611 eh.payld_sz = len; 612 /* 613 * Header and payload are enqueued with two distinct kfifo_in() (so non 614 * atomic), but this situation is handled properly on the consumer side 615 * with in-flight events tracking. 616 */ 617 kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh)); 618 kfifo_in(&r_evt->proto->equeue.kfifo, buf, len); 619 /* 620 * Don't care about return value here since we just want to ensure that 621 * a work is queued all the times whenever some items have been pushed 622 * on the kfifo: 623 * - if work was already queued it will simply fail to queue a new one 624 * since it is not needed 625 * - if work was not queued already it will be now, even in case work 626 * was in fact already running: this behavior avoids any possible race 627 * when this function pushes new items onto the kfifos after the 628 * related executing worker had already determined the kfifo to be 629 * empty and it was terminating. 630 */ 631 queue_work(r_evt->proto->equeue.wq, 632 &r_evt->proto->equeue.notify_work); 633 634 return 0; 635 } 636 637 /** 638 * scmi_kfifo_free() - Devres action helper to free the kfifo 639 * @kfifo: The kfifo to free 640 */ 641 static void scmi_kfifo_free(void *kfifo) 642 { 643 kfifo_free((struct kfifo *)kfifo); 644 } 645 646 /** 647 * scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer 648 * @ni: A reference to the notification instance to use 649 * @equeue: The events_queue to initialize 650 * @sz: Size of the kfifo buffer to allocate 651 * 652 * Allocate a buffer for the kfifo and initialize it. 653 * 654 * Return: 0 on Success 655 */ 656 static int scmi_initialize_events_queue(struct scmi_notify_instance *ni, 657 struct events_queue *equeue, size_t sz) 658 { 659 int ret; 660 661 if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL)) 662 return -ENOMEM; 663 /* Size could have been roundup to power-of-two */ 664 equeue->sz = kfifo_size(&equeue->kfifo); 665 666 ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free, 667 &equeue->kfifo); 668 if (ret) 669 return ret; 670 671 INIT_WORK(&equeue->notify_work, scmi_events_dispatcher); 672 equeue->wq = ni->notify_wq; 673 674 return ret; 675 } 676 677 /** 678 * scmi_allocate_registered_events_desc() - Allocate a registered events' 679 * descriptor 680 * @ni: A reference to the &struct scmi_notify_instance notification instance 681 * to use 682 * @proto_id: Protocol ID 683 * @queue_sz: Size of the associated queue to allocate 684 * @eh_sz: Size of the event header scratch area to pre-allocate 685 * @num_events: Number of events to support (size of @registered_events) 686 * @ops: Pointer to a struct holding references to protocol specific helpers 687 * needed during events handling 688 * 689 * It is supposed to be called only once for each protocol at protocol 690 * initialization time, so it warns if the requested protocol is found already 691 * registered. 692 * 693 * Return: The allocated and registered descriptor on Success 694 */ 695 static struct scmi_registered_events_desc * 696 scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni, 697 u8 proto_id, size_t queue_sz, size_t eh_sz, 698 int num_events, 699 const struct scmi_event_ops *ops) 700 { 701 int ret; 702 struct scmi_registered_events_desc *pd; 703 704 /* Ensure protocols are up to date */ 705 smp_rmb(); 706 if (WARN_ON(ni->registered_protocols[proto_id])) 707 return ERR_PTR(-EINVAL); 708 709 pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL); 710 if (!pd) 711 return ERR_PTR(-ENOMEM); 712 pd->id = proto_id; 713 pd->ops = ops; 714 pd->ni = ni; 715 716 ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz); 717 if (ret) 718 return ERR_PTR(ret); 719 720 pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL); 721 if (!pd->eh) 722 return ERR_PTR(-ENOMEM); 723 pd->eh_sz = eh_sz; 724 725 pd->registered_events = devm_kcalloc(ni->handle->dev, num_events, 726 sizeof(char *), GFP_KERNEL); 727 if (!pd->registered_events) 728 return ERR_PTR(-ENOMEM); 729 pd->num_events = num_events; 730 731 /* Initialize per protocol handlers table */ 732 mutex_init(&pd->registered_mtx); 733 hash_init(pd->registered_events_handlers); 734 735 return pd; 736 } 737 738 /** 739 * scmi_register_protocol_events() - Register Protocol Events with the core 740 * @handle: The handle identifying the platform instance against which the 741 * protocol's events are registered 742 * @proto_id: Protocol ID 743 * @ph: SCMI protocol handle. 744 * @ee: A structure describing the events supported by this protocol. 745 * 746 * Used by SCMI Protocols initialization code to register with the notification 747 * core the list of supported events and their descriptors: takes care to 748 * pre-allocate and store all needed descriptors, scratch buffers and event 749 * queues. 750 * 751 * Return: 0 on Success 752 */ 753 int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id, 754 const struct scmi_protocol_handle *ph, 755 const struct scmi_protocol_events *ee) 756 { 757 int i; 758 unsigned int num_sources; 759 size_t payld_sz = 0; 760 struct scmi_registered_events_desc *pd; 761 struct scmi_notify_instance *ni; 762 const struct scmi_event *evt; 763 764 if (!ee || !ee->ops || !ee->evts || !ph || 765 (!ee->num_sources && !ee->ops->get_num_sources)) 766 return -EINVAL; 767 768 ni = scmi_notification_instance_data_get(handle); 769 if (!ni) 770 return -ENOMEM; 771 772 /* num_sources cannot be <= 0 */ 773 if (ee->num_sources) { 774 num_sources = ee->num_sources; 775 } else { 776 int nsrc = ee->ops->get_num_sources(ph); 777 778 if (nsrc <= 0) 779 return -EINVAL; 780 num_sources = nsrc; 781 } 782 783 evt = ee->evts; 784 for (i = 0; i < ee->num_events; i++) 785 payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz); 786 payld_sz += sizeof(struct scmi_event_header); 787 788 pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz, 789 payld_sz, ee->num_events, 790 ee->ops); 791 if (IS_ERR(pd)) 792 return PTR_ERR(pd); 793 794 pd->ph = ph; 795 for (i = 0; i < ee->num_events; i++, evt++) { 796 int id; 797 struct scmi_registered_event *r_evt; 798 799 r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt), 800 GFP_KERNEL); 801 if (!r_evt) 802 return -ENOMEM; 803 r_evt->proto = pd; 804 r_evt->evt = evt; 805 806 r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources, 807 sizeof(refcount_t), GFP_KERNEL); 808 if (!r_evt->sources) 809 return -ENOMEM; 810 r_evt->num_sources = num_sources; 811 mutex_init(&r_evt->sources_mtx); 812 813 r_evt->report = devm_kzalloc(ni->handle->dev, 814 evt->max_report_sz, GFP_KERNEL); 815 if (!r_evt->report) 816 return -ENOMEM; 817 818 if (ee->ops->is_notify_supported) { 819 int supported = 0; 820 821 for (id = 0; id < r_evt->num_sources; id++) { 822 if (!ee->ops->is_notify_supported(ph, r_evt->evt->id, id)) 823 refcount_set(&r_evt->sources[id], NOTIF_UNSUPP); 824 else 825 supported++; 826 } 827 828 /* Not even one source has been found to be supported */ 829 r_evt->not_supported_by_platform = !supported; 830 } 831 832 pd->registered_events[i] = r_evt; 833 /* Ensure events are updated */ 834 smp_wmb(); 835 dev_dbg(handle->dev, "registered event - %lX\n", 836 MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id)); 837 } 838 839 /* Register protocol and events...it will never be removed */ 840 ni->registered_protocols[proto_id] = pd; 841 /* Ensure protocols are updated */ 842 smp_wmb(); 843 844 /* 845 * Finalize any pending events' handler which could have been waiting 846 * for this protocol's events registration. 847 */ 848 schedule_work(&ni->init_work); 849 850 return 0; 851 } 852 853 /** 854 * scmi_deregister_protocol_events - Deregister protocol events with the core 855 * @handle: The handle identifying the platform instance against which the 856 * protocol's events are registered 857 * @proto_id: Protocol ID 858 */ 859 void scmi_deregister_protocol_events(const struct scmi_handle *handle, 860 u8 proto_id) 861 { 862 struct scmi_notify_instance *ni; 863 struct scmi_registered_events_desc *pd; 864 865 ni = scmi_notification_instance_data_get(handle); 866 if (!ni) 867 return; 868 869 pd = ni->registered_protocols[proto_id]; 870 if (!pd) 871 return; 872 873 ni->registered_protocols[proto_id] = NULL; 874 /* Ensure protocols are updated */ 875 smp_wmb(); 876 877 cancel_work_sync(&pd->equeue.notify_work); 878 } 879 880 /** 881 * scmi_allocate_event_handler() - Allocate Event handler 882 * @ni: A reference to the notification instance to use 883 * @evt_key: 32bit key uniquely bind to the event identified by the tuple 884 * (proto_id, evt_id, src_id) 885 * 886 * Allocate an event handler and related notification chain associated with 887 * the provided event handler key. 888 * Note that, at this point, a related registered_event is still to be 889 * associated to this handler descriptor (hndl->r_evt == NULL), so the handler 890 * is initialized as pending. 891 * 892 * Context: Assumes to be called with @pending_mtx already acquired. 893 * Return: the freshly allocated structure on Success 894 */ 895 static struct scmi_event_handler * 896 scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key) 897 { 898 struct scmi_event_handler *hndl; 899 900 hndl = kzalloc(sizeof(*hndl), GFP_KERNEL); 901 if (!hndl) 902 return NULL; 903 hndl->key = evt_key; 904 BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain); 905 refcount_set(&hndl->users, 1); 906 /* New handlers are created pending */ 907 hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key); 908 909 return hndl; 910 } 911 912 /** 913 * scmi_free_event_handler() - Free the provided Event handler 914 * @hndl: The event handler structure to free 915 * 916 * Context: Assumes to be called with proper locking acquired depending 917 * on the situation. 918 */ 919 static void scmi_free_event_handler(struct scmi_event_handler *hndl) 920 { 921 hash_del(&hndl->hash); 922 kfree(hndl); 923 } 924 925 /** 926 * scmi_bind_event_handler() - Helper to attempt binding an handler to an event 927 * @ni: A reference to the notification instance to use 928 * @hndl: The event handler to bind 929 * 930 * If an associated registered event is found, move the handler from the pending 931 * into the registered table. 932 * 933 * Context: Assumes to be called with @pending_mtx already acquired. 934 * 935 * Return: 0 on Success 936 */ 937 static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni, 938 struct scmi_event_handler *hndl) 939 { 940 struct scmi_registered_event *r_evt; 941 942 r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key), 943 KEY_XTRACT_EVT_ID(hndl->key)); 944 if (!r_evt) 945 return -EINVAL; 946 947 /* 948 * Remove from pending and insert into registered while getting hold 949 * of protocol instance. 950 */ 951 hash_del(&hndl->hash); 952 953 /* Bailout if event is not supported at all */ 954 if (r_evt->not_supported_by_platform) 955 return -EOPNOTSUPP; 956 957 /* 958 * Acquire protocols only for NON pending handlers, so as NOT to trigger 959 * protocol initialization when a notifier is registered against a still 960 * not registered protocol, since it would make little sense to force init 961 * protocols for which still no SCMI driver user exists: they wouldn't 962 * emit any event anyway till some SCMI driver starts using it. 963 */ 964 scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key)); 965 hndl->r_evt = r_evt; 966 967 mutex_lock(&r_evt->proto->registered_mtx); 968 hash_add(r_evt->proto->registered_events_handlers, 969 &hndl->hash, hndl->key); 970 mutex_unlock(&r_evt->proto->registered_mtx); 971 972 return 0; 973 } 974 975 /** 976 * scmi_valid_pending_handler() - Helper to check pending status of handlers 977 * @ni: A reference to the notification instance to use 978 * @hndl: The event handler to check 979 * 980 * An handler is considered pending when its r_evt == NULL, because the related 981 * event was still unknown at handler's registration time; anyway, since all 982 * protocols register their supported events once for all at protocols' 983 * initialization time, a pending handler cannot be considered valid anymore if 984 * the underlying event (which it is waiting for), belongs to an already 985 * initialized and registered protocol. 986 * 987 * Return: 0 on Success 988 */ 989 static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni, 990 struct scmi_event_handler *hndl) 991 { 992 struct scmi_registered_events_desc *pd; 993 994 if (!IS_HNDL_PENDING(hndl)) 995 return -EINVAL; 996 997 pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key)); 998 if (pd) 999 return -EINVAL; 1000 1001 return 0; 1002 } 1003 1004 /** 1005 * scmi_register_event_handler() - Register whenever possible an Event handler 1006 * @ni: A reference to the notification instance to use 1007 * @hndl: The event handler to register 1008 * 1009 * At first try to bind an event handler to its associated event, then check if 1010 * it was at least a valid pending handler: if it was not bound nor valid return 1011 * false. 1012 * 1013 * Valid pending incomplete bindings will be periodically retried by a dedicated 1014 * worker which is kicked each time a new protocol completes its own 1015 * registration phase. 1016 * 1017 * Context: Assumes to be called with @pending_mtx acquired. 1018 * 1019 * Return: 0 on Success 1020 */ 1021 static int scmi_register_event_handler(struct scmi_notify_instance *ni, 1022 struct scmi_event_handler *hndl) 1023 { 1024 int ret; 1025 1026 ret = scmi_bind_event_handler(ni, hndl); 1027 if (!ret) { 1028 dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n", 1029 hndl->key); 1030 } else { 1031 ret = scmi_valid_pending_handler(ni, hndl); 1032 if (!ret) 1033 dev_dbg(ni->handle->dev, 1034 "registered PENDING handler - key:%X\n", 1035 hndl->key); 1036 } 1037 1038 return ret; 1039 } 1040 1041 /** 1042 * __scmi_event_handler_get_ops() - Utility to get or create an event handler 1043 * @ni: A reference to the notification instance to use 1044 * @evt_key: The event key to use 1045 * @create: A boolean flag to specify if a handler must be created when 1046 * not already existent 1047 * 1048 * Search for the desired handler matching the key in both the per-protocol 1049 * registered table and the common pending table: 1050 * * if found adjust users refcount 1051 * * if not found and @create is true, create and register the new handler: 1052 * handler could end up being registered as pending if no matching event 1053 * could be found. 1054 * 1055 * An handler is guaranteed to reside in one and only one of the tables at 1056 * any one time; to ensure this the whole search and create is performed 1057 * holding the @pending_mtx lock, with @registered_mtx additionally acquired 1058 * if needed. 1059 * 1060 * Note that when a nested acquisition of these mutexes is needed the locking 1061 * order is always (same as in @init_work): 1062 * 1. pending_mtx 1063 * 2. registered_mtx 1064 * 1065 * Events generation is NOT enabled right after creation within this routine 1066 * since at creation time we usually want to have all setup and ready before 1067 * events really start flowing. 1068 * 1069 * Return: A properly refcounted handler on Success, NULL on Failure 1070 */ 1071 static inline struct scmi_event_handler * 1072 __scmi_event_handler_get_ops(struct scmi_notify_instance *ni, 1073 u32 evt_key, bool create) 1074 { 1075 struct scmi_registered_event *r_evt; 1076 struct scmi_event_handler *hndl = NULL; 1077 1078 r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key), 1079 KEY_XTRACT_EVT_ID(evt_key)); 1080 1081 if (r_evt && r_evt->not_supported_by_platform) 1082 return ERR_PTR(-EOPNOTSUPP); 1083 1084 mutex_lock(&ni->pending_mtx); 1085 /* Search registered events at first ... if possible at all */ 1086 if (r_evt) { 1087 mutex_lock(&r_evt->proto->registered_mtx); 1088 hndl = KEY_FIND(r_evt->proto->registered_events_handlers, 1089 hndl, evt_key); 1090 if (hndl) 1091 refcount_inc(&hndl->users); 1092 mutex_unlock(&r_evt->proto->registered_mtx); 1093 } 1094 1095 /* ...then amongst pending. */ 1096 if (!hndl) { 1097 hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key); 1098 if (hndl) 1099 refcount_inc(&hndl->users); 1100 } 1101 1102 /* Create if still not found and required */ 1103 if (!hndl && create) { 1104 hndl = scmi_allocate_event_handler(ni, evt_key); 1105 if (hndl && scmi_register_event_handler(ni, hndl)) { 1106 dev_dbg(ni->handle->dev, 1107 "purging UNKNOWN handler - key:%X\n", 1108 hndl->key); 1109 /* this hndl can be only a pending one */ 1110 scmi_put_handler_unlocked(ni, hndl); 1111 hndl = ERR_PTR(-EINVAL); 1112 } 1113 } 1114 mutex_unlock(&ni->pending_mtx); 1115 1116 return hndl; 1117 } 1118 1119 static struct scmi_event_handler * 1120 scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key) 1121 { 1122 return __scmi_event_handler_get_ops(ni, evt_key, false); 1123 } 1124 1125 static struct scmi_event_handler * 1126 scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key) 1127 { 1128 return __scmi_event_handler_get_ops(ni, evt_key, true); 1129 } 1130 1131 /** 1132 * scmi_get_active_handler() - Helper to get active handlers only 1133 * @ni: A reference to the notification instance to use 1134 * @evt_key: The event key to use 1135 * 1136 * Search for the desired handler matching the key only in the per-protocol 1137 * table of registered handlers: this is called only from the dispatching path 1138 * so want to be as quick as possible and do not care about pending. 1139 * 1140 * Return: A properly refcounted active handler 1141 */ 1142 static struct scmi_event_handler * 1143 scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key) 1144 { 1145 struct scmi_registered_event *r_evt; 1146 struct scmi_event_handler *hndl = NULL; 1147 1148 r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key), 1149 KEY_XTRACT_EVT_ID(evt_key)); 1150 if (r_evt) { 1151 mutex_lock(&r_evt->proto->registered_mtx); 1152 hndl = KEY_FIND(r_evt->proto->registered_events_handlers, 1153 hndl, evt_key); 1154 if (hndl) 1155 refcount_inc(&hndl->users); 1156 mutex_unlock(&r_evt->proto->registered_mtx); 1157 } 1158 1159 return hndl; 1160 } 1161 1162 /** 1163 * __scmi_enable_evt() - Enable/disable events generation 1164 * @r_evt: The registered event to act upon 1165 * @src_id: The src_id to act upon 1166 * @enable: The action to perform: true->Enable, false->Disable 1167 * 1168 * Takes care of proper refcounting while performing enable/disable: handles 1169 * the special case of ALL sources requests by itself. 1170 * Returns successfully if at least one of the required src_id has been 1171 * successfully enabled/disabled. 1172 * 1173 * Return: 0 on Success 1174 */ 1175 static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt, 1176 u32 src_id, bool enable) 1177 { 1178 int retvals = 0; 1179 u32 num_sources; 1180 refcount_t *sid; 1181 1182 if (src_id == SRC_ID_MASK) { 1183 src_id = 0; 1184 num_sources = r_evt->num_sources; 1185 } else if (src_id < r_evt->num_sources) { 1186 num_sources = 1; 1187 } else { 1188 return -EINVAL; 1189 } 1190 1191 mutex_lock(&r_evt->sources_mtx); 1192 if (enable) { 1193 for (; num_sources; src_id++, num_sources--) { 1194 int ret = 0; 1195 1196 sid = &r_evt->sources[src_id]; 1197 if (refcount_read(sid) == NOTIF_UNSUPP) { 1198 dev_dbg(r_evt->proto->ph->dev, 1199 "Notification NOT supported - proto_id:%d evt_id:%d src_id:%d", 1200 r_evt->proto->id, r_evt->evt->id, 1201 src_id); 1202 ret = -EOPNOTSUPP; 1203 } else if (refcount_read(sid) == 0) { 1204 ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id, 1205 src_id); 1206 if (!ret) 1207 refcount_set(sid, 1); 1208 } else { 1209 refcount_inc(sid); 1210 } 1211 retvals += !ret; 1212 } 1213 } else { 1214 for (; num_sources; src_id++, num_sources--) { 1215 sid = &r_evt->sources[src_id]; 1216 if (refcount_read(sid) == NOTIF_UNSUPP) 1217 continue; 1218 if (refcount_dec_and_test(sid)) 1219 REVT_NOTIFY_DISABLE(r_evt, 1220 r_evt->evt->id, src_id); 1221 } 1222 retvals = 1; 1223 } 1224 mutex_unlock(&r_evt->sources_mtx); 1225 1226 return retvals ? 0 : -EINVAL; 1227 } 1228 1229 static int scmi_enable_events(struct scmi_event_handler *hndl) 1230 { 1231 int ret = 0; 1232 1233 if (!hndl->enabled) { 1234 ret = __scmi_enable_evt(hndl->r_evt, 1235 KEY_XTRACT_SRC_ID(hndl->key), true); 1236 if (!ret) 1237 hndl->enabled = true; 1238 } 1239 1240 return ret; 1241 } 1242 1243 static int scmi_disable_events(struct scmi_event_handler *hndl) 1244 { 1245 int ret = 0; 1246 1247 if (hndl->enabled) { 1248 ret = __scmi_enable_evt(hndl->r_evt, 1249 KEY_XTRACT_SRC_ID(hndl->key), false); 1250 if (!ret) 1251 hndl->enabled = false; 1252 } 1253 1254 return ret; 1255 } 1256 1257 /** 1258 * scmi_put_handler_unlocked() - Put an event handler 1259 * @ni: A reference to the notification instance to use 1260 * @hndl: The event handler to act upon 1261 * 1262 * After having got exclusive access to the registered handlers hashtable, 1263 * update the refcount and if @hndl is no more in use by anyone: 1264 * * ask for events' generation disabling 1265 * * unregister and free the handler itself 1266 * 1267 * Context: Assumes all the proper locking has been managed by the caller. 1268 * 1269 * Return: True if handler was freed (users dropped to zero) 1270 */ 1271 static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni, 1272 struct scmi_event_handler *hndl) 1273 { 1274 bool freed = false; 1275 1276 if (refcount_dec_and_test(&hndl->users)) { 1277 if (!IS_HNDL_PENDING(hndl)) 1278 scmi_disable_events(hndl); 1279 scmi_free_event_handler(hndl); 1280 freed = true; 1281 } 1282 1283 return freed; 1284 } 1285 1286 static void scmi_put_handler(struct scmi_notify_instance *ni, 1287 struct scmi_event_handler *hndl) 1288 { 1289 bool freed; 1290 u8 protocol_id; 1291 struct scmi_registered_event *r_evt = hndl->r_evt; 1292 1293 mutex_lock(&ni->pending_mtx); 1294 if (r_evt) { 1295 protocol_id = r_evt->proto->id; 1296 mutex_lock(&r_evt->proto->registered_mtx); 1297 } 1298 1299 freed = scmi_put_handler_unlocked(ni, hndl); 1300 1301 if (r_evt) { 1302 mutex_unlock(&r_evt->proto->registered_mtx); 1303 /* 1304 * Only registered handler acquired protocol; must be here 1305 * released only AFTER unlocking registered_mtx, since 1306 * releasing a protocol can trigger its de-initialization 1307 * (ie. including r_evt and registered_mtx) 1308 */ 1309 if (freed) 1310 scmi_protocol_release(ni->handle, protocol_id); 1311 } 1312 mutex_unlock(&ni->pending_mtx); 1313 } 1314 1315 static void scmi_put_active_handler(struct scmi_notify_instance *ni, 1316 struct scmi_event_handler *hndl) 1317 { 1318 bool freed; 1319 struct scmi_registered_event *r_evt = hndl->r_evt; 1320 u8 protocol_id = r_evt->proto->id; 1321 1322 mutex_lock(&r_evt->proto->registered_mtx); 1323 freed = scmi_put_handler_unlocked(ni, hndl); 1324 mutex_unlock(&r_evt->proto->registered_mtx); 1325 if (freed) 1326 scmi_protocol_release(ni->handle, protocol_id); 1327 } 1328 1329 /** 1330 * scmi_event_handler_enable_events() - Enable events associated to an handler 1331 * @hndl: The Event handler to act upon 1332 * 1333 * Return: 0 on Success 1334 */ 1335 static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl) 1336 { 1337 if (scmi_enable_events(hndl)) { 1338 pr_err("Failed to ENABLE events for key:%X !\n", hndl->key); 1339 return -EINVAL; 1340 } 1341 1342 return 0; 1343 } 1344 1345 /** 1346 * scmi_notifier_register() - Register a notifier_block for an event 1347 * @handle: The handle identifying the platform instance against which the 1348 * callback is registered 1349 * @proto_id: Protocol ID 1350 * @evt_id: Event ID 1351 * @src_id: Source ID, when NULL register for events coming form ALL possible 1352 * sources 1353 * @nb: A standard notifier block to register for the specified event 1354 * 1355 * Generic helper to register a notifier_block against a protocol event. 1356 * 1357 * A notifier_block @nb will be registered for each distinct event identified 1358 * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain 1359 * so that: 1360 * 1361 * (proto_X, evt_Y, src_Z) --> chain_X_Y_Z 1362 * 1363 * @src_id meaning is protocol specific and identifies the origin of the event 1364 * (like domain_id, sensor_id and so forth). 1365 * 1366 * @src_id can be NULL to signify that the caller is interested in receiving 1367 * notifications from ALL the available sources for that protocol OR simply that 1368 * the protocol does not support distinct sources. 1369 * 1370 * As soon as one user for the specified tuple appears, an handler is created, 1371 * and that specific event's generation is enabled at the platform level, unless 1372 * an associated registered event is found missing, meaning that the needed 1373 * protocol is still to be initialized and the handler has just been registered 1374 * as still pending. 1375 * 1376 * Return: 0 on Success 1377 */ 1378 static int scmi_notifier_register(const struct scmi_handle *handle, 1379 u8 proto_id, u8 evt_id, const u32 *src_id, 1380 struct notifier_block *nb) 1381 { 1382 int ret = 0; 1383 u32 evt_key; 1384 struct scmi_event_handler *hndl; 1385 struct scmi_notify_instance *ni; 1386 1387 ni = scmi_notification_instance_data_get(handle); 1388 if (!ni) 1389 return -ENODEV; 1390 1391 evt_key = MAKE_HASH_KEY(proto_id, evt_id, 1392 src_id ? *src_id : SRC_ID_MASK); 1393 hndl = scmi_get_or_create_handler(ni, evt_key); 1394 if (IS_ERR(hndl)) 1395 return PTR_ERR(hndl); 1396 1397 blocking_notifier_chain_register(&hndl->chain, nb); 1398 1399 /* Enable events for not pending handlers */ 1400 if (!IS_HNDL_PENDING(hndl)) { 1401 ret = scmi_event_handler_enable_events(hndl); 1402 if (ret) 1403 scmi_put_handler(ni, hndl); 1404 } 1405 1406 return ret; 1407 } 1408 1409 /** 1410 * scmi_notifier_unregister() - Unregister a notifier_block for an event 1411 * @handle: The handle identifying the platform instance against which the 1412 * callback is unregistered 1413 * @proto_id: Protocol ID 1414 * @evt_id: Event ID 1415 * @src_id: Source ID 1416 * @nb: The notifier_block to unregister 1417 * 1418 * Takes care to unregister the provided @nb from the notification chain 1419 * associated to the specified event and, if there are no more users for the 1420 * event handler, frees also the associated event handler structures. 1421 * (this could possibly cause disabling of event's generation at platform level) 1422 * 1423 * Return: 0 on Success 1424 */ 1425 static int scmi_notifier_unregister(const struct scmi_handle *handle, 1426 u8 proto_id, u8 evt_id, const u32 *src_id, 1427 struct notifier_block *nb) 1428 { 1429 u32 evt_key; 1430 struct scmi_event_handler *hndl; 1431 struct scmi_notify_instance *ni; 1432 1433 ni = scmi_notification_instance_data_get(handle); 1434 if (!ni) 1435 return -ENODEV; 1436 1437 evt_key = MAKE_HASH_KEY(proto_id, evt_id, 1438 src_id ? *src_id : SRC_ID_MASK); 1439 hndl = scmi_get_handler(ni, evt_key); 1440 if (IS_ERR(hndl)) 1441 return PTR_ERR(hndl); 1442 1443 /* 1444 * Note that this chain unregistration call is safe on its own 1445 * being internally protected by an rwsem. 1446 */ 1447 blocking_notifier_chain_unregister(&hndl->chain, nb); 1448 scmi_put_handler(ni, hndl); 1449 1450 /* 1451 * This balances the initial get issued in @scmi_notifier_register. 1452 * If this notifier_block happened to be the last known user callback 1453 * for this event, the handler is here freed and the event's generation 1454 * stopped. 1455 * 1456 * Note that, an ongoing concurrent lookup on the delivery workqueue 1457 * path could still hold the refcount to 1 even after this routine 1458 * completes: in such a case it will be the final put on the delivery 1459 * path which will finally free this unused handler. 1460 */ 1461 scmi_put_handler(ni, hndl); 1462 1463 return 0; 1464 } 1465 1466 struct scmi_notifier_devres { 1467 const struct scmi_handle *handle; 1468 u8 proto_id; 1469 u8 evt_id; 1470 u32 __src_id; 1471 u32 *src_id; 1472 struct notifier_block *nb; 1473 }; 1474 1475 static void scmi_devm_release_notifier(struct device *dev, void *res) 1476 { 1477 struct scmi_notifier_devres *dres = res; 1478 1479 scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id, 1480 dres->src_id, dres->nb); 1481 } 1482 1483 /** 1484 * scmi_devm_notifier_register() - Managed registration of a notifier_block 1485 * for an event 1486 * @sdev: A reference to an scmi_device whose embedded struct device is to 1487 * be used for devres accounting. 1488 * @proto_id: Protocol ID 1489 * @evt_id: Event ID 1490 * @src_id: Source ID, when NULL register for events coming form ALL possible 1491 * sources 1492 * @nb: A standard notifier block to register for the specified event 1493 * 1494 * Generic devres managed helper to register a notifier_block against a 1495 * protocol event. 1496 * 1497 * Return: 0 on Success 1498 */ 1499 static int scmi_devm_notifier_register(struct scmi_device *sdev, 1500 u8 proto_id, u8 evt_id, 1501 const u32 *src_id, 1502 struct notifier_block *nb) 1503 { 1504 int ret; 1505 struct scmi_notifier_devres *dres; 1506 1507 dres = devres_alloc(scmi_devm_release_notifier, 1508 sizeof(*dres), GFP_KERNEL); 1509 if (!dres) 1510 return -ENOMEM; 1511 1512 ret = scmi_notifier_register(sdev->handle, proto_id, 1513 evt_id, src_id, nb); 1514 if (ret) { 1515 devres_free(dres); 1516 return ret; 1517 } 1518 1519 dres->handle = sdev->handle; 1520 dres->proto_id = proto_id; 1521 dres->evt_id = evt_id; 1522 dres->nb = nb; 1523 if (src_id) { 1524 dres->__src_id = *src_id; 1525 dres->src_id = &dres->__src_id; 1526 } else { 1527 dres->src_id = NULL; 1528 } 1529 devres_add(&sdev->dev, dres); 1530 1531 return ret; 1532 } 1533 1534 static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) 1535 { 1536 struct scmi_notifier_devres *dres = res; 1537 struct notifier_block *nb = data; 1538 1539 if (WARN_ON(!dres || !nb)) 1540 return 0; 1541 1542 return dres->nb == nb; 1543 } 1544 1545 /** 1546 * scmi_devm_notifier_unregister() - Managed un-registration of a 1547 * notifier_block for an event 1548 * @sdev: A reference to an scmi_device whose embedded struct device is to 1549 * be used for devres accounting. 1550 * @nb: A standard notifier block to register for the specified event 1551 * 1552 * Generic devres managed helper to explicitly un-register a notifier_block 1553 * against a protocol event, which was previously registered using the above 1554 * @scmi_devm_notifier_register. 1555 * 1556 * Return: 0 on Success 1557 */ 1558 static int scmi_devm_notifier_unregister(struct scmi_device *sdev, 1559 struct notifier_block *nb) 1560 { 1561 int ret; 1562 1563 ret = devres_release(&sdev->dev, scmi_devm_release_notifier, 1564 scmi_devm_notifier_match, nb); 1565 1566 WARN_ON(ret); 1567 1568 return ret; 1569 } 1570 1571 /** 1572 * scmi_protocols_late_init() - Worker for late initialization 1573 * @work: The work item to use associated to the proper SCMI instance 1574 * 1575 * This kicks in whenever a new protocol has completed its own registration via 1576 * scmi_register_protocol_events(): it is in charge of scanning the table of 1577 * pending handlers (registered by users while the related protocol was still 1578 * not initialized) and finalizing their initialization whenever possible; 1579 * invalid pending handlers are purged at this point in time. 1580 */ 1581 static void scmi_protocols_late_init(struct work_struct *work) 1582 { 1583 int bkt; 1584 struct scmi_event_handler *hndl; 1585 struct scmi_notify_instance *ni; 1586 struct hlist_node *tmp; 1587 1588 ni = container_of(work, struct scmi_notify_instance, init_work); 1589 1590 /* Ensure protocols and events are up to date */ 1591 smp_rmb(); 1592 1593 mutex_lock(&ni->pending_mtx); 1594 hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) { 1595 int ret; 1596 1597 ret = scmi_bind_event_handler(ni, hndl); 1598 if (!ret) { 1599 dev_dbg(ni->handle->dev, 1600 "finalized PENDING handler - key:%X\n", 1601 hndl->key); 1602 ret = scmi_event_handler_enable_events(hndl); 1603 if (ret) { 1604 dev_dbg(ni->handle->dev, 1605 "purging INVALID handler - key:%X\n", 1606 hndl->key); 1607 scmi_put_active_handler(ni, hndl); 1608 } 1609 } else { 1610 ret = scmi_valid_pending_handler(ni, hndl); 1611 if (ret) { 1612 dev_dbg(ni->handle->dev, 1613 "purging PENDING handler - key:%X\n", 1614 hndl->key); 1615 /* this hndl can be only a pending one */ 1616 scmi_put_handler_unlocked(ni, hndl); 1617 } 1618 } 1619 } 1620 mutex_unlock(&ni->pending_mtx); 1621 } 1622 1623 /* 1624 * notify_ops are attached to the handle so that can be accessed 1625 * directly from an scmi_driver to register its own notifiers. 1626 */ 1627 static const struct scmi_notify_ops notify_ops = { 1628 .devm_event_notifier_register = scmi_devm_notifier_register, 1629 .devm_event_notifier_unregister = scmi_devm_notifier_unregister, 1630 .event_notifier_register = scmi_notifier_register, 1631 .event_notifier_unregister = scmi_notifier_unregister, 1632 }; 1633 1634 /** 1635 * scmi_notification_init() - Initializes Notification Core Support 1636 * @handle: The handle identifying the platform instance to initialize 1637 * 1638 * This function lays out all the basic resources needed by the notification 1639 * core instance identified by the provided handle: once done, all of the 1640 * SCMI Protocols can register their events with the core during their own 1641 * initializations. 1642 * 1643 * Note that failing to initialize the core notifications support does not 1644 * cause the whole SCMI Protocols stack to fail its initialization. 1645 * 1646 * SCMI Notification Initialization happens in 2 steps: 1647 * * initialization: basic common allocations (this function) 1648 * * registration: protocols asynchronously come into life and registers their 1649 * own supported list of events with the core; this causes 1650 * further per-protocol allocations 1651 * 1652 * Any user's callback registration attempt, referring a still not registered 1653 * event, will be registered as pending and finalized later (if possible) 1654 * by scmi_protocols_late_init() work. 1655 * This allows for lazy initialization of SCMI Protocols due to late (or 1656 * missing) SCMI drivers' modules loading. 1657 * 1658 * Return: 0 on Success 1659 */ 1660 int scmi_notification_init(struct scmi_handle *handle) 1661 { 1662 void *gid; 1663 struct scmi_notify_instance *ni; 1664 1665 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); 1666 if (!gid) 1667 return -ENOMEM; 1668 1669 ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL); 1670 if (!ni) 1671 goto err; 1672 1673 ni->gid = gid; 1674 ni->handle = handle; 1675 1676 ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO, 1677 sizeof(char *), GFP_KERNEL); 1678 if (!ni->registered_protocols) 1679 goto err; 1680 1681 ni->notify_wq = alloc_workqueue(dev_name(handle->dev), 1682 WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, 1683 0); 1684 if (!ni->notify_wq) 1685 goto err; 1686 1687 mutex_init(&ni->pending_mtx); 1688 hash_init(ni->pending_events_handlers); 1689 1690 INIT_WORK(&ni->init_work, scmi_protocols_late_init); 1691 1692 scmi_notification_instance_data_set(handle, ni); 1693 handle->notify_ops = ¬ify_ops; 1694 /* Ensure handle is up to date */ 1695 smp_wmb(); 1696 1697 dev_info(handle->dev, "Core Enabled.\n"); 1698 1699 devres_close_group(handle->dev, ni->gid); 1700 1701 return 0; 1702 1703 err: 1704 dev_warn(handle->dev, "Initialization Failed.\n"); 1705 devres_release_group(handle->dev, gid); 1706 return -ENOMEM; 1707 } 1708 1709 /** 1710 * scmi_notification_exit() - Shutdown and clean Notification core 1711 * @handle: The handle identifying the platform instance to shutdown 1712 */ 1713 void scmi_notification_exit(struct scmi_handle *handle) 1714 { 1715 struct scmi_notify_instance *ni; 1716 1717 ni = scmi_notification_instance_data_get(handle); 1718 if (!ni) 1719 return; 1720 scmi_notification_instance_data_set(handle, NULL); 1721 1722 /* Destroy while letting pending work complete */ 1723 destroy_workqueue(ni->notify_wq); 1724 1725 devres_release_group(ni->handle->dev, ni->gid); 1726 } 1727