1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * System Control and Management Interface (SCMI) Notification support 4 * 5 * Copyright (C) 2020-2021 ARM Ltd. 6 */ 7 /** 8 * DOC: Theory of operation 9 * 10 * SCMI Protocol specification allows the platform to signal events to 11 * interested agents via notification messages: this is an implementation 12 * of the dispatch and delivery of such notifications to the interested users 13 * inside the Linux kernel. 14 * 15 * An SCMI Notification core instance is initialized for each active platform 16 * instance identified by the means of the usual &struct scmi_handle. 17 * 18 * Each SCMI Protocol implementation, during its initialization, registers with 19 * this core its set of supported events using scmi_register_protocol_events(): 20 * all the needed descriptors are stored in the &struct registered_protocols and 21 * &struct registered_events arrays. 22 * 23 * Kernel users interested in some specific event can register their callbacks 24 * providing the usual notifier_block descriptor, since this core implements 25 * events' delivery using the standard Kernel notification chains machinery. 26 * 27 * Given the number of possible events defined by SCMI and the extensibility 28 * of the SCMI Protocol itself, the underlying notification chains are created 29 * and destroyed dynamically on demand depending on the number of users 30 * effectively registered for an event, so that no support structures or chains 31 * are allocated until at least one user has registered a notifier_block for 32 * such event. Similarly, events' generation itself is enabled at the platform 33 * level only after at least one user has registered, and it is shutdown after 34 * the last user for that event has gone. 35 * 36 * All users provided callbacks and allocated notification-chains are stored in 37 * the @registered_events_handlers hashtable. Callbacks' registration requests 38 * for still to be registered events are instead kept in the dedicated common 39 * hashtable @pending_events_handlers. 40 * 41 * An event is identified univocally by the tuple (proto_id, evt_id, src_id) 42 * and is served by its own dedicated notification chain; information contained 43 * in such tuples is used, in a few different ways, to generate the needed 44 * hash-keys. 45 * 46 * Here proto_id and evt_id are simply the protocol_id and message_id numbers 47 * as described in the SCMI Protocol specification, while src_id represents an 48 * optional, protocol dependent, source identifier (like domain_id, perf_id 49 * or sensor_id and so forth). 50 * 51 * Upon reception of a notification message from the platform the SCMI RX ISR 52 * passes the received message payload and some ancillary information (including 53 * an arrival timestamp in nanoseconds) to the core via @scmi_notify() which 54 * pushes the event-data itself on a protocol-dedicated kfifo queue for further 55 * deferred processing as specified in @scmi_events_dispatcher(). 56 * 57 * Each protocol has it own dedicated work_struct and worker which, once kicked 58 * by the ISR, takes care to empty its own dedicated queue, deliverying the 59 * queued items into the proper notification-chain: notifications processing can 60 * proceed concurrently on distinct workers only between events belonging to 61 * different protocols while delivery of events within the same protocol is 62 * still strictly sequentially ordered by time of arrival. 63 * 64 * Events' information is then extracted from the SCMI Notification messages and 65 * conveyed, converted into a custom per-event report struct, as the void *data 66 * param to the user callback provided by the registered notifier_block, so that 67 * from the user perspective his callback will look invoked like: 68 * 69 * int user_cb(struct notifier_block *nb, unsigned long event_id, void *report) 70 * 71 */ 72 73 #define dev_fmt(fmt) "SCMI Notifications - " fmt 74 #define pr_fmt(fmt) "SCMI Notifications - " fmt 75 76 #include <linux/bitfield.h> 77 #include <linux/bug.h> 78 #include <linux/compiler.h> 79 #include <linux/device.h> 80 #include <linux/err.h> 81 #include <linux/hashtable.h> 82 #include <linux/kernel.h> 83 #include <linux/ktime.h> 84 #include <linux/kfifo.h> 85 #include <linux/list.h> 86 #include <linux/mutex.h> 87 #include <linux/notifier.h> 88 #include <linux/refcount.h> 89 #include <linux/scmi_protocol.h> 90 #include <linux/slab.h> 91 #include <linux/types.h> 92 #include <linux/workqueue.h> 93 94 #include "common.h" 95 #include "notify.h" 96 97 #define SCMI_MAX_PROTO 256 98 99 #define PROTO_ID_MASK GENMASK(31, 24) 100 #define EVT_ID_MASK GENMASK(23, 16) 101 #define SRC_ID_MASK GENMASK(15, 0) 102 #define NOTIF_UNSUPP -1 103 104 /* 105 * Builds an unsigned 32bit key from the given input tuple to be used 106 * as a key in hashtables. 107 */ 108 #define MAKE_HASH_KEY(p, e, s) \ 109 (FIELD_PREP(PROTO_ID_MASK, (p)) | \ 110 FIELD_PREP(EVT_ID_MASK, (e)) | \ 111 FIELD_PREP(SRC_ID_MASK, (s))) 112 113 #define MAKE_ALL_SRCS_KEY(p, e) MAKE_HASH_KEY((p), (e), SRC_ID_MASK) 114 115 /* 116 * Assumes that the stored obj includes its own hash-key in a field named 'key': 117 * with this simplification this macro can be equally used for all the objects' 118 * types hashed by this implementation. 119 * 120 * @__ht: The hashtable name 121 * @__obj: A pointer to the object type to be retrieved from the hashtable; 122 * it will be used as a cursor while scanning the hastable and it will 123 * be possibly left as NULL when @__k is not found 124 * @__k: The key to search for 125 */ 126 #define KEY_FIND(__ht, __obj, __k) \ 127 ({ \ 128 typeof(__k) k_ = __k; \ 129 typeof(__obj) obj_; \ 130 \ 131 hash_for_each_possible((__ht), obj_, hash, k_) \ 132 if (obj_->key == k_) \ 133 break; \ 134 __obj = obj_; \ 135 }) 136 137 #define KEY_XTRACT_PROTO_ID(key) FIELD_GET(PROTO_ID_MASK, (key)) 138 #define KEY_XTRACT_EVT_ID(key) FIELD_GET(EVT_ID_MASK, (key)) 139 #define KEY_XTRACT_SRC_ID(key) FIELD_GET(SRC_ID_MASK, (key)) 140 141 /* 142 * A set of macros used to access safely @registered_protocols and 143 * @registered_events arrays; these are fixed in size and each entry is possibly 144 * populated at protocols' registration time and then only read but NEVER 145 * modified or removed. 146 */ 147 #define SCMI_GET_PROTO(__ni, __pid) \ 148 ({ \ 149 typeof(__ni) ni_ = __ni; \ 150 struct scmi_registered_events_desc *__pd = NULL; \ 151 \ 152 if (ni_) \ 153 __pd = READ_ONCE(ni_->registered_protocols[(__pid)]); \ 154 __pd; \ 155 }) 156 157 #define SCMI_GET_REVT_FROM_PD(__pd, __eid) \ 158 ({ \ 159 typeof(__pd) pd_ = __pd; \ 160 typeof(__eid) eid_ = __eid; \ 161 struct scmi_registered_event *__revt = NULL; \ 162 \ 163 if (pd_ && eid_ < pd_->num_events) \ 164 __revt = READ_ONCE(pd_->registered_events[eid_]); \ 165 __revt; \ 166 }) 167 168 #define SCMI_GET_REVT(__ni, __pid, __eid) \ 169 ({ \ 170 struct scmi_registered_event *__revt; \ 171 struct scmi_registered_events_desc *__pd; \ 172 \ 173 __pd = SCMI_GET_PROTO((__ni), (__pid)); \ 174 __revt = SCMI_GET_REVT_FROM_PD(__pd, (__eid)); \ 175 __revt; \ 176 }) 177 178 /* A couple of utility macros to limit cruft when calling protocols' helpers */ 179 #define REVT_NOTIFY_SET_STATUS(revt, eid, sid, state) \ 180 ({ \ 181 typeof(revt) r = revt; \ 182 r->proto->ops->set_notify_enabled(r->proto->ph, \ 183 (eid), (sid), (state)); \ 184 }) 185 186 #define REVT_NOTIFY_ENABLE(revt, eid, sid) \ 187 REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), true) 188 189 #define REVT_NOTIFY_DISABLE(revt, eid, sid) \ 190 REVT_NOTIFY_SET_STATUS((revt), (eid), (sid), false) 191 192 #define REVT_FILL_REPORT(revt, ...) \ 193 ({ \ 194 typeof(revt) r = revt; \ 195 r->proto->ops->fill_custom_report(r->proto->ph, \ 196 __VA_ARGS__); \ 197 }) 198 199 #define SCMI_PENDING_HASH_SZ 4 200 #define SCMI_REGISTERED_HASH_SZ 6 201 202 struct scmi_registered_events_desc; 203 204 /** 205 * struct scmi_notify_instance - Represents an instance of the notification 206 * core 207 * @gid: GroupID used for devres 208 * @handle: A reference to the platform instance 209 * @init_work: A work item to perform final initializations of pending handlers 210 * @notify_wq: A reference to the allocated Kernel cmwq 211 * @pending_mtx: A mutex to protect @pending_events_handlers 212 * @registered_protocols: A statically allocated array containing pointers to 213 * all the registered protocol-level specific information 214 * related to events' handling 215 * @pending_events_handlers: An hashtable containing all pending events' 216 * handlers descriptors 217 * 218 * Each platform instance, represented by a handle, has its own instance of 219 * the notification subsystem represented by this structure. 220 */ 221 struct scmi_notify_instance { 222 void *gid; 223 struct scmi_handle *handle; 224 struct work_struct init_work; 225 struct workqueue_struct *notify_wq; 226 /* lock to protect pending_events_handlers */ 227 struct mutex pending_mtx; 228 struct scmi_registered_events_desc **registered_protocols; 229 DECLARE_HASHTABLE(pending_events_handlers, SCMI_PENDING_HASH_SZ); 230 }; 231 232 /** 233 * struct events_queue - Describes a queue and its associated worker 234 * @sz: Size in bytes of the related kfifo 235 * @kfifo: A dedicated Kernel kfifo descriptor 236 * @notify_work: A custom work item bound to this queue 237 * @wq: A reference to the associated workqueue 238 * 239 * Each protocol has its own dedicated events_queue descriptor. 240 */ 241 struct events_queue { 242 size_t sz; 243 struct kfifo kfifo; 244 struct work_struct notify_work; 245 struct workqueue_struct *wq; 246 }; 247 248 /** 249 * struct scmi_event_header - A utility header 250 * @timestamp: The timestamp, in nanoseconds (boottime), which was associated 251 * to this event as soon as it entered the SCMI RX ISR 252 * @payld_sz: Effective size of the embedded message payload which follows 253 * @evt_id: Event ID (corresponds to the Event MsgID for this Protocol) 254 * @payld: A reference to the embedded event payload 255 * 256 * This header is prepended to each received event message payload before 257 * queueing it on the related &struct events_queue. 258 */ 259 struct scmi_event_header { 260 ktime_t timestamp; 261 size_t payld_sz; 262 unsigned char evt_id; 263 unsigned char payld[]; 264 }; 265 266 struct scmi_registered_event; 267 268 /** 269 * struct scmi_registered_events_desc - Protocol Specific information 270 * @id: Protocol ID 271 * @ops: Protocol specific and event-related operations 272 * @equeue: The embedded per-protocol events_queue 273 * @ni: A reference to the initialized instance descriptor 274 * @eh: A reference to pre-allocated buffer to be used as a scratch area by the 275 * deferred worker when fetching data from the kfifo 276 * @eh_sz: Size of the pre-allocated buffer @eh 277 * @in_flight: A reference to an in flight &struct scmi_registered_event 278 * @num_events: Number of events in @registered_events 279 * @registered_events: A dynamically allocated array holding all the registered 280 * events' descriptors, whose fixed-size is determined at 281 * compile time. 282 * @registered_mtx: A mutex to protect @registered_events_handlers 283 * @ph: SCMI protocol handle reference 284 * @registered_events_handlers: An hashtable containing all events' handlers 285 * descriptors registered for this protocol 286 * 287 * All protocols that register at least one event have their protocol-specific 288 * information stored here, together with the embedded allocated events_queue. 289 * These descriptors are stored in the @registered_protocols array at protocol 290 * registration time. 291 * 292 * Once these descriptors are successfully registered, they are NEVER again 293 * removed or modified since protocols do not unregister ever, so that, once 294 * we safely grab a NON-NULL reference from the array we can keep it and use it. 295 */ 296 struct scmi_registered_events_desc { 297 u8 id; 298 const struct scmi_event_ops *ops; 299 struct events_queue equeue; 300 struct scmi_notify_instance *ni; 301 struct scmi_event_header *eh; 302 size_t eh_sz; 303 void *in_flight; 304 int num_events; 305 struct scmi_registered_event **registered_events; 306 /* mutex to protect registered_events_handlers */ 307 struct mutex registered_mtx; 308 const struct scmi_protocol_handle *ph; 309 DECLARE_HASHTABLE(registered_events_handlers, SCMI_REGISTERED_HASH_SZ); 310 }; 311 312 /** 313 * struct scmi_registered_event - Event Specific Information 314 * @proto: A reference to the associated protocol descriptor 315 * @evt: A reference to the associated event descriptor (as provided at 316 * registration time) 317 * @report: A pre-allocated buffer used by the deferred worker to fill a 318 * customized event report 319 * @num_sources: The number of possible sources for this event as stated at 320 * events' registration time 321 * @sources: A reference to a dynamically allocated array used to refcount the 322 * events' enable requests for all the existing sources 323 * @sources_mtx: A mutex to serialize the access to @sources 324 * 325 * All registered events are represented by one of these structures that are 326 * stored in the @registered_events array at protocol registration time. 327 * 328 * Once these descriptors are successfully registered, they are NEVER again 329 * removed or modified since protocols do not unregister ever, so that once we 330 * safely grab a NON-NULL reference from the table we can keep it and use it. 331 */ 332 struct scmi_registered_event { 333 struct scmi_registered_events_desc *proto; 334 const struct scmi_event *evt; 335 void *report; 336 u32 num_sources; 337 refcount_t *sources; 338 /* locking to serialize the access to sources */ 339 struct mutex sources_mtx; 340 }; 341 342 /** 343 * struct scmi_event_handler - Event handler information 344 * @key: The used hashkey 345 * @users: A reference count for number of active users for this handler 346 * @r_evt: A reference to the associated registered event; when this is NULL 347 * this handler is pending, which means that identifies a set of 348 * callbacks intended to be attached to an event which is still not 349 * known nor registered by any protocol at that point in time 350 * @chain: The notification chain dedicated to this specific event tuple 351 * @hash: The hlist_node used for collision handling 352 * @enabled: A boolean which records if event's generation has been already 353 * enabled for this handler as a whole 354 * 355 * This structure collects all the information needed to process a received 356 * event identified by the tuple (proto_id, evt_id, src_id). 357 * These descriptors are stored in a per-protocol @registered_events_handlers 358 * table using as a key a value derived from that tuple. 359 */ 360 struct scmi_event_handler { 361 u32 key; 362 refcount_t users; 363 struct scmi_registered_event *r_evt; 364 struct blocking_notifier_head chain; 365 struct hlist_node hash; 366 bool enabled; 367 }; 368 369 #define IS_HNDL_PENDING(hndl) (!(hndl)->r_evt) 370 371 static struct scmi_event_handler * 372 scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key); 373 static void scmi_put_active_handler(struct scmi_notify_instance *ni, 374 struct scmi_event_handler *hndl); 375 static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni, 376 struct scmi_event_handler *hndl); 377 378 /** 379 * scmi_lookup_and_call_event_chain() - Lookup the proper chain and call it 380 * @ni: A reference to the notification instance to use 381 * @evt_key: The key to use to lookup the related notification chain 382 * @report: The customized event-specific report to pass down to the callbacks 383 * as their *data parameter. 384 */ 385 static inline void 386 scmi_lookup_and_call_event_chain(struct scmi_notify_instance *ni, 387 u32 evt_key, void *report) 388 { 389 int ret; 390 struct scmi_event_handler *hndl; 391 392 /* 393 * Here ensure the event handler cannot vanish while using it. 394 * It is legitimate, though, for an handler not to be found at all here, 395 * e.g. when it has been unregistered by the user after some events had 396 * already been queued. 397 */ 398 hndl = scmi_get_active_handler(ni, evt_key); 399 if (!hndl) 400 return; 401 402 ret = blocking_notifier_call_chain(&hndl->chain, 403 KEY_XTRACT_EVT_ID(evt_key), 404 report); 405 /* Notifiers are NOT supposed to cut the chain ... */ 406 WARN_ON_ONCE(ret & NOTIFY_STOP_MASK); 407 408 scmi_put_active_handler(ni, hndl); 409 } 410 411 /** 412 * scmi_process_event_header() - Dequeue and process an event header 413 * @eq: The queue to use 414 * @pd: The protocol descriptor to use 415 * 416 * Read an event header from the protocol queue into the dedicated scratch 417 * buffer and looks for a matching registered event; in case an anomalously 418 * sized read is detected just flush the queue. 419 * 420 * Return: 421 * * a reference to the matching registered event when found 422 * * ERR_PTR(-EINVAL) when NO registered event could be found 423 * * NULL when the queue is empty 424 */ 425 static inline struct scmi_registered_event * 426 scmi_process_event_header(struct events_queue *eq, 427 struct scmi_registered_events_desc *pd) 428 { 429 unsigned int outs; 430 struct scmi_registered_event *r_evt; 431 432 outs = kfifo_out(&eq->kfifo, pd->eh, 433 sizeof(struct scmi_event_header)); 434 if (!outs) 435 return NULL; 436 if (outs != sizeof(struct scmi_event_header)) { 437 dev_err(pd->ni->handle->dev, "corrupted EVT header. Flush.\n"); 438 kfifo_reset_out(&eq->kfifo); 439 return NULL; 440 } 441 442 r_evt = SCMI_GET_REVT_FROM_PD(pd, pd->eh->evt_id); 443 if (!r_evt) 444 r_evt = ERR_PTR(-EINVAL); 445 446 return r_evt; 447 } 448 449 /** 450 * scmi_process_event_payload() - Dequeue and process an event payload 451 * @eq: The queue to use 452 * @pd: The protocol descriptor to use 453 * @r_evt: The registered event descriptor to use 454 * 455 * Read an event payload from the protocol queue into the dedicated scratch 456 * buffer, fills a custom report and then look for matching event handlers and 457 * call them; skip any unknown event (as marked by scmi_process_event_header()) 458 * and in case an anomalously sized read is detected just flush the queue. 459 * 460 * Return: False when the queue is empty 461 */ 462 static inline bool 463 scmi_process_event_payload(struct events_queue *eq, 464 struct scmi_registered_events_desc *pd, 465 struct scmi_registered_event *r_evt) 466 { 467 u32 src_id, key; 468 unsigned int outs; 469 void *report = NULL; 470 471 outs = kfifo_out(&eq->kfifo, pd->eh->payld, pd->eh->payld_sz); 472 if (!outs) 473 return false; 474 475 /* Any in-flight event has now been officially processed */ 476 pd->in_flight = NULL; 477 478 if (outs != pd->eh->payld_sz) { 479 dev_err(pd->ni->handle->dev, "corrupted EVT Payload. Flush.\n"); 480 kfifo_reset_out(&eq->kfifo); 481 return false; 482 } 483 484 if (IS_ERR(r_evt)) { 485 dev_warn(pd->ni->handle->dev, 486 "SKIP UNKNOWN EVT - proto:%X evt:%d\n", 487 pd->id, pd->eh->evt_id); 488 return true; 489 } 490 491 report = REVT_FILL_REPORT(r_evt, pd->eh->evt_id, pd->eh->timestamp, 492 pd->eh->payld, pd->eh->payld_sz, 493 r_evt->report, &src_id); 494 if (!report) { 495 dev_err(pd->ni->handle->dev, 496 "report not available - proto:%X evt:%d\n", 497 pd->id, pd->eh->evt_id); 498 return true; 499 } 500 501 /* At first search for a generic ALL src_ids handler... */ 502 key = MAKE_ALL_SRCS_KEY(pd->id, pd->eh->evt_id); 503 scmi_lookup_and_call_event_chain(pd->ni, key, report); 504 505 /* ...then search for any specific src_id */ 506 key = MAKE_HASH_KEY(pd->id, pd->eh->evt_id, src_id); 507 scmi_lookup_and_call_event_chain(pd->ni, key, report); 508 509 return true; 510 } 511 512 /** 513 * scmi_events_dispatcher() - Common worker logic for all work items. 514 * @work: The work item to use, which is associated to a dedicated events_queue 515 * 516 * Logic: 517 * 1. dequeue one pending RX notification (queued in SCMI RX ISR context) 518 * 2. generate a custom event report from the received event message 519 * 3. lookup for any registered ALL_SRC_IDs handler: 520 * - > call the related notification chain passing in the report 521 * 4. lookup for any registered specific SRC_ID handler: 522 * - > call the related notification chain passing in the report 523 * 524 * Note that: 525 * * a dedicated per-protocol kfifo queue is used: in this way an anomalous 526 * flood of events cannot saturate other protocols' queues. 527 * * each per-protocol queue is associated to a distinct work_item, which 528 * means, in turn, that: 529 * + all protocols can process their dedicated queues concurrently 530 * (since notify_wq:max_active != 1) 531 * + anyway at most one worker instance is allowed to run on the same queue 532 * concurrently: this ensures that we can have only one concurrent 533 * reader/writer on the associated kfifo, so that we can use it lock-less 534 * 535 * Context: Process context. 536 */ 537 static void scmi_events_dispatcher(struct work_struct *work) 538 { 539 struct events_queue *eq; 540 struct scmi_registered_events_desc *pd; 541 struct scmi_registered_event *r_evt; 542 543 eq = container_of(work, struct events_queue, notify_work); 544 pd = container_of(eq, struct scmi_registered_events_desc, equeue); 545 /* 546 * In order to keep the queue lock-less and the number of memcopies 547 * to the bare minimum needed, the dispatcher accounts for the 548 * possibility of per-protocol in-flight events: i.e. an event whose 549 * reception could end up being split across two subsequent runs of this 550 * worker, first the header, then the payload. 551 */ 552 do { 553 if (!pd->in_flight) { 554 r_evt = scmi_process_event_header(eq, pd); 555 if (!r_evt) 556 break; 557 pd->in_flight = r_evt; 558 } else { 559 r_evt = pd->in_flight; 560 } 561 } while (scmi_process_event_payload(eq, pd, r_evt)); 562 } 563 564 /** 565 * scmi_notify() - Queues a notification for further deferred processing 566 * @handle: The handle identifying the platform instance from which the 567 * dispatched event is generated 568 * @proto_id: Protocol ID 569 * @evt_id: Event ID (msgID) 570 * @buf: Event Message Payload (without the header) 571 * @len: Event Message Payload size 572 * @ts: RX Timestamp in nanoseconds (boottime) 573 * 574 * Context: Called in interrupt context to queue a received event for 575 * deferred processing. 576 * 577 * Return: 0 on Success 578 */ 579 int scmi_notify(const struct scmi_handle *handle, u8 proto_id, u8 evt_id, 580 const void *buf, size_t len, ktime_t ts) 581 { 582 struct scmi_registered_event *r_evt; 583 struct scmi_event_header eh; 584 struct scmi_notify_instance *ni; 585 586 ni = scmi_notification_instance_data_get(handle); 587 if (!ni) 588 return 0; 589 590 r_evt = SCMI_GET_REVT(ni, proto_id, evt_id); 591 if (!r_evt) 592 return -EINVAL; 593 594 if (len > r_evt->evt->max_payld_sz) { 595 dev_err(handle->dev, "discard badly sized message\n"); 596 return -EINVAL; 597 } 598 if (kfifo_avail(&r_evt->proto->equeue.kfifo) < sizeof(eh) + len) { 599 dev_warn(handle->dev, 600 "queue full, dropping proto_id:%d evt_id:%d ts:%lld\n", 601 proto_id, evt_id, ktime_to_ns(ts)); 602 return -ENOMEM; 603 } 604 605 eh.timestamp = ts; 606 eh.evt_id = evt_id; 607 eh.payld_sz = len; 608 /* 609 * Header and payload are enqueued with two distinct kfifo_in() (so non 610 * atomic), but this situation is handled properly on the consumer side 611 * with in-flight events tracking. 612 */ 613 kfifo_in(&r_evt->proto->equeue.kfifo, &eh, sizeof(eh)); 614 kfifo_in(&r_evt->proto->equeue.kfifo, buf, len); 615 /* 616 * Don't care about return value here since we just want to ensure that 617 * a work is queued all the times whenever some items have been pushed 618 * on the kfifo: 619 * - if work was already queued it will simply fail to queue a new one 620 * since it is not needed 621 * - if work was not queued already it will be now, even in case work 622 * was in fact already running: this behavior avoids any possible race 623 * when this function pushes new items onto the kfifos after the 624 * related executing worker had already determined the kfifo to be 625 * empty and it was terminating. 626 */ 627 queue_work(r_evt->proto->equeue.wq, 628 &r_evt->proto->equeue.notify_work); 629 630 return 0; 631 } 632 633 /** 634 * scmi_kfifo_free() - Devres action helper to free the kfifo 635 * @kfifo: The kfifo to free 636 */ 637 static void scmi_kfifo_free(void *kfifo) 638 { 639 kfifo_free((struct kfifo *)kfifo); 640 } 641 642 /** 643 * scmi_initialize_events_queue() - Allocate/Initialize a kfifo buffer 644 * @ni: A reference to the notification instance to use 645 * @equeue: The events_queue to initialize 646 * @sz: Size of the kfifo buffer to allocate 647 * 648 * Allocate a buffer for the kfifo and initialize it. 649 * 650 * Return: 0 on Success 651 */ 652 static int scmi_initialize_events_queue(struct scmi_notify_instance *ni, 653 struct events_queue *equeue, size_t sz) 654 { 655 int ret; 656 657 if (kfifo_alloc(&equeue->kfifo, sz, GFP_KERNEL)) 658 return -ENOMEM; 659 /* Size could have been roundup to power-of-two */ 660 equeue->sz = kfifo_size(&equeue->kfifo); 661 662 ret = devm_add_action_or_reset(ni->handle->dev, scmi_kfifo_free, 663 &equeue->kfifo); 664 if (ret) 665 return ret; 666 667 INIT_WORK(&equeue->notify_work, scmi_events_dispatcher); 668 equeue->wq = ni->notify_wq; 669 670 return ret; 671 } 672 673 /** 674 * scmi_allocate_registered_events_desc() - Allocate a registered events' 675 * descriptor 676 * @ni: A reference to the &struct scmi_notify_instance notification instance 677 * to use 678 * @proto_id: Protocol ID 679 * @queue_sz: Size of the associated queue to allocate 680 * @eh_sz: Size of the event header scratch area to pre-allocate 681 * @num_events: Number of events to support (size of @registered_events) 682 * @ops: Pointer to a struct holding references to protocol specific helpers 683 * needed during events handling 684 * 685 * It is supposed to be called only once for each protocol at protocol 686 * initialization time, so it warns if the requested protocol is found already 687 * registered. 688 * 689 * Return: The allocated and registered descriptor on Success 690 */ 691 static struct scmi_registered_events_desc * 692 scmi_allocate_registered_events_desc(struct scmi_notify_instance *ni, 693 u8 proto_id, size_t queue_sz, size_t eh_sz, 694 int num_events, 695 const struct scmi_event_ops *ops) 696 { 697 int ret; 698 struct scmi_registered_events_desc *pd; 699 700 /* Ensure protocols are up to date */ 701 smp_rmb(); 702 if (WARN_ON(ni->registered_protocols[proto_id])) 703 return ERR_PTR(-EINVAL); 704 705 pd = devm_kzalloc(ni->handle->dev, sizeof(*pd), GFP_KERNEL); 706 if (!pd) 707 return ERR_PTR(-ENOMEM); 708 pd->id = proto_id; 709 pd->ops = ops; 710 pd->ni = ni; 711 712 ret = scmi_initialize_events_queue(ni, &pd->equeue, queue_sz); 713 if (ret) 714 return ERR_PTR(ret); 715 716 pd->eh = devm_kzalloc(ni->handle->dev, eh_sz, GFP_KERNEL); 717 if (!pd->eh) 718 return ERR_PTR(-ENOMEM); 719 pd->eh_sz = eh_sz; 720 721 pd->registered_events = devm_kcalloc(ni->handle->dev, num_events, 722 sizeof(char *), GFP_KERNEL); 723 if (!pd->registered_events) 724 return ERR_PTR(-ENOMEM); 725 pd->num_events = num_events; 726 727 /* Initialize per protocol handlers table */ 728 mutex_init(&pd->registered_mtx); 729 hash_init(pd->registered_events_handlers); 730 731 return pd; 732 } 733 734 /** 735 * scmi_register_protocol_events() - Register Protocol Events with the core 736 * @handle: The handle identifying the platform instance against which the 737 * protocol's events are registered 738 * @proto_id: Protocol ID 739 * @ph: SCMI protocol handle. 740 * @ee: A structure describing the events supported by this protocol. 741 * 742 * Used by SCMI Protocols initialization code to register with the notification 743 * core the list of supported events and their descriptors: takes care to 744 * pre-allocate and store all needed descriptors, scratch buffers and event 745 * queues. 746 * 747 * Return: 0 on Success 748 */ 749 int scmi_register_protocol_events(const struct scmi_handle *handle, u8 proto_id, 750 const struct scmi_protocol_handle *ph, 751 const struct scmi_protocol_events *ee) 752 { 753 int i; 754 unsigned int num_sources; 755 size_t payld_sz = 0; 756 struct scmi_registered_events_desc *pd; 757 struct scmi_notify_instance *ni; 758 const struct scmi_event *evt; 759 760 if (!ee || !ee->ops || !ee->evts || !ph || 761 (!ee->num_sources && !ee->ops->get_num_sources)) 762 return -EINVAL; 763 764 ni = scmi_notification_instance_data_get(handle); 765 if (!ni) 766 return -ENOMEM; 767 768 /* num_sources cannot be <= 0 */ 769 if (ee->num_sources) { 770 num_sources = ee->num_sources; 771 } else { 772 int nsrc = ee->ops->get_num_sources(ph); 773 774 if (nsrc <= 0) 775 return -EINVAL; 776 num_sources = nsrc; 777 } 778 779 evt = ee->evts; 780 for (i = 0; i < ee->num_events; i++) 781 payld_sz = max_t(size_t, payld_sz, evt[i].max_payld_sz); 782 payld_sz += sizeof(struct scmi_event_header); 783 784 pd = scmi_allocate_registered_events_desc(ni, proto_id, ee->queue_sz, 785 payld_sz, ee->num_events, 786 ee->ops); 787 if (IS_ERR(pd)) 788 return PTR_ERR(pd); 789 790 pd->ph = ph; 791 for (i = 0; i < ee->num_events; i++, evt++) { 792 int id; 793 struct scmi_registered_event *r_evt; 794 795 r_evt = devm_kzalloc(ni->handle->dev, sizeof(*r_evt), 796 GFP_KERNEL); 797 if (!r_evt) 798 return -ENOMEM; 799 r_evt->proto = pd; 800 r_evt->evt = evt; 801 802 r_evt->sources = devm_kcalloc(ni->handle->dev, num_sources, 803 sizeof(refcount_t), GFP_KERNEL); 804 if (!r_evt->sources) 805 return -ENOMEM; 806 r_evt->num_sources = num_sources; 807 mutex_init(&r_evt->sources_mtx); 808 809 r_evt->report = devm_kzalloc(ni->handle->dev, 810 evt->max_report_sz, GFP_KERNEL); 811 if (!r_evt->report) 812 return -ENOMEM; 813 814 for (id = 0; id < r_evt->num_sources; id++) 815 if (ee->ops->is_notify_supported && 816 !ee->ops->is_notify_supported(ph, r_evt->evt->id, id)) 817 refcount_set(&r_evt->sources[id], NOTIF_UNSUPP); 818 819 pd->registered_events[i] = r_evt; 820 /* Ensure events are updated */ 821 smp_wmb(); 822 dev_dbg(handle->dev, "registered event - %lX\n", 823 MAKE_ALL_SRCS_KEY(r_evt->proto->id, r_evt->evt->id)); 824 } 825 826 /* Register protocol and events...it will never be removed */ 827 ni->registered_protocols[proto_id] = pd; 828 /* Ensure protocols are updated */ 829 smp_wmb(); 830 831 /* 832 * Finalize any pending events' handler which could have been waiting 833 * for this protocol's events registration. 834 */ 835 schedule_work(&ni->init_work); 836 837 return 0; 838 } 839 840 /** 841 * scmi_deregister_protocol_events - Deregister protocol events with the core 842 * @handle: The handle identifying the platform instance against which the 843 * protocol's events are registered 844 * @proto_id: Protocol ID 845 */ 846 void scmi_deregister_protocol_events(const struct scmi_handle *handle, 847 u8 proto_id) 848 { 849 struct scmi_notify_instance *ni; 850 struct scmi_registered_events_desc *pd; 851 852 ni = scmi_notification_instance_data_get(handle); 853 if (!ni) 854 return; 855 856 pd = ni->registered_protocols[proto_id]; 857 if (!pd) 858 return; 859 860 ni->registered_protocols[proto_id] = NULL; 861 /* Ensure protocols are updated */ 862 smp_wmb(); 863 864 cancel_work_sync(&pd->equeue.notify_work); 865 } 866 867 /** 868 * scmi_allocate_event_handler() - Allocate Event handler 869 * @ni: A reference to the notification instance to use 870 * @evt_key: 32bit key uniquely bind to the event identified by the tuple 871 * (proto_id, evt_id, src_id) 872 * 873 * Allocate an event handler and related notification chain associated with 874 * the provided event handler key. 875 * Note that, at this point, a related registered_event is still to be 876 * associated to this handler descriptor (hndl->r_evt == NULL), so the handler 877 * is initialized as pending. 878 * 879 * Context: Assumes to be called with @pending_mtx already acquired. 880 * Return: the freshly allocated structure on Success 881 */ 882 static struct scmi_event_handler * 883 scmi_allocate_event_handler(struct scmi_notify_instance *ni, u32 evt_key) 884 { 885 struct scmi_event_handler *hndl; 886 887 hndl = kzalloc(sizeof(*hndl), GFP_KERNEL); 888 if (!hndl) 889 return NULL; 890 hndl->key = evt_key; 891 BLOCKING_INIT_NOTIFIER_HEAD(&hndl->chain); 892 refcount_set(&hndl->users, 1); 893 /* New handlers are created pending */ 894 hash_add(ni->pending_events_handlers, &hndl->hash, hndl->key); 895 896 return hndl; 897 } 898 899 /** 900 * scmi_free_event_handler() - Free the provided Event handler 901 * @hndl: The event handler structure to free 902 * 903 * Context: Assumes to be called with proper locking acquired depending 904 * on the situation. 905 */ 906 static void scmi_free_event_handler(struct scmi_event_handler *hndl) 907 { 908 hash_del(&hndl->hash); 909 kfree(hndl); 910 } 911 912 /** 913 * scmi_bind_event_handler() - Helper to attempt binding an handler to an event 914 * @ni: A reference to the notification instance to use 915 * @hndl: The event handler to bind 916 * 917 * If an associated registered event is found, move the handler from the pending 918 * into the registered table. 919 * 920 * Context: Assumes to be called with @pending_mtx already acquired. 921 * 922 * Return: 0 on Success 923 */ 924 static inline int scmi_bind_event_handler(struct scmi_notify_instance *ni, 925 struct scmi_event_handler *hndl) 926 { 927 struct scmi_registered_event *r_evt; 928 929 r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(hndl->key), 930 KEY_XTRACT_EVT_ID(hndl->key)); 931 if (!r_evt) 932 return -EINVAL; 933 934 /* 935 * Remove from pending and insert into registered while getting hold 936 * of protocol instance. 937 */ 938 hash_del(&hndl->hash); 939 /* 940 * Acquire protocols only for NON pending handlers, so as NOT to trigger 941 * protocol initialization when a notifier is registered against a still 942 * not registered protocol, since it would make little sense to force init 943 * protocols for which still no SCMI driver user exists: they wouldn't 944 * emit any event anyway till some SCMI driver starts using it. 945 */ 946 scmi_protocol_acquire(ni->handle, KEY_XTRACT_PROTO_ID(hndl->key)); 947 hndl->r_evt = r_evt; 948 949 mutex_lock(&r_evt->proto->registered_mtx); 950 hash_add(r_evt->proto->registered_events_handlers, 951 &hndl->hash, hndl->key); 952 mutex_unlock(&r_evt->proto->registered_mtx); 953 954 return 0; 955 } 956 957 /** 958 * scmi_valid_pending_handler() - Helper to check pending status of handlers 959 * @ni: A reference to the notification instance to use 960 * @hndl: The event handler to check 961 * 962 * An handler is considered pending when its r_evt == NULL, because the related 963 * event was still unknown at handler's registration time; anyway, since all 964 * protocols register their supported events once for all at protocols' 965 * initialization time, a pending handler cannot be considered valid anymore if 966 * the underlying event (which it is waiting for), belongs to an already 967 * initialized and registered protocol. 968 * 969 * Return: 0 on Success 970 */ 971 static inline int scmi_valid_pending_handler(struct scmi_notify_instance *ni, 972 struct scmi_event_handler *hndl) 973 { 974 struct scmi_registered_events_desc *pd; 975 976 if (!IS_HNDL_PENDING(hndl)) 977 return -EINVAL; 978 979 pd = SCMI_GET_PROTO(ni, KEY_XTRACT_PROTO_ID(hndl->key)); 980 if (pd) 981 return -EINVAL; 982 983 return 0; 984 } 985 986 /** 987 * scmi_register_event_handler() - Register whenever possible an Event handler 988 * @ni: A reference to the notification instance to use 989 * @hndl: The event handler to register 990 * 991 * At first try to bind an event handler to its associated event, then check if 992 * it was at least a valid pending handler: if it was not bound nor valid return 993 * false. 994 * 995 * Valid pending incomplete bindings will be periodically retried by a dedicated 996 * worker which is kicked each time a new protocol completes its own 997 * registration phase. 998 * 999 * Context: Assumes to be called with @pending_mtx acquired. 1000 * 1001 * Return: 0 on Success 1002 */ 1003 static int scmi_register_event_handler(struct scmi_notify_instance *ni, 1004 struct scmi_event_handler *hndl) 1005 { 1006 int ret; 1007 1008 ret = scmi_bind_event_handler(ni, hndl); 1009 if (!ret) { 1010 dev_dbg(ni->handle->dev, "registered NEW handler - key:%X\n", 1011 hndl->key); 1012 } else { 1013 ret = scmi_valid_pending_handler(ni, hndl); 1014 if (!ret) 1015 dev_dbg(ni->handle->dev, 1016 "registered PENDING handler - key:%X\n", 1017 hndl->key); 1018 } 1019 1020 return ret; 1021 } 1022 1023 /** 1024 * __scmi_event_handler_get_ops() - Utility to get or create an event handler 1025 * @ni: A reference to the notification instance to use 1026 * @evt_key: The event key to use 1027 * @create: A boolean flag to specify if a handler must be created when 1028 * not already existent 1029 * 1030 * Search for the desired handler matching the key in both the per-protocol 1031 * registered table and the common pending table: 1032 * * if found adjust users refcount 1033 * * if not found and @create is true, create and register the new handler: 1034 * handler could end up being registered as pending if no matching event 1035 * could be found. 1036 * 1037 * An handler is guaranteed to reside in one and only one of the tables at 1038 * any one time; to ensure this the whole search and create is performed 1039 * holding the @pending_mtx lock, with @registered_mtx additionally acquired 1040 * if needed. 1041 * 1042 * Note that when a nested acquisition of these mutexes is needed the locking 1043 * order is always (same as in @init_work): 1044 * 1. pending_mtx 1045 * 2. registered_mtx 1046 * 1047 * Events generation is NOT enabled right after creation within this routine 1048 * since at creation time we usually want to have all setup and ready before 1049 * events really start flowing. 1050 * 1051 * Return: A properly refcounted handler on Success, NULL on Failure 1052 */ 1053 static inline struct scmi_event_handler * 1054 __scmi_event_handler_get_ops(struct scmi_notify_instance *ni, 1055 u32 evt_key, bool create) 1056 { 1057 struct scmi_registered_event *r_evt; 1058 struct scmi_event_handler *hndl = NULL; 1059 1060 r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key), 1061 KEY_XTRACT_EVT_ID(evt_key)); 1062 1063 mutex_lock(&ni->pending_mtx); 1064 /* Search registered events at first ... if possible at all */ 1065 if (r_evt) { 1066 mutex_lock(&r_evt->proto->registered_mtx); 1067 hndl = KEY_FIND(r_evt->proto->registered_events_handlers, 1068 hndl, evt_key); 1069 if (hndl) 1070 refcount_inc(&hndl->users); 1071 mutex_unlock(&r_evt->proto->registered_mtx); 1072 } 1073 1074 /* ...then amongst pending. */ 1075 if (!hndl) { 1076 hndl = KEY_FIND(ni->pending_events_handlers, hndl, evt_key); 1077 if (hndl) 1078 refcount_inc(&hndl->users); 1079 } 1080 1081 /* Create if still not found and required */ 1082 if (!hndl && create) { 1083 hndl = scmi_allocate_event_handler(ni, evt_key); 1084 if (hndl && scmi_register_event_handler(ni, hndl)) { 1085 dev_dbg(ni->handle->dev, 1086 "purging UNKNOWN handler - key:%X\n", 1087 hndl->key); 1088 /* this hndl can be only a pending one */ 1089 scmi_put_handler_unlocked(ni, hndl); 1090 hndl = NULL; 1091 } 1092 } 1093 mutex_unlock(&ni->pending_mtx); 1094 1095 return hndl; 1096 } 1097 1098 static struct scmi_event_handler * 1099 scmi_get_handler(struct scmi_notify_instance *ni, u32 evt_key) 1100 { 1101 return __scmi_event_handler_get_ops(ni, evt_key, false); 1102 } 1103 1104 static struct scmi_event_handler * 1105 scmi_get_or_create_handler(struct scmi_notify_instance *ni, u32 evt_key) 1106 { 1107 return __scmi_event_handler_get_ops(ni, evt_key, true); 1108 } 1109 1110 /** 1111 * scmi_get_active_handler() - Helper to get active handlers only 1112 * @ni: A reference to the notification instance to use 1113 * @evt_key: The event key to use 1114 * 1115 * Search for the desired handler matching the key only in the per-protocol 1116 * table of registered handlers: this is called only from the dispatching path 1117 * so want to be as quick as possible and do not care about pending. 1118 * 1119 * Return: A properly refcounted active handler 1120 */ 1121 static struct scmi_event_handler * 1122 scmi_get_active_handler(struct scmi_notify_instance *ni, u32 evt_key) 1123 { 1124 struct scmi_registered_event *r_evt; 1125 struct scmi_event_handler *hndl = NULL; 1126 1127 r_evt = SCMI_GET_REVT(ni, KEY_XTRACT_PROTO_ID(evt_key), 1128 KEY_XTRACT_EVT_ID(evt_key)); 1129 if (r_evt) { 1130 mutex_lock(&r_evt->proto->registered_mtx); 1131 hndl = KEY_FIND(r_evt->proto->registered_events_handlers, 1132 hndl, evt_key); 1133 if (hndl) 1134 refcount_inc(&hndl->users); 1135 mutex_unlock(&r_evt->proto->registered_mtx); 1136 } 1137 1138 return hndl; 1139 } 1140 1141 /** 1142 * __scmi_enable_evt() - Enable/disable events generation 1143 * @r_evt: The registered event to act upon 1144 * @src_id: The src_id to act upon 1145 * @enable: The action to perform: true->Enable, false->Disable 1146 * 1147 * Takes care of proper refcounting while performing enable/disable: handles 1148 * the special case of ALL sources requests by itself. 1149 * Returns successfully if at least one of the required src_id has been 1150 * successfully enabled/disabled. 1151 * 1152 * Return: 0 on Success 1153 */ 1154 static inline int __scmi_enable_evt(struct scmi_registered_event *r_evt, 1155 u32 src_id, bool enable) 1156 { 1157 int retvals = 0; 1158 u32 num_sources; 1159 refcount_t *sid; 1160 1161 if (src_id == SRC_ID_MASK) { 1162 src_id = 0; 1163 num_sources = r_evt->num_sources; 1164 } else if (src_id < r_evt->num_sources) { 1165 num_sources = 1; 1166 } else { 1167 return -EINVAL; 1168 } 1169 1170 mutex_lock(&r_evt->sources_mtx); 1171 if (enable) { 1172 for (; num_sources; src_id++, num_sources--) { 1173 int ret = 0; 1174 1175 sid = &r_evt->sources[src_id]; 1176 if (refcount_read(sid) == NOTIF_UNSUPP) { 1177 dev_dbg(r_evt->proto->ph->dev, 1178 "Notification NOT supported - proto_id:%d evt_id:%d src_id:%d", 1179 r_evt->proto->id, r_evt->evt->id, 1180 src_id); 1181 ret = -EOPNOTSUPP; 1182 } else if (refcount_read(sid) == 0) { 1183 ret = REVT_NOTIFY_ENABLE(r_evt, r_evt->evt->id, 1184 src_id); 1185 if (!ret) 1186 refcount_set(sid, 1); 1187 } else { 1188 refcount_inc(sid); 1189 } 1190 retvals += !ret; 1191 } 1192 } else { 1193 for (; num_sources; src_id++, num_sources--) { 1194 sid = &r_evt->sources[src_id]; 1195 if (refcount_read(sid) == NOTIF_UNSUPP) 1196 continue; 1197 if (refcount_dec_and_test(sid)) 1198 REVT_NOTIFY_DISABLE(r_evt, 1199 r_evt->evt->id, src_id); 1200 } 1201 retvals = 1; 1202 } 1203 mutex_unlock(&r_evt->sources_mtx); 1204 1205 return retvals ? 0 : -EINVAL; 1206 } 1207 1208 static int scmi_enable_events(struct scmi_event_handler *hndl) 1209 { 1210 int ret = 0; 1211 1212 if (!hndl->enabled) { 1213 ret = __scmi_enable_evt(hndl->r_evt, 1214 KEY_XTRACT_SRC_ID(hndl->key), true); 1215 if (!ret) 1216 hndl->enabled = true; 1217 } 1218 1219 return ret; 1220 } 1221 1222 static int scmi_disable_events(struct scmi_event_handler *hndl) 1223 { 1224 int ret = 0; 1225 1226 if (hndl->enabled) { 1227 ret = __scmi_enable_evt(hndl->r_evt, 1228 KEY_XTRACT_SRC_ID(hndl->key), false); 1229 if (!ret) 1230 hndl->enabled = false; 1231 } 1232 1233 return ret; 1234 } 1235 1236 /** 1237 * scmi_put_handler_unlocked() - Put an event handler 1238 * @ni: A reference to the notification instance to use 1239 * @hndl: The event handler to act upon 1240 * 1241 * After having got exclusive access to the registered handlers hashtable, 1242 * update the refcount and if @hndl is no more in use by anyone: 1243 * * ask for events' generation disabling 1244 * * unregister and free the handler itself 1245 * 1246 * Context: Assumes all the proper locking has been managed by the caller. 1247 * 1248 * Return: True if handler was freed (users dropped to zero) 1249 */ 1250 static bool scmi_put_handler_unlocked(struct scmi_notify_instance *ni, 1251 struct scmi_event_handler *hndl) 1252 { 1253 bool freed = false; 1254 1255 if (refcount_dec_and_test(&hndl->users)) { 1256 if (!IS_HNDL_PENDING(hndl)) 1257 scmi_disable_events(hndl); 1258 scmi_free_event_handler(hndl); 1259 freed = true; 1260 } 1261 1262 return freed; 1263 } 1264 1265 static void scmi_put_handler(struct scmi_notify_instance *ni, 1266 struct scmi_event_handler *hndl) 1267 { 1268 bool freed; 1269 u8 protocol_id; 1270 struct scmi_registered_event *r_evt = hndl->r_evt; 1271 1272 mutex_lock(&ni->pending_mtx); 1273 if (r_evt) { 1274 protocol_id = r_evt->proto->id; 1275 mutex_lock(&r_evt->proto->registered_mtx); 1276 } 1277 1278 freed = scmi_put_handler_unlocked(ni, hndl); 1279 1280 if (r_evt) { 1281 mutex_unlock(&r_evt->proto->registered_mtx); 1282 /* 1283 * Only registered handler acquired protocol; must be here 1284 * released only AFTER unlocking registered_mtx, since 1285 * releasing a protocol can trigger its de-initialization 1286 * (ie. including r_evt and registered_mtx) 1287 */ 1288 if (freed) 1289 scmi_protocol_release(ni->handle, protocol_id); 1290 } 1291 mutex_unlock(&ni->pending_mtx); 1292 } 1293 1294 static void scmi_put_active_handler(struct scmi_notify_instance *ni, 1295 struct scmi_event_handler *hndl) 1296 { 1297 bool freed; 1298 struct scmi_registered_event *r_evt = hndl->r_evt; 1299 u8 protocol_id = r_evt->proto->id; 1300 1301 mutex_lock(&r_evt->proto->registered_mtx); 1302 freed = scmi_put_handler_unlocked(ni, hndl); 1303 mutex_unlock(&r_evt->proto->registered_mtx); 1304 if (freed) 1305 scmi_protocol_release(ni->handle, protocol_id); 1306 } 1307 1308 /** 1309 * scmi_event_handler_enable_events() - Enable events associated to an handler 1310 * @hndl: The Event handler to act upon 1311 * 1312 * Return: 0 on Success 1313 */ 1314 static int scmi_event_handler_enable_events(struct scmi_event_handler *hndl) 1315 { 1316 if (scmi_enable_events(hndl)) { 1317 pr_err("Failed to ENABLE events for key:%X !\n", hndl->key); 1318 return -EINVAL; 1319 } 1320 1321 return 0; 1322 } 1323 1324 /** 1325 * scmi_notifier_register() - Register a notifier_block for an event 1326 * @handle: The handle identifying the platform instance against which the 1327 * callback is registered 1328 * @proto_id: Protocol ID 1329 * @evt_id: Event ID 1330 * @src_id: Source ID, when NULL register for events coming form ALL possible 1331 * sources 1332 * @nb: A standard notifier block to register for the specified event 1333 * 1334 * Generic helper to register a notifier_block against a protocol event. 1335 * 1336 * A notifier_block @nb will be registered for each distinct event identified 1337 * by the tuple (proto_id, evt_id, src_id) on a dedicated notification chain 1338 * so that: 1339 * 1340 * (proto_X, evt_Y, src_Z) --> chain_X_Y_Z 1341 * 1342 * @src_id meaning is protocol specific and identifies the origin of the event 1343 * (like domain_id, sensor_id and so forth). 1344 * 1345 * @src_id can be NULL to signify that the caller is interested in receiving 1346 * notifications from ALL the available sources for that protocol OR simply that 1347 * the protocol does not support distinct sources. 1348 * 1349 * As soon as one user for the specified tuple appears, an handler is created, 1350 * and that specific event's generation is enabled at the platform level, unless 1351 * an associated registered event is found missing, meaning that the needed 1352 * protocol is still to be initialized and the handler has just been registered 1353 * as still pending. 1354 * 1355 * Return: 0 on Success 1356 */ 1357 static int scmi_notifier_register(const struct scmi_handle *handle, 1358 u8 proto_id, u8 evt_id, const u32 *src_id, 1359 struct notifier_block *nb) 1360 { 1361 int ret = 0; 1362 u32 evt_key; 1363 struct scmi_event_handler *hndl; 1364 struct scmi_notify_instance *ni; 1365 1366 ni = scmi_notification_instance_data_get(handle); 1367 if (!ni) 1368 return -ENODEV; 1369 1370 evt_key = MAKE_HASH_KEY(proto_id, evt_id, 1371 src_id ? *src_id : SRC_ID_MASK); 1372 hndl = scmi_get_or_create_handler(ni, evt_key); 1373 if (!hndl) 1374 return -EINVAL; 1375 1376 blocking_notifier_chain_register(&hndl->chain, nb); 1377 1378 /* Enable events for not pending handlers */ 1379 if (!IS_HNDL_PENDING(hndl)) { 1380 ret = scmi_event_handler_enable_events(hndl); 1381 if (ret) 1382 scmi_put_handler(ni, hndl); 1383 } 1384 1385 return ret; 1386 } 1387 1388 /** 1389 * scmi_notifier_unregister() - Unregister a notifier_block for an event 1390 * @handle: The handle identifying the platform instance against which the 1391 * callback is unregistered 1392 * @proto_id: Protocol ID 1393 * @evt_id: Event ID 1394 * @src_id: Source ID 1395 * @nb: The notifier_block to unregister 1396 * 1397 * Takes care to unregister the provided @nb from the notification chain 1398 * associated to the specified event and, if there are no more users for the 1399 * event handler, frees also the associated event handler structures. 1400 * (this could possibly cause disabling of event's generation at platform level) 1401 * 1402 * Return: 0 on Success 1403 */ 1404 static int scmi_notifier_unregister(const struct scmi_handle *handle, 1405 u8 proto_id, u8 evt_id, const u32 *src_id, 1406 struct notifier_block *nb) 1407 { 1408 u32 evt_key; 1409 struct scmi_event_handler *hndl; 1410 struct scmi_notify_instance *ni; 1411 1412 ni = scmi_notification_instance_data_get(handle); 1413 if (!ni) 1414 return -ENODEV; 1415 1416 evt_key = MAKE_HASH_KEY(proto_id, evt_id, 1417 src_id ? *src_id : SRC_ID_MASK); 1418 hndl = scmi_get_handler(ni, evt_key); 1419 if (!hndl) 1420 return -EINVAL; 1421 1422 /* 1423 * Note that this chain unregistration call is safe on its own 1424 * being internally protected by an rwsem. 1425 */ 1426 blocking_notifier_chain_unregister(&hndl->chain, nb); 1427 scmi_put_handler(ni, hndl); 1428 1429 /* 1430 * This balances the initial get issued in @scmi_notifier_register. 1431 * If this notifier_block happened to be the last known user callback 1432 * for this event, the handler is here freed and the event's generation 1433 * stopped. 1434 * 1435 * Note that, an ongoing concurrent lookup on the delivery workqueue 1436 * path could still hold the refcount to 1 even after this routine 1437 * completes: in such a case it will be the final put on the delivery 1438 * path which will finally free this unused handler. 1439 */ 1440 scmi_put_handler(ni, hndl); 1441 1442 return 0; 1443 } 1444 1445 struct scmi_notifier_devres { 1446 const struct scmi_handle *handle; 1447 u8 proto_id; 1448 u8 evt_id; 1449 u32 __src_id; 1450 u32 *src_id; 1451 struct notifier_block *nb; 1452 }; 1453 1454 static void scmi_devm_release_notifier(struct device *dev, void *res) 1455 { 1456 struct scmi_notifier_devres *dres = res; 1457 1458 scmi_notifier_unregister(dres->handle, dres->proto_id, dres->evt_id, 1459 dres->src_id, dres->nb); 1460 } 1461 1462 /** 1463 * scmi_devm_notifier_register() - Managed registration of a notifier_block 1464 * for an event 1465 * @sdev: A reference to an scmi_device whose embedded struct device is to 1466 * be used for devres accounting. 1467 * @proto_id: Protocol ID 1468 * @evt_id: Event ID 1469 * @src_id: Source ID, when NULL register for events coming form ALL possible 1470 * sources 1471 * @nb: A standard notifier block to register for the specified event 1472 * 1473 * Generic devres managed helper to register a notifier_block against a 1474 * protocol event. 1475 * 1476 * Return: 0 on Success 1477 */ 1478 static int scmi_devm_notifier_register(struct scmi_device *sdev, 1479 u8 proto_id, u8 evt_id, 1480 const u32 *src_id, 1481 struct notifier_block *nb) 1482 { 1483 int ret; 1484 struct scmi_notifier_devres *dres; 1485 1486 dres = devres_alloc(scmi_devm_release_notifier, 1487 sizeof(*dres), GFP_KERNEL); 1488 if (!dres) 1489 return -ENOMEM; 1490 1491 ret = scmi_notifier_register(sdev->handle, proto_id, 1492 evt_id, src_id, nb); 1493 if (ret) { 1494 devres_free(dres); 1495 return ret; 1496 } 1497 1498 dres->handle = sdev->handle; 1499 dres->proto_id = proto_id; 1500 dres->evt_id = evt_id; 1501 dres->nb = nb; 1502 if (src_id) { 1503 dres->__src_id = *src_id; 1504 dres->src_id = &dres->__src_id; 1505 } else { 1506 dres->src_id = NULL; 1507 } 1508 devres_add(&sdev->dev, dres); 1509 1510 return ret; 1511 } 1512 1513 static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) 1514 { 1515 struct scmi_notifier_devres *dres = res; 1516 struct notifier_block *nb = data; 1517 1518 if (WARN_ON(!dres || !nb)) 1519 return 0; 1520 1521 return dres->nb == nb; 1522 } 1523 1524 /** 1525 * scmi_devm_notifier_unregister() - Managed un-registration of a 1526 * notifier_block for an event 1527 * @sdev: A reference to an scmi_device whose embedded struct device is to 1528 * be used for devres accounting. 1529 * @nb: A standard notifier block to register for the specified event 1530 * 1531 * Generic devres managed helper to explicitly un-register a notifier_block 1532 * against a protocol event, which was previously registered using the above 1533 * @scmi_devm_notifier_register. 1534 * 1535 * Return: 0 on Success 1536 */ 1537 static int scmi_devm_notifier_unregister(struct scmi_device *sdev, 1538 struct notifier_block *nb) 1539 { 1540 int ret; 1541 1542 ret = devres_release(&sdev->dev, scmi_devm_release_notifier, 1543 scmi_devm_notifier_match, nb); 1544 1545 WARN_ON(ret); 1546 1547 return ret; 1548 } 1549 1550 /** 1551 * scmi_protocols_late_init() - Worker for late initialization 1552 * @work: The work item to use associated to the proper SCMI instance 1553 * 1554 * This kicks in whenever a new protocol has completed its own registration via 1555 * scmi_register_protocol_events(): it is in charge of scanning the table of 1556 * pending handlers (registered by users while the related protocol was still 1557 * not initialized) and finalizing their initialization whenever possible; 1558 * invalid pending handlers are purged at this point in time. 1559 */ 1560 static void scmi_protocols_late_init(struct work_struct *work) 1561 { 1562 int bkt; 1563 struct scmi_event_handler *hndl; 1564 struct scmi_notify_instance *ni; 1565 struct hlist_node *tmp; 1566 1567 ni = container_of(work, struct scmi_notify_instance, init_work); 1568 1569 /* Ensure protocols and events are up to date */ 1570 smp_rmb(); 1571 1572 mutex_lock(&ni->pending_mtx); 1573 hash_for_each_safe(ni->pending_events_handlers, bkt, tmp, hndl, hash) { 1574 int ret; 1575 1576 ret = scmi_bind_event_handler(ni, hndl); 1577 if (!ret) { 1578 dev_dbg(ni->handle->dev, 1579 "finalized PENDING handler - key:%X\n", 1580 hndl->key); 1581 ret = scmi_event_handler_enable_events(hndl); 1582 if (ret) { 1583 dev_dbg(ni->handle->dev, 1584 "purging INVALID handler - key:%X\n", 1585 hndl->key); 1586 scmi_put_active_handler(ni, hndl); 1587 } 1588 } else { 1589 ret = scmi_valid_pending_handler(ni, hndl); 1590 if (ret) { 1591 dev_dbg(ni->handle->dev, 1592 "purging PENDING handler - key:%X\n", 1593 hndl->key); 1594 /* this hndl can be only a pending one */ 1595 scmi_put_handler_unlocked(ni, hndl); 1596 } 1597 } 1598 } 1599 mutex_unlock(&ni->pending_mtx); 1600 } 1601 1602 /* 1603 * notify_ops are attached to the handle so that can be accessed 1604 * directly from an scmi_driver to register its own notifiers. 1605 */ 1606 static const struct scmi_notify_ops notify_ops = { 1607 .devm_event_notifier_register = scmi_devm_notifier_register, 1608 .devm_event_notifier_unregister = scmi_devm_notifier_unregister, 1609 .event_notifier_register = scmi_notifier_register, 1610 .event_notifier_unregister = scmi_notifier_unregister, 1611 }; 1612 1613 /** 1614 * scmi_notification_init() - Initializes Notification Core Support 1615 * @handle: The handle identifying the platform instance to initialize 1616 * 1617 * This function lays out all the basic resources needed by the notification 1618 * core instance identified by the provided handle: once done, all of the 1619 * SCMI Protocols can register their events with the core during their own 1620 * initializations. 1621 * 1622 * Note that failing to initialize the core notifications support does not 1623 * cause the whole SCMI Protocols stack to fail its initialization. 1624 * 1625 * SCMI Notification Initialization happens in 2 steps: 1626 * * initialization: basic common allocations (this function) 1627 * * registration: protocols asynchronously come into life and registers their 1628 * own supported list of events with the core; this causes 1629 * further per-protocol allocations 1630 * 1631 * Any user's callback registration attempt, referring a still not registered 1632 * event, will be registered as pending and finalized later (if possible) 1633 * by scmi_protocols_late_init() work. 1634 * This allows for lazy initialization of SCMI Protocols due to late (or 1635 * missing) SCMI drivers' modules loading. 1636 * 1637 * Return: 0 on Success 1638 */ 1639 int scmi_notification_init(struct scmi_handle *handle) 1640 { 1641 void *gid; 1642 struct scmi_notify_instance *ni; 1643 1644 gid = devres_open_group(handle->dev, NULL, GFP_KERNEL); 1645 if (!gid) 1646 return -ENOMEM; 1647 1648 ni = devm_kzalloc(handle->dev, sizeof(*ni), GFP_KERNEL); 1649 if (!ni) 1650 goto err; 1651 1652 ni->gid = gid; 1653 ni->handle = handle; 1654 1655 ni->registered_protocols = devm_kcalloc(handle->dev, SCMI_MAX_PROTO, 1656 sizeof(char *), GFP_KERNEL); 1657 if (!ni->registered_protocols) 1658 goto err; 1659 1660 ni->notify_wq = alloc_workqueue(dev_name(handle->dev), 1661 WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, 1662 0); 1663 if (!ni->notify_wq) 1664 goto err; 1665 1666 mutex_init(&ni->pending_mtx); 1667 hash_init(ni->pending_events_handlers); 1668 1669 INIT_WORK(&ni->init_work, scmi_protocols_late_init); 1670 1671 scmi_notification_instance_data_set(handle, ni); 1672 handle->notify_ops = ¬ify_ops; 1673 /* Ensure handle is up to date */ 1674 smp_wmb(); 1675 1676 dev_info(handle->dev, "Core Enabled.\n"); 1677 1678 devres_close_group(handle->dev, ni->gid); 1679 1680 return 0; 1681 1682 err: 1683 dev_warn(handle->dev, "Initialization Failed.\n"); 1684 devres_release_group(handle->dev, gid); 1685 return -ENOMEM; 1686 } 1687 1688 /** 1689 * scmi_notification_exit() - Shutdown and clean Notification core 1690 * @handle: The handle identifying the platform instance to shutdown 1691 */ 1692 void scmi_notification_exit(struct scmi_handle *handle) 1693 { 1694 struct scmi_notify_instance *ni; 1695 1696 ni = scmi_notification_instance_data_get(handle); 1697 if (!ni) 1698 return; 1699 scmi_notification_instance_data_set(handle, NULL); 1700 1701 /* Destroy while letting pending work complete */ 1702 destroy_workqueue(ni->notify_wq); 1703 1704 devres_release_group(ni->handle->dev, ni->gid); 1705 } 1706