1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2000, 2009 4 * Author(s): Utz Bacher <utz.bacher@de.ibm.com> 5 * Cornelia Huck <cornelia.huck@de.ibm.com> 6 * Jan Glauber <jang@linux.vnet.ibm.com> 7 */ 8 #include <linux/io.h> 9 #include <linux/slab.h> 10 #include <linux/kernel_stat.h> 11 #include <linux/atomic.h> 12 #include <linux/rculist.h> 13 14 #include <asm/debug.h> 15 #include <asm/qdio.h> 16 #include <asm/airq.h> 17 #include <asm/isc.h> 18 19 #include "cio.h" 20 #include "ioasm.h" 21 #include "qdio.h" 22 #include "qdio_debug.h" 23 24 /* 25 * Restriction: only 63 iqdio subchannels would have its own indicator, 26 * after that, subsequent subchannels share one indicator 27 */ 28 #define TIQDIO_NR_NONSHARED_IND 63 29 #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 30 #define TIQDIO_SHARED_IND 63 31 32 /* device state change indicators */ 33 struct indicator_t { 34 u32 ind; /* u32 because of compare-and-swap performance */ 35 atomic_t count; /* use count, 0 or 1 for non-shared indicators */ 36 }; 37 38 /* list of thin interrupt input queues */ 39 static LIST_HEAD(tiq_list); 40 static DEFINE_MUTEX(tiq_list_lock); 41 42 static struct indicator_t *q_indicators; 43 44 u64 last_ai_time; 45 46 /* returns addr for the device state change indicator */ 47 static u32 *get_indicator(void) 48 { 49 int i; 50 51 for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) 52 if (!atomic_cmpxchg(&q_indicators[i].count, 0, 1)) 53 return &q_indicators[i].ind; 54 55 /* use the shared indicator */ 56 atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); 57 return &q_indicators[TIQDIO_SHARED_IND].ind; 58 } 59 60 static void put_indicator(u32 *addr) 61 { 62 struct indicator_t *ind = container_of(addr, struct indicator_t, ind); 63 64 if (!addr) 65 return; 66 atomic_dec(&ind->count); 67 } 68 69 void tiqdio_add_device(struct qdio_irq *irq_ptr) 70 { 71 mutex_lock(&tiq_list_lock); 72 list_add_rcu(&irq_ptr->entry, &tiq_list); 73 mutex_unlock(&tiq_list_lock); 74 } 75 76 void tiqdio_remove_device(struct qdio_irq *irq_ptr) 77 { 78 mutex_lock(&tiq_list_lock); 79 list_del_rcu(&irq_ptr->entry); 80 mutex_unlock(&tiq_list_lock); 81 synchronize_rcu(); 82 INIT_LIST_HEAD(&irq_ptr->entry); 83 } 84 85 static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) 86 { 87 return irq_ptr->nr_input_qs > 1; 88 } 89 90 static inline int references_shared_dsci(struct qdio_irq *irq_ptr) 91 { 92 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 93 } 94 95 static inline int shared_ind(struct qdio_irq *irq_ptr) 96 { 97 return references_shared_dsci(irq_ptr) || 98 has_multiple_inq_on_dsci(irq_ptr); 99 } 100 101 void clear_nonshared_ind(struct qdio_irq *irq_ptr) 102 { 103 if (!is_thinint_irq(irq_ptr)) 104 return; 105 if (shared_ind(irq_ptr)) 106 return; 107 xchg(irq_ptr->dsci, 0); 108 } 109 110 int test_nonshared_ind(struct qdio_irq *irq_ptr) 111 { 112 if (!is_thinint_irq(irq_ptr)) 113 return 0; 114 if (shared_ind(irq_ptr)) 115 return 0; 116 if (*irq_ptr->dsci) 117 return 1; 118 else 119 return 0; 120 } 121 122 static inline u32 clear_shared_ind(void) 123 { 124 if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) 125 return 0; 126 return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 127 } 128 129 static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) 130 { 131 struct qdio_q *q; 132 int i; 133 134 if (!references_shared_dsci(irq) && 135 has_multiple_inq_on_dsci(irq)) 136 xchg(irq->dsci, 0); 137 138 if (irq->irq_poll) { 139 if (!test_and_set_bit(QDIO_IRQ_DISABLED, &irq->poll_state)) 140 irq->irq_poll(irq->cdev, irq->int_parm); 141 else 142 QDIO_PERF_STAT_INC(irq, int_discarded); 143 144 return; 145 } 146 147 for_each_input_queue(irq, q, i) { 148 if (!shared_ind(irq)) 149 xchg(irq->dsci, 0); 150 151 /* 152 * Call inbound processing but not directly 153 * since that could starve other thinint queues. 154 */ 155 tasklet_schedule(&q->tasklet); 156 } 157 } 158 159 /** 160 * tiqdio_thinint_handler - thin interrupt handler for qdio 161 * @airq: pointer to adapter interrupt descriptor 162 * @floating: flag to recognize floating vs. directed interrupts (unused) 163 */ 164 static void tiqdio_thinint_handler(struct airq_struct *airq, bool floating) 165 { 166 u32 si_used = clear_shared_ind(); 167 struct qdio_irq *irq; 168 169 last_ai_time = S390_lowcore.int_clock; 170 inc_irq_stat(IRQIO_QAI); 171 172 /* protect tiq_list entries, only changed in activate or shutdown */ 173 rcu_read_lock(); 174 175 list_for_each_entry_rcu(irq, &tiq_list, entry) { 176 /* only process queues from changed sets */ 177 if (unlikely(references_shared_dsci(irq))) { 178 if (!si_used) 179 continue; 180 } else if (!*irq->dsci) 181 continue; 182 183 tiqdio_call_inq_handlers(irq); 184 185 QDIO_PERF_STAT_INC(irq, adapter_int); 186 } 187 rcu_read_unlock(); 188 } 189 190 static struct airq_struct tiqdio_airq = { 191 .handler = tiqdio_thinint_handler, 192 .isc = QDIO_AIRQ_ISC, 193 }; 194 195 static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 196 { 197 struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page; 198 u64 summary_indicator_addr, subchannel_indicator_addr; 199 int rc; 200 201 if (reset) { 202 summary_indicator_addr = 0; 203 subchannel_indicator_addr = 0; 204 } else { 205 summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr); 206 subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci); 207 } 208 209 rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr, 210 subchannel_indicator_addr); 211 if (rc) { 212 DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, 213 scssc->response.code); 214 goto out; 215 } 216 217 DBF_EVENT("setscind"); 218 DBF_HEX(&summary_indicator_addr, sizeof(summary_indicator_addr)); 219 DBF_HEX(&subchannel_indicator_addr, sizeof(subchannel_indicator_addr)); 220 out: 221 return rc; 222 } 223 224 /* allocate non-shared indicators and shared indicator */ 225 int __init tiqdio_allocate_memory(void) 226 { 227 q_indicators = kcalloc(TIQDIO_NR_INDICATORS, 228 sizeof(struct indicator_t), 229 GFP_KERNEL); 230 if (!q_indicators) 231 return -ENOMEM; 232 return 0; 233 } 234 235 void tiqdio_free_memory(void) 236 { 237 kfree(q_indicators); 238 } 239 240 int __init tiqdio_register_thinints(void) 241 { 242 int rc; 243 244 rc = register_adapter_interrupt(&tiqdio_airq); 245 if (rc) { 246 DBF_EVENT("RTI:%x", rc); 247 return rc; 248 } 249 return 0; 250 } 251 252 int qdio_establish_thinint(struct qdio_irq *irq_ptr) 253 { 254 if (!is_thinint_irq(irq_ptr)) 255 return 0; 256 return set_subchannel_ind(irq_ptr, 0); 257 } 258 259 void qdio_setup_thinint(struct qdio_irq *irq_ptr) 260 { 261 if (!is_thinint_irq(irq_ptr)) 262 return; 263 irq_ptr->dsci = get_indicator(); 264 DBF_HEX(&irq_ptr->dsci, sizeof(void *)); 265 } 266 267 void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) 268 { 269 if (!is_thinint_irq(irq_ptr)) 270 return; 271 272 /* reset adapter interrupt indicators */ 273 set_subchannel_ind(irq_ptr, 1); 274 put_indicator(irq_ptr->dsci); 275 } 276 277 void __exit tiqdio_unregister_thinints(void) 278 { 279 WARN_ON(!list_empty(&tiq_list)); 280 unregister_adapter_interrupt(&tiqdio_airq); 281 } 282