Lines Matching full:ipa

16 #include "ipa.h"
25 * DOC: IPA Filter and Route Tables
27 * The IPA has tables defined in its local (IPA-resident) memory that define
35 * by all IPA hardware (IPA v4.2 doesn't support hashed tables).
38 * an object (such as a route or filter table) in IPA-resident memory must
52 * address of a filter rule in the memory following the bitmap. Until IPA
58 * removed starting at IPA v5.0. For IPA v5.0+, the endpoint bitmap
60 * bitmap, endpoint 1 has a filter rule. Older versions of IPA represent
72 * endpoints they "own" directly. Currently the AP does not use the IPA
76 * bitmap as defined prior to IPA v5.0.
78 * IPA Filter Table
97 * though the AP currently does not use the IPA routing functionality.
99 * IPA Route Table
145 ipa_table_mem(struct ipa *ipa, bool filter, bool hashed, bool ipv6) in ipa_table_mem() argument
158 return ipa_mem_find(ipa, mem_id); in ipa_table_mem()
162 bool ipa_table_hash_support(struct ipa *ipa) in ipa_table_hash_support() argument
164 return ipa->version != IPA_VERSION_4_2; in ipa_table_hash_support()
167 bool ipa_filtered_valid(struct ipa *ipa, u64 filtered) in ipa_filtered_valid() argument
169 struct device *dev = ipa->dev; in ipa_filtered_valid()
179 if (count > ipa->filter_count) { in ipa_filtered_valid()
181 count, ipa->filter_count); in ipa_filtered_valid()
190 static dma_addr_t ipa_table_addr(struct ipa *ipa, bool filter_mask, u16 count) in ipa_table_addr() argument
197 WARN_ON(count > max_t(u32, ipa->filter_count, ipa->route_count)); in ipa_table_addr()
202 return ipa->table_addr + skip * sizeof(*ipa->table_virt); in ipa_table_addr()
208 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_table_reset_add() local
215 mem = ipa_table_mem(ipa, filter, hashed, ipv6); in ipa_table_reset_add()
224 addr = ipa_table_addr(ipa, false, count); in ipa_table_reset_add()
234 ipa_filter_reset_table(struct ipa *ipa, bool hashed, bool ipv6, bool modem) in ipa_filter_reset_table() argument
236 u64 ep_mask = ipa->filtered; in ipa_filter_reset_table()
240 trans = ipa_cmd_trans_alloc(ipa, hweight64(ep_mask)); in ipa_filter_reset_table()
242 dev_err(ipa->dev, "no transaction for %s filter reset\n", in ipa_filter_reset_table()
254 endpoint = &ipa->endpoint[endpoint_id]; in ipa_filter_reset_table()
270 static int ipa_filter_reset(struct ipa *ipa, bool modem) in ipa_filter_reset() argument
274 ret = ipa_filter_reset_table(ipa, false, false, modem); in ipa_filter_reset()
278 ret = ipa_filter_reset_table(ipa, false, true, modem); in ipa_filter_reset()
279 if (ret || !ipa_table_hash_support(ipa)) in ipa_filter_reset()
282 ret = ipa_filter_reset_table(ipa, true, false, modem); in ipa_filter_reset()
286 return ipa_filter_reset_table(ipa, true, true, modem); in ipa_filter_reset()
293 static int ipa_route_reset(struct ipa *ipa, bool modem) in ipa_route_reset() argument
295 bool hash_support = ipa_table_hash_support(ipa); in ipa_route_reset()
296 u32 modem_route_count = ipa->modem_route_count; in ipa_route_reset()
301 trans = ipa_cmd_trans_alloc(ipa, hash_support ? 4 : 2); in ipa_route_reset()
303 dev_err(ipa->dev, "no transaction for %s route reset\n", in ipa_route_reset()
313 count = ipa->route_count - modem_route_count; in ipa_route_reset()
329 void ipa_table_reset(struct ipa *ipa, bool modem) in ipa_table_reset() argument
331 struct device *dev = ipa->dev; in ipa_table_reset()
338 ret = ipa_filter_reset(ipa, modem); in ipa_table_reset()
343 ret = ipa_route_reset(ipa, modem); in ipa_table_reset()
349 int ipa_table_hash_flush(struct ipa *ipa) in ipa_table_hash_flush() argument
355 if (!ipa_table_hash_support(ipa)) in ipa_table_hash_flush()
358 trans = ipa_cmd_trans_alloc(ipa, 1); in ipa_table_hash_flush()
360 dev_err(ipa->dev, "no transaction for hash flush\n"); in ipa_table_hash_flush()
364 if (ipa->version < IPA_VERSION_5_0) { in ipa_table_hash_flush()
365 reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH); in ipa_table_hash_flush()
372 reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH); in ipa_table_hash_flush()
374 /* IPA v5.0+ uses a unified cache (both IPv4 and IPv6) */ in ipa_table_hash_flush()
388 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi); in ipa_table_init_add() local
408 mem = ipa_table_mem(ipa, filter, false, ipv6); in ipa_table_init_add()
409 hash_mem = ipa_table_mem(ipa, filter, true, ipv6); in ipa_table_init_add()
419 count = 1 + hweight64(ipa->filtered); in ipa_table_init_add()
431 addr = ipa_table_addr(ipa, filter, count); in ipa_table_init_add()
432 hash_addr = ipa_table_addr(ipa, filter, hash_count); in ipa_table_init_add()
443 ipa->zero_addr, true); in ipa_table_init_add()
451 ipa->zero_addr, true); in ipa_table_init_add()
454 int ipa_table_setup(struct ipa *ipa) in ipa_table_setup() argument
471 trans = ipa_cmd_trans_alloc(ipa, 8); in ipa_table_setup()
473 dev_err(ipa->dev, "no transaction for table setup\n"); in ipa_table_setup()
497 struct ipa *ipa = endpoint->ipa; in ipa_filter_tuple_zero() local
502 if (ipa->version < IPA_VERSION_5_0) { in ipa_filter_tuple_zero()
503 reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG); in ipa_filter_tuple_zero()
506 val = ioread32(endpoint->ipa->reg_virt + offset); in ipa_filter_tuple_zero()
511 /* IPA v5.0 separates filter and router cache configuration */ in ipa_filter_tuple_zero()
512 reg = ipa_reg(ipa, ENDP_FILTER_CACHE_CFG); in ipa_filter_tuple_zero()
519 iowrite32(val, endpoint->ipa->reg_virt + offset); in ipa_filter_tuple_zero()
523 static void ipa_filter_config(struct ipa *ipa, bool modem) in ipa_filter_config() argument
526 u64 ep_mask = ipa->filtered; in ipa_filter_config()
528 if (!ipa_table_hash_support(ipa)) in ipa_filter_config()
537 endpoint = &ipa->endpoint[endpoint_id]; in ipa_filter_config()
543 static bool ipa_route_id_modem(struct ipa *ipa, u32 route_id) in ipa_route_id_modem() argument
545 return route_id < ipa->modem_route_count; in ipa_route_id_modem()
550 * @ipa: IPA pointer
555 static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id) in ipa_route_tuple_zero() argument
561 if (ipa->version < IPA_VERSION_5_0) { in ipa_route_tuple_zero()
562 reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG); in ipa_route_tuple_zero()
565 val = ioread32(ipa->reg_virt + offset); in ipa_route_tuple_zero()
570 /* IPA v5.0 separates filter and router cache configuration */ in ipa_route_tuple_zero()
571 reg = ipa_reg(ipa, ENDP_ROUTER_CACHE_CFG); in ipa_route_tuple_zero()
578 iowrite32(val, ipa->reg_virt + offset); in ipa_route_tuple_zero()
582 static void ipa_route_config(struct ipa *ipa, bool modem) in ipa_route_config() argument
586 if (!ipa_table_hash_support(ipa)) in ipa_route_config()
589 for (route_id = 0; route_id < ipa->route_count; route_id++) in ipa_route_config()
590 if (ipa_route_id_modem(ipa, route_id) == modem) in ipa_route_config()
591 ipa_route_tuple_zero(ipa, route_id); in ipa_route_config()
595 void ipa_table_config(struct ipa *ipa) in ipa_table_config() argument
597 ipa_filter_config(ipa, false); in ipa_table_config()
598 ipa_filter_config(ipa, true); in ipa_table_config()
599 ipa_route_config(ipa, false); in ipa_table_config()
600 ipa_route_config(ipa, true); in ipa_table_config()
603 /* Verify the sizes of all IPA table filter or routing table memory regions
606 bool ipa_table_mem_valid(struct ipa *ipa, bool filter) in ipa_table_mem_valid() argument
608 bool hash_support = ipa_table_hash_support(ipa); in ipa_table_mem_valid()
618 mem_ipv4 = ipa_table_mem(ipa, filter, false, false); in ipa_table_mem_valid()
622 mem_ipv6 = ipa_table_mem(ipa, filter, false, true); in ipa_table_mem_valid()
634 ipa->filter_count = count - 1; /* Filter map in first entry */ in ipa_table_mem_valid()
636 ipa->route_count = count; in ipa_table_mem_valid()
639 if (!ipa_cmd_table_init_valid(ipa, mem_ipv4, !filter)) in ipa_table_mem_valid()
647 if (count < 1 + hweight64(ipa->filtered)) in ipa_table_mem_valid()
653 if (count < ipa->modem_route_count + 1) in ipa_table_mem_valid()
662 mem_hashed = ipa_table_mem(ipa, filter, true, false); in ipa_table_mem_valid()
672 mem_hashed = ipa_table_mem(ipa, filter, true, true); in ipa_table_mem_valid()
685 * route table data. This is used when initializing or resetting the IPA
708 * | ---- zero rule address | | Max IPA filter count
709 * | |-------------------| > or IPA route count,
715 int ipa_table_init(struct ipa *ipa) in ipa_table_init() argument
717 struct device *dev = ipa->dev; in ipa_table_init()
726 count = max_t(u32, ipa->filter_count, ipa->route_count); in ipa_table_init()
728 /* The IPA hardware requires route and filter table rules to be in ipa_table_init()
739 ipa->table_virt = virt; in ipa_table_init()
740 ipa->table_addr = addr; in ipa_table_init()
747 * it left one position. Prior to IPA v5.0, bit 0 repesents global in ipa_table_init()
748 * filtering, which is possible but not used. IPA v5.0+ eliminated in ipa_table_init()
751 if (ipa->version < IPA_VERSION_5_0) in ipa_table_init()
752 *virt++ = cpu_to_le64(ipa->filtered << 1); in ipa_table_init()
754 *virt++ = cpu_to_le64(ipa->filtered); in ipa_table_init()
764 void ipa_table_exit(struct ipa *ipa) in ipa_table_exit() argument
766 u32 count = max_t(u32, 1 + ipa->filter_count, ipa->route_count); in ipa_table_exit()
767 struct device *dev = ipa->dev; in ipa_table_exit()
772 dma_free_coherent(dev, size, ipa->table_virt, ipa->table_addr); in ipa_table_exit()
773 ipa->table_addr = 0; in ipa_table_exit()
774 ipa->table_virt = NULL; in ipa_table_exit()