/linux/kernel/ |
H A D | range.c | 12 int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) in add_range() argument 21 range[nr_range].start = start; in add_range() 22 range[nr_range].end = end; in add_range() 29 int add_range_with_merge(struct range *range, int az, int nr_range, in add_range_with_merge() argument 41 if (!range[i].end) in add_range_with_merge() 44 common_start = max(range[i].start, start); in add_range_with_merge() 45 common_end = min(range[i].end, end); in add_range_with_merge() 50 start = min(range[i].start, start); in add_range_with_merge() 51 end = max(range[i].end, end); in add_range_with_merge() 53 memmove(&range[i], &range[i + 1], in add_range_with_merge() [all …]
|
/linux/drivers/soc/ti/ |
H A D | knav_qmss_acc.c | 20 #define knav_range_offset_to_inst(kdev, range, q) \ argument 21 (range->queue_base_inst + (q << kdev->inst_shift)) 23 static void __knav_acc_notify(struct knav_range_info *range, in __knav_acc_notify() argument 26 struct knav_device *kdev = range->kdev; in __knav_acc_notify() 30 range_base = kdev->base_id + range->queue_base; in __knav_acc_notify() 32 if (range->flags & RANGE_MULTI_QUEUE) { in __knav_acc_notify() 33 for (queue = 0; queue < range->num_queues; queue++) { in __knav_acc_notify() 34 inst = knav_range_offset_to_inst(kdev, range, in __knav_acc_notify() 44 queue = acc->channel - range->acc_info.start_channel; in __knav_acc_notify() 45 inst = knav_range_offset_to_inst(kdev, range, queue); in __knav_acc_notify() [all …]
|
H A D | knav_qmss_queue.c | 114 static int knav_queue_setup_irq(struct knav_range_info *range, in knav_queue_setup_irq() argument 117 unsigned queue = inst->id - range->queue_base; in knav_queue_setup_irq() 120 if (range->flags & RANGE_HAS_IRQ) { in knav_queue_setup_irq() 121 irq = range->irqs[queue].irq; in knav_queue_setup_irq() 126 if (range->irqs[queue].cpu_mask) { in knav_queue_setup_irq() 127 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask); in knav_queue_setup_irq() 129 dev_warn(range->kdev->dev, in knav_queue_setup_irq() 140 struct knav_range_info *range = inst->range; in knav_queue_free_irq() local 141 unsigned queue = inst->id - inst->range->queue_base; in knav_queue_free_irq() 144 if (range->flags & RANGE_HAS_IRQ) { in knav_queue_free_irq() [all …]
|
/linux/security/selinux/ss/ |
H A D | context.h | 33 struct mls_range range; member 39 memset(&c->range, 0, sizeof(c->range)); in mls_context_init() 47 dst->range.level[0].sens = src->range.level[0].sens; in mls_context_cpy() 48 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); in mls_context_cpy() 52 dst->range.level[1].sens = src->range.level[1].sens; in mls_context_cpy() 53 rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); in mls_context_cpy() 55 ebitmap_destroy(&dst->range.level[0].cat); in mls_context_cpy() 68 dst->range.level[0].sens = src->range.level[0].sens; in mls_context_cpy_low() 69 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); in mls_context_cpy_low() 73 dst->range.level[1].sens = src->range.level[0].sens; in mls_context_cpy_low() [all …]
|
H A D | mls.c | 44 u32 index_sens = context->range.level[l].sens; in mls_compute_context_len() 50 e = &context->range.level[l].cat; in mls_compute_context_len() 70 if (mls_level_eq(&context->range.level[0], in mls_compute_context_len() 71 &context->range.level[1])) in mls_compute_context_len() 104 context->range.level[l].sens - 1)); in mls_sid_to_context() 110 e = &context->range.level[l].cat; in mls_sid_to_context() 147 if (mls_level_eq(&context->range.level[0], in mls_sid_to_context() 148 &context->range.level[1])) in mls_sid_to_context() 196 if (!mls_range_isvalid(p, &c->range)) in mls_context_isvalid() 208 if (!mls_range_contains(usrdatum->range, c->range)) in mls_context_isvalid() [all …]
|
/linux/mm/ |
H A D | memremap.c | 66 static void pgmap_array_delete(struct range *range) in pgmap_array_delete() argument 68 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), in pgmap_array_delete() 75 struct range *range = &pgmap->ranges[range_id]; in pfn_first() local 76 unsigned long pfn = PHYS_PFN(range->start); in pfn_first() 88 struct range *range = &pgmap->ranges[i]; in pgmap_pfn_valid() local 90 if (pfn >= PHYS_PFN(range in pgmap_pfn_valid() 100 const struct range *range = &pgmap->ranges[range_id]; pfn_end() local 113 struct range *range = &pgmap->ranges[range_id]; pageunmap_range() local 174 struct range *range = &pgmap->ranges[range_id]; pagemap_range() local [all...] |
H A D | hmm.c | 32 struct hmm_range *range; member 43 struct hmm_range *range, unsigned long cpu_flags) in hmm_pfns_fill() argument 45 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill() 48 range->hmm_pfns[i] = cpu_flags; in hmm_pfns_fill() 90 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local 102 pfn_req_flags &= range->pfn_flags_mask; in hmm_pte_need_fault() 103 pfn_req_flags |= range->default_flags; in hmm_pte_need_fault() 125 struct hmm_range *range = hmm_vma_walk->range; in hmm_range_need_fault() local 134 if (!((range->default_flags | range->pfn_flags_mask) & in hmm_range_need_fault() 151 struct hmm_range *range = hmm_vma_walk->range; in hmm_vma_walk_hole() local [all …]
|
/linux/include/linux/ |
H A D | range.h | 6 struct range { struct 11 static inline u64 range_len(const struct range *range) in range_len() argument 13 return range->end - range->start + 1; in range_len() 17 static inline bool range_contains(const struct range *r1, in range_contains() 18 const struct range *r2) in range_contains() 24 static inline bool range_overlaps(const struct range *r1, in range_overlaps() 25 const struct range *r2) in range_overlaps() 30 int add_range(struct range *range, int az, int nr_range, 34 int add_range_with_merge(struct range *range, int az, int nr_range, 37 void subtract_range(struct range *range, int az, u64 start, u64 end); [all …]
|
/linux/arch/mips/loongson64/ |
H A D | init.c | 156 struct logic_pio_hwaddr *range; in add_legacy_isa_io() local 159 range = kzalloc(sizeof(*range), GFP_ATOMIC); in add_legacy_isa_io() 160 if (!range) in add_legacy_isa_io() 163 range->fwnode = fwnode; in add_legacy_isa_io() 164 range->size = size = round_up(size, PAGE_SIZE); in add_legacy_isa_io() 165 range->hw_start = hw_start; in add_legacy_isa_io() 166 range->flags = LOGIC_PIO_CPU_MMIO; in add_legacy_isa_io() 168 ret = logic_pio_register_range(range); in add_legacy_isa_io() 170 kfree(range); in add_legacy_isa_io() 175 if (range->io_start != 0) { in add_legacy_isa_io() [all …]
|
/linux/drivers/dax/ |
H A D | kmem.c | 31 static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r) in dax_kmem_range() 34 struct range *range = &dax_range->range; in dax_kmem_range() local 37 r->start = ALIGN(range->start, memory_block_size_bytes()); in dax_kmem_range() 38 r->end = ALIGN_DOWN(range->end + 1, memory_block_size_bytes()) - 1; in dax_kmem_range() 40 r->start = range->start; in dax_kmem_range() 41 r->end = range->end; in dax_kmem_range() 98 struct range range; in dev_dax_kmem_probe() local 100 rc = dax_kmem_range(dev_dax, i, &range); in dev_dax_kmem_probe() 103 i, range.start, range.end); in dev_dax_kmem_probe() 106 total_len += range_len(&range); in dev_dax_kmem_probe() [all …]
|
/linux/drivers/pci/hotplug/ |
H A D | ibmphp_res.c | 368 static int add_bus_range(int type, struct range_node *range, struct bus_node *bus_cur) in add_bus_range() argument 392 if (range->start < range_cur->start) in add_bus_range() 402 bus_cur->rangeMem = range; in add_bus_range() 405 bus_cur->rangePFMem = range; in add_bus_range() 408 bus_cur->rangeIO = range; in add_bus_range() 411 range->next = range_cur; in add_bus_range() 412 range->rangeno = 1; in add_bus_range() 416 range->next = NULL; in add_bus_range() 417 range_prev->next = range; in add_bus_range() 418 range->rangeno = range_prev->rangeno + 1; in add_bus_range() [all …]
|
/linux/drivers/of/ |
H A D | address.c | 46 u64 (*map)(__be32 *addr, const __be32 *range, 66 static u64 of_bus_default_map(__be32 *addr, const __be32 *range, in of_bus_default_map() argument 71 cp = of_read_number(range + fna, na - fna); in of_bus_default_map() 72 s = of_read_number(range + na + pna, ns); in of_bus_default_map() 104 static u64 of_bus_default_flags_map(__be32 *addr, const __be32 *range, int na, in of_bus_default_flags_map() argument 108 if (*addr != *range) in of_bus_default_flags_map() 111 return of_bus_default_map(addr, range, na, ns, pna, fna); in of_bus_default_flags_map() 184 static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, in of_bus_pci_map() argument 190 rf = of_bus_pci_get_flags(range); in of_bus_pci_map() 196 return of_bus_default_map(addr, range, na, ns, pna, fna); in of_bus_pci_map() [all …]
|
/linux/drivers/gpu/drm/sprd/ |
H A D | megacores_pll.c | 221 u32 range[2], constant; in dphy_timing_config() local 236 range[L] = 50 * scale; in dphy_timing_config() 237 range[H] = INFINITY; in dphy_timing_config() 238 val[CLK] = DIV_ROUND_UP(range[L] * (factor << 1), t_byteck) - 2; in dphy_timing_config() 243 range[L] = 38 * scale; in dphy_timing_config() 244 range[H] = 95 * scale; in dphy_timing_config() 245 tmp = AVERAGE(range[L], range[H]); in dphy_timing_config() 246 val[CLK] = DIV_ROUND_UP(AVERAGE(range[L], range[H]), t_half_byteck) - 1; in dphy_timing_config() 247 range[L] = 40 * scale + 4 * t_ui; in dphy_timing_config() 248 range[H] = 85 * scale + 6 * t_ui; in dphy_timing_config() [all …]
|
/linux/mm/damon/ |
H A D | sysfs-common.c | 22 struct damon_sysfs_ul_range *range = kmalloc(sizeof(*range), in damon_sysfs_ul_range_alloc() local 25 if (!range) in damon_sysfs_ul_range_alloc() 27 range->kobj = (struct kobject){}; in damon_sysfs_ul_range_alloc() 28 range->min = min; in damon_sysfs_ul_range_alloc() 29 range->max = max; in damon_sysfs_ul_range_alloc() 31 return range; in damon_sysfs_ul_range_alloc() 37 struct damon_sysfs_ul_range *range = container_of(kobj, in min_show() local 40 return sysfs_emit(buf, "%lu\n", range->min); in min_show() 46 struct damon_sysfs_ul_range *range = container_of(kobj, in min_store() local 55 range->min = min; in min_store() [all …]
|
/linux/net/netfilter/ |
H A D | nf_nat_core.c | 402 const struct nf_nat_range2 *range) in nf_nat_inet_in_range() argument 405 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) && in nf_nat_inet_in_range() 406 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip); in nf_nat_inet_in_range() 408 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 && in nf_nat_inet_in_range() 409 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0; in nf_nat_inet_in_range() 447 const struct nf_nat_range2 *range) in nf_in_range() argument 452 if (range->flags & NF_NAT_RANGE_MAP_IPS && in nf_in_range() 453 !nf_nat_inet_in_range(tuple, range)) in nf_in_range() 456 if (!(range->flags & NF_NAT_RANGE_PROTO_SPECIFIED)) in nf_in_range() 460 &range->min_proto, &range->max_proto); in nf_in_range() [all …]
|
H A D | xt_nat.c | 55 struct nf_nat_range2 range; in xt_snat_target_v0() local 64 xt_nat_convert_range(&range, &mr->range[0]); in xt_snat_target_v0() 65 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); in xt_snat_target_v0() 72 struct nf_nat_range2 range; in xt_dnat_target_v0() local 80 xt_nat_convert_range(&range, &mr->range[0]); in xt_dnat_target_v0() 81 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_DST); in xt_dnat_target_v0() 88 struct nf_nat_range2 range; in xt_snat_target_v1() local 97 memcpy(&range, range_v1, sizeof(*range_v1)); in xt_snat_target_v1() 98 memset(&range.base_proto, 0, sizeof(range.base_proto)); in xt_snat_target_v1() 100 return nf_nat_setup_info(ct, &range, NF_NAT_MANIP_SRC); in xt_snat_target_v1() [all …]
|
H A D | nf_nat_bpf.c | 37 struct nf_nat_range2 range; in bpf_ct_set_nat_info() local 42 memset(&range, 0, sizeof(struct nf_nat_range2)); in bpf_ct_set_nat_info() 43 range.flags = NF_NAT_RANGE_MAP_IPS; in bpf_ct_set_nat_info() 44 range.min_addr = *addr; in bpf_ct_set_nat_info() 45 range.max_addr = range.min_addr; in bpf_ct_set_nat_info() 47 range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; in bpf_ct_set_nat_info() 48 range.min_proto.all = cpu_to_be16(port); in bpf_ct_set_nat_info() 49 range.max_proto.all = range.min_proto.all; in bpf_ct_set_nat_info() 52 return nf_nat_setup_info(ct, &range, manip) == NF_DROP ? -ENOMEM : 0; in bpf_ct_set_nat_info()
|
H A D | xt_NETMAP.c | 21 const struct nf_nat_range2 *range = par->targinfo; in netmap_tg6() local 29 for (i = 0; i < ARRAY_SIZE(range->min_addr.ip6); i++) in netmap_tg6() 30 netmask.ip6[i] = ~(range->min_addr.ip6[i] ^ in netmap_tg6() 31 range->max_addr.ip6[i]); in netmap_tg6() 41 new_addr.ip6[i] |= range->min_addr.ip6[i] & in netmap_tg6() 45 newrange.flags = range->flags | NF_NAT_RANGE_MAP_IPS; in netmap_tg6() 48 newrange.min_proto = range->min_proto; in netmap_tg6() 49 newrange.max_proto = range->max_proto; in netmap_tg6() 56 const struct nf_nat_range2 *range = par->targinfo; in netmap_tg6_checkentry() local 58 if (!(range->flags & NF_NAT_RANGE_MAP_IPS)) in netmap_tg6_checkentry() [all …]
|
H A D | nft_nat.c | 33 static void nft_nat_setup_addr(struct nf_nat_range2 *range, in nft_nat_setup_addr() argument 39 range->min_addr.ip = (__force __be32) in nft_nat_setup_addr() 41 range->max_addr.ip = (__force __be32) in nft_nat_setup_addr() 45 memcpy(range->min_addr.ip6, ®s->data[priv->sreg_addr_min], in nft_nat_setup_addr() 46 sizeof(range->min_addr.ip6)); in nft_nat_setup_addr() 47 memcpy(range->max_addr.ip6, ®s->data[priv->sreg_addr_max], in nft_nat_setup_addr() 48 sizeof(range->max_addr.ip6)); in nft_nat_setup_addr() 53 static void nft_nat_setup_proto(struct nf_nat_range2 *range, in nft_nat_setup_proto() argument 57 range->min_proto.all = (__force __be16) in nft_nat_setup_proto() 59 range->max_proto.all = (__force __be16) in nft_nat_setup_proto() [all …]
|
/linux/tools/testing/selftests/net/ |
H A D | ip_local_port_range.c | 28 static void unpack_port_range(__u32 range, __u16 *lo, __u16 *hi) in unpack_port_range() argument 30 *lo = range & 0xffff; in unpack_port_range() 31 *hi = range >> 16; in unpack_port_range() 104 static int get_ip_local_port_range(int fd, __u32 *range) in get_ip_local_port_range() argument 115 *range = val; in get_ip_local_port_range() 238 __u32 range; in TEST_F() local 243 range = pack_port_range(t->range_lo, t->range_hi); in TEST_F() 244 err = setsockopt(fd, SOL_IP, IP_LOCAL_PORT_RANGE, &range, sizeof(range)); in TEST_F() 281 __u32 range; in TEST_F() local 289 range = pack_port_range(t->range_lo, t->range_hi); in TEST_F() [all …]
|
/linux/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | spectrum_port_range.c | 13 struct mlxsw_sp_port_range range; member 36 mlxsw_reg_pprr_src_set(pprr_pl, prr->range.source); in mlxsw_sp_port_range_reg_configure() 37 mlxsw_reg_pprr_dst_set(pprr_pl, !prr->range.source); in mlxsw_sp_port_range_reg_configure() 40 mlxsw_reg_pprr_port_range_min_set(pprr_pl, prr->range.min); in mlxsw_sp_port_range_reg_configure() 41 mlxsw_reg_pprr_port_range_max_set(pprr_pl, prr->range.max); in mlxsw_sp_port_range_reg_configure() 48 const struct mlxsw_sp_port_range *range, in mlxsw_sp_port_range_reg_create() argument 59 prr->range = *range; in mlxsw_sp_port_range_reg_create() 99 const struct mlxsw_sp_port_range *range) in mlxsw_sp_port_range_reg_find() argument 106 if (prr->range.min == range->min && in mlxsw_sp_port_range_reg_find() 107 prr->range.max == range->max && in mlxsw_sp_port_range_reg_find() [all …]
|
/linux/arch/x86/kernel/cpu/mtrr/ |
H A D | cleanup.c | 53 static struct range __initdata range[RANGE_NUM]; variable 62 x86_get_mtrr_mem_range(struct range *range, int nr_range, in x86_get_mtrr_mem_range() argument 76 nr_range = add_range_with_merge(range, RANGE_NUM, nr_range, in x86_get_mtrr_mem_range() 83 range[i].start, range[i].end); in x86_get_mtrr_mem_range() 105 subtract_range(range, RANGE_NUM, base, base + size); in x86_get_mtrr_mem_range() 108 subtract_range(range, RANGE_NUM, extra_remove_base, in x86_get_mtrr_mem_range() 113 if (!range[i].end) in x86_get_mtrr_mem_range() 117 range[i].start, range[i].end); in x86_get_mtrr_mem_range() 121 nr_range = clean_sort_range(range, RANGE_NUM); in x86_get_mtrr_mem_range() 126 range[i].start, range[i].end); in x86_get_mtrr_mem_range() [all …]
|
/linux/arch/loongarch/kernel/ |
H A D | setup.c | 468 struct logic_pio_hwaddr *range; in add_legacy_isa_io() local 470 range = kzalloc(sizeof(*range), GFP_ATOMIC); in add_legacy_isa_io() 471 if (!range) in add_legacy_isa_io() 474 range->fwnode = fwnode; in add_legacy_isa_io() 475 range->size = size = round_up(size, PAGE_SIZE); in add_legacy_isa_io() 476 range->hw_start = hw_start; in add_legacy_isa_io() 477 range->flags = LOGIC_PIO_CPU_MMIO; in add_legacy_isa_io() 479 ret = logic_pio_register_range(range); in add_legacy_isa_io() 481 kfree(range); in add_legacy_isa_io() 486 if (range->io_start != 0) { in add_legacy_isa_io() [all …]
|
/linux/drivers/pinctrl/ |
H A D | core.c | 284 static inline int gpio_to_pin(struct pinctrl_gpio_range *range, in gpio_to_pin() argument 287 unsigned int pin = gc->base + offset - range->base; in gpio_to_pin() 288 if (range->pins) in gpio_to_pin() 289 return range->pins[pin]; in gpio_to_pin() 291 return range->pin_base + pin; in gpio_to_pin() 307 struct pinctrl_gpio_range *range; in pinctrl_match_gpio_range() local 311 list_for_each_entry(range, &pctldev->gpio_ranges, node) { in pinctrl_match_gpio_range() 313 if ((gc->base + offset) >= range->base && in pinctrl_match_gpio_range() 314 (gc->base + offset) < range->base + range->npins) { in pinctrl_match_gpio_range() 316 return range; in pinctrl_match_gpio_range() [all …]
|
/linux/drivers/regulator/ |
H A D | qcom_spmi-regulator.c | 403 struct spmi_voltage_range *range; member 481 .range = name##_ranges, \ 669 const struct spmi_voltage_range *range; in spmi_regulator_select_voltage() local 675 lim_min_uV = vreg->set_points->range[0].set_point_min_uV; in spmi_regulator_select_voltage() 677 vreg->set_points->range[vreg->set_points->count - 1].set_point_max_uV; in spmi_regulator_select_voltage() 691 range_max_uV = vreg->set_points->range[i - 1].set_point_max_uV; in spmi_regulator_select_voltage() 697 range = &vreg->set_points->range[range_id]; in spmi_regulator_select_voltage() 703 voltage_sel = DIV_ROUND_UP(uV - range->min_uV, range->step_uV); in spmi_regulator_select_voltage() 704 uV = voltage_sel * range->step_uV + range->min_uV; in spmi_regulator_select_voltage() 716 selector += vreg->set_points->range[i].n_voltages; in spmi_regulator_select_voltage() [all …]
|