/linux/kernel/ |
H A D | range.c | 3 * Range add and subtract 10 #include <linux/range.h> 12 int add_range(struct range *range, int az, int nr_range, u64 start, u64 end) in add_range() argument 21 range[nr_range].start = start; in add_range() 22 range[nr_range].end = end; in add_range() 29 int add_range_with_merge(struct range *range, int az, int nr_range, in add_range_with_merge() argument 41 if (!range[i].end) in add_range_with_merge() 44 common_start = max(range[i].start, start); in add_range_with_merge() 45 common_end = min(range[i].end, end); in add_range_with_merge() 50 start = min(range[i].start, start); in add_range_with_merge() [all …]
|
/linux/Documentation/gpu/ |
H A D | kms-properties.csv | 7 ,,“left margin”,RANGE,"Min=0, Max=100",Connector,TBD 8 ,,“right margin”,RANGE,"Min=0, Max=100",Connector,TBD 9 ,,“top margin”,RANGE,"Min=0, Max=100",Connector,TBD 10 ,,“bottom margin”,RANGE,"Min=0, Max=100",Connector,TBD 11 ,,“brightness”,RANGE,"Min=0, Max=100",Connector,TBD 12 ,,“contrast”,RANGE,"Min=0, Max=100",Connector,TBD 13 ,,“flicker reduction”,RANGE,"Min=0, Max=100",Connector,TBD 14 ,,“overscan”,RANGE,"Min=0, Max=100",Connector,TBD 15 ,,“saturation”,RANGE,"Min=0, Max=100",Connector,TBD 16 ,,“hue”,RANGE,"Min=0, Max=100",Connector,TBD [all …]
|
/linux/drivers/soc/ti/ |
H A D | knav_qmss_acc.c | 20 #define knav_range_offset_to_inst(kdev, range, q) \ argument 21 (range->queue_base_inst + (q << kdev->inst_shift)) 23 static void __knav_acc_notify(struct knav_range_info *range, in __knav_acc_notify() argument 26 struct knav_device *kdev = range->kdev; in __knav_acc_notify() 30 range_base = kdev->base_id + range->queue_base; in __knav_acc_notify() 32 if (range->flags & RANGE_MULTI_QUEUE) { in __knav_acc_notify() 33 for (queue = 0; queue < range->num_queues; queue++) { in __knav_acc_notify() 34 inst = knav_range_offset_to_inst(kdev, range, in __knav_acc_notify() 44 queue = acc->channel - range->acc_info.start_channel; in __knav_acc_notify() 45 inst = knav_range_offset_to_inst(kdev, range, queue); in __knav_acc_notify() [all …]
|
/linux/lib/ |
H A D | logic_pio.c | 24 * logic_pio_register_range - register logical PIO range for a host 25 * @new_range: pointer to the IO range to be registered. 28 * If the range already exists, -EEXIST will be returned, which should be 31 * Register a new IO range node in the IO range list. 35 struct logic_pio_hwaddr *range; in logic_pio_register_range() local 50 list_for_each_entry(range, &io_range_list, list) { in logic_pio_register_range() 51 if (range->fwnode == new_range->fwnode) { in logic_pio_register_range() 52 /* range already there */ in logic_pio_register_range() 56 if (range->flags == LOGIC_PIO_CPU_MMIO && in logic_pio_register_range() 59 if (start >= range->hw_start + range->size || in logic_pio_register_range() [all …]
|
H A D | linear_ranges.c | 3 * helpers to map values in a linear range to range index 18 * linear_range_values_in_range - return the amount of values in a range 19 * @r: pointer to linear range where values are counted 21 * Compute the amount of values in range pointed by @r. Note, values can 22 * be all equal - range with selectors 0,...,2 with step 0 still contains 25 * Return: the amount of values in range pointed by @r 41 * be all equal - range with selectors 0,...,2 with step 0 still contains 65 * linear_range_get_max_value - return the largest value in a range 66 * @r: pointer to linear range where value is looked from 68 * Return: the largest value in the given range [all …]
|
/linux/arch/s390/include/asm/ |
H A D | physmem_info.h | 89 * for_each_physmem_usable_range - early online memory range iterator 91 * @p_start: ptr to unsigned long for start address of the range 92 * @p_end: ptr to unsigned long for end address of the range 135 #define for_each_physmem_reserved_type_range(t, range, p_start, p_end) \ argument 136 for (range = &physmem_info.reserved[t], *p_start = range->start, *p_end = range->end; \ 137 range && range->end; range = range->chain ? __va(range->chain) : NULL, \ 138 *p_start = range ? range->start : 0, *p_end = range ? range->end : 0) 141 struct reserved_range *range) in __physmem_reserved_next() argument 143 if (!range) { in __physmem_reserved_next() 144 range = &physmem_info.reserved[*t]; in __physmem_reserved_next() [all …]
|
/linux/security/selinux/ss/ |
H A D | context.h | 26 * identity, a role, a type and a MLS range. 33 struct mls_range range; member 39 memset(&c->range, 0, sizeof(c->range)); in mls_context_init() 47 dst->range.level[0].sens = src->range.level[0].sens; in mls_context_cpy() 48 rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat); in mls_context_cpy() 52 dst->range.level[1].sens = src->range.level[1].sens; in mls_context_cpy() 53 rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); in mls_context_cpy() 55 ebitmap_destroy(&dst->range.level[0].cat); in mls_context_cpy() 61 * Sets both levels in the MLS range of 'dst' to the low level of 'src'. 68 dst->range.level[0].sens = src->range.level[0].sens; in mls_context_cpy_low() [all …]
|
H A D | mls.c | 44 u32 index_sens = context->range.level[l].sens; in mls_compute_context_len() 50 e = &context->range.level[l].cat; in mls_compute_context_len() 70 if (mls_level_eq(&context->range.level[0], in mls_compute_context_len() 71 &context->range.level[1])) in mls_compute_context_len() 104 context->range.level[l].sens - 1)); in mls_sid_to_context() 110 e = &context->range.level[l].cat; in mls_sid_to_context() 147 if (mls_level_eq(&context->range.level[0], in mls_sid_to_context() 148 &context->range.level[1])) in mls_sid_to_context() 196 if (!mls_range_isvalid(p, &c->range)) in mls_context_isvalid() 203 * User must be authorized for the MLS range. in mls_context_isvalid() [all …]
|
/linux/tools/testing/selftests/net/ |
H A D | ip_local_port_range.c | 28 static void unpack_port_range(__u32 range, __u16 *lo, __u16 *hi) in unpack_port_range() argument 30 *lo = range & 0xffff; in unpack_port_range() 31 *hi = range >> 16; in unpack_port_range() 104 static int get_ip_local_port_range(int fd, __u32 *range) in get_ip_local_port_range() argument 115 *range = val; in get_ip_local_port_range() 199 /* Empty range: low port > high port */ in TEST_F() 221 { 30000, 39999 }, /* socket range below netns range */ in TEST_F() 222 { 50000, 59999 }, /* socket range above netns range */ in TEST_F() 228 * that the range wasn't clamped to a single port from in TEST_F() 229 * the netns range. That is [40000, 40000] or [49999, in TEST_F() [all …]
|
/linux/mm/ |
H A D | hmm.c | 32 struct hmm_range *range; member 43 struct hmm_range *range, unsigned long cpu_flags) in hmm_pfns_fill() argument 45 unsigned long i = (addr - range->start) >> PAGE_SHIFT; in hmm_pfns_fill() 48 range->hmm_pfns[i] = cpu_flags; in hmm_pfns_fill() 53 * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s) 54 * @addr: range virtual start address (inclusive) 55 * @end: range virtual end address (exclusive) 61 * or whenever there is no page directory covering the virtual address range. 90 struct hmm_range *range = hmm_vma_walk->range; in hmm_pte_need_fault() local 94 * consider the default flags requested for the range. The API can in hmm_pte_need_fault() [all …]
|
H A D | memremap.c | 66 static void pgmap_array_delete(struct range *range) in pgmap_array_delete() argument 68 xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end), in pgmap_array_delete() 75 struct range *range = &pgmap->ranges[range_id]; in pfn_first() local 76 unsigned long pfn = PHYS_PFN(range->start); in pfn_first() 88 struct range *range = &pgmap->ranges[i]; in pgmap_pfn_valid() local 90 if (pfn >= PHYS_PFN(range in pgmap_pfn_valid() 100 const struct range *range = &pgmap->ranges[range_id]; pfn_end() local 113 struct range *range = &pgmap->ranges[range_id]; pageunmap_range() local 174 struct range *range = &pgmap->ranges[range_id]; pagemap_range() local [all...] |
/linux/block/ |
H A D | badblocks.c | 23 * When the caller of badblocks_set() wants to set a range of bad blocks, the 24 * setting range can be acked or unacked. And the setting range may merge, 25 * overwrite, skip the overlapped already set range, depends on who they are 27 * more complicated when the setting range covers multiple already set bad block 28 * ranges, with restrictions of maximum length of each bad range and the bad 32 * for setting a large range of bad blocks, we can handle it by dividing the 33 * large range into smaller ones when encounter overlap, max range length or 34 * bad table full conditions. Every time only a smaller piece of the bad range 39 * When setting a range of bad blocks to the bad table, the simplified situations 41 * prefix E, and the setting bad blocks range is naming with prefix S) [all …]
|
/linux/include/linux/ |
H A D | range.h | 6 struct range { struct 11 static inline u64 range_len(const struct range *range) in range_len() argument 13 return range->end - range->start + 1; in range_len() 16 static inline bool range_contains(struct range *r1, struct range *r2) in range_contains() 21 int add_range(struct range *range, int az, int nr_range, 25 int add_range_with_merge(struct range *range, int az, int nr_range, 28 void subtract_range(struct range *range, int az, u64 start, u64 end); 30 int clean_sort_range(struct range *range, int az); 32 void sort_range(struct range *range, int nr_range);
|
H A D | mmu_notifier.h | 19 * @MMU_NOTIFY_UNMAP: either munmap() that unmap the range or a mremap() that 20 * move the range 25 * @MMU_NOTIFY_PROTECTION_VMA: update is due to protection change for the range 26 * ie using the vma access permission (vm_page_prot) to update the whole range 31 * pages in the range so to mirror those changes the user must inspect the CPU 40 * that the mm refcount is zero and the range is no longer accessible. 48 * exclusive range the owner will be initialised to the value provided by the 130 * the pages in the range, it has to implement the 136 * establishment of sptes is forbidden in the range passed to 141 * range are still mapped and have at least a refcount of one. [all …]
|
/linux/drivers/of/ |
H A D | address.c | 46 u64 (*map)(__be32 *addr, const __be32 *range, 66 static u64 of_bus_default_map(__be32 *addr, const __be32 *range, in of_bus_default_map() argument 71 cp = of_read_number(range + fna, na - fna); in of_bus_default_map() 72 s = of_read_number(range + na + pna, ns); in of_bus_default_map() 104 static u64 of_bus_default_flags_map(__be32 *addr, const __be32 *range, int na, in of_bus_default_flags_map() argument 108 if (*addr != *range) in of_bus_default_flags_map() 111 return of_bus_default_map(addr, range, na, ns, pna, fna); in of_bus_default_flags_map() 184 static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns, in of_bus_pci_map() argument 190 rf = of_bus_pci_get_flags(range); in of_bus_pci_map() 196 return of_bus_default_map(addr, range, na, ns, pna, fna); in of_bus_pci_map() [all …]
|
/linux/arch/powerpc/kexec/ |
H A D | ranges.c | 40 sizeof(struct range)); in get_max_nr_ranges() 58 (mem_rngs->max_nr_ranges * sizeof(struct range))); in get_mem_rngs_size() 68 * __add_mem_range - add a memory range to memory ranges list. 69 * @mem_ranges: Range list to add the memory range to. 70 * @base: Base address of the range to add. 71 * @size: Size of the memory range to add. 89 pr_debug("Added memory range [%#016llx - %#016llx] at index %d\n", in __add_mem_range() 97 * @mem_rngs: Range list to merge. 99 * Assumes a sorted range list. 105 struct range *ranges; in __merge_memory_ranges() [all …]
|
/linux/tools/testing/selftests/bpf/prog_tests/ |
H A D | reg_bounds.c | 215 * GENERIC RANGE STRUCT AND OPERATIONS 218 struct range { struct 222 static void snprintf_range(enum num_t t, struct strbuf *sb, struct range x) in snprintf_range() argument 234 static void print_range(enum num_t t, struct range x, const char *sfx) in print_range() 242 static const struct range unkn[] = { 249 static struct range unkn_subreg(enum num_t t) in unkn_subreg() 260 static struct range range(enum num_t t, u64 a, u64 b) in range() function 263 case U64: return (struct range){ (u64)a, (u64)b }; in range() 264 case U32: return (struct range){ (u32)a, (u32)b }; in range() 265 case S64: return (struct range){ (s64)a, (s64)b }; in range() [all …]
|
/linux/arch/mips/loongson64/ |
H A D | init.c | 156 struct logic_pio_hwaddr *range; in add_legacy_isa_io() local 159 range = kzalloc(sizeof(*range), GFP_ATOMIC); in add_legacy_isa_io() 160 if (!range) in add_legacy_isa_io() 163 range->fwnode = fwnode; in add_legacy_isa_io() 164 range->size = size = round_up(size, PAGE_SIZE); in add_legacy_isa_io() 165 range->hw_start = hw_start; in add_legacy_isa_io() 166 range->flags = LOGIC_PIO_CPU_MMIO; in add_legacy_isa_io() 168 ret = logic_pio_register_range(range); in add_legacy_isa_io() 170 kfree(range); in add_legacy_isa_io() 175 if (range->io_start != 0) { in add_legacy_isa_io() [all …]
|
/linux/drivers/net/wireless/ti/wlcore/ |
H A D | conf.h | 117 * Range: 0 - 0xFFFFFFFF 132 * Range: 0 - 200000 139 * Range: 0 - 200000 147 * Range: 0 - 4096 155 * Range: ENABLE_ENERGY_D == 0x140A 164 * Range: u16 172 * Range: u16 179 * Range: 1 - 100 186 * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY, 252 * Range: CONF_HW_BIT_RATE_* bit mask [all …]
|
/linux/drivers/gpu/drm/xe/ |
H A D | xe_range_fence.c | 44 * xe_range_fence_insert() - range fence insert 45 * @tree: range fence tree to insert intoi 46 * @rfence: range fence 47 * @ops: range fence ops 48 * @start: start address of range fence 49 * @last: last address of range fence 50 * @fence: dma fence which signals range fence can be removed + freed 107 * xe_range_fence_tree_init() - Init range fence tree 108 * @tree: range fence tree 116 * xe_range_fence_tree_fini() - Fini range fence tree [all …]
|
/linux/drivers/dax/ |
H A D | kmem.c | 31 static int dax_kmem_range(struct dev_dax *dev_dax, int i, struct range *r) in dax_kmem_range() 34 struct range *range = &dax_range->range; in dax_kmem_range() local 36 /* memory-block align the hotplug range */ in dax_kmem_range() 37 r->start = ALIGN(range->start, memory_block_size_bytes()); in dax_kmem_range() 38 r->end = ALIGN_DOWN(range->end + 1, memory_block_size_bytes()) - 1; in dax_kmem_range() 40 r->start = range->start; in dax_kmem_range() 41 r->end = range->end; in dax_kmem_range() 98 struct range range; in dev_dax_kmem_probe() local 100 rc = dax_kmem_range(dev_dax, i, &range); in dev_dax_kmem_probe() 103 i, range.start, range.end); in dev_dax_kmem_probe() [all …]
|
/linux/drivers/net/wireless/ti/wl12xx/ |
H A D | conf.h | 19 * Range: s8 26 * Range: s8 41 * Range: 0 - 255 (ms) 50 * Range: 0 - 255 (ms) 59 * Range: 0 - 255 (ms) 68 * Range: 0 - 255 (ms) 77 * Range: 0 - 255 (ms) 86 * Range: 0 - 255 (ms) 112 * Range: 0 - 255 (%) 120 * Range: 0 - 255 (%) [all …]
|
/linux/drivers/pci/hotplug/ |
H A D | ibmphp_res.c | 116 /* need to insert our range */ in alloc_bus_range() 164 * 2. If cannot allocate out of PFMem range, allocate from Mem ranges. PFmemFromMem 294 * assign a -1 and then update once the range in ibmphp_rsrc_init() 336 * range actually appears... in ibmphp_rsrc_init() 361 * This function adds a range into a sorted list of ranges per bus for a particular 362 * range type, it then calls another routine to update the range numbers on the 365 * Input: type of the resource, range to add, current bus 366 * Output: 0 or -1, bus and range ptrs 368 static int add_bus_range(int type, struct range_node *range, struct bus_node *bus_cur) in add_bus_range() argument 392 if (range->start < range_cur->start) in add_bus_range() [all …]
|
/linux/drivers/gpu/ipu-v3/ |
H A D | ipu-ic-csc.c | 29 * RGB full-range to RGB limited-range 46 * RGB limited-range to RGB full-range 63 * YUV full-range to YUV limited-range 81 * YUV limited-range to YUV full-range 112 * BT.601 RGB full-range to YUV full-range 128 /* BT.601 RGB full-range to YUV limited-range */ 140 /* BT.601 RGB limited-range to YUV full-range */ 151 /* BT.601 RGB limited-range to YUV limited-range */ 164 * BT.601 YUV full-range to RGB full-range 186 /* BT.601 YUV full-range to RGB limited-range */ [all …]
|
/linux/net/netfilter/ |
H A D | nf_nat_core.c | 402 const struct nf_nat_range2 *range) in nf_nat_inet_in_range() argument 405 return ntohl(t->src.u3.ip) >= ntohl(range->min_addr.ip) && in nf_nat_inet_in_range() 406 ntohl(t->src.u3.ip) <= ntohl(range->max_addr.ip); in nf_nat_inet_in_range() 408 return ipv6_addr_cmp(&t->src.u3.in6, &range->min_addr.in6) >= 0 && in nf_nat_inet_in_range() 409 ipv6_addr_cmp(&t->src.u3.in6, &range->max_addr.in6) <= 0; in nf_nat_inet_in_range() 444 * that meet the constraints of range. 447 const struct nf_nat_range2 *range) in nf_in_range() argument 450 * range specified, otherwise let this drag us onto a new src IP. in nf_in_range() 452 if (range->flags & NF_NAT_RANGE_MAP_IPS && in nf_in_range() 453 !nf_nat_inet_in_range(tuple, range)) in nf_in_range() [all …]
|