1 #include <xen/xen.h> 2 #include <xen/events.h> 3 #include <xen/grant_table.h> 4 #include <xen/hvm.h> 5 #include <xen/interface/vcpu.h> 6 #include <xen/interface/xen.h> 7 #include <xen/interface/memory.h> 8 #include <xen/interface/hvm/params.h> 9 #include <xen/features.h> 10 #include <xen/platform_pci.h> 11 #include <xen/xenbus.h> 12 #include <xen/page.h> 13 #include <xen/interface/sched.h> 14 #include <xen/xen-ops.h> 15 #include <asm/xen/hypervisor.h> 16 #include <asm/xen/hypercall.h> 17 #include <asm/system_misc.h> 18 #include <linux/interrupt.h> 19 #include <linux/irqreturn.h> 20 #include <linux/module.h> 21 #include <linux/of.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_address.h> 24 #include <linux/cpuidle.h> 25 #include <linux/cpufreq.h> 26 #include <linux/cpu.h> 27 28 #include <linux/mm.h> 29 30 struct start_info _xen_start_info; 31 struct start_info *xen_start_info = &_xen_start_info; 32 EXPORT_SYMBOL_GPL(xen_start_info); 33 34 enum xen_domain_type xen_domain_type = XEN_NATIVE; 35 EXPORT_SYMBOL_GPL(xen_domain_type); 36 37 struct shared_info xen_dummy_shared_info; 38 struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; 39 40 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 41 static struct vcpu_info __percpu *xen_vcpu_info; 42 43 /* These are unused until we support booting "pre-ballooned" */ 44 unsigned long xen_released_pages; 45 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; 46 47 /* TODO: to be removed */ 48 __read_mostly int xen_have_vector_callback; 49 EXPORT_SYMBOL_GPL(xen_have_vector_callback); 50 51 int xen_platform_pci_unplug = XEN_UNPLUG_ALL; 52 EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); 53 54 static __read_mostly int xen_events_irq = -1; 55 56 /* map fgmfn of domid to lpfn in the current domain */ 57 static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 58 unsigned int domid) 59 { 60 int rc; 61 struct xen_add_to_physmap_range xatp = { 62 .domid = DOMID_SELF, 63 .foreign_domid = domid, 64 .size = 1, 65 .space = XENMAPSPACE_gmfn_foreign, 66 }; 67 xen_ulong_t idx = fgmfn; 68 xen_pfn_t gpfn = lpfn; 69 int err = 0; 70 71 set_xen_guest_handle(xatp.idxs, &idx); 72 set_xen_guest_handle(xatp.gpfns, &gpfn); 73 set_xen_guest_handle(xatp.errs, &err); 74 75 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); 76 if (rc || err) { 77 pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n", 78 rc, err, lpfn, fgmfn); 79 return 1; 80 } 81 return 0; 82 } 83 84 struct remap_data { 85 xen_pfn_t fgmfn; /* foreign domain's gmfn */ 86 pgprot_t prot; 87 domid_t domid; 88 struct vm_area_struct *vma; 89 int index; 90 struct page **pages; 91 struct xen_remap_mfn_info *info; 92 }; 93 94 static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, 95 void *data) 96 { 97 struct remap_data *info = data; 98 struct page *page = info->pages[info->index++]; 99 unsigned long pfn = page_to_pfn(page); 100 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot)); 101 102 if (map_foreign_page(pfn, info->fgmfn, info->domid)) 103 return -EFAULT; 104 set_pte_at(info->vma->vm_mm, addr, ptep, pte); 105 106 return 0; 107 } 108 109 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 110 unsigned long addr, 111 xen_pfn_t mfn, int nr, 112 pgprot_t prot, unsigned domid, 113 struct page **pages) 114 { 115 int err; 116 struct remap_data data; 117 118 /* TBD: Batching, current sole caller only does page at a time */ 119 if (nr > 1) 120 return -EINVAL; 121 122 data.fgmfn = mfn; 123 data.prot = prot; 124 data.domid = domid; 125 data.vma = vma; 126 data.index = 0; 127 data.pages = pages; 128 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, 129 remap_pte_fn, &data); 130 return err; 131 } 132 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 133 134 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 135 int nr, struct page **pages) 136 { 137 int i; 138 139 for (i = 0; i < nr; i++) { 140 struct xen_remove_from_physmap xrp; 141 unsigned long rc, pfn; 142 143 pfn = page_to_pfn(pages[i]); 144 145 xrp.domid = DOMID_SELF; 146 xrp.gpfn = pfn; 147 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); 148 if (rc) { 149 pr_warn("Failed to unmap pfn:%lx rc:%ld\n", 150 pfn, rc); 151 return rc; 152 } 153 } 154 return 0; 155 } 156 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 157 158 static void xen_percpu_init(void) 159 { 160 struct vcpu_register_vcpu_info info; 161 struct vcpu_info *vcpup; 162 int err; 163 int cpu = get_cpu(); 164 165 pr_info("Xen: initializing cpu%d\n", cpu); 166 vcpup = per_cpu_ptr(xen_vcpu_info, cpu); 167 168 info.mfn = __pa(vcpup) >> PAGE_SHIFT; 169 info.offset = offset_in_page(vcpup); 170 171 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 172 BUG_ON(err); 173 per_cpu(xen_vcpu, cpu) = vcpup; 174 175 enable_percpu_irq(xen_events_irq, 0); 176 put_cpu(); 177 } 178 179 static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) 180 { 181 struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; 182 int rc; 183 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 184 BUG_ON(rc); 185 } 186 187 static void xen_power_off(void) 188 { 189 struct sched_shutdown r = { .reason = SHUTDOWN_poweroff }; 190 int rc; 191 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 192 BUG_ON(rc); 193 } 194 195 static int xen_cpu_notification(struct notifier_block *self, 196 unsigned long action, 197 void *hcpu) 198 { 199 switch (action) { 200 case CPU_STARTING: 201 xen_percpu_init(); 202 break; 203 default: 204 break; 205 } 206 207 return NOTIFY_OK; 208 } 209 210 static struct notifier_block xen_cpu_notifier = { 211 .notifier_call = xen_cpu_notification, 212 }; 213 214 static irqreturn_t xen_arm_callback(int irq, void *arg) 215 { 216 xen_hvm_evtchn_do_upcall(); 217 return IRQ_HANDLED; 218 } 219 220 /* 221 * see Documentation/devicetree/bindings/arm/xen.txt for the 222 * documentation of the Xen Device Tree format. 223 */ 224 #define GRANT_TABLE_PHYSADDR 0 225 static int __init xen_guest_init(void) 226 { 227 struct xen_add_to_physmap xatp; 228 static struct shared_info *shared_info_page = 0; 229 struct device_node *node; 230 int len; 231 const char *s = NULL; 232 const char *version = NULL; 233 const char *xen_prefix = "xen,xen-"; 234 struct resource res; 235 phys_addr_t grant_frames; 236 237 node = of_find_compatible_node(NULL, NULL, "xen,xen"); 238 if (!node) { 239 pr_debug("No Xen support\n"); 240 return 0; 241 } 242 s = of_get_property(node, "compatible", &len); 243 if (strlen(xen_prefix) + 3 < len && 244 !strncmp(xen_prefix, s, strlen(xen_prefix))) 245 version = s + strlen(xen_prefix); 246 if (version == NULL) { 247 pr_debug("Xen version not found\n"); 248 return 0; 249 } 250 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) 251 return 0; 252 grant_frames = res.start; 253 xen_events_irq = irq_of_parse_and_map(node, 0); 254 pr_info("Xen %s support found, events_irq=%d gnttab_frame=%pa\n", 255 version, xen_events_irq, &grant_frames); 256 257 if (xen_events_irq < 0) 258 return -ENODEV; 259 260 xen_domain_type = XEN_HVM_DOMAIN; 261 262 xen_setup_features(); 263 264 if (!xen_feature(XENFEAT_grant_map_identity)) { 265 pr_warn("Please upgrade your Xen.\n" 266 "If your platform has any non-coherent DMA devices, they won't work properly.\n"); 267 } 268 269 if (xen_feature(XENFEAT_dom0)) 270 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; 271 else 272 xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED); 273 274 if (!shared_info_page) 275 shared_info_page = (struct shared_info *) 276 get_zeroed_page(GFP_KERNEL); 277 if (!shared_info_page) { 278 pr_err("not enough memory\n"); 279 return -ENOMEM; 280 } 281 xatp.domid = DOMID_SELF; 282 xatp.idx = 0; 283 xatp.space = XENMAPSPACE_shared_info; 284 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; 285 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 286 BUG(); 287 288 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; 289 290 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 291 * page, we use it in the event channel upcall and in some pvclock 292 * related functions. 293 * The shared info contains exactly 1 CPU (the boot CPU). The guest 294 * is required to use VCPUOP_register_vcpu_info to place vcpu info 295 * for secondary CPUs as they are brought up. 296 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. 297 */ 298 xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), 299 sizeof(struct vcpu_info)); 300 if (xen_vcpu_info == NULL) 301 return -ENOMEM; 302 303 if (gnttab_setup_auto_xlat_frames(grant_frames)) { 304 free_percpu(xen_vcpu_info); 305 return -ENOMEM; 306 } 307 gnttab_init(); 308 if (!xen_initial_domain()) 309 xenbus_probe(NULL); 310 311 /* 312 * Making sure board specific code will not set up ops for 313 * cpu idle and cpu freq. 314 */ 315 disable_cpuidle(); 316 disable_cpufreq(); 317 318 xen_init_IRQ(); 319 320 if (request_percpu_irq(xen_events_irq, xen_arm_callback, 321 "events", &xen_vcpu)) { 322 pr_err("Error request IRQ %d\n", xen_events_irq); 323 return -EINVAL; 324 } 325 326 xen_percpu_init(); 327 328 register_cpu_notifier(&xen_cpu_notifier); 329 330 return 0; 331 } 332 early_initcall(xen_guest_init); 333 334 static int __init xen_pm_init(void) 335 { 336 if (!xen_domain()) 337 return -ENODEV; 338 339 pm_power_off = xen_power_off; 340 arm_pm_restart = xen_restart; 341 342 return 0; 343 } 344 late_initcall(xen_pm_init); 345 346 347 /* empty stubs */ 348 void xen_arch_pre_suspend(void) { } 349 void xen_arch_post_suspend(int suspend_cancelled) { } 350 void xen_timer_resume(void) { } 351 void xen_arch_resume(void) { } 352 353 354 /* In the hypervisor.S file. */ 355 EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); 356 EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); 357 EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); 358 EXPORT_SYMBOL_GPL(HYPERVISOR_console_io); 359 EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op); 360 EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); 361 EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); 362 EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); 363 EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); 364 EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op); 365 EXPORT_SYMBOL_GPL(HYPERVISOR_multicall); 366 EXPORT_SYMBOL_GPL(privcmd_call); 367