1 #include <xen/xen.h> 2 #include <xen/events.h> 3 #include <xen/grant_table.h> 4 #include <xen/hvm.h> 5 #include <xen/interface/vcpu.h> 6 #include <xen/interface/xen.h> 7 #include <xen/interface/memory.h> 8 #include <xen/interface/hvm/params.h> 9 #include <xen/features.h> 10 #include <xen/platform_pci.h> 11 #include <xen/xenbus.h> 12 #include <xen/page.h> 13 #include <xen/interface/sched.h> 14 #include <xen/xen-ops.h> 15 #include <asm/xen/hypervisor.h> 16 #include <asm/xen/hypercall.h> 17 #include <asm/system_misc.h> 18 #include <linux/interrupt.h> 19 #include <linux/irqreturn.h> 20 #include <linux/module.h> 21 #include <linux/of.h> 22 #include <linux/of_irq.h> 23 #include <linux/of_address.h> 24 25 #include <linux/mm.h> 26 27 struct start_info _xen_start_info; 28 struct start_info *xen_start_info = &_xen_start_info; 29 EXPORT_SYMBOL_GPL(xen_start_info); 30 31 enum xen_domain_type xen_domain_type = XEN_NATIVE; 32 EXPORT_SYMBOL_GPL(xen_domain_type); 33 34 struct shared_info xen_dummy_shared_info; 35 struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info; 36 37 DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); 38 static struct vcpu_info __percpu *xen_vcpu_info; 39 40 /* These are unused until we support booting "pre-ballooned" */ 41 unsigned long xen_released_pages; 42 struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; 43 44 /* TODO: to be removed */ 45 __read_mostly int xen_have_vector_callback; 46 EXPORT_SYMBOL_GPL(xen_have_vector_callback); 47 48 int xen_platform_pci_unplug = XEN_UNPLUG_ALL; 49 EXPORT_SYMBOL_GPL(xen_platform_pci_unplug); 50 51 static __read_mostly int xen_events_irq = -1; 52 53 /* map fgmfn of domid to lpfn in the current domain */ 54 static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn, 55 unsigned int domid) 56 { 57 int rc; 58 struct xen_add_to_physmap_range xatp = { 59 .domid = DOMID_SELF, 60 .foreign_domid = domid, 61 .size = 1, 62 .space = XENMAPSPACE_gmfn_foreign, 63 }; 64 xen_ulong_t idx = fgmfn; 65 xen_pfn_t gpfn = lpfn; 66 int err = 0; 67 68 set_xen_guest_handle(xatp.idxs, &idx); 69 set_xen_guest_handle(xatp.gpfns, &gpfn); 70 set_xen_guest_handle(xatp.errs, &err); 71 72 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp); 73 if (rc || err) { 74 pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n", 75 rc, err, lpfn, fgmfn); 76 return 1; 77 } 78 return 0; 79 } 80 81 struct remap_data { 82 xen_pfn_t fgmfn; /* foreign domain's gmfn */ 83 pgprot_t prot; 84 domid_t domid; 85 struct vm_area_struct *vma; 86 int index; 87 struct page **pages; 88 struct xen_remap_mfn_info *info; 89 }; 90 91 static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr, 92 void *data) 93 { 94 struct remap_data *info = data; 95 struct page *page = info->pages[info->index++]; 96 unsigned long pfn = page_to_pfn(page); 97 pte_t pte = pfn_pte(pfn, info->prot); 98 99 if (map_foreign_page(pfn, info->fgmfn, info->domid)) 100 return -EFAULT; 101 set_pte_at(info->vma->vm_mm, addr, ptep, pte); 102 103 return 0; 104 } 105 106 int xen_remap_domain_mfn_range(struct vm_area_struct *vma, 107 unsigned long addr, 108 xen_pfn_t mfn, int nr, 109 pgprot_t prot, unsigned domid, 110 struct page **pages) 111 { 112 int err; 113 struct remap_data data; 114 115 /* TBD: Batching, current sole caller only does page at a time */ 116 if (nr > 1) 117 return -EINVAL; 118 119 data.fgmfn = mfn; 120 data.prot = prot; 121 data.domid = domid; 122 data.vma = vma; 123 data.index = 0; 124 data.pages = pages; 125 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT, 126 remap_pte_fn, &data); 127 return err; 128 } 129 EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); 130 131 int xen_unmap_domain_mfn_range(struct vm_area_struct *vma, 132 int nr, struct page **pages) 133 { 134 int i; 135 136 for (i = 0; i < nr; i++) { 137 struct xen_remove_from_physmap xrp; 138 unsigned long rc, pfn; 139 140 pfn = page_to_pfn(pages[i]); 141 142 xrp.domid = DOMID_SELF; 143 xrp.gpfn = pfn; 144 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp); 145 if (rc) { 146 pr_warn("Failed to unmap pfn:%lx rc:%ld\n", 147 pfn, rc); 148 return rc; 149 } 150 } 151 return 0; 152 } 153 EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range); 154 155 static void __init xen_percpu_init(void *unused) 156 { 157 struct vcpu_register_vcpu_info info; 158 struct vcpu_info *vcpup; 159 int err; 160 int cpu = get_cpu(); 161 162 pr_info("Xen: initializing cpu%d\n", cpu); 163 vcpup = per_cpu_ptr(xen_vcpu_info, cpu); 164 165 info.mfn = __pa(vcpup) >> PAGE_SHIFT; 166 info.offset = offset_in_page(vcpup); 167 168 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); 169 BUG_ON(err); 170 per_cpu(xen_vcpu, cpu) = vcpup; 171 172 enable_percpu_irq(xen_events_irq, 0); 173 } 174 175 static void xen_restart(char str, const char *cmd) 176 { 177 struct sched_shutdown r = { .reason = SHUTDOWN_reboot }; 178 int rc; 179 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 180 if (rc) 181 BUG(); 182 } 183 184 static void xen_power_off(void) 185 { 186 struct sched_shutdown r = { .reason = SHUTDOWN_poweroff }; 187 int rc; 188 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); 189 if (rc) 190 BUG(); 191 } 192 193 /* 194 * see Documentation/devicetree/bindings/arm/xen.txt for the 195 * documentation of the Xen Device Tree format. 196 */ 197 #define GRANT_TABLE_PHYSADDR 0 198 static int __init xen_guest_init(void) 199 { 200 struct xen_add_to_physmap xatp; 201 static struct shared_info *shared_info_page = 0; 202 struct device_node *node; 203 int len; 204 const char *s = NULL; 205 const char *version = NULL; 206 const char *xen_prefix = "xen,xen-"; 207 struct resource res; 208 209 node = of_find_compatible_node(NULL, NULL, "xen,xen"); 210 if (!node) { 211 pr_debug("No Xen support\n"); 212 return 0; 213 } 214 s = of_get_property(node, "compatible", &len); 215 if (strlen(xen_prefix) + 3 < len && 216 !strncmp(xen_prefix, s, strlen(xen_prefix))) 217 version = s + strlen(xen_prefix); 218 if (version == NULL) { 219 pr_debug("Xen version not found\n"); 220 return 0; 221 } 222 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res)) 223 return 0; 224 xen_hvm_resume_frames = res.start >> PAGE_SHIFT; 225 xen_events_irq = irq_of_parse_and_map(node, 0); 226 pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n", 227 version, xen_events_irq, xen_hvm_resume_frames); 228 xen_domain_type = XEN_HVM_DOMAIN; 229 230 xen_setup_features(); 231 if (xen_feature(XENFEAT_dom0)) 232 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; 233 else 234 xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED); 235 236 if (!shared_info_page) 237 shared_info_page = (struct shared_info *) 238 get_zeroed_page(GFP_KERNEL); 239 if (!shared_info_page) { 240 pr_err("not enough memory\n"); 241 return -ENOMEM; 242 } 243 xatp.domid = DOMID_SELF; 244 xatp.idx = 0; 245 xatp.space = XENMAPSPACE_shared_info; 246 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT; 247 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) 248 BUG(); 249 250 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page; 251 252 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info 253 * page, we use it in the event channel upcall and in some pvclock 254 * related functions. 255 * The shared info contains exactly 1 CPU (the boot CPU). The guest 256 * is required to use VCPUOP_register_vcpu_info to place vcpu info 257 * for secondary CPUs as they are brought up. 258 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0. 259 */ 260 xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info), 261 sizeof(struct vcpu_info)); 262 if (xen_vcpu_info == NULL) 263 return -ENOMEM; 264 265 gnttab_init(); 266 if (!xen_initial_domain()) 267 xenbus_probe(NULL); 268 269 return 0; 270 } 271 core_initcall(xen_guest_init); 272 273 static int __init xen_pm_init(void) 274 { 275 pm_power_off = xen_power_off; 276 arm_pm_restart = xen_restart; 277 278 return 0; 279 } 280 subsys_initcall(xen_pm_init); 281 282 static irqreturn_t xen_arm_callback(int irq, void *arg) 283 { 284 xen_hvm_evtchn_do_upcall(); 285 return IRQ_HANDLED; 286 } 287 288 static int __init xen_init_events(void) 289 { 290 if (!xen_domain() || xen_events_irq < 0) 291 return -ENODEV; 292 293 xen_init_IRQ(); 294 295 if (request_percpu_irq(xen_events_irq, xen_arm_callback, 296 "events", &xen_vcpu)) { 297 pr_err("Error requesting IRQ %d\n", xen_events_irq); 298 return -EINVAL; 299 } 300 301 on_each_cpu(xen_percpu_init, NULL, 0); 302 303 return 0; 304 } 305 postcore_initcall(xen_init_events); 306 307 /* In the hypervisor.S file. */ 308 EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op); 309 EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op); 310 EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version); 311 EXPORT_SYMBOL_GPL(HYPERVISOR_console_io); 312 EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op); 313 EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op); 314 EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op); 315 EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op); 316 EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op); 317 EXPORT_SYMBOL_GPL(privcmd_call); 318