17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 500d0963fSdilpreet * Common Development and Distribution License (the "License"). 600d0963fSdilpreet * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22b57cd2d3SMark Johnson * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 237c478bd9Sstevel@tonic-gate */ 247c478bd9Sstevel@tonic-gate 257c478bd9Sstevel@tonic-gate /* 2612f080e7Smrj * x86 root nexus driver 277c478bd9Sstevel@tonic-gate */ 287c478bd9Sstevel@tonic-gate 297c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 307c478bd9Sstevel@tonic-gate #include <sys/conf.h> 317c478bd9Sstevel@tonic-gate #include <sys/autoconf.h> 327c478bd9Sstevel@tonic-gate #include <sys/sysmacros.h> 337c478bd9Sstevel@tonic-gate #include <sys/debug.h> 347c478bd9Sstevel@tonic-gate #include <sys/psw.h> 357c478bd9Sstevel@tonic-gate #include <sys/ddidmareq.h> 367c478bd9Sstevel@tonic-gate #include <sys/promif.h> 377c478bd9Sstevel@tonic-gate #include <sys/devops.h> 387c478bd9Sstevel@tonic-gate #include <sys/kmem.h> 397c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 407c478bd9Sstevel@tonic-gate #include <vm/seg.h> 417c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 427c478bd9Sstevel@tonic-gate #include <vm/seg_dev.h> 437c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 447c478bd9Sstevel@tonic-gate #include <sys/mman.h> 457c478bd9Sstevel@tonic-gate #include <vm/hat.h> 467c478bd9Sstevel@tonic-gate #include <vm/as.h> 477c478bd9Sstevel@tonic-gate #include <vm/page.h> 487c478bd9Sstevel@tonic-gate #include <sys/avintr.h> 497c478bd9Sstevel@tonic-gate #include <sys/errno.h> 507c478bd9Sstevel@tonic-gate #include <sys/modctl.h> 517c478bd9Sstevel@tonic-gate #include <sys/ddi_impldefs.h> 527c478bd9Sstevel@tonic-gate #include <sys/sunddi.h> 537c478bd9Sstevel@tonic-gate #include <sys/sunndi.h> 547a364d25Sschwartz #include <sys/mach_intr.h> 557c478bd9Sstevel@tonic-gate #include <sys/psm.h> 567c478bd9Sstevel@tonic-gate #include <sys/ontrap.h> 5712f080e7Smrj #include <sys/atomic.h> 5812f080e7Smrj #include <sys/sdt.h> 5912f080e7Smrj #include <sys/rootnex.h> 6012f080e7Smrj #include <vm/hat_i86.h> 6100d0963fSdilpreet #include <sys/ddifm.h> 6236945f79Smrj #include <sys/ddi_isa.h> 63*7ff178cdSJimmy Vetayases #include <sys/apic.h> 647c478bd9Sstevel@tonic-gate 65843e1988Sjohnlev #ifdef __xpv 66843e1988Sjohnlev #include <sys/bootinfo.h> 67843e1988Sjohnlev #include <sys/hypervisor.h> 68843e1988Sjohnlev #include <sys/bootconf.h> 69843e1988Sjohnlev #include <vm/kboot_mmu.h> 703a634bfcSVikram Hegde #endif 713a634bfcSVikram Hegde 723a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 733a634bfcSVikram Hegde #include <sys/immu.h> 74843e1988Sjohnlev #endif 75843e1988Sjohnlev 7686c1f4dcSVikram Hegde 7712f080e7Smrj /* 7812f080e7Smrj * enable/disable extra checking of function parameters. Useful for debugging 7912f080e7Smrj * drivers. 8012f080e7Smrj */ 8112f080e7Smrj #ifdef DEBUG 8212f080e7Smrj int rootnex_alloc_check_parms = 1; 8312f080e7Smrj int rootnex_bind_check_parms = 1; 8412f080e7Smrj int rootnex_bind_check_inuse = 1; 8512f080e7Smrj int rootnex_unbind_verify_buffer = 0; 8612f080e7Smrj int rootnex_sync_check_parms = 1; 8712f080e7Smrj #else 8812f080e7Smrj int rootnex_alloc_check_parms = 0; 8912f080e7Smrj int rootnex_bind_check_parms = 0; 9012f080e7Smrj int rootnex_bind_check_inuse = 0; 9112f080e7Smrj int rootnex_unbind_verify_buffer = 0; 9212f080e7Smrj int rootnex_sync_check_parms = 0; 9312f080e7Smrj #endif 947c478bd9Sstevel@tonic-gate 953a634bfcSVikram Hegde boolean_t rootnex_dmar_not_setup; 963a634bfcSVikram Hegde 977aec1d6eScindi /* Master Abort and Target Abort panic flag */ 987aec1d6eScindi int rootnex_fm_ma_ta_panic_flag = 0; 997aec1d6eScindi 10012f080e7Smrj /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */ 1017c478bd9Sstevel@tonic-gate int rootnex_bind_fail = 1; 1027c478bd9Sstevel@tonic-gate int rootnex_bind_warn = 1; 1037c478bd9Sstevel@tonic-gate uint8_t *rootnex_warn_list; 1047c478bd9Sstevel@tonic-gate /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */ 1057c478bd9Sstevel@tonic-gate #define ROOTNEX_BIND_WARNING (0x1 << 0) 1067c478bd9Sstevel@tonic-gate 1077c478bd9Sstevel@tonic-gate /* 10812f080e7Smrj * revert back to old broken behavior of always sync'ing entire copy buffer. 10912f080e7Smrj * This is useful if be have a buggy driver which doesn't correctly pass in 11012f080e7Smrj * the offset and size into ddi_dma_sync(). 1117c478bd9Sstevel@tonic-gate */ 11212f080e7Smrj int rootnex_sync_ignore_params = 0; 1137c478bd9Sstevel@tonic-gate 1147c478bd9Sstevel@tonic-gate /* 11512f080e7Smrj * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1 11612f080e7Smrj * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a 11712f080e7Smrj * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit 11812f080e7Smrj * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65 11912f080e7Smrj * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages 12012f080e7Smrj * (< 8K). We will still need to allocate the copy buffer during bind though 12112f080e7Smrj * (if we need one). These can only be modified in /etc/system before rootnex 12212f080e7Smrj * attach. 1237c478bd9Sstevel@tonic-gate */ 12412f080e7Smrj #if defined(__amd64) 12512f080e7Smrj int rootnex_prealloc_cookies = 65; 12612f080e7Smrj int rootnex_prealloc_windows = 4; 12712f080e7Smrj int rootnex_prealloc_copybuf = 2; 12812f080e7Smrj #else 12912f080e7Smrj int rootnex_prealloc_cookies = 33; 13012f080e7Smrj int rootnex_prealloc_windows = 4; 13112f080e7Smrj int rootnex_prealloc_copybuf = 2; 13212f080e7Smrj #endif 1337c478bd9Sstevel@tonic-gate 13412f080e7Smrj /* driver global state */ 13512f080e7Smrj static rootnex_state_t *rootnex_state; 13612f080e7Smrj 13712f080e7Smrj /* shortcut to rootnex counters */ 13812f080e7Smrj static uint64_t *rootnex_cnt; 1397c478bd9Sstevel@tonic-gate 1407c478bd9Sstevel@tonic-gate /* 14112f080e7Smrj * XXX - does x86 even need these or are they left over from the SPARC days? 1427c478bd9Sstevel@tonic-gate */ 14312f080e7Smrj /* statically defined integer/boolean properties for the root node */ 14412f080e7Smrj static rootnex_intprop_t rootnex_intprp[] = { 14512f080e7Smrj { "PAGESIZE", PAGESIZE }, 14612f080e7Smrj { "MMU_PAGESIZE", MMU_PAGESIZE }, 14712f080e7Smrj { "MMU_PAGEOFFSET", MMU_PAGEOFFSET }, 14812f080e7Smrj { DDI_RELATIVE_ADDRESSING, 1 }, 14912f080e7Smrj }; 15012f080e7Smrj #define NROOT_INTPROPS (sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t)) 1517c478bd9Sstevel@tonic-gate 152843e1988Sjohnlev #ifdef __xpv 153843e1988Sjohnlev typedef maddr_t rootnex_addr_t; 154843e1988Sjohnlev #define ROOTNEX_PADDR_TO_RBASE(xinfo, pa) \ 155843e1988Sjohnlev (DOMAIN_IS_INITDOMAIN(xinfo) ? pa_to_ma(pa) : (pa)) 156843e1988Sjohnlev #else 157843e1988Sjohnlev typedef paddr_t rootnex_addr_t; 158843e1988Sjohnlev #endif 159843e1988Sjohnlev 16020906b23SVikram Hegde #if !defined(__xpv) 161*7ff178cdSJimmy Vetayases char _depends_on[] = "misc/iommulib misc/acpica"; 16220906b23SVikram Hegde #endif 1637c478bd9Sstevel@tonic-gate 16412f080e7Smrj static struct cb_ops rootnex_cb_ops = { 16512f080e7Smrj nodev, /* open */ 16612f080e7Smrj nodev, /* close */ 16712f080e7Smrj nodev, /* strategy */ 16812f080e7Smrj nodev, /* print */ 16912f080e7Smrj nodev, /* dump */ 17012f080e7Smrj nodev, /* read */ 17112f080e7Smrj nodev, /* write */ 17212f080e7Smrj nodev, /* ioctl */ 17312f080e7Smrj nodev, /* devmap */ 17412f080e7Smrj nodev, /* mmap */ 17512f080e7Smrj nodev, /* segmap */ 17612f080e7Smrj nochpoll, /* chpoll */ 17712f080e7Smrj ddi_prop_op, /* cb_prop_op */ 17812f080e7Smrj NULL, /* struct streamtab */ 17912f080e7Smrj D_NEW | D_MP | D_HOTPLUG, /* compatibility flags */ 18012f080e7Smrj CB_REV, /* Rev */ 18112f080e7Smrj nodev, /* cb_aread */ 18212f080e7Smrj nodev /* cb_awrite */ 18312f080e7Smrj }; 1847c478bd9Sstevel@tonic-gate 18512f080e7Smrj static int rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, 1867c478bd9Sstevel@tonic-gate off_t offset, off_t len, caddr_t *vaddrp); 18712f080e7Smrj static int rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, 1887c478bd9Sstevel@tonic-gate struct hat *hat, struct seg *seg, caddr_t addr, 1897c478bd9Sstevel@tonic-gate struct devpage *dp, pfn_t pfn, uint_t prot, uint_t lock); 19012f080e7Smrj static int rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 1917c478bd9Sstevel@tonic-gate struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep); 19212f080e7Smrj static int rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, 19312f080e7Smrj ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 19412f080e7Smrj ddi_dma_handle_t *handlep); 19512f080e7Smrj static int rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, 19612f080e7Smrj ddi_dma_handle_t handle); 19712f080e7Smrj static int rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 19812f080e7Smrj ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 19912f080e7Smrj ddi_dma_cookie_t *cookiep, uint_t *ccountp); 20012f080e7Smrj static int rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 20112f080e7Smrj ddi_dma_handle_t handle); 20212f080e7Smrj static int rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, 20312f080e7Smrj ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags); 20412f080e7Smrj static int rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, 20512f080e7Smrj ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 20612f080e7Smrj ddi_dma_cookie_t *cookiep, uint_t *ccountp); 20712f080e7Smrj static int rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, 2087c478bd9Sstevel@tonic-gate ddi_dma_handle_t handle, enum ddi_dma_ctlops request, 2097c478bd9Sstevel@tonic-gate off_t *offp, size_t *lenp, caddr_t *objp, uint_t cache_flags); 21012f080e7Smrj static int rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, 21112f080e7Smrj ddi_ctl_enum_t ctlop, void *arg, void *result); 21200d0963fSdilpreet static int rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap, 21300d0963fSdilpreet ddi_iblock_cookie_t *ibc); 21412f080e7Smrj static int rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, 21512f080e7Smrj ddi_intr_op_t intr_op, ddi_intr_handle_impl_t *hdlp, void *result); 216*7ff178cdSJimmy Vetayases static int rootnex_alloc_intr_fixed(dev_info_t *, ddi_intr_handle_impl_t *, 217*7ff178cdSJimmy Vetayases void *); 218*7ff178cdSJimmy Vetayases static int rootnex_free_intr_fixed(dev_info_t *, ddi_intr_handle_impl_t *); 2197c478bd9Sstevel@tonic-gate 22020906b23SVikram Hegde static int rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip, 22120906b23SVikram Hegde ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 22220906b23SVikram Hegde ddi_dma_handle_t *handlep); 22320906b23SVikram Hegde static int rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip, 22420906b23SVikram Hegde ddi_dma_handle_t handle); 22520906b23SVikram Hegde static int rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 22620906b23SVikram Hegde ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 22720906b23SVikram Hegde ddi_dma_cookie_t *cookiep, uint_t *ccountp); 22820906b23SVikram Hegde static int rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 22920906b23SVikram Hegde ddi_dma_handle_t handle); 2303a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 23120906b23SVikram Hegde static void rootnex_coredma_reset_cookies(dev_info_t *dip, 23220906b23SVikram Hegde ddi_dma_handle_t handle); 23320906b23SVikram Hegde static int rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 23494f1124eSVikram Hegde ddi_dma_cookie_t **cookiepp, uint_t *ccountp); 23594f1124eSVikram Hegde static int rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 23694f1124eSVikram Hegde ddi_dma_cookie_t *cookiep, uint_t ccount); 23794f1124eSVikram Hegde static int rootnex_coredma_clear_cookies(dev_info_t *dip, 23894f1124eSVikram Hegde ddi_dma_handle_t handle); 23994f1124eSVikram Hegde static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle); 2405dfdb46bSVikram Hegde #endif 24120906b23SVikram Hegde static int rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, 24220906b23SVikram Hegde ddi_dma_handle_t handle, off_t off, size_t len, uint_t cache_flags); 24320906b23SVikram Hegde static int rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, 24420906b23SVikram Hegde ddi_dma_handle_t handle, uint_t win, off_t *offp, size_t *lenp, 24520906b23SVikram Hegde ddi_dma_cookie_t *cookiep, uint_t *ccountp); 2467c478bd9Sstevel@tonic-gate 2477c478bd9Sstevel@tonic-gate static struct bus_ops rootnex_bus_ops = { 2487c478bd9Sstevel@tonic-gate BUSO_REV, 2497c478bd9Sstevel@tonic-gate rootnex_map, 2507c478bd9Sstevel@tonic-gate NULL, 2517c478bd9Sstevel@tonic-gate NULL, 2527c478bd9Sstevel@tonic-gate NULL, 2537c478bd9Sstevel@tonic-gate rootnex_map_fault, 2547c478bd9Sstevel@tonic-gate rootnex_dma_map, 2557c478bd9Sstevel@tonic-gate rootnex_dma_allochdl, 2567c478bd9Sstevel@tonic-gate rootnex_dma_freehdl, 2577c478bd9Sstevel@tonic-gate rootnex_dma_bindhdl, 2587c478bd9Sstevel@tonic-gate rootnex_dma_unbindhdl, 25912f080e7Smrj rootnex_dma_sync, 2607c478bd9Sstevel@tonic-gate rootnex_dma_win, 2617c478bd9Sstevel@tonic-gate rootnex_dma_mctl, 2627c478bd9Sstevel@tonic-gate rootnex_ctlops, 2637c478bd9Sstevel@tonic-gate ddi_bus_prop_op, 2647c478bd9Sstevel@tonic-gate i_ddi_rootnex_get_eventcookie, 2657c478bd9Sstevel@tonic-gate i_ddi_rootnex_add_eventcall, 2667c478bd9Sstevel@tonic-gate i_ddi_rootnex_remove_eventcall, 2677c478bd9Sstevel@tonic-gate i_ddi_rootnex_post_event, 2687c478bd9Sstevel@tonic-gate 0, /* bus_intr_ctl */ 2697c478bd9Sstevel@tonic-gate 0, /* bus_config */ 2707c478bd9Sstevel@tonic-gate 0, /* bus_unconfig */ 27100d0963fSdilpreet rootnex_fm_init, /* bus_fm_init */ 2727c478bd9Sstevel@tonic-gate NULL, /* bus_fm_fini */ 2737c478bd9Sstevel@tonic-gate NULL, /* bus_fm_access_enter */ 2747c478bd9Sstevel@tonic-gate NULL, /* bus_fm_access_exit */ 2757c478bd9Sstevel@tonic-gate NULL, /* bus_powr */ 2767c478bd9Sstevel@tonic-gate rootnex_intr_ops /* bus_intr_op */ 2777c478bd9Sstevel@tonic-gate }; 2787c478bd9Sstevel@tonic-gate 27912f080e7Smrj static int rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 28012f080e7Smrj static int rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 2813a634bfcSVikram Hegde static int rootnex_quiesce(dev_info_t *dip); 2827c478bd9Sstevel@tonic-gate 2837c478bd9Sstevel@tonic-gate static struct dev_ops rootnex_ops = { 2847c478bd9Sstevel@tonic-gate DEVO_REV, 28512f080e7Smrj 0, 28612f080e7Smrj ddi_no_info, 2877c478bd9Sstevel@tonic-gate nulldev, 28812f080e7Smrj nulldev, 2897c478bd9Sstevel@tonic-gate rootnex_attach, 29012f080e7Smrj rootnex_detach, 29112f080e7Smrj nulldev, 29212f080e7Smrj &rootnex_cb_ops, 29319397407SSherry Moore &rootnex_bus_ops, 29419397407SSherry Moore NULL, 2953a634bfcSVikram Hegde rootnex_quiesce, /* quiesce */ 2967c478bd9Sstevel@tonic-gate }; 2977c478bd9Sstevel@tonic-gate 29812f080e7Smrj static struct modldrv rootnex_modldrv = { 29912f080e7Smrj &mod_driverops, 300613b2871SRichard Bean "i86pc root nexus", 30112f080e7Smrj &rootnex_ops 3027c478bd9Sstevel@tonic-gate }; 3037c478bd9Sstevel@tonic-gate 30412f080e7Smrj static struct modlinkage rootnex_modlinkage = { 30512f080e7Smrj MODREV_1, 30612f080e7Smrj (void *)&rootnex_modldrv, 30712f080e7Smrj NULL 3087c478bd9Sstevel@tonic-gate }; 3097c478bd9Sstevel@tonic-gate 3103a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 31120906b23SVikram Hegde static iommulib_nexops_t iommulib_nexops = { 31220906b23SVikram Hegde IOMMU_NEXOPS_VERSION, 31320906b23SVikram Hegde "Rootnex IOMMU ops Vers 1.1", 31420906b23SVikram Hegde NULL, 31520906b23SVikram Hegde rootnex_coredma_allochdl, 31620906b23SVikram Hegde rootnex_coredma_freehdl, 31720906b23SVikram Hegde rootnex_coredma_bindhdl, 31820906b23SVikram Hegde rootnex_coredma_unbindhdl, 31920906b23SVikram Hegde rootnex_coredma_reset_cookies, 32020906b23SVikram Hegde rootnex_coredma_get_cookies, 32194f1124eSVikram Hegde rootnex_coredma_set_cookies, 32294f1124eSVikram Hegde rootnex_coredma_clear_cookies, 32394f1124eSVikram Hegde rootnex_coredma_get_sleep_flags, 32420906b23SVikram Hegde rootnex_coredma_sync, 32520906b23SVikram Hegde rootnex_coredma_win, 326b51bbbf5SVikram Hegde rootnex_dma_map, 327b51bbbf5SVikram Hegde rootnex_dma_mctl 32820906b23SVikram Hegde }; 3295dfdb46bSVikram Hegde #endif 3307c478bd9Sstevel@tonic-gate 33112f080e7Smrj /* 33212f080e7Smrj * extern hacks 33312f080e7Smrj */ 33412f080e7Smrj extern struct seg_ops segdev_ops; 33512f080e7Smrj extern int ignore_hardware_nodes; /* force flag from ddi_impl.c */ 33612f080e7Smrj #ifdef DDI_MAP_DEBUG 33712f080e7Smrj extern int ddi_map_debug_flag; 33812f080e7Smrj #define ddi_map_debug if (ddi_map_debug_flag) prom_printf 33912f080e7Smrj #endif 34012f080e7Smrj extern void i86_pp_map(page_t *pp, caddr_t kaddr); 34112f080e7Smrj extern void i86_va_map(caddr_t vaddr, struct as *asp, caddr_t kaddr); 34212f080e7Smrj extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 34312f080e7Smrj psm_intr_op_t, int *); 34412f080e7Smrj extern int impl_ddi_sunbus_initchild(dev_info_t *dip); 34512f080e7Smrj extern void impl_ddi_sunbus_removechild(dev_info_t *dip); 34636945f79Smrj 34712f080e7Smrj /* 34812f080e7Smrj * Use device arena to use for device control register mappings. 34912f080e7Smrj * Various kernel memory walkers (debugger, dtrace) need to know 35012f080e7Smrj * to avoid this address range to prevent undesired device activity. 35112f080e7Smrj */ 35212f080e7Smrj extern void *device_arena_alloc(size_t size, int vm_flag); 35312f080e7Smrj extern void device_arena_free(void * vaddr, size_t size); 35412f080e7Smrj 35512f080e7Smrj 35612f080e7Smrj /* 35712f080e7Smrj * Internal functions 35812f080e7Smrj */ 35912f080e7Smrj static int rootnex_dma_init(); 36012f080e7Smrj static void rootnex_add_props(dev_info_t *); 36112f080e7Smrj static int rootnex_ctl_reportdev(dev_info_t *dip); 36212f080e7Smrj static struct intrspec *rootnex_get_ispec(dev_info_t *rdip, int inum); 36312f080e7Smrj static int rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp); 36412f080e7Smrj static int rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp); 36512f080e7Smrj static int rootnex_map_handle(ddi_map_req_t *mp); 36612f080e7Smrj static void rootnex_clean_dmahdl(ddi_dma_impl_t *hp); 36712f080e7Smrj static int rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegsize); 36812f080e7Smrj static int rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, 36912f080e7Smrj ddi_dma_attr_t *attr); 37012f080e7Smrj static void rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, 37112f080e7Smrj rootnex_sglinfo_t *sglinfo); 37212f080e7Smrj static int rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 37312f080e7Smrj rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag); 37412f080e7Smrj static int rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 37512f080e7Smrj rootnex_dma_t *dma, ddi_dma_attr_t *attr); 37612f080e7Smrj static void rootnex_teardown_copybuf(rootnex_dma_t *dma); 37712f080e7Smrj static int rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 37812f080e7Smrj ddi_dma_attr_t *attr, int kmflag); 37912f080e7Smrj static void rootnex_teardown_windows(rootnex_dma_t *dma); 38012f080e7Smrj static void rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 38112f080e7Smrj rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset); 38212f080e7Smrj static void rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, 38312f080e7Smrj rootnex_dma_t *dma, ddi_dma_cookie_t *cookie, off_t cur_offset, 38412f080e7Smrj size_t *copybuf_used, page_t **cur_pp); 38512f080e7Smrj static int rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, 38612f080e7Smrj rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, 38712f080e7Smrj ddi_dma_attr_t *attr, off_t cur_offset); 38812f080e7Smrj static int rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, 38912f080e7Smrj rootnex_dma_t *dma, rootnex_window_t **windowp, 39012f080e7Smrj ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used); 39112f080e7Smrj static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, 39212f080e7Smrj rootnex_dma_t *dma, rootnex_window_t **windowp, ddi_dma_cookie_t *cookie); 39312f080e7Smrj static int rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win, 39412f080e7Smrj off_t offset, size_t size, uint_t cache_flags); 39512f080e7Smrj static int rootnex_verify_buffer(rootnex_dma_t *dma); 39600d0963fSdilpreet static int rootnex_dma_check(dev_info_t *dip, const void *handle, 39700d0963fSdilpreet const void *comp_addr, const void *not_used); 39807c6692fSMark Johnson static boolean_t rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object, 39907c6692fSMark Johnson rootnex_sglinfo_t *sglinfo); 40012f080e7Smrj 40112f080e7Smrj /* 40212f080e7Smrj * _init() 40312f080e7Smrj * 40412f080e7Smrj */ 4057c478bd9Sstevel@tonic-gate int 4067c478bd9Sstevel@tonic-gate _init(void) 4077c478bd9Sstevel@tonic-gate { 40812f080e7Smrj 40912f080e7Smrj rootnex_state = NULL; 41012f080e7Smrj return (mod_install(&rootnex_modlinkage)); 4117c478bd9Sstevel@tonic-gate } 4127c478bd9Sstevel@tonic-gate 41312f080e7Smrj 41412f080e7Smrj /* 41512f080e7Smrj * _info() 41612f080e7Smrj * 41712f080e7Smrj */ 41812f080e7Smrj int 41912f080e7Smrj _info(struct modinfo *modinfop) 42012f080e7Smrj { 42112f080e7Smrj return (mod_info(&rootnex_modlinkage, modinfop)); 42212f080e7Smrj } 42312f080e7Smrj 42412f080e7Smrj 42512f080e7Smrj /* 42612f080e7Smrj * _fini() 42712f080e7Smrj * 42812f080e7Smrj */ 4297c478bd9Sstevel@tonic-gate int 4307c478bd9Sstevel@tonic-gate _fini(void) 4317c478bd9Sstevel@tonic-gate { 4327c478bd9Sstevel@tonic-gate return (EBUSY); 4337c478bd9Sstevel@tonic-gate } 4347c478bd9Sstevel@tonic-gate 43512f080e7Smrj 43612f080e7Smrj /* 43712f080e7Smrj * rootnex_attach() 43812f080e7Smrj * 43912f080e7Smrj */ 44012f080e7Smrj static int 44112f080e7Smrj rootnex_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 4427c478bd9Sstevel@tonic-gate { 4437aec1d6eScindi int fmcap; 44412f080e7Smrj int e; 44512f080e7Smrj 44612f080e7Smrj switch (cmd) { 44712f080e7Smrj case DDI_ATTACH: 44812f080e7Smrj break; 44912f080e7Smrj case DDI_RESUME: 4503a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 4513a634bfcSVikram Hegde return (immu_unquiesce()); 4523a634bfcSVikram Hegde #else 45312f080e7Smrj return (DDI_SUCCESS); 4543a634bfcSVikram Hegde #endif 45512f080e7Smrj default: 45612f080e7Smrj return (DDI_FAILURE); 4577c478bd9Sstevel@tonic-gate } 4587c478bd9Sstevel@tonic-gate 4597c478bd9Sstevel@tonic-gate /* 46012f080e7Smrj * We should only have one instance of rootnex. Save it away since we 46112f080e7Smrj * don't have an easy way to get it back later. 4627c478bd9Sstevel@tonic-gate */ 46312f080e7Smrj ASSERT(rootnex_state == NULL); 46412f080e7Smrj rootnex_state = kmem_zalloc(sizeof (rootnex_state_t), KM_SLEEP); 4657c478bd9Sstevel@tonic-gate 46612f080e7Smrj rootnex_state->r_dip = dip; 4677aec1d6eScindi rootnex_state->r_err_ibc = (ddi_iblock_cookie_t)ipltospl(15); 46812f080e7Smrj rootnex_state->r_reserved_msg_printed = B_FALSE; 46912f080e7Smrj rootnex_cnt = &rootnex_state->r_counters[0]; 4707c478bd9Sstevel@tonic-gate 4717aec1d6eScindi /* 4727aec1d6eScindi * Set minimum fm capability level for i86pc platforms and then 4737aec1d6eScindi * initialize error handling. Since we're the rootnex, we don't 4747aec1d6eScindi * care what's returned in the fmcap field. 4757aec1d6eScindi */ 47600d0963fSdilpreet ddi_system_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE | 47700d0963fSdilpreet DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE; 4787aec1d6eScindi fmcap = ddi_system_fmcap; 4797aec1d6eScindi ddi_fm_init(dip, &fmcap, &rootnex_state->r_err_ibc); 4807aec1d6eScindi 48112f080e7Smrj /* initialize DMA related state */ 48212f080e7Smrj e = rootnex_dma_init(); 48312f080e7Smrj if (e != DDI_SUCCESS) { 48412f080e7Smrj kmem_free(rootnex_state, sizeof (rootnex_state_t)); 48512f080e7Smrj return (DDI_FAILURE); 48612f080e7Smrj } 48712f080e7Smrj 48812f080e7Smrj /* Add static root node properties */ 48912f080e7Smrj rootnex_add_props(dip); 49012f080e7Smrj 49112f080e7Smrj /* since we can't call ddi_report_dev() */ 49212f080e7Smrj cmn_err(CE_CONT, "?root nexus = %s\n", ddi_get_name(dip)); 49312f080e7Smrj 49412f080e7Smrj /* Initialize rootnex event handle */ 49512f080e7Smrj i_ddi_rootnex_init_events(dip); 49612f080e7Smrj 4973a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 49820906b23SVikram Hegde e = iommulib_nexus_register(dip, &iommulib_nexops, 49920906b23SVikram Hegde &rootnex_state->r_iommulib_handle); 50020906b23SVikram Hegde 50120906b23SVikram Hegde ASSERT(e == DDI_SUCCESS); 50220906b23SVikram Hegde #endif 50320906b23SVikram Hegde 50412f080e7Smrj return (DDI_SUCCESS); 50512f080e7Smrj } 50612f080e7Smrj 50712f080e7Smrj 50812f080e7Smrj /* 50912f080e7Smrj * rootnex_detach() 51012f080e7Smrj * 51112f080e7Smrj */ 5127c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 5137c478bd9Sstevel@tonic-gate static int 51412f080e7Smrj rootnex_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 5157c478bd9Sstevel@tonic-gate { 51612f080e7Smrj switch (cmd) { 51712f080e7Smrj case DDI_SUSPEND: 5183a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 5193a634bfcSVikram Hegde return (immu_quiesce()); 5203a634bfcSVikram Hegde #else 5213a634bfcSVikram Hegde return (DDI_SUCCESS); 5223a634bfcSVikram Hegde #endif 52312f080e7Smrj default: 52412f080e7Smrj return (DDI_FAILURE); 52512f080e7Smrj } 5263a634bfcSVikram Hegde /*NOTREACHED*/ 5277c478bd9Sstevel@tonic-gate 52812f080e7Smrj } 5297c478bd9Sstevel@tonic-gate 5307c478bd9Sstevel@tonic-gate 53112f080e7Smrj /* 53212f080e7Smrj * rootnex_dma_init() 53312f080e7Smrj * 53412f080e7Smrj */ 53512f080e7Smrj /*ARGSUSED*/ 53612f080e7Smrj static int 53712f080e7Smrj rootnex_dma_init() 53812f080e7Smrj { 53912f080e7Smrj size_t bufsize; 54012f080e7Smrj 54112f080e7Smrj 54212f080e7Smrj /* 54312f080e7Smrj * size of our cookie/window/copybuf state needed in dma bind that we 54412f080e7Smrj * pre-alloc in dma_alloc_handle 54512f080e7Smrj */ 54612f080e7Smrj rootnex_state->r_prealloc_cookies = rootnex_prealloc_cookies; 54712f080e7Smrj rootnex_state->r_prealloc_size = 54812f080e7Smrj (rootnex_state->r_prealloc_cookies * sizeof (ddi_dma_cookie_t)) + 54912f080e7Smrj (rootnex_prealloc_windows * sizeof (rootnex_window_t)) + 55012f080e7Smrj (rootnex_prealloc_copybuf * sizeof (rootnex_pgmap_t)); 55112f080e7Smrj 55212f080e7Smrj /* 55312f080e7Smrj * setup DDI DMA handle kmem cache, align each handle on 64 bytes, 55412f080e7Smrj * allocate 16 extra bytes for struct pointer alignment 55512f080e7Smrj * (p->dmai_private & dma->dp_prealloc_buffer) 55612f080e7Smrj */ 55712f080e7Smrj bufsize = sizeof (ddi_dma_impl_t) + sizeof (rootnex_dma_t) + 55812f080e7Smrj rootnex_state->r_prealloc_size + 0x10; 55912f080e7Smrj rootnex_state->r_dmahdl_cache = kmem_cache_create("rootnex_dmahdl", 56012f080e7Smrj bufsize, 64, NULL, NULL, NULL, NULL, NULL, 0); 56112f080e7Smrj if (rootnex_state->r_dmahdl_cache == NULL) { 56212f080e7Smrj return (DDI_FAILURE); 56312f080e7Smrj } 5647c478bd9Sstevel@tonic-gate 5657c478bd9Sstevel@tonic-gate /* 5667c478bd9Sstevel@tonic-gate * allocate array to track which major numbers we have printed warnings 5677c478bd9Sstevel@tonic-gate * for. 5687c478bd9Sstevel@tonic-gate */ 5697c478bd9Sstevel@tonic-gate rootnex_warn_list = kmem_zalloc(devcnt * sizeof (*rootnex_warn_list), 5707c478bd9Sstevel@tonic-gate KM_SLEEP); 5717c478bd9Sstevel@tonic-gate 5727c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 5737c478bd9Sstevel@tonic-gate } 5747c478bd9Sstevel@tonic-gate 5757c478bd9Sstevel@tonic-gate 5767c478bd9Sstevel@tonic-gate /* 57712f080e7Smrj * rootnex_add_props() 57812f080e7Smrj * 5797c478bd9Sstevel@tonic-gate */ 5807c478bd9Sstevel@tonic-gate static void 58112f080e7Smrj rootnex_add_props(dev_info_t *dip) 5827c478bd9Sstevel@tonic-gate { 58312f080e7Smrj rootnex_intprop_t *rpp; 5847c478bd9Sstevel@tonic-gate int i; 5857c478bd9Sstevel@tonic-gate 58612f080e7Smrj /* Add static integer/boolean properties to the root node */ 58712f080e7Smrj rpp = rootnex_intprp; 58812f080e7Smrj for (i = 0; i < NROOT_INTPROPS; i++) { 58912f080e7Smrj (void) e_ddi_prop_update_int(DDI_DEV_T_NONE, dip, 59012f080e7Smrj rpp[i].prop_name, rpp[i].prop_value); 59112f080e7Smrj } 5927c478bd9Sstevel@tonic-gate } 5937c478bd9Sstevel@tonic-gate 59412f080e7Smrj 59512f080e7Smrj 5967c478bd9Sstevel@tonic-gate /* 59712f080e7Smrj * ************************* 59812f080e7Smrj * ctlops related routines 59912f080e7Smrj * ************************* 60012f080e7Smrj */ 60112f080e7Smrj 60212f080e7Smrj /* 60312f080e7Smrj * rootnex_ctlops() 6047c478bd9Sstevel@tonic-gate * 6057c478bd9Sstevel@tonic-gate */ 606a195726fSgovinda /*ARGSUSED*/ 6077c478bd9Sstevel@tonic-gate static int 60812f080e7Smrj rootnex_ctlops(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t ctlop, 60912f080e7Smrj void *arg, void *result) 6107c478bd9Sstevel@tonic-gate { 61112f080e7Smrj int n, *ptr; 61212f080e7Smrj struct ddi_parent_private_data *pdp; 6137c478bd9Sstevel@tonic-gate 61412f080e7Smrj switch (ctlop) { 61512f080e7Smrj case DDI_CTLOPS_DMAPMAPC: 6167c478bd9Sstevel@tonic-gate /* 61712f080e7Smrj * Return 'partial' to indicate that dma mapping 61812f080e7Smrj * has to be done in the main MMU. 6197c478bd9Sstevel@tonic-gate */ 62012f080e7Smrj return (DDI_DMA_PARTIAL); 6217c478bd9Sstevel@tonic-gate 62212f080e7Smrj case DDI_CTLOPS_BTOP: 6237c478bd9Sstevel@tonic-gate /* 62412f080e7Smrj * Convert byte count input to physical page units. 62512f080e7Smrj * (byte counts that are not a page-size multiple 62612f080e7Smrj * are rounded down) 6277c478bd9Sstevel@tonic-gate */ 62812f080e7Smrj *(ulong_t *)result = btop(*(ulong_t *)arg); 6297c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 6307c478bd9Sstevel@tonic-gate 63112f080e7Smrj case DDI_CTLOPS_PTOB: 6327c478bd9Sstevel@tonic-gate /* 63312f080e7Smrj * Convert size in physical pages to bytes 6347c478bd9Sstevel@tonic-gate */ 63512f080e7Smrj *(ulong_t *)result = ptob(*(ulong_t *)arg); 6367c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 6377c478bd9Sstevel@tonic-gate 63812f080e7Smrj case DDI_CTLOPS_BTOPR: 6397c478bd9Sstevel@tonic-gate /* 64012f080e7Smrj * Convert byte count input to physical page units 64112f080e7Smrj * (byte counts that are not a page-size multiple 64212f080e7Smrj * are rounded up) 6437c478bd9Sstevel@tonic-gate */ 64412f080e7Smrj *(ulong_t *)result = btopr(*(ulong_t *)arg); 64512f080e7Smrj return (DDI_SUCCESS); 64612f080e7Smrj 64712f080e7Smrj case DDI_CTLOPS_INITCHILD: 64812f080e7Smrj return (impl_ddi_sunbus_initchild(arg)); 64912f080e7Smrj 65012f080e7Smrj case DDI_CTLOPS_UNINITCHILD: 65112f080e7Smrj impl_ddi_sunbus_removechild(arg); 65212f080e7Smrj return (DDI_SUCCESS); 65312f080e7Smrj 65412f080e7Smrj case DDI_CTLOPS_REPORTDEV: 65512f080e7Smrj return (rootnex_ctl_reportdev(rdip)); 65612f080e7Smrj 65712f080e7Smrj case DDI_CTLOPS_IOMIN: 6587c478bd9Sstevel@tonic-gate /* 65912f080e7Smrj * Nothing to do here but reflect back.. 6607c478bd9Sstevel@tonic-gate */ 6617c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 6627c478bd9Sstevel@tonic-gate 66312f080e7Smrj case DDI_CTLOPS_REGSIZE: 66412f080e7Smrj case DDI_CTLOPS_NREGS: 66512f080e7Smrj break; 6667c478bd9Sstevel@tonic-gate 66712f080e7Smrj case DDI_CTLOPS_SIDDEV: 66812f080e7Smrj if (ndi_dev_is_prom_node(rdip)) 6697c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 67012f080e7Smrj if (ndi_dev_is_persistent_node(rdip)) 67112f080e7Smrj return (DDI_SUCCESS); 6727c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 6737c478bd9Sstevel@tonic-gate 67412f080e7Smrj case DDI_CTLOPS_POWER: 67512f080e7Smrj return ((*pm_platform_power)((power_req_t *)arg)); 67612f080e7Smrj 677a195726fSgovinda case DDI_CTLOPS_RESERVED0: /* Was DDI_CTLOPS_NINTRS, obsolete */ 67812f080e7Smrj case DDI_CTLOPS_RESERVED1: /* Was DDI_CTLOPS_POKE_INIT, obsolete */ 67912f080e7Smrj case DDI_CTLOPS_RESERVED2: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */ 68012f080e7Smrj case DDI_CTLOPS_RESERVED3: /* Was DDI_CTLOPS_POKE_FINI, obsolete */ 681a195726fSgovinda case DDI_CTLOPS_RESERVED4: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */ 682a195726fSgovinda case DDI_CTLOPS_RESERVED5: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */ 68312f080e7Smrj if (!rootnex_state->r_reserved_msg_printed) { 68412f080e7Smrj rootnex_state->r_reserved_msg_printed = B_TRUE; 68512f080e7Smrj cmn_err(CE_WARN, "Failing ddi_ctlops call(s) for " 68612f080e7Smrj "1 or more reserved/obsolete operations."); 6877c478bd9Sstevel@tonic-gate } 68812f080e7Smrj return (DDI_FAILURE); 6897c478bd9Sstevel@tonic-gate 6907c478bd9Sstevel@tonic-gate default: 6917c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 6927c478bd9Sstevel@tonic-gate } 69312f080e7Smrj /* 69412f080e7Smrj * The rest are for "hardware" properties 69512f080e7Smrj */ 69612f080e7Smrj if ((pdp = ddi_get_parent_data(rdip)) == NULL) 69712f080e7Smrj return (DDI_FAILURE); 6987c478bd9Sstevel@tonic-gate 69912f080e7Smrj if (ctlop == DDI_CTLOPS_NREGS) { 70012f080e7Smrj ptr = (int *)result; 70112f080e7Smrj *ptr = pdp->par_nreg; 70212f080e7Smrj } else { 70312f080e7Smrj off_t *size = (off_t *)result; 7047c478bd9Sstevel@tonic-gate 70512f080e7Smrj ptr = (int *)arg; 70612f080e7Smrj n = *ptr; 70712f080e7Smrj if (n >= pdp->par_nreg) { 70812f080e7Smrj return (DDI_FAILURE); 70912f080e7Smrj } 71012f080e7Smrj *size = (off_t)pdp->par_reg[n].regspec_size; 71112f080e7Smrj } 7127c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 7137c478bd9Sstevel@tonic-gate } 7147c478bd9Sstevel@tonic-gate 71512f080e7Smrj 71612f080e7Smrj /* 71712f080e7Smrj * rootnex_ctl_reportdev() 71812f080e7Smrj * 71912f080e7Smrj */ 7207c478bd9Sstevel@tonic-gate static int 72112f080e7Smrj rootnex_ctl_reportdev(dev_info_t *dev) 72212f080e7Smrj { 72312f080e7Smrj int i, n, len, f_len = 0; 72412f080e7Smrj char *buf; 72512f080e7Smrj 72612f080e7Smrj buf = kmem_alloc(REPORTDEV_BUFSIZE, KM_SLEEP); 72712f080e7Smrj f_len += snprintf(buf, REPORTDEV_BUFSIZE, 72812f080e7Smrj "%s%d at root", ddi_driver_name(dev), ddi_get_instance(dev)); 72912f080e7Smrj len = strlen(buf); 73012f080e7Smrj 73112f080e7Smrj for (i = 0; i < sparc_pd_getnreg(dev); i++) { 73212f080e7Smrj 73312f080e7Smrj struct regspec *rp = sparc_pd_getreg(dev, i); 73412f080e7Smrj 73512f080e7Smrj if (i == 0) 73612f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 73712f080e7Smrj ": "); 73812f080e7Smrj else 73912f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 74012f080e7Smrj " and "); 74112f080e7Smrj len = strlen(buf); 74212f080e7Smrj 74312f080e7Smrj switch (rp->regspec_bustype) { 74412f080e7Smrj 74512f080e7Smrj case BTEISA: 74612f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 74712f080e7Smrj "%s 0x%x", DEVI_EISA_NEXNAME, rp->regspec_addr); 74812f080e7Smrj break; 74912f080e7Smrj 75012f080e7Smrj case BTISA: 75112f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 75212f080e7Smrj "%s 0x%x", DEVI_ISA_NEXNAME, rp->regspec_addr); 75312f080e7Smrj break; 75412f080e7Smrj 75512f080e7Smrj default: 75612f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 75712f080e7Smrj "space %x offset %x", 75812f080e7Smrj rp->regspec_bustype, rp->regspec_addr); 75912f080e7Smrj break; 76012f080e7Smrj } 76112f080e7Smrj len = strlen(buf); 76212f080e7Smrj } 76312f080e7Smrj for (i = 0, n = sparc_pd_getnintr(dev); i < n; i++) { 76412f080e7Smrj int pri; 76512f080e7Smrj 76612f080e7Smrj if (i != 0) { 76712f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 76812f080e7Smrj ","); 76912f080e7Smrj len = strlen(buf); 77012f080e7Smrj } 77112f080e7Smrj pri = INT_IPL(sparc_pd_getintr(dev, i)->intrspec_pri); 77212f080e7Smrj f_len += snprintf(buf + len, REPORTDEV_BUFSIZE - len, 77312f080e7Smrj " sparc ipl %d", pri); 77412f080e7Smrj len = strlen(buf); 77512f080e7Smrj } 77612f080e7Smrj #ifdef DEBUG 77712f080e7Smrj if (f_len + 1 >= REPORTDEV_BUFSIZE) { 77812f080e7Smrj cmn_err(CE_NOTE, "next message is truncated: " 77912f080e7Smrj "printed length 1024, real length %d", f_len); 78012f080e7Smrj } 78112f080e7Smrj #endif /* DEBUG */ 78212f080e7Smrj cmn_err(CE_CONT, "?%s\n", buf); 78312f080e7Smrj kmem_free(buf, REPORTDEV_BUFSIZE); 78412f080e7Smrj return (DDI_SUCCESS); 78512f080e7Smrj } 78612f080e7Smrj 78712f080e7Smrj 78812f080e7Smrj /* 78912f080e7Smrj * ****************** 79012f080e7Smrj * map related code 79112f080e7Smrj * ****************** 79212f080e7Smrj */ 79312f080e7Smrj 79412f080e7Smrj /* 79512f080e7Smrj * rootnex_map() 79612f080e7Smrj * 79712f080e7Smrj */ 79812f080e7Smrj static int 79912f080e7Smrj rootnex_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp, off_t offset, 80012f080e7Smrj off_t len, caddr_t *vaddrp) 8017c478bd9Sstevel@tonic-gate { 8027c478bd9Sstevel@tonic-gate struct regspec *rp, tmp_reg; 8037c478bd9Sstevel@tonic-gate ddi_map_req_t mr = *mp; /* Get private copy of request */ 8047c478bd9Sstevel@tonic-gate int error; 8057c478bd9Sstevel@tonic-gate 8067c478bd9Sstevel@tonic-gate mp = &mr; 8077c478bd9Sstevel@tonic-gate 8087c478bd9Sstevel@tonic-gate switch (mp->map_op) { 8097c478bd9Sstevel@tonic-gate case DDI_MO_MAP_LOCKED: 8107c478bd9Sstevel@tonic-gate case DDI_MO_UNMAP: 8117c478bd9Sstevel@tonic-gate case DDI_MO_MAP_HANDLE: 8127c478bd9Sstevel@tonic-gate break; 8137c478bd9Sstevel@tonic-gate default: 8147c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 8157c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "rootnex_map: unimplemented map op %d.", 8167c478bd9Sstevel@tonic-gate mp->map_op); 8177c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 8187c478bd9Sstevel@tonic-gate return (DDI_ME_UNIMPLEMENTED); 8197c478bd9Sstevel@tonic-gate } 8207c478bd9Sstevel@tonic-gate 8217c478bd9Sstevel@tonic-gate if (mp->map_flags & DDI_MF_USER_MAPPING) { 8227c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 8237c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "rootnex_map: unimplemented map type: user."); 8247c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 8257c478bd9Sstevel@tonic-gate return (DDI_ME_UNIMPLEMENTED); 8267c478bd9Sstevel@tonic-gate } 8277c478bd9Sstevel@tonic-gate 8287c478bd9Sstevel@tonic-gate /* 8297c478bd9Sstevel@tonic-gate * First, if given an rnumber, convert it to a regspec... 8307c478bd9Sstevel@tonic-gate * (Presumably, this is on behalf of a child of the root node?) 8317c478bd9Sstevel@tonic-gate */ 8327c478bd9Sstevel@tonic-gate 8337c478bd9Sstevel@tonic-gate if (mp->map_type == DDI_MT_RNUMBER) { 8347c478bd9Sstevel@tonic-gate 8357c478bd9Sstevel@tonic-gate int rnumber = mp->map_obj.rnumber; 8367c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 8377c478bd9Sstevel@tonic-gate static char *out_of_range = 8387c478bd9Sstevel@tonic-gate "rootnex_map: Out of range rnumber <%d>, device <%s>"; 8397c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 8407c478bd9Sstevel@tonic-gate 8417c478bd9Sstevel@tonic-gate rp = i_ddi_rnumber_to_regspec(rdip, rnumber); 8427c478bd9Sstevel@tonic-gate if (rp == NULL) { 8437c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 8447c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, out_of_range, rnumber, 8457c478bd9Sstevel@tonic-gate ddi_get_name(rdip)); 8467c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 8477c478bd9Sstevel@tonic-gate return (DDI_ME_RNUMBER_RANGE); 8487c478bd9Sstevel@tonic-gate } 8497c478bd9Sstevel@tonic-gate 8507c478bd9Sstevel@tonic-gate /* 8517c478bd9Sstevel@tonic-gate * Convert the given ddi_map_req_t from rnumber to regspec... 8527c478bd9Sstevel@tonic-gate */ 8537c478bd9Sstevel@tonic-gate 8547c478bd9Sstevel@tonic-gate mp->map_type = DDI_MT_REGSPEC; 8557c478bd9Sstevel@tonic-gate mp->map_obj.rp = rp; 8567c478bd9Sstevel@tonic-gate } 8577c478bd9Sstevel@tonic-gate 8587c478bd9Sstevel@tonic-gate /* 8597c478bd9Sstevel@tonic-gate * Adjust offset and length correspnding to called values... 8607c478bd9Sstevel@tonic-gate * XXX: A non-zero length means override the one in the regspec 8617c478bd9Sstevel@tonic-gate * XXX: (regardless of what's in the parent's range?) 8627c478bd9Sstevel@tonic-gate */ 8637c478bd9Sstevel@tonic-gate 8647c478bd9Sstevel@tonic-gate tmp_reg = *(mp->map_obj.rp); /* Preserve underlying data */ 8657c478bd9Sstevel@tonic-gate rp = mp->map_obj.rp = &tmp_reg; /* Use tmp_reg in request */ 8667c478bd9Sstevel@tonic-gate 8677c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 868843e1988Sjohnlev cmn_err(CE_CONT, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d " 869843e1988Sjohnlev "handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip), 870843e1988Sjohnlev rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, offset, 871843e1988Sjohnlev len, mp->map_handlep); 8727c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 8737c478bd9Sstevel@tonic-gate 8747c478bd9Sstevel@tonic-gate /* 8757c478bd9Sstevel@tonic-gate * I/O or memory mapping: 8767c478bd9Sstevel@tonic-gate * 8777c478bd9Sstevel@tonic-gate * <bustype=0, addr=x, len=x>: memory 8787c478bd9Sstevel@tonic-gate * <bustype=1, addr=x, len=x>: i/o 8797c478bd9Sstevel@tonic-gate * <bustype>1, addr=0, len=x>: x86-compatibility i/o 8807c478bd9Sstevel@tonic-gate */ 8817c478bd9Sstevel@tonic-gate 8827c478bd9Sstevel@tonic-gate if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 8837c478bd9Sstevel@tonic-gate cmn_err(CE_WARN, "<%s,%s> invalid register spec" 8847c478bd9Sstevel@tonic-gate " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip), 8857c478bd9Sstevel@tonic-gate ddi_get_name(rdip), rp->regspec_bustype, 8867c478bd9Sstevel@tonic-gate rp->regspec_addr, rp->regspec_size); 8877c478bd9Sstevel@tonic-gate return (DDI_ME_INVAL); 8887c478bd9Sstevel@tonic-gate } 8897c478bd9Sstevel@tonic-gate 8907c478bd9Sstevel@tonic-gate if (rp->regspec_bustype > 1 && rp->regspec_addr == 0) { 8917c478bd9Sstevel@tonic-gate /* 8927c478bd9Sstevel@tonic-gate * compatibility i/o mapping 8937c478bd9Sstevel@tonic-gate */ 8947c478bd9Sstevel@tonic-gate rp->regspec_bustype += (uint_t)offset; 8957c478bd9Sstevel@tonic-gate } else { 8967c478bd9Sstevel@tonic-gate /* 8977c478bd9Sstevel@tonic-gate * Normal memory or i/o mapping 8987c478bd9Sstevel@tonic-gate */ 8997c478bd9Sstevel@tonic-gate rp->regspec_addr += (uint_t)offset; 9007c478bd9Sstevel@tonic-gate } 9017c478bd9Sstevel@tonic-gate 9027c478bd9Sstevel@tonic-gate if (len != 0) 9037c478bd9Sstevel@tonic-gate rp->regspec_size = (uint_t)len; 9047c478bd9Sstevel@tonic-gate 9057c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 906843e1988Sjohnlev cmn_err(CE_CONT, " <%s,%s> <0x%x, 0x%x, 0x%d> offset %d " 907843e1988Sjohnlev "len %d handle 0x%x\n", ddi_get_name(dip), ddi_get_name(rdip), 9087c478bd9Sstevel@tonic-gate rp->regspec_bustype, rp->regspec_addr, rp->regspec_size, 9097c478bd9Sstevel@tonic-gate offset, len, mp->map_handlep); 9107c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 9117c478bd9Sstevel@tonic-gate 9127c478bd9Sstevel@tonic-gate /* 9137c478bd9Sstevel@tonic-gate * Apply any parent ranges at this level, if applicable. 9147c478bd9Sstevel@tonic-gate * (This is where nexus specific regspec translation takes place. 9157c478bd9Sstevel@tonic-gate * Use of this function is implicit agreement that translation is 9167c478bd9Sstevel@tonic-gate * provided via ddi_apply_range.) 9177c478bd9Sstevel@tonic-gate */ 9187c478bd9Sstevel@tonic-gate 9197c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 9207c478bd9Sstevel@tonic-gate ddi_map_debug("applying range of parent <%s> to child <%s>...\n", 9217c478bd9Sstevel@tonic-gate ddi_get_name(dip), ddi_get_name(rdip)); 9227c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 9237c478bd9Sstevel@tonic-gate 9247c478bd9Sstevel@tonic-gate if ((error = i_ddi_apply_range(dip, rdip, mp->map_obj.rp)) != 0) 9257c478bd9Sstevel@tonic-gate return (error); 9267c478bd9Sstevel@tonic-gate 9277c478bd9Sstevel@tonic-gate switch (mp->map_op) { 9287c478bd9Sstevel@tonic-gate case DDI_MO_MAP_LOCKED: 9297c478bd9Sstevel@tonic-gate 9307c478bd9Sstevel@tonic-gate /* 9317c478bd9Sstevel@tonic-gate * Set up the locked down kernel mapping to the regspec... 9327c478bd9Sstevel@tonic-gate */ 9337c478bd9Sstevel@tonic-gate 9347c478bd9Sstevel@tonic-gate return (rootnex_map_regspec(mp, vaddrp)); 9357c478bd9Sstevel@tonic-gate 9367c478bd9Sstevel@tonic-gate case DDI_MO_UNMAP: 9377c478bd9Sstevel@tonic-gate 9387c478bd9Sstevel@tonic-gate /* 9397c478bd9Sstevel@tonic-gate * Release mapping... 9407c478bd9Sstevel@tonic-gate */ 9417c478bd9Sstevel@tonic-gate 9427c478bd9Sstevel@tonic-gate return (rootnex_unmap_regspec(mp, vaddrp)); 9437c478bd9Sstevel@tonic-gate 9447c478bd9Sstevel@tonic-gate case DDI_MO_MAP_HANDLE: 9457c478bd9Sstevel@tonic-gate 9467c478bd9Sstevel@tonic-gate return (rootnex_map_handle(mp)); 9477c478bd9Sstevel@tonic-gate 9487c478bd9Sstevel@tonic-gate default: 9497c478bd9Sstevel@tonic-gate return (DDI_ME_UNIMPLEMENTED); 9507c478bd9Sstevel@tonic-gate } 9517c478bd9Sstevel@tonic-gate } 9527c478bd9Sstevel@tonic-gate 9537c478bd9Sstevel@tonic-gate 9547c478bd9Sstevel@tonic-gate /* 95512f080e7Smrj * rootnex_map_fault() 9567c478bd9Sstevel@tonic-gate * 9577c478bd9Sstevel@tonic-gate * fault in mappings for requestors 9587c478bd9Sstevel@tonic-gate */ 9597c478bd9Sstevel@tonic-gate /*ARGSUSED*/ 9607c478bd9Sstevel@tonic-gate static int 96112f080e7Smrj rootnex_map_fault(dev_info_t *dip, dev_info_t *rdip, struct hat *hat, 96212f080e7Smrj struct seg *seg, caddr_t addr, struct devpage *dp, pfn_t pfn, uint_t prot, 96312f080e7Smrj uint_t lock) 9647c478bd9Sstevel@tonic-gate { 9657c478bd9Sstevel@tonic-gate 9667c478bd9Sstevel@tonic-gate #ifdef DDI_MAP_DEBUG 9677c478bd9Sstevel@tonic-gate ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr, pfn); 9687c478bd9Sstevel@tonic-gate ddi_map_debug(" Seg <%s>\n", 9697c478bd9Sstevel@tonic-gate seg->s_ops == &segdev_ops ? "segdev" : 9707c478bd9Sstevel@tonic-gate seg == &kvseg ? "segkmem" : "NONE!"); 9717c478bd9Sstevel@tonic-gate #endif /* DDI_MAP_DEBUG */ 9727c478bd9Sstevel@tonic-gate 9737c478bd9Sstevel@tonic-gate /* 9747c478bd9Sstevel@tonic-gate * This is all terribly broken, but it is a start 9757c478bd9Sstevel@tonic-gate * 9767c478bd9Sstevel@tonic-gate * XXX Note that this test means that segdev_ops 9777c478bd9Sstevel@tonic-gate * must be exported from seg_dev.c. 9787c478bd9Sstevel@tonic-gate * XXX What about devices with their own segment drivers? 9797c478bd9Sstevel@tonic-gate */ 9807c478bd9Sstevel@tonic-gate if (seg->s_ops == &segdev_ops) { 981843e1988Sjohnlev struct segdev_data *sdp = (struct segdev_data *)seg->s_data; 9827c478bd9Sstevel@tonic-gate 9837c478bd9Sstevel@tonic-gate if (hat == NULL) { 9847c478bd9Sstevel@tonic-gate /* 9857c478bd9Sstevel@tonic-gate * This is one plausible interpretation of 9867c478bd9Sstevel@tonic-gate * a null hat i.e. use the first hat on the 9877c478bd9Sstevel@tonic-gate * address space hat list which by convention is 9887c478bd9Sstevel@tonic-gate * the hat of the system MMU. At alternative 9897c478bd9Sstevel@tonic-gate * would be to panic .. this might well be better .. 9907c478bd9Sstevel@tonic-gate */ 9917c478bd9Sstevel@tonic-gate ASSERT(AS_READ_HELD(seg->s_as, &seg->s_as->a_lock)); 9927c478bd9Sstevel@tonic-gate hat = seg->s_as->a_hat; 9937c478bd9Sstevel@tonic-gate cmn_err(CE_NOTE, "rootnex_map_fault: nil hat"); 9947c478bd9Sstevel@tonic-gate } 9957c478bd9Sstevel@tonic-gate hat_devload(hat, addr, MMU_PAGESIZE, pfn, prot | sdp->hat_attr, 9967c478bd9Sstevel@tonic-gate (lock ? HAT_LOAD_LOCK : HAT_LOAD)); 9977c478bd9Sstevel@tonic-gate } else if (seg == &kvseg && dp == NULL) { 9987c478bd9Sstevel@tonic-gate hat_devload(kas.a_hat, addr, MMU_PAGESIZE, pfn, prot, 9997c478bd9Sstevel@tonic-gate HAT_LOAD_LOCK); 10007c478bd9Sstevel@tonic-gate } else 10017c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 10027c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 10037c478bd9Sstevel@tonic-gate } 10047c478bd9Sstevel@tonic-gate 10057c478bd9Sstevel@tonic-gate 10067c478bd9Sstevel@tonic-gate /* 100712f080e7Smrj * rootnex_map_regspec() 100812f080e7Smrj * we don't support mapping of I/O cards above 4Gb 10097c478bd9Sstevel@tonic-gate */ 10107c478bd9Sstevel@tonic-gate static int 101112f080e7Smrj rootnex_map_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 10127c478bd9Sstevel@tonic-gate { 1013843e1988Sjohnlev rootnex_addr_t rbase; 101412f080e7Smrj void *cvaddr; 101512f080e7Smrj uint_t npages, pgoffset; 101612f080e7Smrj struct regspec *rp; 101712f080e7Smrj ddi_acc_hdl_t *hp; 101812f080e7Smrj ddi_acc_impl_t *ap; 101912f080e7Smrj uint_t hat_acc_flags; 1020843e1988Sjohnlev paddr_t pbase; 10217c478bd9Sstevel@tonic-gate 102212f080e7Smrj rp = mp->map_obj.rp; 102312f080e7Smrj hp = mp->map_handlep; 102412f080e7Smrj 102512f080e7Smrj #ifdef DDI_MAP_DEBUG 102612f080e7Smrj ddi_map_debug( 102712f080e7Smrj "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n", 102812f080e7Smrj rp->regspec_bustype, rp->regspec_addr, 102912f080e7Smrj rp->regspec_size, mp->map_handlep); 103012f080e7Smrj #endif /* DDI_MAP_DEBUG */ 10317c478bd9Sstevel@tonic-gate 10327c478bd9Sstevel@tonic-gate /* 103312f080e7Smrj * I/O or memory mapping 103412f080e7Smrj * 103512f080e7Smrj * <bustype=0, addr=x, len=x>: memory 103612f080e7Smrj * <bustype=1, addr=x, len=x>: i/o 103712f080e7Smrj * <bustype>1, addr=0, len=x>: x86-compatibility i/o 10387c478bd9Sstevel@tonic-gate */ 103912f080e7Smrj 104012f080e7Smrj if (rp->regspec_bustype > 1 && rp->regspec_addr != 0) { 104112f080e7Smrj cmn_err(CE_WARN, "rootnex: invalid register spec" 104212f080e7Smrj " <0x%x, 0x%x, 0x%x>", rp->regspec_bustype, 104312f080e7Smrj rp->regspec_addr, rp->regspec_size); 104412f080e7Smrj return (DDI_FAILURE); 10457c478bd9Sstevel@tonic-gate } 104612f080e7Smrj 104712f080e7Smrj if (rp->regspec_bustype != 0) { 10487c478bd9Sstevel@tonic-gate /* 104912f080e7Smrj * I/O space - needs a handle. 10507c478bd9Sstevel@tonic-gate */ 10517c478bd9Sstevel@tonic-gate if (hp == NULL) { 105212f080e7Smrj return (DDI_FAILURE); 10537c478bd9Sstevel@tonic-gate } 105412f080e7Smrj ap = (ddi_acc_impl_t *)hp->ah_platform_private; 105512f080e7Smrj ap->ahi_acc_attr |= DDI_ACCATTR_IO_SPACE; 105612f080e7Smrj impl_acc_hdl_init(hp); 10577c478bd9Sstevel@tonic-gate 105812f080e7Smrj if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 105912f080e7Smrj #ifdef DDI_MAP_DEBUG 1060843e1988Sjohnlev ddi_map_debug("rootnex_map_regspec: mmap() " 1061843e1988Sjohnlev "to I/O space is not supported.\n"); 106212f080e7Smrj #endif /* DDI_MAP_DEBUG */ 106312f080e7Smrj return (DDI_ME_INVAL); 10647c478bd9Sstevel@tonic-gate } else { 10657c478bd9Sstevel@tonic-gate /* 106612f080e7Smrj * 1275-compliant vs. compatibility i/o mapping 10677c478bd9Sstevel@tonic-gate */ 106812f080e7Smrj *vaddrp = 106912f080e7Smrj (rp->regspec_bustype > 1 && rp->regspec_addr == 0) ? 107012f080e7Smrj ((caddr_t)(uintptr_t)rp->regspec_bustype) : 107112f080e7Smrj ((caddr_t)(uintptr_t)rp->regspec_addr); 1072843e1988Sjohnlev #ifdef __xpv 1073843e1988Sjohnlev if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1074843e1988Sjohnlev hp->ah_pfn = xen_assign_pfn( 1075843e1988Sjohnlev mmu_btop((ulong_t)rp->regspec_addr & 1076843e1988Sjohnlev MMU_PAGEMASK)); 1077843e1988Sjohnlev } else { 1078843e1988Sjohnlev hp->ah_pfn = mmu_btop( 1079843e1988Sjohnlev (ulong_t)rp->regspec_addr & MMU_PAGEMASK); 1080843e1988Sjohnlev } 1081843e1988Sjohnlev #else 108200d0963fSdilpreet hp->ah_pfn = mmu_btop((ulong_t)rp->regspec_addr & 1083843e1988Sjohnlev MMU_PAGEMASK); 1084843e1988Sjohnlev #endif 108500d0963fSdilpreet hp->ah_pnum = mmu_btopr(rp->regspec_size + 108600d0963fSdilpreet (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET); 10877c478bd9Sstevel@tonic-gate } 10887c478bd9Sstevel@tonic-gate 108912f080e7Smrj #ifdef DDI_MAP_DEBUG 109012f080e7Smrj ddi_map_debug( 109112f080e7Smrj "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n", 109212f080e7Smrj rp->regspec_size, *vaddrp); 109312f080e7Smrj #endif /* DDI_MAP_DEBUG */ 109412f080e7Smrj return (DDI_SUCCESS); 10957c478bd9Sstevel@tonic-gate } 10967c478bd9Sstevel@tonic-gate 10977c478bd9Sstevel@tonic-gate /* 109812f080e7Smrj * Memory space 109912f080e7Smrj */ 110012f080e7Smrj 110112f080e7Smrj if (hp != NULL) { 110212f080e7Smrj /* 110312f080e7Smrj * hat layer ignores 110412f080e7Smrj * hp->ah_acc.devacc_attr_endian_flags. 110512f080e7Smrj */ 110612f080e7Smrj switch (hp->ah_acc.devacc_attr_dataorder) { 110712f080e7Smrj case DDI_STRICTORDER_ACC: 110812f080e7Smrj hat_acc_flags = HAT_STRICTORDER; 110912f080e7Smrj break; 111012f080e7Smrj case DDI_UNORDERED_OK_ACC: 111112f080e7Smrj hat_acc_flags = HAT_UNORDERED_OK; 111212f080e7Smrj break; 111312f080e7Smrj case DDI_MERGING_OK_ACC: 111412f080e7Smrj hat_acc_flags = HAT_MERGING_OK; 111512f080e7Smrj break; 111612f080e7Smrj case DDI_LOADCACHING_OK_ACC: 111712f080e7Smrj hat_acc_flags = HAT_LOADCACHING_OK; 111812f080e7Smrj break; 111912f080e7Smrj case DDI_STORECACHING_OK_ACC: 112012f080e7Smrj hat_acc_flags = HAT_STORECACHING_OK; 112112f080e7Smrj break; 112212f080e7Smrj } 112312f080e7Smrj ap = (ddi_acc_impl_t *)hp->ah_platform_private; 112412f080e7Smrj ap->ahi_acc_attr |= DDI_ACCATTR_CPU_VADDR; 112512f080e7Smrj impl_acc_hdl_init(hp); 112612f080e7Smrj hp->ah_hat_flags = hat_acc_flags; 112712f080e7Smrj } else { 112812f080e7Smrj hat_acc_flags = HAT_STRICTORDER; 112912f080e7Smrj } 113012f080e7Smrj 1131843e1988Sjohnlev rbase = (rootnex_addr_t)(rp->regspec_addr & MMU_PAGEMASK); 1132843e1988Sjohnlev #ifdef __xpv 1133843e1988Sjohnlev /* 1134843e1988Sjohnlev * If we're dom0, we're using a real device so we need to translate 1135843e1988Sjohnlev * the MA to a PA. 1136843e1988Sjohnlev */ 1137843e1988Sjohnlev if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1138843e1988Sjohnlev pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))); 1139843e1988Sjohnlev } else { 1140843e1988Sjohnlev pbase = rbase; 1141843e1988Sjohnlev } 1142843e1988Sjohnlev #else 1143843e1988Sjohnlev pbase = rbase; 1144843e1988Sjohnlev #endif 1145843e1988Sjohnlev pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; 114612f080e7Smrj 114712f080e7Smrj if (rp->regspec_size == 0) { 114812f080e7Smrj #ifdef DDI_MAP_DEBUG 114912f080e7Smrj ddi_map_debug("rootnex_map_regspec: zero regspec_size\n"); 115012f080e7Smrj #endif /* DDI_MAP_DEBUG */ 115112f080e7Smrj return (DDI_ME_INVAL); 115212f080e7Smrj } 115312f080e7Smrj 115412f080e7Smrj if (mp->map_flags & DDI_MF_DEVICE_MAPPING) { 1155843e1988Sjohnlev /* extra cast to make gcc happy */ 1156843e1988Sjohnlev *vaddrp = (caddr_t)((uintptr_t)mmu_btop(pbase)); 115712f080e7Smrj } else { 115812f080e7Smrj npages = mmu_btopr(rp->regspec_size + pgoffset); 115912f080e7Smrj 116012f080e7Smrj #ifdef DDI_MAP_DEBUG 1161843e1988Sjohnlev ddi_map_debug("rootnex_map_regspec: Mapping %d pages " 1162843e1988Sjohnlev "physical %llx", npages, pbase); 116312f080e7Smrj #endif /* DDI_MAP_DEBUG */ 116412f080e7Smrj 116512f080e7Smrj cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP); 116612f080e7Smrj if (cvaddr == NULL) 116712f080e7Smrj return (DDI_ME_NORESOURCES); 116812f080e7Smrj 116912f080e7Smrj /* 117012f080e7Smrj * Now map in the pages we've allocated... 117112f080e7Smrj */ 1172843e1988Sjohnlev hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), 1173843e1988Sjohnlev mmu_btop(pbase), mp->map_prot | hat_acc_flags, 1174843e1988Sjohnlev HAT_LOAD_LOCK); 117512f080e7Smrj *vaddrp = (caddr_t)cvaddr + pgoffset; 117600d0963fSdilpreet 117700d0963fSdilpreet /* save away pfn and npages for FMA */ 117800d0963fSdilpreet hp = mp->map_handlep; 117900d0963fSdilpreet if (hp) { 1180843e1988Sjohnlev hp->ah_pfn = mmu_btop(pbase); 118100d0963fSdilpreet hp->ah_pnum = npages; 118200d0963fSdilpreet } 118312f080e7Smrj } 118412f080e7Smrj 118512f080e7Smrj #ifdef DDI_MAP_DEBUG 118612f080e7Smrj ddi_map_debug("at virtual 0x%x\n", *vaddrp); 118712f080e7Smrj #endif /* DDI_MAP_DEBUG */ 118812f080e7Smrj return (DDI_SUCCESS); 118912f080e7Smrj } 119012f080e7Smrj 119112f080e7Smrj 119212f080e7Smrj /* 119312f080e7Smrj * rootnex_unmap_regspec() 11947c478bd9Sstevel@tonic-gate * 11957c478bd9Sstevel@tonic-gate */ 11967c478bd9Sstevel@tonic-gate static int 119712f080e7Smrj rootnex_unmap_regspec(ddi_map_req_t *mp, caddr_t *vaddrp) 11987c478bd9Sstevel@tonic-gate { 119912f080e7Smrj caddr_t addr = (caddr_t)*vaddrp; 120012f080e7Smrj uint_t npages, pgoffset; 120112f080e7Smrj struct regspec *rp; 12027c478bd9Sstevel@tonic-gate 120312f080e7Smrj if (mp->map_flags & DDI_MF_DEVICE_MAPPING) 120412f080e7Smrj return (0); 12057c478bd9Sstevel@tonic-gate 120612f080e7Smrj rp = mp->map_obj.rp; 12077c478bd9Sstevel@tonic-gate 120812f080e7Smrj if (rp->regspec_size == 0) { 120912f080e7Smrj #ifdef DDI_MAP_DEBUG 121012f080e7Smrj ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n"); 121112f080e7Smrj #endif /* DDI_MAP_DEBUG */ 121212f080e7Smrj return (DDI_ME_INVAL); 12137c478bd9Sstevel@tonic-gate } 12147c478bd9Sstevel@tonic-gate 12157c478bd9Sstevel@tonic-gate /* 121612f080e7Smrj * I/O or memory mapping: 12177c478bd9Sstevel@tonic-gate * 121812f080e7Smrj * <bustype=0, addr=x, len=x>: memory 121912f080e7Smrj * <bustype=1, addr=x, len=x>: i/o 122012f080e7Smrj * <bustype>1, addr=0, len=x>: x86-compatibility i/o 12217c478bd9Sstevel@tonic-gate */ 122212f080e7Smrj if (rp->regspec_bustype != 0) { 12237c478bd9Sstevel@tonic-gate /* 122412f080e7Smrj * This is I/O space, which requires no particular 122512f080e7Smrj * processing on unmap since it isn't mapped in the 122612f080e7Smrj * first place. 12277c478bd9Sstevel@tonic-gate */ 12287c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 12297c478bd9Sstevel@tonic-gate } 12307c478bd9Sstevel@tonic-gate 12317c478bd9Sstevel@tonic-gate /* 123212f080e7Smrj * Memory space 12337c478bd9Sstevel@tonic-gate */ 123412f080e7Smrj pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET; 123512f080e7Smrj npages = mmu_btopr(rp->regspec_size + pgoffset); 123612f080e7Smrj hat_unload(kas.a_hat, addr - pgoffset, ptob(npages), HAT_UNLOAD_UNLOCK); 123712f080e7Smrj device_arena_free(addr - pgoffset, ptob(npages)); 12387c478bd9Sstevel@tonic-gate 12397c478bd9Sstevel@tonic-gate /* 124012f080e7Smrj * Destroy the pointer - the mapping has logically gone 12417c478bd9Sstevel@tonic-gate */ 124212f080e7Smrj *vaddrp = NULL; 12437c478bd9Sstevel@tonic-gate 12447c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 12457c478bd9Sstevel@tonic-gate } 12467c478bd9Sstevel@tonic-gate 124712f080e7Smrj 124812f080e7Smrj /* 124912f080e7Smrj * rootnex_map_handle() 125012f080e7Smrj * 125112f080e7Smrj */ 12527c478bd9Sstevel@tonic-gate static int 125312f080e7Smrj rootnex_map_handle(ddi_map_req_t *mp) 12547c478bd9Sstevel@tonic-gate { 1255843e1988Sjohnlev rootnex_addr_t rbase; 125612f080e7Smrj ddi_acc_hdl_t *hp; 125712f080e7Smrj uint_t pgoffset; 125812f080e7Smrj struct regspec *rp; 1259843e1988Sjohnlev paddr_t pbase; 12607c478bd9Sstevel@tonic-gate 126112f080e7Smrj rp = mp->map_obj.rp; 12627c478bd9Sstevel@tonic-gate 126312f080e7Smrj #ifdef DDI_MAP_DEBUG 126412f080e7Smrj ddi_map_debug( 126512f080e7Smrj "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n", 126612f080e7Smrj rp->regspec_bustype, rp->regspec_addr, 126712f080e7Smrj rp->regspec_size, mp->map_handlep); 126812f080e7Smrj #endif /* DDI_MAP_DEBUG */ 12697c478bd9Sstevel@tonic-gate 12707c478bd9Sstevel@tonic-gate /* 127112f080e7Smrj * I/O or memory mapping: 127212f080e7Smrj * 127312f080e7Smrj * <bustype=0, addr=x, len=x>: memory 127412f080e7Smrj * <bustype=1, addr=x, len=x>: i/o 127512f080e7Smrj * <bustype>1, addr=0, len=x>: x86-compatibility i/o 12767c478bd9Sstevel@tonic-gate */ 127712f080e7Smrj if (rp->regspec_bustype != 0) { 127812f080e7Smrj /* 127912f080e7Smrj * This refers to I/O space, and we don't support "mapping" 128012f080e7Smrj * I/O space to a user. 128112f080e7Smrj */ 12827c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12837c478bd9Sstevel@tonic-gate } 12847c478bd9Sstevel@tonic-gate 12857c478bd9Sstevel@tonic-gate /* 128612f080e7Smrj * Set up the hat_flags for the mapping. 12877c478bd9Sstevel@tonic-gate */ 128812f080e7Smrj hp = mp->map_handlep; 12897c478bd9Sstevel@tonic-gate 129012f080e7Smrj switch (hp->ah_acc.devacc_attr_endian_flags) { 129112f080e7Smrj case DDI_NEVERSWAP_ACC: 129212f080e7Smrj hp->ah_hat_flags = HAT_NEVERSWAP | HAT_STRICTORDER; 12937c478bd9Sstevel@tonic-gate break; 129412f080e7Smrj case DDI_STRUCTURE_LE_ACC: 129512f080e7Smrj hp->ah_hat_flags = HAT_STRUCTURE_LE; 12967c478bd9Sstevel@tonic-gate break; 129712f080e7Smrj case DDI_STRUCTURE_BE_ACC: 12987c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 12997c478bd9Sstevel@tonic-gate default: 130012f080e7Smrj return (DDI_REGS_ACC_CONFLICT); 13017c478bd9Sstevel@tonic-gate } 13027c478bd9Sstevel@tonic-gate 130312f080e7Smrj switch (hp->ah_acc.devacc_attr_dataorder) { 130412f080e7Smrj case DDI_STRICTORDER_ACC: 13057c478bd9Sstevel@tonic-gate break; 130612f080e7Smrj case DDI_UNORDERED_OK_ACC: 130712f080e7Smrj hp->ah_hat_flags |= HAT_UNORDERED_OK; 13087c478bd9Sstevel@tonic-gate break; 130912f080e7Smrj case DDI_MERGING_OK_ACC: 131012f080e7Smrj hp->ah_hat_flags |= HAT_MERGING_OK; 13117c478bd9Sstevel@tonic-gate break; 131212f080e7Smrj case DDI_LOADCACHING_OK_ACC: 131312f080e7Smrj hp->ah_hat_flags |= HAT_LOADCACHING_OK; 131412f080e7Smrj break; 131512f080e7Smrj case DDI_STORECACHING_OK_ACC: 131612f080e7Smrj hp->ah_hat_flags |= HAT_STORECACHING_OK; 131712f080e7Smrj break; 13187c478bd9Sstevel@tonic-gate default: 13197c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13207c478bd9Sstevel@tonic-gate } 13217c478bd9Sstevel@tonic-gate 1322843e1988Sjohnlev rbase = (rootnex_addr_t)rp->regspec_addr & 1323843e1988Sjohnlev (~(rootnex_addr_t)MMU_PAGEOFFSET); 1324843e1988Sjohnlev pgoffset = (ulong_t)rp->regspec_addr & MMU_PAGEOFFSET; 13257c478bd9Sstevel@tonic-gate 132612f080e7Smrj if (rp->regspec_size == 0) 132712f080e7Smrj return (DDI_ME_INVAL); 13287c478bd9Sstevel@tonic-gate 1329843e1988Sjohnlev #ifdef __xpv 1330843e1988Sjohnlev /* 1331843e1988Sjohnlev * If we're dom0, we're using a real device so we need to translate 1332843e1988Sjohnlev * the MA to a PA. 1333843e1988Sjohnlev */ 1334843e1988Sjohnlev if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1335843e1988Sjohnlev pbase = pfn_to_pa(xen_assign_pfn(mmu_btop(rbase))) | 1336843e1988Sjohnlev (rbase & MMU_PAGEOFFSET); 1337843e1988Sjohnlev } else { 1338843e1988Sjohnlev pbase = rbase; 1339843e1988Sjohnlev } 1340843e1988Sjohnlev #else 1341843e1988Sjohnlev pbase = rbase; 1342843e1988Sjohnlev #endif 1343843e1988Sjohnlev 1344843e1988Sjohnlev hp->ah_pfn = mmu_btop(pbase); 134512f080e7Smrj hp->ah_pnum = mmu_btopr(rp->regspec_size + pgoffset); 13467c478bd9Sstevel@tonic-gate 13477c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 13487c478bd9Sstevel@tonic-gate } 13497c478bd9Sstevel@tonic-gate 135012f080e7Smrj 135112f080e7Smrj 13527c478bd9Sstevel@tonic-gate /* 135312f080e7Smrj * ************************ 135412f080e7Smrj * interrupt related code 135512f080e7Smrj * ************************ 13567c478bd9Sstevel@tonic-gate */ 13577c478bd9Sstevel@tonic-gate 13587c478bd9Sstevel@tonic-gate /* 135912f080e7Smrj * rootnex_intr_ops() 13607c478bd9Sstevel@tonic-gate * bus_intr_op() function for interrupt support 13617c478bd9Sstevel@tonic-gate */ 13627c478bd9Sstevel@tonic-gate /* ARGSUSED */ 13637c478bd9Sstevel@tonic-gate static int 13647c478bd9Sstevel@tonic-gate rootnex_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 13657c478bd9Sstevel@tonic-gate ddi_intr_handle_impl_t *hdlp, void *result) 13667c478bd9Sstevel@tonic-gate { 13677c478bd9Sstevel@tonic-gate struct intrspec *ispec; 13687c478bd9Sstevel@tonic-gate 13697c478bd9Sstevel@tonic-gate DDI_INTR_NEXDBG((CE_CONT, 13707c478bd9Sstevel@tonic-gate "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n", 13717c478bd9Sstevel@tonic-gate (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 13727c478bd9Sstevel@tonic-gate 13737c478bd9Sstevel@tonic-gate /* Process the interrupt operation */ 13747c478bd9Sstevel@tonic-gate switch (intr_op) { 13757c478bd9Sstevel@tonic-gate case DDI_INTROP_GETCAP: 13767c478bd9Sstevel@tonic-gate /* First check with pcplusmp */ 13777c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 13787c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13797c478bd9Sstevel@tonic-gate 13807c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_CAP, result)) { 13817c478bd9Sstevel@tonic-gate *(int *)result = 0; 13827c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13837c478bd9Sstevel@tonic-gate } 13847c478bd9Sstevel@tonic-gate break; 13857c478bd9Sstevel@tonic-gate case DDI_INTROP_SETCAP: 13867c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 13877c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13887c478bd9Sstevel@tonic-gate 13897c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) 13907c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 13917c478bd9Sstevel@tonic-gate break; 13927c478bd9Sstevel@tonic-gate case DDI_INTROP_ALLOC: 1393*7ff178cdSJimmy Vetayases ASSERT(hdlp->ih_type == DDI_INTR_TYPE_FIXED); 1394*7ff178cdSJimmy Vetayases return (rootnex_alloc_intr_fixed(rdip, hdlp, result)); 13957c478bd9Sstevel@tonic-gate case DDI_INTROP_FREE: 1396*7ff178cdSJimmy Vetayases ASSERT(hdlp->ih_type == DDI_INTR_TYPE_FIXED); 1397*7ff178cdSJimmy Vetayases return (rootnex_free_intr_fixed(rdip, hdlp)); 13987c478bd9Sstevel@tonic-gate case DDI_INTROP_GETPRI: 13997c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 14007c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14017c478bd9Sstevel@tonic-gate *(int *)result = ispec->intrspec_pri; 14027c478bd9Sstevel@tonic-gate break; 14037c478bd9Sstevel@tonic-gate case DDI_INTROP_SETPRI: 14047c478bd9Sstevel@tonic-gate /* Validate the interrupt priority passed to us */ 14057c478bd9Sstevel@tonic-gate if (*(int *)result > LOCK_LEVEL) 14067c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14077c478bd9Sstevel@tonic-gate 14087c478bd9Sstevel@tonic-gate /* Ensure that PSM is all initialized and ispec is ok */ 14097c478bd9Sstevel@tonic-gate if ((psm_intr_ops == NULL) || 14107c478bd9Sstevel@tonic-gate ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL)) 14117c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14127c478bd9Sstevel@tonic-gate 14137c478bd9Sstevel@tonic-gate /* Change the priority */ 14147c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 14157c478bd9Sstevel@tonic-gate PSM_FAILURE) 14167c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14177c478bd9Sstevel@tonic-gate 14187c478bd9Sstevel@tonic-gate /* update the ispec with the new priority */ 14197c478bd9Sstevel@tonic-gate ispec->intrspec_pri = *(int *)result; 14207c478bd9Sstevel@tonic-gate break; 14217c478bd9Sstevel@tonic-gate case DDI_INTROP_ADDISR: 14227c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 14237c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14247c478bd9Sstevel@tonic-gate ispec->intrspec_func = hdlp->ih_cb_func; 14257c478bd9Sstevel@tonic-gate break; 14267c478bd9Sstevel@tonic-gate case DDI_INTROP_REMISR: 14277c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 14287c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14297c478bd9Sstevel@tonic-gate ispec->intrspec_func = (uint_t (*)()) 0; 14307c478bd9Sstevel@tonic-gate break; 14317c478bd9Sstevel@tonic-gate case DDI_INTROP_ENABLE: 14327c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 14337c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14347c478bd9Sstevel@tonic-gate 14357c478bd9Sstevel@tonic-gate /* Call psmi to translate irq with the dip */ 14367c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 14377c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14387c478bd9Sstevel@tonic-gate 14397a364d25Sschwartz ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 144086a9c507SGuoli Shu if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, 144186a9c507SGuoli Shu (int *)&hdlp->ih_vector) == PSM_FAILURE) 144286a9c507SGuoli Shu return (DDI_FAILURE); 14437c478bd9Sstevel@tonic-gate 14447c478bd9Sstevel@tonic-gate /* Add the interrupt handler */ 14457c478bd9Sstevel@tonic-gate if (!add_avintr((void *)hdlp, ispec->intrspec_pri, 14467c478bd9Sstevel@tonic-gate hdlp->ih_cb_func, DEVI(rdip)->devi_name, hdlp->ih_vector, 14477a364d25Sschwartz hdlp->ih_cb_arg1, hdlp->ih_cb_arg2, NULL, rdip)) 14487c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14497c478bd9Sstevel@tonic-gate break; 14507c478bd9Sstevel@tonic-gate case DDI_INTROP_DISABLE: 14517c478bd9Sstevel@tonic-gate if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 14527c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14537c478bd9Sstevel@tonic-gate 14547c478bd9Sstevel@tonic-gate /* Call psm_ops() to translate irq with the dip */ 14557c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 14567c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14577c478bd9Sstevel@tonic-gate 14587a364d25Sschwartz ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 14597c478bd9Sstevel@tonic-gate (void) (*psm_intr_ops)(rdip, hdlp, 14607c478bd9Sstevel@tonic-gate PSM_INTR_OP_XLATE_VECTOR, (int *)&hdlp->ih_vector); 14617c478bd9Sstevel@tonic-gate 14627c478bd9Sstevel@tonic-gate /* Remove the interrupt handler */ 14637c478bd9Sstevel@tonic-gate rem_avintr((void *)hdlp, ispec->intrspec_pri, 14647c478bd9Sstevel@tonic-gate hdlp->ih_cb_func, hdlp->ih_vector); 14657c478bd9Sstevel@tonic-gate break; 14667c478bd9Sstevel@tonic-gate case DDI_INTROP_SETMASK: 14677c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 14687c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14697c478bd9Sstevel@tonic-gate 14707c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_MASK, NULL)) 14717c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14727c478bd9Sstevel@tonic-gate break; 14737c478bd9Sstevel@tonic-gate case DDI_INTROP_CLRMASK: 14747c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 14757c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14767c478bd9Sstevel@tonic-gate 14777c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_CLEAR_MASK, NULL)) 14787c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14797c478bd9Sstevel@tonic-gate break; 14807c478bd9Sstevel@tonic-gate case DDI_INTROP_GETPENDING: 14817c478bd9Sstevel@tonic-gate if (psm_intr_ops == NULL) 14827c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14837c478bd9Sstevel@tonic-gate 14847c478bd9Sstevel@tonic-gate if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_GET_PENDING, 14857c478bd9Sstevel@tonic-gate result)) { 14867c478bd9Sstevel@tonic-gate *(int *)result = 0; 14877c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 14887c478bd9Sstevel@tonic-gate } 14897c478bd9Sstevel@tonic-gate break; 1490a54f81fbSanish case DDI_INTROP_NAVAIL: 14917c478bd9Sstevel@tonic-gate case DDI_INTROP_NINTRS: 1492a54f81fbSanish *(int *)result = i_ddi_get_intx_nintrs(rdip); 1493a54f81fbSanish if (*(int *)result == 0) { 14947c478bd9Sstevel@tonic-gate /* 14957c478bd9Sstevel@tonic-gate * Special case for 'pcic' driver' only. This driver 14967c478bd9Sstevel@tonic-gate * driver is a child of 'isa' and 'rootnex' drivers. 14977c478bd9Sstevel@tonic-gate * 14987c478bd9Sstevel@tonic-gate * See detailed comments on this in the function 14997c478bd9Sstevel@tonic-gate * rootnex_get_ispec(). 15007c478bd9Sstevel@tonic-gate * 15017c478bd9Sstevel@tonic-gate * Children of 'pcic' send 'NINITR' request all the 15027c478bd9Sstevel@tonic-gate * way to rootnex driver. But, the 'pdp->par_nintr' 15037c478bd9Sstevel@tonic-gate * field may not initialized. So, we fake it here 15047c478bd9Sstevel@tonic-gate * to return 1 (a la what PCMCIA nexus does). 15057c478bd9Sstevel@tonic-gate */ 15067c478bd9Sstevel@tonic-gate if (strcmp(ddi_get_name(rdip), "pcic") == 0) 15077c478bd9Sstevel@tonic-gate *(int *)result = 1; 1508a54f81fbSanish else 1509a54f81fbSanish return (DDI_FAILURE); 15107c478bd9Sstevel@tonic-gate } 15117c478bd9Sstevel@tonic-gate break; 15127c478bd9Sstevel@tonic-gate case DDI_INTROP_SUPPORTED_TYPES: 1513a54f81fbSanish *(int *)result = DDI_INTR_TYPE_FIXED; /* Always ... */ 15147c478bd9Sstevel@tonic-gate break; 15157c478bd9Sstevel@tonic-gate default: 15167c478bd9Sstevel@tonic-gate return (DDI_FAILURE); 15177c478bd9Sstevel@tonic-gate } 15187c478bd9Sstevel@tonic-gate 15197c478bd9Sstevel@tonic-gate return (DDI_SUCCESS); 15207c478bd9Sstevel@tonic-gate } 15217c478bd9Sstevel@tonic-gate 15227c478bd9Sstevel@tonic-gate 15237c478bd9Sstevel@tonic-gate /* 152412f080e7Smrj * rootnex_get_ispec() 152512f080e7Smrj * convert an interrupt number to an interrupt specification. 152612f080e7Smrj * The interrupt number determines which interrupt spec will be 152712f080e7Smrj * returned if more than one exists. 152812f080e7Smrj * 152912f080e7Smrj * Look into the parent private data area of the 'rdip' to find out 153012f080e7Smrj * the interrupt specification. First check to make sure there is 153112f080e7Smrj * one that matchs "inumber" and then return a pointer to it. 153212f080e7Smrj * 153312f080e7Smrj * Return NULL if one could not be found. 153412f080e7Smrj * 153512f080e7Smrj * NOTE: This is needed for rootnex_intr_ops() 15367c478bd9Sstevel@tonic-gate */ 153712f080e7Smrj static struct intrspec * 153812f080e7Smrj rootnex_get_ispec(dev_info_t *rdip, int inum) 15397c478bd9Sstevel@tonic-gate { 154012f080e7Smrj struct ddi_parent_private_data *pdp = ddi_get_parent_data(rdip); 15417c478bd9Sstevel@tonic-gate 15427c478bd9Sstevel@tonic-gate /* 154312f080e7Smrj * Special case handling for drivers that provide their own 154412f080e7Smrj * intrspec structures instead of relying on the DDI framework. 154512f080e7Smrj * 154612f080e7Smrj * A broken hardware driver in ON could potentially provide its 154712f080e7Smrj * own intrspec structure, instead of relying on the hardware. 154812f080e7Smrj * If these drivers are children of 'rootnex' then we need to 154912f080e7Smrj * continue to provide backward compatibility to them here. 155012f080e7Smrj * 155112f080e7Smrj * Following check is a special case for 'pcic' driver which 155212f080e7Smrj * was found to have broken hardwre andby provides its own intrspec. 155312f080e7Smrj * 155412f080e7Smrj * Verbatim comments from this driver are shown here: 155512f080e7Smrj * "Don't use the ddi_add_intr since we don't have a 155612f080e7Smrj * default intrspec in all cases." 155712f080e7Smrj * 155812f080e7Smrj * Since an 'ispec' may not be always created for it, 155912f080e7Smrj * check for that and create one if so. 156012f080e7Smrj * 156112f080e7Smrj * NOTE: Currently 'pcic' is the only driver found to do this. 15627c478bd9Sstevel@tonic-gate */ 156312f080e7Smrj if (!pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 156412f080e7Smrj pdp->par_nintr = 1; 156512f080e7Smrj pdp->par_intr = kmem_zalloc(sizeof (struct intrspec) * 156612f080e7Smrj pdp->par_nintr, KM_SLEEP); 156712f080e7Smrj } 156812f080e7Smrj 156912f080e7Smrj /* Validate the interrupt number */ 157012f080e7Smrj if (inum >= pdp->par_nintr) 157112f080e7Smrj return (NULL); 157212f080e7Smrj 157312f080e7Smrj /* Get the interrupt structure pointer and return that */ 157412f080e7Smrj return ((struct intrspec *)&pdp->par_intr[inum]); 157512f080e7Smrj } 157612f080e7Smrj 1577*7ff178cdSJimmy Vetayases /* 1578*7ff178cdSJimmy Vetayases * Allocate interrupt vector for FIXED (legacy) type. 1579*7ff178cdSJimmy Vetayases */ 1580*7ff178cdSJimmy Vetayases static int 1581*7ff178cdSJimmy Vetayases rootnex_alloc_intr_fixed(dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp, 1582*7ff178cdSJimmy Vetayases void *result) 1583*7ff178cdSJimmy Vetayases { 1584*7ff178cdSJimmy Vetayases struct intrspec *ispec; 1585*7ff178cdSJimmy Vetayases ddi_intr_handle_impl_t info_hdl; 1586*7ff178cdSJimmy Vetayases int ret; 1587*7ff178cdSJimmy Vetayases int free_phdl = 0; 1588*7ff178cdSJimmy Vetayases apic_get_type_t type_info; 1589*7ff178cdSJimmy Vetayases 1590*7ff178cdSJimmy Vetayases if (psm_intr_ops == NULL) 1591*7ff178cdSJimmy Vetayases return (DDI_FAILURE); 1592*7ff178cdSJimmy Vetayases 1593*7ff178cdSJimmy Vetayases if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1594*7ff178cdSJimmy Vetayases return (DDI_FAILURE); 1595*7ff178cdSJimmy Vetayases 1596*7ff178cdSJimmy Vetayases /* 1597*7ff178cdSJimmy Vetayases * If the PSM module is "APIX" then pass the request for it 1598*7ff178cdSJimmy Vetayases * to allocate the vector now. 1599*7ff178cdSJimmy Vetayases */ 1600*7ff178cdSJimmy Vetayases bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t)); 1601*7ff178cdSJimmy Vetayases info_hdl.ih_private = &type_info; 1602*7ff178cdSJimmy Vetayases if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) == 1603*7ff178cdSJimmy Vetayases PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) { 1604*7ff178cdSJimmy Vetayases if (hdlp->ih_private == NULL) { /* allocate phdl structure */ 1605*7ff178cdSJimmy Vetayases free_phdl = 1; 1606*7ff178cdSJimmy Vetayases i_ddi_alloc_intr_phdl(hdlp); 1607*7ff178cdSJimmy Vetayases } 1608*7ff178cdSJimmy Vetayases ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 1609*7ff178cdSJimmy Vetayases ret = (*psm_intr_ops)(rdip, hdlp, 1610*7ff178cdSJimmy Vetayases PSM_INTR_OP_ALLOC_VECTORS, result); 1611*7ff178cdSJimmy Vetayases if (free_phdl) { /* free up the phdl structure */ 1612*7ff178cdSJimmy Vetayases free_phdl = 0; 1613*7ff178cdSJimmy Vetayases i_ddi_free_intr_phdl(hdlp); 1614*7ff178cdSJimmy Vetayases hdlp->ih_private = NULL; 1615*7ff178cdSJimmy Vetayases } 1616*7ff178cdSJimmy Vetayases } else { 1617*7ff178cdSJimmy Vetayases /* 1618*7ff178cdSJimmy Vetayases * No APIX module; fall back to the old scheme where the 1619*7ff178cdSJimmy Vetayases * interrupt vector is allocated during ddi_enable_intr() call. 1620*7ff178cdSJimmy Vetayases */ 1621*7ff178cdSJimmy Vetayases hdlp->ih_pri = ispec->intrspec_pri; 1622*7ff178cdSJimmy Vetayases *(int *)result = hdlp->ih_scratch1; 1623*7ff178cdSJimmy Vetayases ret = DDI_SUCCESS; 1624*7ff178cdSJimmy Vetayases } 1625*7ff178cdSJimmy Vetayases 1626*7ff178cdSJimmy Vetayases return (ret); 1627*7ff178cdSJimmy Vetayases } 1628*7ff178cdSJimmy Vetayases 1629*7ff178cdSJimmy Vetayases /* 1630*7ff178cdSJimmy Vetayases * Free up interrupt vector for FIXED (legacy) type. 1631*7ff178cdSJimmy Vetayases */ 1632*7ff178cdSJimmy Vetayases static int 1633*7ff178cdSJimmy Vetayases rootnex_free_intr_fixed(dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp) 1634*7ff178cdSJimmy Vetayases { 1635*7ff178cdSJimmy Vetayases struct intrspec *ispec; 1636*7ff178cdSJimmy Vetayases struct ddi_parent_private_data *pdp; 1637*7ff178cdSJimmy Vetayases ddi_intr_handle_impl_t info_hdl; 1638*7ff178cdSJimmy Vetayases int ret; 1639*7ff178cdSJimmy Vetayases apic_get_type_t type_info; 1640*7ff178cdSJimmy Vetayases 1641*7ff178cdSJimmy Vetayases if (psm_intr_ops == NULL) 1642*7ff178cdSJimmy Vetayases return (DDI_FAILURE); 1643*7ff178cdSJimmy Vetayases 1644*7ff178cdSJimmy Vetayases /* 1645*7ff178cdSJimmy Vetayases * If the PSM module is "APIX" then pass the request for it 1646*7ff178cdSJimmy Vetayases * to free up the vector now. 1647*7ff178cdSJimmy Vetayases */ 1648*7ff178cdSJimmy Vetayases bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t)); 1649*7ff178cdSJimmy Vetayases info_hdl.ih_private = &type_info; 1650*7ff178cdSJimmy Vetayases if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) == 1651*7ff178cdSJimmy Vetayases PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) { 1652*7ff178cdSJimmy Vetayases if ((ispec = rootnex_get_ispec(rdip, hdlp->ih_inum)) == NULL) 1653*7ff178cdSJimmy Vetayases return (DDI_FAILURE); 1654*7ff178cdSJimmy Vetayases ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 1655*7ff178cdSJimmy Vetayases ret = (*psm_intr_ops)(rdip, hdlp, 1656*7ff178cdSJimmy Vetayases PSM_INTR_OP_FREE_VECTORS, NULL); 1657*7ff178cdSJimmy Vetayases } else { 1658*7ff178cdSJimmy Vetayases /* 1659*7ff178cdSJimmy Vetayases * No APIX module; fall back to the old scheme where 1660*7ff178cdSJimmy Vetayases * the interrupt vector was already freed during 1661*7ff178cdSJimmy Vetayases * ddi_disable_intr() call. 1662*7ff178cdSJimmy Vetayases */ 1663*7ff178cdSJimmy Vetayases ret = DDI_SUCCESS; 1664*7ff178cdSJimmy Vetayases } 1665*7ff178cdSJimmy Vetayases 1666*7ff178cdSJimmy Vetayases pdp = ddi_get_parent_data(rdip); 1667*7ff178cdSJimmy Vetayases 1668*7ff178cdSJimmy Vetayases /* 1669*7ff178cdSJimmy Vetayases * Special case for 'pcic' driver' only. 1670*7ff178cdSJimmy Vetayases * If an intrspec was created for it, clean it up here 1671*7ff178cdSJimmy Vetayases * See detailed comments on this in the function 1672*7ff178cdSJimmy Vetayases * rootnex_get_ispec(). 1673*7ff178cdSJimmy Vetayases */ 1674*7ff178cdSJimmy Vetayases if (pdp->par_intr && strcmp(ddi_get_name(rdip), "pcic") == 0) { 1675*7ff178cdSJimmy Vetayases kmem_free(pdp->par_intr, sizeof (struct intrspec) * 1676*7ff178cdSJimmy Vetayases pdp->par_nintr); 1677*7ff178cdSJimmy Vetayases /* 1678*7ff178cdSJimmy Vetayases * Set it to zero; so that 1679*7ff178cdSJimmy Vetayases * DDI framework doesn't free it again 1680*7ff178cdSJimmy Vetayases */ 1681*7ff178cdSJimmy Vetayases pdp->par_intr = NULL; 1682*7ff178cdSJimmy Vetayases pdp->par_nintr = 0; 1683*7ff178cdSJimmy Vetayases } 1684*7ff178cdSJimmy Vetayases 1685*7ff178cdSJimmy Vetayases return (ret); 1686*7ff178cdSJimmy Vetayases } 1687*7ff178cdSJimmy Vetayases 168812f080e7Smrj 168912f080e7Smrj /* 169012f080e7Smrj * ****************** 169112f080e7Smrj * dma related code 169212f080e7Smrj * ****************** 169312f080e7Smrj */ 169412f080e7Smrj 169512f080e7Smrj /*ARGSUSED*/ 169612f080e7Smrj static int 169720906b23SVikram Hegde rootnex_coredma_allochdl(dev_info_t *dip, dev_info_t *rdip, 169820906b23SVikram Hegde ddi_dma_attr_t *attr, int (*waitfp)(caddr_t), caddr_t arg, 169920906b23SVikram Hegde ddi_dma_handle_t *handlep) 170012f080e7Smrj { 170112f080e7Smrj uint64_t maxsegmentsize_ll; 170212f080e7Smrj uint_t maxsegmentsize; 170312f080e7Smrj ddi_dma_impl_t *hp; 170412f080e7Smrj rootnex_dma_t *dma; 170512f080e7Smrj uint64_t count_max; 170612f080e7Smrj uint64_t seg; 170712f080e7Smrj int kmflag; 170812f080e7Smrj int e; 170912f080e7Smrj 171012f080e7Smrj 171112f080e7Smrj /* convert our sleep flags */ 171212f080e7Smrj if (waitfp == DDI_DMA_SLEEP) { 171312f080e7Smrj kmflag = KM_SLEEP; 171412f080e7Smrj } else { 171512f080e7Smrj kmflag = KM_NOSLEEP; 171612f080e7Smrj } 171712f080e7Smrj 171812f080e7Smrj /* 171912f080e7Smrj * We try to do only one memory allocation here. We'll do a little 172012f080e7Smrj * pointer manipulation later. If the bind ends up taking more than 172112f080e7Smrj * our prealloc's space, we'll have to allocate more memory in the 172212f080e7Smrj * bind operation. Not great, but much better than before and the 172312f080e7Smrj * best we can do with the current bind interfaces. 172412f080e7Smrj */ 172512f080e7Smrj hp = kmem_cache_alloc(rootnex_state->r_dmahdl_cache, kmflag); 172612f080e7Smrj if (hp == NULL) { 172712f080e7Smrj if (waitfp != DDI_DMA_DONTWAIT) { 172812f080e7Smrj ddi_set_callback(waitfp, arg, 172912f080e7Smrj &rootnex_state->r_dvma_call_list_id); 173012f080e7Smrj } 173112f080e7Smrj return (DDI_DMA_NORESOURCES); 173212f080e7Smrj } 173312f080e7Smrj 173412f080e7Smrj /* Do our pointer manipulation now, align the structures */ 173512f080e7Smrj hp->dmai_private = (void *)(((uintptr_t)hp + 173612f080e7Smrj (uintptr_t)sizeof (ddi_dma_impl_t) + 0x7) & ~0x7); 173712f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 173812f080e7Smrj dma->dp_prealloc_buffer = (uchar_t *)(((uintptr_t)dma + 173912f080e7Smrj sizeof (rootnex_dma_t) + 0x7) & ~0x7); 174012f080e7Smrj 174112f080e7Smrj /* setup the handle */ 174212f080e7Smrj rootnex_clean_dmahdl(hp); 1743567c0b92SStephen Hanson hp->dmai_error.err_fep = NULL; 1744567c0b92SStephen Hanson hp->dmai_error.err_cf = NULL; 174512f080e7Smrj dma->dp_dip = rdip; 174612f080e7Smrj dma->dp_sglinfo.si_min_addr = attr->dma_attr_addr_lo; 174712f080e7Smrj dma->dp_sglinfo.si_max_addr = attr->dma_attr_addr_hi; 174812f080e7Smrj hp->dmai_minxfer = attr->dma_attr_minxfer; 174912f080e7Smrj hp->dmai_burstsizes = attr->dma_attr_burstsizes; 175012f080e7Smrj hp->dmai_rdip = rdip; 175112f080e7Smrj hp->dmai_attr = *attr; 175212f080e7Smrj 175312f080e7Smrj /* we don't need to worry about the SPL since we do a tryenter */ 175412f080e7Smrj mutex_init(&dma->dp_mutex, NULL, MUTEX_DRIVER, NULL); 175512f080e7Smrj 175612f080e7Smrj /* 175712f080e7Smrj * Figure out our maximum segment size. If the segment size is greater 175812f080e7Smrj * than 4G, we will limit it to (4G - 1) since the max size of a dma 175912f080e7Smrj * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and 176012f080e7Smrj * dma_attr_count_max are size-1 type values. 176112f080e7Smrj * 176212f080e7Smrj * Maximum segment size is the largest physically contiguous chunk of 176312f080e7Smrj * memory that we can return from a bind (i.e. the maximum size of a 176412f080e7Smrj * single cookie). 176512f080e7Smrj */ 176612f080e7Smrj 176712f080e7Smrj /* handle the rollover cases */ 176812f080e7Smrj seg = attr->dma_attr_seg + 1; 176912f080e7Smrj if (seg < attr->dma_attr_seg) { 177012f080e7Smrj seg = attr->dma_attr_seg; 177112f080e7Smrj } 177212f080e7Smrj count_max = attr->dma_attr_count_max + 1; 177312f080e7Smrj if (count_max < attr->dma_attr_count_max) { 177412f080e7Smrj count_max = attr->dma_attr_count_max; 177512f080e7Smrj } 177612f080e7Smrj 177712f080e7Smrj /* 177812f080e7Smrj * granularity may or may not be a power of two. If it isn't, we can't 177912f080e7Smrj * use a simple mask. 178012f080e7Smrj */ 178112f080e7Smrj if (attr->dma_attr_granular & (attr->dma_attr_granular - 1)) { 178212f080e7Smrj dma->dp_granularity_power_2 = B_FALSE; 178312f080e7Smrj } else { 178412f080e7Smrj dma->dp_granularity_power_2 = B_TRUE; 178512f080e7Smrj } 178612f080e7Smrj 178712f080e7Smrj /* 178812f080e7Smrj * maxxfer should be a whole multiple of granularity. If we're going to 178912f080e7Smrj * break up a window because we're greater than maxxfer, we might as 179012f080e7Smrj * well make sure it's maxxfer is a whole multiple so we don't have to 179112f080e7Smrj * worry about triming the window later on for this case. 179212f080e7Smrj */ 179312f080e7Smrj if (attr->dma_attr_granular > 1) { 179412f080e7Smrj if (dma->dp_granularity_power_2) { 179512f080e7Smrj dma->dp_maxxfer = attr->dma_attr_maxxfer - 179612f080e7Smrj (attr->dma_attr_maxxfer & 179712f080e7Smrj (attr->dma_attr_granular - 1)); 179812f080e7Smrj } else { 179912f080e7Smrj dma->dp_maxxfer = attr->dma_attr_maxxfer - 180012f080e7Smrj (attr->dma_attr_maxxfer % attr->dma_attr_granular); 180112f080e7Smrj } 180212f080e7Smrj } else { 180312f080e7Smrj dma->dp_maxxfer = attr->dma_attr_maxxfer; 180412f080e7Smrj } 180512f080e7Smrj 180612f080e7Smrj maxsegmentsize_ll = MIN(seg, dma->dp_maxxfer); 180712f080e7Smrj maxsegmentsize_ll = MIN(maxsegmentsize_ll, count_max); 180812f080e7Smrj if (maxsegmentsize_ll == 0 || (maxsegmentsize_ll > 0xFFFFFFFF)) { 180912f080e7Smrj maxsegmentsize = 0xFFFFFFFF; 181012f080e7Smrj } else { 181112f080e7Smrj maxsegmentsize = maxsegmentsize_ll; 181212f080e7Smrj } 181312f080e7Smrj dma->dp_sglinfo.si_max_cookie_size = maxsegmentsize; 181412f080e7Smrj dma->dp_sglinfo.si_segmask = attr->dma_attr_seg; 181507c6692fSMark Johnson dma->dp_sglinfo.si_flags = attr->dma_attr_flags; 181612f080e7Smrj 181712f080e7Smrj /* check the ddi_dma_attr arg to make sure it makes a little sense */ 181812f080e7Smrj if (rootnex_alloc_check_parms) { 181912f080e7Smrj e = rootnex_valid_alloc_parms(attr, maxsegmentsize); 182012f080e7Smrj if (e != DDI_SUCCESS) { 182112f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ALLOC_FAIL]); 182212f080e7Smrj (void) rootnex_dma_freehdl(dip, rdip, 182312f080e7Smrj (ddi_dma_handle_t)hp); 182412f080e7Smrj return (e); 182512f080e7Smrj } 182612f080e7Smrj } 182712f080e7Smrj 182812f080e7Smrj *handlep = (ddi_dma_handle_t)hp; 182912f080e7Smrj 18300b7ba611SMark Johnson ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 18310b7ba611SMark Johnson ROOTNEX_DPROBE1(rootnex__alloc__handle, uint64_t, 183212f080e7Smrj rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 183312f080e7Smrj 183412f080e7Smrj return (DDI_SUCCESS); 183512f080e7Smrj } 183612f080e7Smrj 183712f080e7Smrj 183812f080e7Smrj /* 183920906b23SVikram Hegde * rootnex_dma_allochdl() 184020906b23SVikram Hegde * called from ddi_dma_alloc_handle(). 184112f080e7Smrj */ 184220906b23SVikram Hegde static int 184320906b23SVikram Hegde rootnex_dma_allochdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_attr_t *attr, 184420906b23SVikram Hegde int (*waitfp)(caddr_t), caddr_t arg, ddi_dma_handle_t *handlep) 184520906b23SVikram Hegde { 1846567c0b92SStephen Hanson int retval; 18473a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 184820906b23SVikram Hegde uint_t error = ENOTSUP; 184920906b23SVikram Hegde 185020906b23SVikram Hegde retval = iommulib_nex_open(rdip, &error); 185120906b23SVikram Hegde 185220906b23SVikram Hegde if (retval != DDI_SUCCESS && error == ENOTSUP) { 185320906b23SVikram Hegde /* No IOMMU */ 185420906b23SVikram Hegde return (rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg, 185520906b23SVikram Hegde handlep)); 185620906b23SVikram Hegde } else if (retval != DDI_SUCCESS) { 185720906b23SVikram Hegde return (DDI_FAILURE); 185820906b23SVikram Hegde } 185920906b23SVikram Hegde 1860b51bbbf5SVikram Hegde ASSERT(IOMMU_USED(rdip)); 186120906b23SVikram Hegde 186220906b23SVikram Hegde /* has an IOMMU */ 1863567c0b92SStephen Hanson retval = iommulib_nexdma_allochdl(dip, rdip, attr, 1864567c0b92SStephen Hanson waitfp, arg, handlep); 186520906b23SVikram Hegde #else 1866567c0b92SStephen Hanson retval = rootnex_coredma_allochdl(dip, rdip, attr, waitfp, arg, 1867567c0b92SStephen Hanson handlep); 186820906b23SVikram Hegde #endif 1869567c0b92SStephen Hanson if (retval == DDI_SUCCESS) 1870567c0b92SStephen Hanson ndi_fmc_insert(rdip, DMA_HANDLE, *handlep, NULL); 1871567c0b92SStephen Hanson return (retval); 187220906b23SVikram Hegde } 187320906b23SVikram Hegde 187412f080e7Smrj /*ARGSUSED*/ 187512f080e7Smrj static int 187620906b23SVikram Hegde rootnex_coredma_freehdl(dev_info_t *dip, dev_info_t *rdip, 187720906b23SVikram Hegde ddi_dma_handle_t handle) 187812f080e7Smrj { 187912f080e7Smrj ddi_dma_impl_t *hp; 188012f080e7Smrj rootnex_dma_t *dma; 188112f080e7Smrj 188212f080e7Smrj 188312f080e7Smrj hp = (ddi_dma_impl_t *)handle; 188412f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 188512f080e7Smrj 188612f080e7Smrj /* unbind should have been called first */ 188712f080e7Smrj ASSERT(!dma->dp_inuse); 188812f080e7Smrj 188912f080e7Smrj mutex_destroy(&dma->dp_mutex); 189012f080e7Smrj kmem_cache_free(rootnex_state->r_dmahdl_cache, hp); 189112f080e7Smrj 18920b7ba611SMark Johnson ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 18930b7ba611SMark Johnson ROOTNEX_DPROBE1(rootnex__free__handle, uint64_t, 189412f080e7Smrj rootnex_cnt[ROOTNEX_CNT_ACTIVE_HDLS]); 189512f080e7Smrj 189612f080e7Smrj if (rootnex_state->r_dvma_call_list_id) 189712f080e7Smrj ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 189812f080e7Smrj 189912f080e7Smrj return (DDI_SUCCESS); 190012f080e7Smrj } 190112f080e7Smrj 190212f080e7Smrj /* 190320906b23SVikram Hegde * rootnex_dma_freehdl() 190420906b23SVikram Hegde * called from ddi_dma_free_handle(). 190512f080e7Smrj */ 190620906b23SVikram Hegde static int 190720906b23SVikram Hegde rootnex_dma_freehdl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle) 190820906b23SVikram Hegde { 1909567c0b92SStephen Hanson ndi_fmc_remove(rdip, DMA_HANDLE, handle); 19103a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 1911b51bbbf5SVikram Hegde if (IOMMU_USED(rdip)) { 191220906b23SVikram Hegde return (iommulib_nexdma_freehdl(dip, rdip, handle)); 191320906b23SVikram Hegde } 191420906b23SVikram Hegde #endif 191520906b23SVikram Hegde return (rootnex_coredma_freehdl(dip, rdip, handle)); 191620906b23SVikram Hegde } 191720906b23SVikram Hegde 191812f080e7Smrj /*ARGSUSED*/ 191912f080e7Smrj static int 192020906b23SVikram Hegde rootnex_coredma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 192120906b23SVikram Hegde ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 192220906b23SVikram Hegde ddi_dma_cookie_t *cookiep, uint_t *ccountp) 192312f080e7Smrj { 192412f080e7Smrj rootnex_sglinfo_t *sinfo; 192512f080e7Smrj ddi_dma_attr_t *attr; 192612f080e7Smrj ddi_dma_impl_t *hp; 192712f080e7Smrj rootnex_dma_t *dma; 192812f080e7Smrj int kmflag; 192912f080e7Smrj int e; 193012f080e7Smrj 193112f080e7Smrj hp = (ddi_dma_impl_t *)handle; 193212f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 193312f080e7Smrj sinfo = &dma->dp_sglinfo; 193412f080e7Smrj attr = &hp->dmai_attr; 193512f080e7Smrj 193694f1124eSVikram Hegde if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 193794f1124eSVikram Hegde dma->dp_sleep_flags = KM_SLEEP; 193894f1124eSVikram Hegde } else { 193994f1124eSVikram Hegde dma->dp_sleep_flags = KM_NOSLEEP; 194094f1124eSVikram Hegde } 194194f1124eSVikram Hegde 194212f080e7Smrj hp->dmai_rflags = dmareq->dmar_flags & DMP_DDIFLAGS; 194312f080e7Smrj 194412f080e7Smrj /* 194512f080e7Smrj * This is useful for debugging a driver. Not as useful in a production 194612f080e7Smrj * system. The only time this will fail is if you have a driver bug. 194712f080e7Smrj */ 194812f080e7Smrj if (rootnex_bind_check_inuse) { 194912f080e7Smrj /* 195012f080e7Smrj * No one else should ever have this lock unless someone else 195112f080e7Smrj * is trying to use this handle. So contention on the lock 195212f080e7Smrj * is the same as inuse being set. 195312f080e7Smrj */ 195412f080e7Smrj e = mutex_tryenter(&dma->dp_mutex); 195512f080e7Smrj if (e == 0) { 195612f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 195712f080e7Smrj return (DDI_DMA_INUSE); 195812f080e7Smrj } 195912f080e7Smrj if (dma->dp_inuse) { 196012f080e7Smrj mutex_exit(&dma->dp_mutex); 196112f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 196212f080e7Smrj return (DDI_DMA_INUSE); 196312f080e7Smrj } 196412f080e7Smrj dma->dp_inuse = B_TRUE; 196512f080e7Smrj mutex_exit(&dma->dp_mutex); 196612f080e7Smrj } 196712f080e7Smrj 196812f080e7Smrj /* check the ddi_dma_attr arg to make sure it makes a little sense */ 196912f080e7Smrj if (rootnex_bind_check_parms) { 197012f080e7Smrj e = rootnex_valid_bind_parms(dmareq, attr); 197112f080e7Smrj if (e != DDI_SUCCESS) { 197212f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 197312f080e7Smrj rootnex_clean_dmahdl(hp); 197412f080e7Smrj return (e); 197512f080e7Smrj } 197612f080e7Smrj } 197712f080e7Smrj 197812f080e7Smrj /* save away the original bind info */ 197912f080e7Smrj dma->dp_dma = dmareq->dmar_object; 198012f080e7Smrj 19813a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 19823a634bfcSVikram Hegde e = immu_map_sgl(hp, dmareq, rootnex_prealloc_cookies, rdip); 198386c1f4dcSVikram Hegde switch (e) { 19843a634bfcSVikram Hegde case DDI_DMA_MAPPED: 19853a634bfcSVikram Hegde goto out; 19863a634bfcSVikram Hegde case DDI_DMA_USE_PHYSICAL: 19873a634bfcSVikram Hegde break; 19883a634bfcSVikram Hegde case DDI_DMA_PARTIAL: 19893a634bfcSVikram Hegde ddi_err(DER_PANIC, rdip, "Partial DVMA map"); 19903a634bfcSVikram Hegde e = DDI_DMA_NORESOURCES; 19913a634bfcSVikram Hegde /*FALLTHROUGH*/ 199286c1f4dcSVikram Hegde default: 19933a634bfcSVikram Hegde ddi_err(DER_MODE, rdip, "DVMA map failed"); 19943a634bfcSVikram Hegde ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 199586c1f4dcSVikram Hegde rootnex_clean_dmahdl(hp); 19963a634bfcSVikram Hegde return (e); 199786c1f4dcSVikram Hegde } 199820906b23SVikram Hegde #endif 199986c1f4dcSVikram Hegde 200012f080e7Smrj /* 200112f080e7Smrj * Figure out a rough estimate of what maximum number of pages this 200212f080e7Smrj * buffer could use (a high estimate of course). 200312f080e7Smrj */ 200412f080e7Smrj sinfo->si_max_pages = mmu_btopr(dma->dp_dma.dmao_size) + 1; 200512f080e7Smrj 200612f080e7Smrj /* 200712f080e7Smrj * We'll use the pre-allocated cookies for any bind that will *always* 200812f080e7Smrj * fit (more important to be consistent, we don't want to create 200912f080e7Smrj * additional degenerate cases). 201012f080e7Smrj */ 201112f080e7Smrj if (sinfo->si_max_pages <= rootnex_state->r_prealloc_cookies) { 201212f080e7Smrj dma->dp_cookies = (ddi_dma_cookie_t *)dma->dp_prealloc_buffer; 201312f080e7Smrj dma->dp_need_to_free_cookie = B_FALSE; 201412f080e7Smrj DTRACE_PROBE2(rootnex__bind__prealloc, dev_info_t *, rdip, 201512f080e7Smrj uint_t, sinfo->si_max_pages); 201612f080e7Smrj 201712f080e7Smrj /* 201812f080e7Smrj * For anything larger than that, we'll go ahead and allocate the 201912f080e7Smrj * maximum number of pages we expect to see. Hopefuly, we won't be 202012f080e7Smrj * seeing this path in the fast path for high performance devices very 202112f080e7Smrj * frequently. 202212f080e7Smrj * 202312f080e7Smrj * a ddi bind interface that allowed the driver to provide storage to 202412f080e7Smrj * the bind interface would speed this case up. 202512f080e7Smrj */ 202612f080e7Smrj } else { 202712f080e7Smrj /* convert the sleep flags */ 202812f080e7Smrj if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 202912f080e7Smrj kmflag = KM_SLEEP; 203012f080e7Smrj } else { 203112f080e7Smrj kmflag = KM_NOSLEEP; 203212f080e7Smrj } 203312f080e7Smrj 203412f080e7Smrj /* 203512f080e7Smrj * Save away how much memory we allocated. If we're doing a 203612f080e7Smrj * nosleep, the alloc could fail... 203712f080e7Smrj */ 203812f080e7Smrj dma->dp_cookie_size = sinfo->si_max_pages * 203912f080e7Smrj sizeof (ddi_dma_cookie_t); 204012f080e7Smrj dma->dp_cookies = kmem_alloc(dma->dp_cookie_size, kmflag); 204112f080e7Smrj if (dma->dp_cookies == NULL) { 204212f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 204312f080e7Smrj rootnex_clean_dmahdl(hp); 204412f080e7Smrj return (DDI_DMA_NORESOURCES); 204512f080e7Smrj } 204612f080e7Smrj dma->dp_need_to_free_cookie = B_TRUE; 204712f080e7Smrj DTRACE_PROBE2(rootnex__bind__alloc, dev_info_t *, rdip, uint_t, 204812f080e7Smrj sinfo->si_max_pages); 204912f080e7Smrj } 205012f080e7Smrj hp->dmai_cookie = dma->dp_cookies; 205112f080e7Smrj 205212f080e7Smrj /* 205312f080e7Smrj * Get the real sgl. rootnex_get_sgl will fill in cookie array while 20543a634bfcSVikram Hegde * looking at the constraints in the dma structure. It will then put 20553a634bfcSVikram Hegde * some additional state about the sgl in the dma struct (i.e. is 20563a634bfcSVikram Hegde * the sgl clean, or do we need to do some munging; how many pages 20573a634bfcSVikram Hegde * need to be copied, etc.) 205812f080e7Smrj */ 205912f080e7Smrj rootnex_get_sgl(&dmareq->dmar_object, dma->dp_cookies, 206012f080e7Smrj &dma->dp_sglinfo); 206112f080e7Smrj 20623a634bfcSVikram Hegde out: 206386c1f4dcSVikram Hegde ASSERT(sinfo->si_sgl_size <= sinfo->si_max_pages); 206412f080e7Smrj /* if we don't need a copy buffer, we don't need to sync */ 206512f080e7Smrj if (sinfo->si_copybuf_req == 0) { 206612f080e7Smrj hp->dmai_rflags |= DMP_NOSYNC; 206712f080e7Smrj } 206812f080e7Smrj 206912f080e7Smrj /* 207012f080e7Smrj * if we don't need the copybuf and we don't need to do a partial, we 207112f080e7Smrj * hit the fast path. All the high performance devices should be trying 207212f080e7Smrj * to hit this path. To hit this path, a device should be able to reach 207312f080e7Smrj * all of memory, shouldn't try to bind more than it can transfer, and 207412f080e7Smrj * the buffer shouldn't require more cookies than the driver/device can 207512f080e7Smrj * handle [sgllen]). 207612f080e7Smrj */ 207712f080e7Smrj if ((sinfo->si_copybuf_req == 0) && 207812f080e7Smrj (sinfo->si_sgl_size <= attr->dma_attr_sgllen) && 207912f080e7Smrj (dma->dp_dma.dmao_size < dma->dp_maxxfer)) { 208012f080e7Smrj /* 208185c8e0e8Sstephh * If the driver supports FMA, insert the handle in the FMA DMA 208285c8e0e8Sstephh * handle cache. 208385c8e0e8Sstephh */ 2084567c0b92SStephen Hanson if (attr->dma_attr_flags & DDI_DMA_FLAGERR) 208585c8e0e8Sstephh hp->dmai_error.err_cf = rootnex_dma_check; 208685c8e0e8Sstephh 208785c8e0e8Sstephh /* 208812f080e7Smrj * copy out the first cookie and ccountp, set the cookie 208912f080e7Smrj * pointer to the second cookie. The first cookie is passed 209012f080e7Smrj * back on the stack. Additional cookies are accessed via 209112f080e7Smrj * ddi_dma_nextcookie() 209212f080e7Smrj */ 209312f080e7Smrj *cookiep = dma->dp_cookies[0]; 209412f080e7Smrj *ccountp = sinfo->si_sgl_size; 209512f080e7Smrj hp->dmai_cookie++; 209612f080e7Smrj hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 20973a634bfcSVikram Hegde ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 20983a634bfcSVikram Hegde DTRACE_PROBE3(rootnex__bind__fast, dev_info_t *, rdip, 20993a634bfcSVikram Hegde uint64_t, rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], 21003a634bfcSVikram Hegde uint_t, dma->dp_dma.dmao_size); 21013a634bfcSVikram Hegde 21023a634bfcSVikram Hegde 210312f080e7Smrj return (DDI_DMA_MAPPED); 210412f080e7Smrj } 210512f080e7Smrj 210612f080e7Smrj /* 210712f080e7Smrj * go to the slow path, we may need to alloc more memory, create 210812f080e7Smrj * multiple windows, and munge up a sgl to make the device happy. 210912f080e7Smrj */ 211012f080e7Smrj e = rootnex_bind_slowpath(hp, dmareq, dma, attr, kmflag); 211112f080e7Smrj if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) { 211212f080e7Smrj if (dma->dp_need_to_free_cookie) { 211312f080e7Smrj kmem_free(dma->dp_cookies, dma->dp_cookie_size); 211412f080e7Smrj } 211512f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_BIND_FAIL]); 211612f080e7Smrj rootnex_clean_dmahdl(hp); /* must be after free cookie */ 211712f080e7Smrj return (e); 211812f080e7Smrj } 211912f080e7Smrj 212085c8e0e8Sstephh /* 212185c8e0e8Sstephh * If the driver supports FMA, insert the handle in the FMA DMA handle 212285c8e0e8Sstephh * cache. 212385c8e0e8Sstephh */ 2124567c0b92SStephen Hanson if (attr->dma_attr_flags & DDI_DMA_FLAGERR) 212585c8e0e8Sstephh hp->dmai_error.err_cf = rootnex_dma_check; 212685c8e0e8Sstephh 212712f080e7Smrj /* if the first window uses the copy buffer, sync it for the device */ 212812f080e7Smrj if ((dma->dp_window[dma->dp_current_win].wd_dosync) && 212912f080e7Smrj (hp->dmai_rflags & DDI_DMA_WRITE)) { 213094f1124eSVikram Hegde (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 213112f080e7Smrj DDI_DMA_SYNC_FORDEV); 213212f080e7Smrj } 213312f080e7Smrj 213412f080e7Smrj /* 213512f080e7Smrj * copy out the first cookie and ccountp, set the cookie pointer to the 213612f080e7Smrj * second cookie. Make sure the partial flag is set/cleared correctly. 213712f080e7Smrj * If we have a partial map (i.e. multiple windows), the number of 213812f080e7Smrj * cookies we return is the number of cookies in the first window. 213912f080e7Smrj */ 214012f080e7Smrj if (e == DDI_DMA_MAPPED) { 214112f080e7Smrj hp->dmai_rflags &= ~DDI_DMA_PARTIAL; 214212f080e7Smrj *ccountp = sinfo->si_sgl_size; 21433a634bfcSVikram Hegde hp->dmai_nwin = 1; 214412f080e7Smrj } else { 214512f080e7Smrj hp->dmai_rflags |= DDI_DMA_PARTIAL; 214612f080e7Smrj *ccountp = dma->dp_window[dma->dp_current_win].wd_cookie_cnt; 214712f080e7Smrj ASSERT(hp->dmai_nwin <= dma->dp_max_win); 214812f080e7Smrj } 214912f080e7Smrj *cookiep = dma->dp_cookies[0]; 215012f080e7Smrj hp->dmai_cookie++; 215112f080e7Smrj 21520b7ba611SMark Johnson ROOTNEX_DPROF_INC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 21530b7ba611SMark Johnson ROOTNEX_DPROBE3(rootnex__bind__slow, dev_info_t *, rdip, uint64_t, 215412f080e7Smrj rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS], uint_t, 215512f080e7Smrj dma->dp_dma.dmao_size); 215612f080e7Smrj return (e); 215712f080e7Smrj } 215812f080e7Smrj 215912f080e7Smrj /* 216020906b23SVikram Hegde * rootnex_dma_bindhdl() 216120906b23SVikram Hegde * called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle(). 216212f080e7Smrj */ 216320906b23SVikram Hegde static int 216420906b23SVikram Hegde rootnex_dma_bindhdl(dev_info_t *dip, dev_info_t *rdip, 216520906b23SVikram Hegde ddi_dma_handle_t handle, struct ddi_dma_req *dmareq, 216620906b23SVikram Hegde ddi_dma_cookie_t *cookiep, uint_t *ccountp) 216720906b23SVikram Hegde { 21683a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 2169b51bbbf5SVikram Hegde if (IOMMU_USED(rdip)) { 217020906b23SVikram Hegde return (iommulib_nexdma_bindhdl(dip, rdip, handle, dmareq, 217120906b23SVikram Hegde cookiep, ccountp)); 217220906b23SVikram Hegde } 217320906b23SVikram Hegde #endif 217420906b23SVikram Hegde return (rootnex_coredma_bindhdl(dip, rdip, handle, dmareq, 217520906b23SVikram Hegde cookiep, ccountp)); 217620906b23SVikram Hegde } 217720906b23SVikram Hegde 21783a634bfcSVikram Hegde 21793a634bfcSVikram Hegde 218012f080e7Smrj /*ARGSUSED*/ 218112f080e7Smrj static int 218220906b23SVikram Hegde rootnex_coredma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 218312f080e7Smrj ddi_dma_handle_t handle) 218412f080e7Smrj { 218512f080e7Smrj ddi_dma_impl_t *hp; 218612f080e7Smrj rootnex_dma_t *dma; 218712f080e7Smrj int e; 218812f080e7Smrj 218912f080e7Smrj hp = (ddi_dma_impl_t *)handle; 219012f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 219112f080e7Smrj 219212f080e7Smrj /* make sure the buffer wasn't free'd before calling unbind */ 219312f080e7Smrj if (rootnex_unbind_verify_buffer) { 219412f080e7Smrj e = rootnex_verify_buffer(dma); 219512f080e7Smrj if (e != DDI_SUCCESS) { 219612f080e7Smrj ASSERT(0); 219712f080e7Smrj return (DDI_FAILURE); 219812f080e7Smrj } 219912f080e7Smrj } 220012f080e7Smrj 220112f080e7Smrj /* sync the current window before unbinding the buffer */ 220212f080e7Smrj if (dma->dp_window && dma->dp_window[dma->dp_current_win].wd_dosync && 220312f080e7Smrj (hp->dmai_rflags & DDI_DMA_READ)) { 220494f1124eSVikram Hegde (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 220512f080e7Smrj DDI_DMA_SYNC_FORCPU); 220612f080e7Smrj } 220712f080e7Smrj 220812f080e7Smrj /* 220912f080e7Smrj * cleanup and copy buffer or window state. if we didn't use the copy 221012f080e7Smrj * buffer or windows, there won't be much to do :-) 221112f080e7Smrj */ 221212f080e7Smrj rootnex_teardown_copybuf(dma); 221312f080e7Smrj rootnex_teardown_windows(dma); 221412f080e7Smrj 22153a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 221612f080e7Smrj /* 22173a634bfcSVikram Hegde * Clean up the page tables and free the dvma 221886c1f4dcSVikram Hegde */ 22193a634bfcSVikram Hegde e = immu_unmap_sgl(hp, rdip); 22203a634bfcSVikram Hegde if (e != DDI_DMA_USE_PHYSICAL && e != DDI_SUCCESS) { 22213a634bfcSVikram Hegde return (e); 222286c1f4dcSVikram Hegde } 222320906b23SVikram Hegde #endif 222486c1f4dcSVikram Hegde 222586c1f4dcSVikram Hegde /* 222612f080e7Smrj * If we had to allocate space to for the worse case sgl (it didn't 222712f080e7Smrj * fit into our pre-allocate buffer), free that up now 222812f080e7Smrj */ 222912f080e7Smrj if (dma->dp_need_to_free_cookie) { 223012f080e7Smrj kmem_free(dma->dp_cookies, dma->dp_cookie_size); 223112f080e7Smrj } 223212f080e7Smrj 223312f080e7Smrj /* 223412f080e7Smrj * clean up the handle so it's ready for the next bind (i.e. if the 223512f080e7Smrj * handle is reused). 223612f080e7Smrj */ 223712f080e7Smrj rootnex_clean_dmahdl(hp); 2238567c0b92SStephen Hanson hp->dmai_error.err_cf = NULL; 223912f080e7Smrj 224012f080e7Smrj if (rootnex_state->r_dvma_call_list_id) 224112f080e7Smrj ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 224212f080e7Smrj 22430b7ba611SMark Johnson ROOTNEX_DPROF_DEC(&rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 22440b7ba611SMark Johnson ROOTNEX_DPROBE1(rootnex__unbind, uint64_t, 224512f080e7Smrj rootnex_cnt[ROOTNEX_CNT_ACTIVE_BINDS]); 224612f080e7Smrj 224712f080e7Smrj return (DDI_SUCCESS); 224812f080e7Smrj } 224912f080e7Smrj 225020906b23SVikram Hegde /* 225120906b23SVikram Hegde * rootnex_dma_unbindhdl() 225220906b23SVikram Hegde * called from ddi_dma_unbind_handle() 225320906b23SVikram Hegde */ 225420906b23SVikram Hegde /*ARGSUSED*/ 225520906b23SVikram Hegde static int 225620906b23SVikram Hegde rootnex_dma_unbindhdl(dev_info_t *dip, dev_info_t *rdip, 225720906b23SVikram Hegde ddi_dma_handle_t handle) 225820906b23SVikram Hegde { 22593a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 2260b51bbbf5SVikram Hegde if (IOMMU_USED(rdip)) { 226120906b23SVikram Hegde return (iommulib_nexdma_unbindhdl(dip, rdip, handle)); 226220906b23SVikram Hegde } 226320906b23SVikram Hegde #endif 226420906b23SVikram Hegde return (rootnex_coredma_unbindhdl(dip, rdip, handle)); 226520906b23SVikram Hegde } 226620906b23SVikram Hegde 22673a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 226894f1124eSVikram Hegde 226994f1124eSVikram Hegde static int 227094f1124eSVikram Hegde rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle) 227194f1124eSVikram Hegde { 227294f1124eSVikram Hegde ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 227394f1124eSVikram Hegde rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 227494f1124eSVikram Hegde 227594f1124eSVikram Hegde if (dma->dp_sleep_flags != KM_SLEEP && 227694f1124eSVikram Hegde dma->dp_sleep_flags != KM_NOSLEEP) 227794f1124eSVikram Hegde cmn_err(CE_PANIC, "kmem sleep flags not set in DMA handle"); 227894f1124eSVikram Hegde return (dma->dp_sleep_flags); 227994f1124eSVikram Hegde } 228020906b23SVikram Hegde /*ARGSUSED*/ 228120906b23SVikram Hegde static void 228220906b23SVikram Hegde rootnex_coredma_reset_cookies(dev_info_t *dip, ddi_dma_handle_t handle) 228320906b23SVikram Hegde { 228420906b23SVikram Hegde ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 228520906b23SVikram Hegde rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 228694f1124eSVikram Hegde rootnex_window_t *window; 228720906b23SVikram Hegde 228894f1124eSVikram Hegde if (dma->dp_window) { 228994f1124eSVikram Hegde window = &dma->dp_window[dma->dp_current_win]; 229094f1124eSVikram Hegde hp->dmai_cookie = window->wd_first_cookie; 229194f1124eSVikram Hegde } else { 229294f1124eSVikram Hegde hp->dmai_cookie = dma->dp_cookies; 229394f1124eSVikram Hegde } 229420906b23SVikram Hegde hp->dmai_cookie++; 229520906b23SVikram Hegde } 229620906b23SVikram Hegde 229720906b23SVikram Hegde /*ARGSUSED*/ 229820906b23SVikram Hegde static int 229920906b23SVikram Hegde rootnex_coredma_get_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 230094f1124eSVikram Hegde ddi_dma_cookie_t **cookiepp, uint_t *ccountp) 230120906b23SVikram Hegde { 230294f1124eSVikram Hegde int i; 230394f1124eSVikram Hegde int km_flags; 230420906b23SVikram Hegde ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 230520906b23SVikram Hegde rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 230694f1124eSVikram Hegde rootnex_window_t *window; 230794f1124eSVikram Hegde ddi_dma_cookie_t *cp; 230894f1124eSVikram Hegde ddi_dma_cookie_t *cookie; 230920906b23SVikram Hegde 231094f1124eSVikram Hegde ASSERT(*cookiepp == NULL); 231194f1124eSVikram Hegde ASSERT(*ccountp == 0); 231220906b23SVikram Hegde 231394f1124eSVikram Hegde if (dma->dp_window) { 231494f1124eSVikram Hegde window = &dma->dp_window[dma->dp_current_win]; 231594f1124eSVikram Hegde cp = window->wd_first_cookie; 231694f1124eSVikram Hegde *ccountp = window->wd_cookie_cnt; 231720906b23SVikram Hegde } else { 231894f1124eSVikram Hegde cp = dma->dp_cookies; 231920906b23SVikram Hegde *ccountp = dma->dp_sglinfo.si_sgl_size; 232020906b23SVikram Hegde } 232120906b23SVikram Hegde 232294f1124eSVikram Hegde km_flags = rootnex_coredma_get_sleep_flags(handle); 232394f1124eSVikram Hegde cookie = kmem_zalloc(sizeof (ddi_dma_cookie_t) * (*ccountp), km_flags); 232494f1124eSVikram Hegde if (cookie == NULL) { 232594f1124eSVikram Hegde return (DDI_DMA_NORESOURCES); 232694f1124eSVikram Hegde } 232794f1124eSVikram Hegde 232894f1124eSVikram Hegde for (i = 0; i < *ccountp; i++) { 232994f1124eSVikram Hegde cookie[i].dmac_notused = cp[i].dmac_notused; 233094f1124eSVikram Hegde cookie[i].dmac_type = cp[i].dmac_type; 233194f1124eSVikram Hegde cookie[i].dmac_address = cp[i].dmac_address; 233294f1124eSVikram Hegde cookie[i].dmac_size = cp[i].dmac_size; 233394f1124eSVikram Hegde } 233494f1124eSVikram Hegde 233594f1124eSVikram Hegde *cookiepp = cookie; 233620906b23SVikram Hegde 233720906b23SVikram Hegde return (DDI_SUCCESS); 233820906b23SVikram Hegde } 233994f1124eSVikram Hegde 234094f1124eSVikram Hegde /*ARGSUSED*/ 234194f1124eSVikram Hegde static int 234294f1124eSVikram Hegde rootnex_coredma_set_cookies(dev_info_t *dip, ddi_dma_handle_t handle, 234394f1124eSVikram Hegde ddi_dma_cookie_t *cookiep, uint_t ccount) 234494f1124eSVikram Hegde { 234594f1124eSVikram Hegde ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 234694f1124eSVikram Hegde rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 234794f1124eSVikram Hegde rootnex_window_t *window; 234894f1124eSVikram Hegde ddi_dma_cookie_t *cur_cookiep; 234994f1124eSVikram Hegde 235094f1124eSVikram Hegde ASSERT(cookiep); 235194f1124eSVikram Hegde ASSERT(ccount != 0); 235294f1124eSVikram Hegde ASSERT(dma->dp_need_to_switch_cookies == B_FALSE); 235394f1124eSVikram Hegde 235494f1124eSVikram Hegde if (dma->dp_window) { 235594f1124eSVikram Hegde window = &dma->dp_window[dma->dp_current_win]; 235694f1124eSVikram Hegde dma->dp_saved_cookies = window->wd_first_cookie; 235794f1124eSVikram Hegde window->wd_first_cookie = cookiep; 235894f1124eSVikram Hegde ASSERT(ccount == window->wd_cookie_cnt); 235994f1124eSVikram Hegde cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies) 236094f1124eSVikram Hegde + window->wd_first_cookie; 236194f1124eSVikram Hegde } else { 236294f1124eSVikram Hegde dma->dp_saved_cookies = dma->dp_cookies; 236394f1124eSVikram Hegde dma->dp_cookies = cookiep; 236494f1124eSVikram Hegde ASSERT(ccount == dma->dp_sglinfo.si_sgl_size); 236594f1124eSVikram Hegde cur_cookiep = (hp->dmai_cookie - dma->dp_saved_cookies) 236694f1124eSVikram Hegde + dma->dp_cookies; 236794f1124eSVikram Hegde } 236894f1124eSVikram Hegde 236994f1124eSVikram Hegde dma->dp_need_to_switch_cookies = B_TRUE; 237094f1124eSVikram Hegde hp->dmai_cookie = cur_cookiep; 237194f1124eSVikram Hegde 237294f1124eSVikram Hegde return (DDI_SUCCESS); 237394f1124eSVikram Hegde } 237494f1124eSVikram Hegde 237594f1124eSVikram Hegde /*ARGSUSED*/ 237694f1124eSVikram Hegde static int 237794f1124eSVikram Hegde rootnex_coredma_clear_cookies(dev_info_t *dip, ddi_dma_handle_t handle) 237894f1124eSVikram Hegde { 237994f1124eSVikram Hegde ddi_dma_impl_t *hp = (ddi_dma_impl_t *)handle; 238094f1124eSVikram Hegde rootnex_dma_t *dma = (rootnex_dma_t *)hp->dmai_private; 238194f1124eSVikram Hegde rootnex_window_t *window; 238294f1124eSVikram Hegde ddi_dma_cookie_t *cur_cookiep; 238394f1124eSVikram Hegde ddi_dma_cookie_t *cookie_array; 238494f1124eSVikram Hegde uint_t ccount; 238594f1124eSVikram Hegde 238694f1124eSVikram Hegde /* check if cookies have not been switched */ 238794f1124eSVikram Hegde if (dma->dp_need_to_switch_cookies == B_FALSE) 238894f1124eSVikram Hegde return (DDI_SUCCESS); 238994f1124eSVikram Hegde 239094f1124eSVikram Hegde ASSERT(dma->dp_saved_cookies); 239194f1124eSVikram Hegde 239294f1124eSVikram Hegde if (dma->dp_window) { 239394f1124eSVikram Hegde window = &dma->dp_window[dma->dp_current_win]; 239494f1124eSVikram Hegde cookie_array = window->wd_first_cookie; 239594f1124eSVikram Hegde window->wd_first_cookie = dma->dp_saved_cookies; 239694f1124eSVikram Hegde dma->dp_saved_cookies = NULL; 239794f1124eSVikram Hegde ccount = window->wd_cookie_cnt; 239894f1124eSVikram Hegde cur_cookiep = (hp->dmai_cookie - cookie_array) 239994f1124eSVikram Hegde + window->wd_first_cookie; 240094f1124eSVikram Hegde } else { 240194f1124eSVikram Hegde cookie_array = dma->dp_cookies; 240294f1124eSVikram Hegde dma->dp_cookies = dma->dp_saved_cookies; 240394f1124eSVikram Hegde dma->dp_saved_cookies = NULL; 240494f1124eSVikram Hegde ccount = dma->dp_sglinfo.si_sgl_size; 240594f1124eSVikram Hegde cur_cookiep = (hp->dmai_cookie - cookie_array) 240694f1124eSVikram Hegde + dma->dp_cookies; 240794f1124eSVikram Hegde } 240894f1124eSVikram Hegde 240994f1124eSVikram Hegde kmem_free(cookie_array, sizeof (ddi_dma_cookie_t) * ccount); 241094f1124eSVikram Hegde 241194f1124eSVikram Hegde hp->dmai_cookie = cur_cookiep; 241294f1124eSVikram Hegde 241394f1124eSVikram Hegde dma->dp_need_to_switch_cookies = B_FALSE; 241494f1124eSVikram Hegde 241594f1124eSVikram Hegde return (DDI_SUCCESS); 241694f1124eSVikram Hegde } 241794f1124eSVikram Hegde 24185dfdb46bSVikram Hegde #endif 241912f080e7Smrj 242012f080e7Smrj /* 242112f080e7Smrj * rootnex_verify_buffer() 242212f080e7Smrj * verify buffer wasn't free'd 242312f080e7Smrj */ 242412f080e7Smrj static int 242512f080e7Smrj rootnex_verify_buffer(rootnex_dma_t *dma) 242612f080e7Smrj { 242712f080e7Smrj page_t **pplist; 242812f080e7Smrj caddr_t vaddr; 242912f080e7Smrj uint_t pcnt; 243012f080e7Smrj uint_t poff; 243112f080e7Smrj page_t *pp; 243200d0963fSdilpreet char b; 243312f080e7Smrj int i; 243412f080e7Smrj 243512f080e7Smrj /* Figure out how many pages this buffer occupies */ 243612f080e7Smrj if (dma->dp_dma.dmao_type == DMA_OTYP_PAGES) { 243712f080e7Smrj poff = dma->dp_dma.dmao_obj.pp_obj.pp_offset & MMU_PAGEOFFSET; 243812f080e7Smrj } else { 243912f080e7Smrj vaddr = dma->dp_dma.dmao_obj.virt_obj.v_addr; 244012f080e7Smrj poff = (uintptr_t)vaddr & MMU_PAGEOFFSET; 244112f080e7Smrj } 244212f080e7Smrj pcnt = mmu_btopr(dma->dp_dma.dmao_size + poff); 244312f080e7Smrj 244412f080e7Smrj switch (dma->dp_dma.dmao_type) { 244512f080e7Smrj case DMA_OTYP_PAGES: 244612f080e7Smrj /* 244712f080e7Smrj * for a linked list of pp's walk through them to make sure 244812f080e7Smrj * they're locked and not free. 244912f080e7Smrj */ 245012f080e7Smrj pp = dma->dp_dma.dmao_obj.pp_obj.pp_pp; 245112f080e7Smrj for (i = 0; i < pcnt; i++) { 245212f080e7Smrj if (PP_ISFREE(pp) || !PAGE_LOCKED(pp)) { 245312f080e7Smrj return (DDI_FAILURE); 245412f080e7Smrj } 24557c478bd9Sstevel@tonic-gate pp = pp->p_next; 24567c478bd9Sstevel@tonic-gate } 24577c478bd9Sstevel@tonic-gate break; 245812f080e7Smrj 24597c478bd9Sstevel@tonic-gate case DMA_OTYP_VADDR: 24607c478bd9Sstevel@tonic-gate case DMA_OTYP_BUFVADDR: 246112f080e7Smrj pplist = dma->dp_dma.dmao_obj.virt_obj.v_priv; 246212f080e7Smrj /* 246312f080e7Smrj * for an array of pp's walk through them to make sure they're 246412f080e7Smrj * not free. It's possible that they may not be locked. 246512f080e7Smrj */ 246612f080e7Smrj if (pplist) { 246712f080e7Smrj for (i = 0; i < pcnt; i++) { 246812f080e7Smrj if (PP_ISFREE(pplist[i])) { 246912f080e7Smrj return (DDI_FAILURE); 247012f080e7Smrj } 247112f080e7Smrj } 247212f080e7Smrj 247312f080e7Smrj /* For a virtual address, try to peek at each page */ 247412f080e7Smrj } else { 247512f080e7Smrj if (dma->dp_sglinfo.si_asp == &kas) { 247612f080e7Smrj for (i = 0; i < pcnt; i++) { 247700d0963fSdilpreet if (ddi_peek8(NULL, vaddr, &b) == 247800d0963fSdilpreet DDI_FAILURE) 247912f080e7Smrj return (DDI_FAILURE); 248000d0963fSdilpreet vaddr += MMU_PAGESIZE; 248112f080e7Smrj } 248212f080e7Smrj } 248312f080e7Smrj } 248412f080e7Smrj break; 248512f080e7Smrj 248612f080e7Smrj default: 248712f080e7Smrj ASSERT(0); 248812f080e7Smrj break; 248912f080e7Smrj } 249012f080e7Smrj 249112f080e7Smrj return (DDI_SUCCESS); 249212f080e7Smrj } 249312f080e7Smrj 249412f080e7Smrj 249512f080e7Smrj /* 249612f080e7Smrj * rootnex_clean_dmahdl() 249712f080e7Smrj * Clean the dma handle. This should be called on a handle alloc and an 249812f080e7Smrj * unbind handle. Set the handle state to the default settings. 249912f080e7Smrj */ 250012f080e7Smrj static void 250112f080e7Smrj rootnex_clean_dmahdl(ddi_dma_impl_t *hp) 250212f080e7Smrj { 250312f080e7Smrj rootnex_dma_t *dma; 250412f080e7Smrj 250512f080e7Smrj 250612f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 250712f080e7Smrj 250812f080e7Smrj hp->dmai_nwin = 0; 250912f080e7Smrj dma->dp_current_cookie = 0; 251012f080e7Smrj dma->dp_copybuf_size = 0; 251112f080e7Smrj dma->dp_window = NULL; 251212f080e7Smrj dma->dp_cbaddr = NULL; 251312f080e7Smrj dma->dp_inuse = B_FALSE; 251412f080e7Smrj dma->dp_need_to_free_cookie = B_FALSE; 251594f1124eSVikram Hegde dma->dp_need_to_switch_cookies = B_FALSE; 251694f1124eSVikram Hegde dma->dp_saved_cookies = NULL; 251794f1124eSVikram Hegde dma->dp_sleep_flags = KM_PANIC; 251812f080e7Smrj dma->dp_need_to_free_window = B_FALSE; 251912f080e7Smrj dma->dp_partial_required = B_FALSE; 252012f080e7Smrj dma->dp_trim_required = B_FALSE; 252112f080e7Smrj dma->dp_sglinfo.si_copybuf_req = 0; 252212f080e7Smrj #if !defined(__amd64) 252312f080e7Smrj dma->dp_cb_remaping = B_FALSE; 252412f080e7Smrj dma->dp_kva = NULL; 252512f080e7Smrj #endif 252612f080e7Smrj 252712f080e7Smrj /* FMA related initialization */ 252812f080e7Smrj hp->dmai_fault = 0; 252912f080e7Smrj hp->dmai_fault_check = NULL; 253012f080e7Smrj hp->dmai_fault_notify = NULL; 253112f080e7Smrj hp->dmai_error.err_ena = 0; 253212f080e7Smrj hp->dmai_error.err_status = DDI_FM_OK; 253312f080e7Smrj hp->dmai_error.err_expected = DDI_FM_ERR_UNEXPECTED; 253412f080e7Smrj hp->dmai_error.err_ontrap = NULL; 253512f080e7Smrj } 253612f080e7Smrj 253712f080e7Smrj 253812f080e7Smrj /* 253912f080e7Smrj * rootnex_valid_alloc_parms() 254012f080e7Smrj * Called in ddi_dma_alloc_handle path to validate its parameters. 254112f080e7Smrj */ 254212f080e7Smrj static int 254312f080e7Smrj rootnex_valid_alloc_parms(ddi_dma_attr_t *attr, uint_t maxsegmentsize) 254412f080e7Smrj { 254512f080e7Smrj if ((attr->dma_attr_seg < MMU_PAGEOFFSET) || 254612f080e7Smrj (attr->dma_attr_count_max < MMU_PAGEOFFSET) || 254712f080e7Smrj (attr->dma_attr_granular > MMU_PAGESIZE) || 254812f080e7Smrj (attr->dma_attr_maxxfer < MMU_PAGESIZE)) { 254912f080e7Smrj return (DDI_DMA_BADATTR); 255012f080e7Smrj } 255112f080e7Smrj 255212f080e7Smrj if (attr->dma_attr_addr_hi <= attr->dma_attr_addr_lo) { 255312f080e7Smrj return (DDI_DMA_BADATTR); 255412f080e7Smrj } 255512f080e7Smrj 255612f080e7Smrj if ((attr->dma_attr_seg & MMU_PAGEOFFSET) != MMU_PAGEOFFSET || 255712f080e7Smrj MMU_PAGESIZE & (attr->dma_attr_granular - 1) || 255812f080e7Smrj attr->dma_attr_sgllen <= 0) { 255912f080e7Smrj return (DDI_DMA_BADATTR); 256012f080e7Smrj } 256112f080e7Smrj 256212f080e7Smrj /* We should be able to DMA into every byte offset in a page */ 256312f080e7Smrj if (maxsegmentsize < MMU_PAGESIZE) { 256412f080e7Smrj return (DDI_DMA_BADATTR); 256512f080e7Smrj } 256612f080e7Smrj 256707c6692fSMark Johnson /* if we're bouncing on seg, seg must be <= addr_hi */ 256807c6692fSMark Johnson if ((attr->dma_attr_flags & _DDI_DMA_BOUNCE_ON_SEG) && 256907c6692fSMark Johnson (attr->dma_attr_seg > attr->dma_attr_addr_hi)) { 257007c6692fSMark Johnson return (DDI_DMA_BADATTR); 257107c6692fSMark Johnson } 257212f080e7Smrj return (DDI_SUCCESS); 257312f080e7Smrj } 257412f080e7Smrj 257512f080e7Smrj /* 257612f080e7Smrj * rootnex_valid_bind_parms() 257712f080e7Smrj * Called in ddi_dma_*_bind_handle path to validate its parameters. 257812f080e7Smrj */ 257912f080e7Smrj /* ARGSUSED */ 258012f080e7Smrj static int 258112f080e7Smrj rootnex_valid_bind_parms(ddi_dma_req_t *dmareq, ddi_dma_attr_t *attr) 258212f080e7Smrj { 258312f080e7Smrj #if !defined(__amd64) 258412f080e7Smrj /* 258512f080e7Smrj * we only support up to a 2G-1 transfer size on 32-bit kernels so 258612f080e7Smrj * we can track the offset for the obsoleted interfaces. 258712f080e7Smrj */ 258812f080e7Smrj if (dmareq->dmar_object.dmao_size > 0x7FFFFFFF) { 258912f080e7Smrj return (DDI_DMA_TOOBIG); 259012f080e7Smrj } 259112f080e7Smrj #endif 259212f080e7Smrj 259312f080e7Smrj return (DDI_SUCCESS); 259412f080e7Smrj } 259512f080e7Smrj 259612f080e7Smrj 259712f080e7Smrj /* 259807c6692fSMark Johnson * rootnex_need_bounce_seg() 259907c6692fSMark Johnson * check to see if the buffer lives on both side of the seg. 260007c6692fSMark Johnson */ 260107c6692fSMark Johnson static boolean_t 260207c6692fSMark Johnson rootnex_need_bounce_seg(ddi_dma_obj_t *dmar_object, rootnex_sglinfo_t *sglinfo) 260307c6692fSMark Johnson { 260407c6692fSMark Johnson ddi_dma_atyp_t buftype; 260507c6692fSMark Johnson rootnex_addr_t raddr; 260607c6692fSMark Johnson boolean_t lower_addr; 260707c6692fSMark Johnson boolean_t upper_addr; 260807c6692fSMark Johnson uint64_t offset; 260907c6692fSMark Johnson page_t **pplist; 261007c6692fSMark Johnson uint64_t paddr; 261107c6692fSMark Johnson uint32_t psize; 261207c6692fSMark Johnson uint32_t size; 261307c6692fSMark Johnson caddr_t vaddr; 261407c6692fSMark Johnson uint_t pcnt; 261507c6692fSMark Johnson page_t *pp; 261607c6692fSMark Johnson 261707c6692fSMark Johnson 261807c6692fSMark Johnson /* shortcuts */ 261907c6692fSMark Johnson pplist = dmar_object->dmao_obj.virt_obj.v_priv; 262007c6692fSMark Johnson vaddr = dmar_object->dmao_obj.virt_obj.v_addr; 262107c6692fSMark Johnson buftype = dmar_object->dmao_type; 262207c6692fSMark Johnson size = dmar_object->dmao_size; 262307c6692fSMark Johnson 262407c6692fSMark Johnson lower_addr = B_FALSE; 262507c6692fSMark Johnson upper_addr = B_FALSE; 262607c6692fSMark Johnson pcnt = 0; 262707c6692fSMark Johnson 262807c6692fSMark Johnson /* 262907c6692fSMark Johnson * Process the first page to handle the initial offset of the buffer. 263007c6692fSMark Johnson * We'll use the base address we get later when we loop through all 263107c6692fSMark Johnson * the pages. 263207c6692fSMark Johnson */ 263307c6692fSMark Johnson if (buftype == DMA_OTYP_PAGES) { 263407c6692fSMark Johnson pp = dmar_object->dmao_obj.pp_obj.pp_pp; 263507c6692fSMark Johnson offset = dmar_object->dmao_obj.pp_obj.pp_offset & 263607c6692fSMark Johnson MMU_PAGEOFFSET; 263707c6692fSMark Johnson paddr = pfn_to_pa(pp->p_pagenum) + offset; 263807c6692fSMark Johnson psize = MIN(size, (MMU_PAGESIZE - offset)); 263907c6692fSMark Johnson pp = pp->p_next; 264007c6692fSMark Johnson sglinfo->si_asp = NULL; 264107c6692fSMark Johnson } else if (pplist != NULL) { 264207c6692fSMark Johnson offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 264307c6692fSMark Johnson sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 264407c6692fSMark Johnson if (sglinfo->si_asp == NULL) { 264507c6692fSMark Johnson sglinfo->si_asp = &kas; 264607c6692fSMark Johnson } 264707c6692fSMark Johnson paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 264807c6692fSMark Johnson paddr += offset; 264907c6692fSMark Johnson psize = MIN(size, (MMU_PAGESIZE - offset)); 265007c6692fSMark Johnson pcnt++; 265107c6692fSMark Johnson } else { 265207c6692fSMark Johnson offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 265307c6692fSMark Johnson sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 265407c6692fSMark Johnson if (sglinfo->si_asp == NULL) { 265507c6692fSMark Johnson sglinfo->si_asp = &kas; 265607c6692fSMark Johnson } 265707c6692fSMark Johnson paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr)); 265807c6692fSMark Johnson paddr += offset; 265907c6692fSMark Johnson psize = MIN(size, (MMU_PAGESIZE - offset)); 266007c6692fSMark Johnson vaddr += psize; 266107c6692fSMark Johnson } 266207c6692fSMark Johnson 266307c6692fSMark Johnson #ifdef __xpv 266407c6692fSMark Johnson /* 266507c6692fSMark Johnson * If we're dom0, we're using a real device so we need to load 266607c6692fSMark Johnson * the cookies with MFNs instead of PFNs. 266707c6692fSMark Johnson */ 266807c6692fSMark Johnson raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 266907c6692fSMark Johnson #else 267007c6692fSMark Johnson raddr = paddr; 267107c6692fSMark Johnson #endif 267207c6692fSMark Johnson 267307c6692fSMark Johnson if ((raddr + psize) > sglinfo->si_segmask) { 267407c6692fSMark Johnson upper_addr = B_TRUE; 267507c6692fSMark Johnson } else { 267607c6692fSMark Johnson lower_addr = B_TRUE; 267707c6692fSMark Johnson } 267807c6692fSMark Johnson size -= psize; 267907c6692fSMark Johnson 268007c6692fSMark Johnson /* 268107c6692fSMark Johnson * Walk through the rest of the pages in the buffer. Track to see 268207c6692fSMark Johnson * if we have pages on both sides of the segment boundary. 268307c6692fSMark Johnson */ 268407c6692fSMark Johnson while (size > 0) { 268507c6692fSMark Johnson /* partial or full page */ 268607c6692fSMark Johnson psize = MIN(size, MMU_PAGESIZE); 268707c6692fSMark Johnson 268807c6692fSMark Johnson if (buftype == DMA_OTYP_PAGES) { 268907c6692fSMark Johnson /* get the paddr from the page_t */ 269007c6692fSMark Johnson ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 269107c6692fSMark Johnson paddr = pfn_to_pa(pp->p_pagenum); 269207c6692fSMark Johnson pp = pp->p_next; 269307c6692fSMark Johnson } else if (pplist != NULL) { 269407c6692fSMark Johnson /* index into the array of page_t's to get the paddr */ 269507c6692fSMark Johnson ASSERT(!PP_ISFREE(pplist[pcnt])); 269607c6692fSMark Johnson paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 269707c6692fSMark Johnson pcnt++; 269807c6692fSMark Johnson } else { 269907c6692fSMark Johnson /* call into the VM to get the paddr */ 270007c6692fSMark Johnson paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, 270107c6692fSMark Johnson vaddr)); 270207c6692fSMark Johnson vaddr += psize; 270307c6692fSMark Johnson } 270407c6692fSMark Johnson 270507c6692fSMark Johnson #ifdef __xpv 270607c6692fSMark Johnson /* 270707c6692fSMark Johnson * If we're dom0, we're using a real device so we need to load 270807c6692fSMark Johnson * the cookies with MFNs instead of PFNs. 270907c6692fSMark Johnson */ 271007c6692fSMark Johnson raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 271107c6692fSMark Johnson #else 271207c6692fSMark Johnson raddr = paddr; 271307c6692fSMark Johnson #endif 271407c6692fSMark Johnson 271507c6692fSMark Johnson if ((raddr + psize) > sglinfo->si_segmask) { 271607c6692fSMark Johnson upper_addr = B_TRUE; 271707c6692fSMark Johnson } else { 271807c6692fSMark Johnson lower_addr = B_TRUE; 271907c6692fSMark Johnson } 272007c6692fSMark Johnson /* 272107c6692fSMark Johnson * if the buffer lives both above and below the segment 272207c6692fSMark Johnson * boundary, or the current page is the page immediately 272307c6692fSMark Johnson * after the segment, we will use a copy/bounce buffer for 272407c6692fSMark Johnson * all pages > seg. 272507c6692fSMark Johnson */ 272607c6692fSMark Johnson if ((lower_addr && upper_addr) || 272707c6692fSMark Johnson (raddr == (sglinfo->si_segmask + 1))) { 272807c6692fSMark Johnson return (B_TRUE); 272907c6692fSMark Johnson } 273007c6692fSMark Johnson 273107c6692fSMark Johnson size -= psize; 273207c6692fSMark Johnson } 273307c6692fSMark Johnson 273407c6692fSMark Johnson return (B_FALSE); 273507c6692fSMark Johnson } 273607c6692fSMark Johnson 273707c6692fSMark Johnson 273807c6692fSMark Johnson /* 273912f080e7Smrj * rootnex_get_sgl() 274012f080e7Smrj * Called in bind fastpath to get the sgl. Most of this will be replaced 274112f080e7Smrj * with a call to the vm layer when vm2.0 comes around... 274212f080e7Smrj */ 274312f080e7Smrj static void 274412f080e7Smrj rootnex_get_sgl(ddi_dma_obj_t *dmar_object, ddi_dma_cookie_t *sgl, 274512f080e7Smrj rootnex_sglinfo_t *sglinfo) 274612f080e7Smrj { 274712f080e7Smrj ddi_dma_atyp_t buftype; 2748843e1988Sjohnlev rootnex_addr_t raddr; 274912f080e7Smrj uint64_t last_page; 275012f080e7Smrj uint64_t offset; 275112f080e7Smrj uint64_t addrhi; 275212f080e7Smrj uint64_t addrlo; 275312f080e7Smrj uint64_t maxseg; 275412f080e7Smrj page_t **pplist; 275512f080e7Smrj uint64_t paddr; 275612f080e7Smrj uint32_t psize; 275712f080e7Smrj uint32_t size; 275812f080e7Smrj caddr_t vaddr; 275912f080e7Smrj uint_t pcnt; 276012f080e7Smrj page_t *pp; 276112f080e7Smrj uint_t cnt; 276212f080e7Smrj 276312f080e7Smrj 276412f080e7Smrj /* shortcuts */ 276512f080e7Smrj pplist = dmar_object->dmao_obj.virt_obj.v_priv; 276612f080e7Smrj vaddr = dmar_object->dmao_obj.virt_obj.v_addr; 276712f080e7Smrj maxseg = sglinfo->si_max_cookie_size; 276812f080e7Smrj buftype = dmar_object->dmao_type; 276912f080e7Smrj addrhi = sglinfo->si_max_addr; 277012f080e7Smrj addrlo = sglinfo->si_min_addr; 277112f080e7Smrj size = dmar_object->dmao_size; 277212f080e7Smrj 277312f080e7Smrj pcnt = 0; 277412f080e7Smrj cnt = 0; 277512f080e7Smrj 277607c6692fSMark Johnson 277707c6692fSMark Johnson /* 277807c6692fSMark Johnson * check to see if we need to use the copy buffer for pages over 277907c6692fSMark Johnson * the segment attr. 278007c6692fSMark Johnson */ 278107c6692fSMark Johnson sglinfo->si_bounce_on_seg = B_FALSE; 278207c6692fSMark Johnson if (sglinfo->si_flags & _DDI_DMA_BOUNCE_ON_SEG) { 278307c6692fSMark Johnson sglinfo->si_bounce_on_seg = rootnex_need_bounce_seg( 278407c6692fSMark Johnson dmar_object, sglinfo); 278507c6692fSMark Johnson } 278607c6692fSMark Johnson 278712f080e7Smrj /* 278812f080e7Smrj * if we were passed down a linked list of pages, i.e. pointer to 278912f080e7Smrj * page_t, use this to get our physical address and buf offset. 279012f080e7Smrj */ 279112f080e7Smrj if (buftype == DMA_OTYP_PAGES) { 279212f080e7Smrj pp = dmar_object->dmao_obj.pp_obj.pp_pp; 279312f080e7Smrj ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 279412f080e7Smrj offset = dmar_object->dmao_obj.pp_obj.pp_offset & 279512f080e7Smrj MMU_PAGEOFFSET; 2796843e1988Sjohnlev paddr = pfn_to_pa(pp->p_pagenum) + offset; 279712f080e7Smrj psize = MIN(size, (MMU_PAGESIZE - offset)); 279812f080e7Smrj pp = pp->p_next; 279912f080e7Smrj sglinfo->si_asp = NULL; 280012f080e7Smrj 280112f080e7Smrj /* 280212f080e7Smrj * We weren't passed down a linked list of pages, but if we were passed 280312f080e7Smrj * down an array of pages, use this to get our physical address and buf 280412f080e7Smrj * offset. 280512f080e7Smrj */ 280612f080e7Smrj } else if (pplist != NULL) { 280712f080e7Smrj ASSERT((buftype == DMA_OTYP_VADDR) || 280812f080e7Smrj (buftype == DMA_OTYP_BUFVADDR)); 280912f080e7Smrj 281012f080e7Smrj offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 281112f080e7Smrj sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 281212f080e7Smrj if (sglinfo->si_asp == NULL) { 281312f080e7Smrj sglinfo->si_asp = &kas; 281412f080e7Smrj } 281512f080e7Smrj 281612f080e7Smrj ASSERT(!PP_ISFREE(pplist[pcnt])); 2817843e1988Sjohnlev paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 281812f080e7Smrj paddr += offset; 281912f080e7Smrj psize = MIN(size, (MMU_PAGESIZE - offset)); 282012f080e7Smrj pcnt++; 282112f080e7Smrj 282212f080e7Smrj /* 282312f080e7Smrj * All we have is a virtual address, we'll need to call into the VM 282412f080e7Smrj * to get the physical address. 282512f080e7Smrj */ 282612f080e7Smrj } else { 282712f080e7Smrj ASSERT((buftype == DMA_OTYP_VADDR) || 282812f080e7Smrj (buftype == DMA_OTYP_BUFVADDR)); 282912f080e7Smrj 283012f080e7Smrj offset = (uintptr_t)vaddr & MMU_PAGEOFFSET; 283112f080e7Smrj sglinfo->si_asp = dmar_object->dmao_obj.virt_obj.v_as; 283212f080e7Smrj if (sglinfo->si_asp == NULL) { 283312f080e7Smrj sglinfo->si_asp = &kas; 283412f080e7Smrj } 283512f080e7Smrj 2836843e1988Sjohnlev paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, vaddr)); 283712f080e7Smrj paddr += offset; 283812f080e7Smrj psize = MIN(size, (MMU_PAGESIZE - offset)); 283912f080e7Smrj vaddr += psize; 284012f080e7Smrj } 284112f080e7Smrj 2842843e1988Sjohnlev #ifdef __xpv 2843843e1988Sjohnlev /* 2844843e1988Sjohnlev * If we're dom0, we're using a real device so we need to load 2845843e1988Sjohnlev * the cookies with MFNs instead of PFNs. 2846843e1988Sjohnlev */ 2847843e1988Sjohnlev raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2848843e1988Sjohnlev #else 2849843e1988Sjohnlev raddr = paddr; 2850843e1988Sjohnlev #endif 2851843e1988Sjohnlev 285212f080e7Smrj /* 285312f080e7Smrj * Setup the first cookie with the physical address of the page and the 285412f080e7Smrj * size of the page (which takes into account the initial offset into 285512f080e7Smrj * the page. 285612f080e7Smrj */ 2857843e1988Sjohnlev sgl[cnt].dmac_laddress = raddr; 285812f080e7Smrj sgl[cnt].dmac_size = psize; 285912f080e7Smrj sgl[cnt].dmac_type = 0; 286012f080e7Smrj 286112f080e7Smrj /* 286212f080e7Smrj * Save away the buffer offset into the page. We'll need this later in 286312f080e7Smrj * the copy buffer code to help figure out the page index within the 286412f080e7Smrj * buffer and the offset into the current page. 286512f080e7Smrj */ 286612f080e7Smrj sglinfo->si_buf_offset = offset; 286712f080e7Smrj 286812f080e7Smrj /* 286907c6692fSMark Johnson * If we are using the copy buffer for anything over the segment 287007c6692fSMark Johnson * boundary, and this page is over the segment boundary. 287107c6692fSMark Johnson * OR 287207c6692fSMark Johnson * if the DMA engine can't reach the physical address. 287312f080e7Smrj */ 287407c6692fSMark Johnson if (((sglinfo->si_bounce_on_seg) && 287507c6692fSMark Johnson ((raddr + psize) > sglinfo->si_segmask)) || 287607c6692fSMark Johnson ((raddr < addrlo) || ((raddr + psize) > addrhi))) { 287707c6692fSMark Johnson /* 287807c6692fSMark Johnson * Increase how much copy buffer we use. We always increase by 287907c6692fSMark Johnson * pagesize so we don't have to worry about converting offsets. 288007c6692fSMark Johnson * Set a flag in the cookies dmac_type to indicate that it uses 288107c6692fSMark Johnson * the copy buffer. If this isn't the last cookie, go to the 288207c6692fSMark Johnson * next cookie (since we separate each page which uses the copy 288307c6692fSMark Johnson * buffer in case the copy buffer is not physically contiguous. 288407c6692fSMark Johnson */ 288512f080e7Smrj sglinfo->si_copybuf_req += MMU_PAGESIZE; 288612f080e7Smrj sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF; 288712f080e7Smrj if ((cnt + 1) < sglinfo->si_max_pages) { 288812f080e7Smrj cnt++; 288912f080e7Smrj sgl[cnt].dmac_laddress = 0; 289012f080e7Smrj sgl[cnt].dmac_size = 0; 289112f080e7Smrj sgl[cnt].dmac_type = 0; 289212f080e7Smrj } 289312f080e7Smrj } 289412f080e7Smrj 289512f080e7Smrj /* 289612f080e7Smrj * save this page's physical address so we can figure out if the next 289712f080e7Smrj * page is physically contiguous. Keep decrementing size until we are 289812f080e7Smrj * done with the buffer. 289912f080e7Smrj */ 2900843e1988Sjohnlev last_page = raddr & MMU_PAGEMASK; 290112f080e7Smrj size -= psize; 290212f080e7Smrj 290312f080e7Smrj while (size > 0) { 290412f080e7Smrj /* Get the size for this page (i.e. partial or full page) */ 290512f080e7Smrj psize = MIN(size, MMU_PAGESIZE); 290612f080e7Smrj 290712f080e7Smrj if (buftype == DMA_OTYP_PAGES) { 290812f080e7Smrj /* get the paddr from the page_t */ 290912f080e7Smrj ASSERT(!PP_ISFREE(pp) && PAGE_LOCKED(pp)); 2910843e1988Sjohnlev paddr = pfn_to_pa(pp->p_pagenum); 291112f080e7Smrj pp = pp->p_next; 291212f080e7Smrj } else if (pplist != NULL) { 291312f080e7Smrj /* index into the array of page_t's to get the paddr */ 291412f080e7Smrj ASSERT(!PP_ISFREE(pplist[pcnt])); 2915843e1988Sjohnlev paddr = pfn_to_pa(pplist[pcnt]->p_pagenum); 291612f080e7Smrj pcnt++; 291712f080e7Smrj } else { 291812f080e7Smrj /* call into the VM to get the paddr */ 2919843e1988Sjohnlev paddr = pfn_to_pa(hat_getpfnum(sglinfo->si_asp->a_hat, 292012f080e7Smrj vaddr)); 292112f080e7Smrj vaddr += psize; 292212f080e7Smrj } 292312f080e7Smrj 2924843e1988Sjohnlev #ifdef __xpv 2925843e1988Sjohnlev /* 2926843e1988Sjohnlev * If we're dom0, we're using a real device so we need to load 2927843e1988Sjohnlev * the cookies with MFNs instead of PFNs. 2928843e1988Sjohnlev */ 2929843e1988Sjohnlev raddr = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 2930843e1988Sjohnlev #else 2931843e1988Sjohnlev raddr = paddr; 2932843e1988Sjohnlev #endif 293307c6692fSMark Johnson 293407c6692fSMark Johnson /* 293507c6692fSMark Johnson * If we are using the copy buffer for anything over the 293607c6692fSMark Johnson * segment boundary, and this page is over the segment 293707c6692fSMark Johnson * boundary. 293807c6692fSMark Johnson * OR 293907c6692fSMark Johnson * if the DMA engine can't reach the physical address. 294007c6692fSMark Johnson */ 294107c6692fSMark Johnson if (((sglinfo->si_bounce_on_seg) && 294207c6692fSMark Johnson ((raddr + psize) > sglinfo->si_segmask)) || 294307c6692fSMark Johnson ((raddr < addrlo) || ((raddr + psize) > addrhi))) { 294407c6692fSMark Johnson 294512f080e7Smrj sglinfo->si_copybuf_req += MMU_PAGESIZE; 294612f080e7Smrj 294712f080e7Smrj /* 294812f080e7Smrj * if there is something in the current cookie, go to 294912f080e7Smrj * the next one. We only want one page in a cookie which 295012f080e7Smrj * uses the copybuf since the copybuf doesn't have to 295112f080e7Smrj * be physically contiguous. 295212f080e7Smrj */ 295312f080e7Smrj if (sgl[cnt].dmac_size != 0) { 295412f080e7Smrj cnt++; 295512f080e7Smrj } 2956843e1988Sjohnlev sgl[cnt].dmac_laddress = raddr; 295712f080e7Smrj sgl[cnt].dmac_size = psize; 295812f080e7Smrj #if defined(__amd64) 295912f080e7Smrj sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF; 296012f080e7Smrj #else 296112f080e7Smrj /* 296212f080e7Smrj * save the buf offset for 32-bit kernel. used in the 296312f080e7Smrj * obsoleted interfaces. 296412f080e7Smrj */ 296512f080e7Smrj sgl[cnt].dmac_type = ROOTNEX_USES_COPYBUF | 296612f080e7Smrj (dmar_object->dmao_size - size); 296712f080e7Smrj #endif 296812f080e7Smrj /* if this isn't the last cookie, go to the next one */ 296912f080e7Smrj if ((cnt + 1) < sglinfo->si_max_pages) { 297012f080e7Smrj cnt++; 297112f080e7Smrj sgl[cnt].dmac_laddress = 0; 297212f080e7Smrj sgl[cnt].dmac_size = 0; 297312f080e7Smrj sgl[cnt].dmac_type = 0; 297412f080e7Smrj } 297512f080e7Smrj 297612f080e7Smrj /* 297712f080e7Smrj * this page didn't need the copy buffer, if it's not physically 297812f080e7Smrj * contiguous, or it would put us over a segment boundary, or it 297912f080e7Smrj * puts us over the max cookie size, or the current sgl doesn't 298012f080e7Smrj * have anything in it. 298112f080e7Smrj */ 2982843e1988Sjohnlev } else if (((last_page + MMU_PAGESIZE) != raddr) || 2983843e1988Sjohnlev !(raddr & sglinfo->si_segmask) || 298412f080e7Smrj ((sgl[cnt].dmac_size + psize) > maxseg) || 298512f080e7Smrj (sgl[cnt].dmac_size == 0)) { 298612f080e7Smrj /* 298712f080e7Smrj * if we're not already in a new cookie, go to the next 298812f080e7Smrj * cookie. 298912f080e7Smrj */ 299012f080e7Smrj if (sgl[cnt].dmac_size != 0) { 299112f080e7Smrj cnt++; 299212f080e7Smrj } 299312f080e7Smrj 299412f080e7Smrj /* save the cookie information */ 2995843e1988Sjohnlev sgl[cnt].dmac_laddress = raddr; 299612f080e7Smrj sgl[cnt].dmac_size = psize; 299712f080e7Smrj #if defined(__amd64) 299812f080e7Smrj sgl[cnt].dmac_type = 0; 299912f080e7Smrj #else 300012f080e7Smrj /* 300112f080e7Smrj * save the buf offset for 32-bit kernel. used in the 300212f080e7Smrj * obsoleted interfaces. 300312f080e7Smrj */ 300412f080e7Smrj sgl[cnt].dmac_type = dmar_object->dmao_size - size; 300512f080e7Smrj #endif 300612f080e7Smrj 300712f080e7Smrj /* 300812f080e7Smrj * this page didn't need the copy buffer, it is physically 300912f080e7Smrj * contiguous with the last page, and it's <= the max cookie 301012f080e7Smrj * size. 301112f080e7Smrj */ 301212f080e7Smrj } else { 301312f080e7Smrj sgl[cnt].dmac_size += psize; 301412f080e7Smrj 301512f080e7Smrj /* 301612f080e7Smrj * if this exactly == the maximum cookie size, and 301712f080e7Smrj * it isn't the last cookie, go to the next cookie. 301812f080e7Smrj */ 301912f080e7Smrj if (((sgl[cnt].dmac_size + psize) == maxseg) && 302012f080e7Smrj ((cnt + 1) < sglinfo->si_max_pages)) { 302112f080e7Smrj cnt++; 302212f080e7Smrj sgl[cnt].dmac_laddress = 0; 302312f080e7Smrj sgl[cnt].dmac_size = 0; 302412f080e7Smrj sgl[cnt].dmac_type = 0; 302512f080e7Smrj } 302612f080e7Smrj } 302712f080e7Smrj 302812f080e7Smrj /* 302912f080e7Smrj * save this page's physical address so we can figure out if the 303012f080e7Smrj * next page is physically contiguous. Keep decrementing size 303112f080e7Smrj * until we are done with the buffer. 303212f080e7Smrj */ 3033843e1988Sjohnlev last_page = raddr; 303412f080e7Smrj size -= psize; 303512f080e7Smrj } 303612f080e7Smrj 303712f080e7Smrj /* we're done, save away how many cookies the sgl has */ 303812f080e7Smrj if (sgl[cnt].dmac_size == 0) { 303912f080e7Smrj ASSERT(cnt < sglinfo->si_max_pages); 304012f080e7Smrj sglinfo->si_sgl_size = cnt; 304112f080e7Smrj } else { 304212f080e7Smrj sglinfo->si_sgl_size = cnt + 1; 304312f080e7Smrj } 304412f080e7Smrj } 304512f080e7Smrj 304612f080e7Smrj /* 304712f080e7Smrj * rootnex_bind_slowpath() 304812f080e7Smrj * Call in the bind path if the calling driver can't use the sgl without 304912f080e7Smrj * modifying it. We either need to use the copy buffer and/or we will end up 305012f080e7Smrj * with a partial bind. 305112f080e7Smrj */ 305212f080e7Smrj static int 305312f080e7Smrj rootnex_bind_slowpath(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 305412f080e7Smrj rootnex_dma_t *dma, ddi_dma_attr_t *attr, int kmflag) 305512f080e7Smrj { 305612f080e7Smrj rootnex_sglinfo_t *sinfo; 305712f080e7Smrj rootnex_window_t *window; 305812f080e7Smrj ddi_dma_cookie_t *cookie; 305912f080e7Smrj size_t copybuf_used; 306012f080e7Smrj size_t dmac_size; 306112f080e7Smrj boolean_t partial; 306212f080e7Smrj off_t cur_offset; 306312f080e7Smrj page_t *cur_pp; 306412f080e7Smrj major_t mnum; 306512f080e7Smrj int e; 306612f080e7Smrj int i; 306712f080e7Smrj 306812f080e7Smrj 306912f080e7Smrj sinfo = &dma->dp_sglinfo; 307012f080e7Smrj copybuf_used = 0; 307112f080e7Smrj partial = B_FALSE; 307212f080e7Smrj 307312f080e7Smrj /* 307412f080e7Smrj * If we're using the copybuf, set the copybuf state in dma struct. 307512f080e7Smrj * Needs to be first since it sets the copy buffer size. 307612f080e7Smrj */ 307712f080e7Smrj if (sinfo->si_copybuf_req != 0) { 307812f080e7Smrj e = rootnex_setup_copybuf(hp, dmareq, dma, attr); 307912f080e7Smrj if (e != DDI_SUCCESS) { 308012f080e7Smrj return (e); 308112f080e7Smrj } 308212f080e7Smrj } else { 308312f080e7Smrj dma->dp_copybuf_size = 0; 308412f080e7Smrj } 308512f080e7Smrj 308612f080e7Smrj /* 308712f080e7Smrj * Figure out if we need to do a partial mapping. If so, figure out 308812f080e7Smrj * if we need to trim the buffers when we munge the sgl. 308912f080e7Smrj */ 309012f080e7Smrj if ((dma->dp_copybuf_size < sinfo->si_copybuf_req) || 309112f080e7Smrj (dma->dp_dma.dmao_size > dma->dp_maxxfer) || 309212f080e7Smrj (attr->dma_attr_sgllen < sinfo->si_sgl_size)) { 309312f080e7Smrj dma->dp_partial_required = B_TRUE; 309412f080e7Smrj if (attr->dma_attr_granular != 1) { 309512f080e7Smrj dma->dp_trim_required = B_TRUE; 309612f080e7Smrj } 309712f080e7Smrj } else { 309812f080e7Smrj dma->dp_partial_required = B_FALSE; 309912f080e7Smrj dma->dp_trim_required = B_FALSE; 310012f080e7Smrj } 310112f080e7Smrj 310212f080e7Smrj /* If we need to do a partial bind, make sure the driver supports it */ 310312f080e7Smrj if (dma->dp_partial_required && 310412f080e7Smrj !(dmareq->dmar_flags & DDI_DMA_PARTIAL)) { 310512f080e7Smrj 310612f080e7Smrj mnum = ddi_driver_major(dma->dp_dip); 310712f080e7Smrj /* 310812f080e7Smrj * patchable which allows us to print one warning per major 310912f080e7Smrj * number. 311012f080e7Smrj */ 311112f080e7Smrj if ((rootnex_bind_warn) && 311212f080e7Smrj ((rootnex_warn_list[mnum] & ROOTNEX_BIND_WARNING) == 0)) { 311312f080e7Smrj rootnex_warn_list[mnum] |= ROOTNEX_BIND_WARNING; 311412f080e7Smrj cmn_err(CE_WARN, "!%s: coding error detected, the " 311512f080e7Smrj "driver is using ddi_dma_attr(9S) incorrectly. " 311612f080e7Smrj "There is a small risk of data corruption in " 311712f080e7Smrj "particular with large I/Os. The driver should be " 311812f080e7Smrj "replaced with a corrected version for proper " 311912f080e7Smrj "system operation. To disable this warning, add " 312012f080e7Smrj "'set rootnex:rootnex_bind_warn=0' to " 312112f080e7Smrj "/etc/system(4).", ddi_driver_name(dma->dp_dip)); 312212f080e7Smrj } 312312f080e7Smrj return (DDI_DMA_TOOBIG); 312412f080e7Smrj } 312512f080e7Smrj 312612f080e7Smrj /* 312712f080e7Smrj * we might need multiple windows, setup state to handle them. In this 312812f080e7Smrj * code path, we will have at least one window. 312912f080e7Smrj */ 313012f080e7Smrj e = rootnex_setup_windows(hp, dma, attr, kmflag); 313112f080e7Smrj if (e != DDI_SUCCESS) { 313212f080e7Smrj rootnex_teardown_copybuf(dma); 313312f080e7Smrj return (e); 313412f080e7Smrj } 313512f080e7Smrj 313612f080e7Smrj window = &dma->dp_window[0]; 313712f080e7Smrj cookie = &dma->dp_cookies[0]; 313812f080e7Smrj cur_offset = 0; 313912f080e7Smrj rootnex_init_win(hp, dma, window, cookie, cur_offset); 314012f080e7Smrj if (dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) { 314112f080e7Smrj cur_pp = dmareq->dmar_object.dmao_obj.pp_obj.pp_pp; 314212f080e7Smrj } 314312f080e7Smrj 314412f080e7Smrj /* loop though all the cookies we got back from get_sgl() */ 314512f080e7Smrj for (i = 0; i < sinfo->si_sgl_size; i++) { 314612f080e7Smrj /* 314712f080e7Smrj * If we're using the copy buffer, check this cookie and setup 314812f080e7Smrj * its associated copy buffer state. If this cookie uses the 314912f080e7Smrj * copy buffer, make sure we sync this window during dma_sync. 315012f080e7Smrj */ 315112f080e7Smrj if (dma->dp_copybuf_size > 0) { 315212f080e7Smrj rootnex_setup_cookie(&dmareq->dmar_object, dma, cookie, 315312f080e7Smrj cur_offset, ©buf_used, &cur_pp); 315412f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 315512f080e7Smrj window->wd_dosync = B_TRUE; 315612f080e7Smrj } 315712f080e7Smrj } 315812f080e7Smrj 315912f080e7Smrj /* 316012f080e7Smrj * save away the cookie size, since it could be modified in 316112f080e7Smrj * the windowing code. 316212f080e7Smrj */ 316312f080e7Smrj dmac_size = cookie->dmac_size; 316412f080e7Smrj 316512f080e7Smrj /* if we went over max copybuf size */ 316612f080e7Smrj if (dma->dp_copybuf_size && 316712f080e7Smrj (copybuf_used > dma->dp_copybuf_size)) { 316812f080e7Smrj partial = B_TRUE; 316912f080e7Smrj e = rootnex_copybuf_window_boundary(hp, dma, &window, 317012f080e7Smrj cookie, cur_offset, ©buf_used); 317112f080e7Smrj if (e != DDI_SUCCESS) { 317212f080e7Smrj rootnex_teardown_copybuf(dma); 317312f080e7Smrj rootnex_teardown_windows(dma); 317412f080e7Smrj return (e); 317512f080e7Smrj } 317612f080e7Smrj 317712f080e7Smrj /* 317812f080e7Smrj * if the coookie uses the copy buffer, make sure the 317912f080e7Smrj * new window we just moved to is set to sync. 318012f080e7Smrj */ 318112f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 318212f080e7Smrj window->wd_dosync = B_TRUE; 318312f080e7Smrj } 318412f080e7Smrj DTRACE_PROBE1(rootnex__copybuf__window, dev_info_t *, 318512f080e7Smrj dma->dp_dip); 318612f080e7Smrj 318712f080e7Smrj /* if the cookie cnt == max sgllen, move to the next window */ 318812f080e7Smrj } else if (window->wd_cookie_cnt >= attr->dma_attr_sgllen) { 318912f080e7Smrj partial = B_TRUE; 319012f080e7Smrj ASSERT(window->wd_cookie_cnt == attr->dma_attr_sgllen); 319112f080e7Smrj e = rootnex_sgllen_window_boundary(hp, dma, &window, 319212f080e7Smrj cookie, attr, cur_offset); 319312f080e7Smrj if (e != DDI_SUCCESS) { 319412f080e7Smrj rootnex_teardown_copybuf(dma); 319512f080e7Smrj rootnex_teardown_windows(dma); 319612f080e7Smrj return (e); 319712f080e7Smrj } 319812f080e7Smrj 319912f080e7Smrj /* 320012f080e7Smrj * if the coookie uses the copy buffer, make sure the 320112f080e7Smrj * new window we just moved to is set to sync. 320212f080e7Smrj */ 320312f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 320412f080e7Smrj window->wd_dosync = B_TRUE; 320512f080e7Smrj } 320612f080e7Smrj DTRACE_PROBE1(rootnex__sgllen__window, dev_info_t *, 320712f080e7Smrj dma->dp_dip); 320812f080e7Smrj 320912f080e7Smrj /* else if we will be over maxxfer */ 321012f080e7Smrj } else if ((window->wd_size + dmac_size) > 321112f080e7Smrj dma->dp_maxxfer) { 321212f080e7Smrj partial = B_TRUE; 321312f080e7Smrj e = rootnex_maxxfer_window_boundary(hp, dma, &window, 321412f080e7Smrj cookie); 321512f080e7Smrj if (e != DDI_SUCCESS) { 321612f080e7Smrj rootnex_teardown_copybuf(dma); 321712f080e7Smrj rootnex_teardown_windows(dma); 321812f080e7Smrj return (e); 321912f080e7Smrj } 322012f080e7Smrj 322112f080e7Smrj /* 322212f080e7Smrj * if the coookie uses the copy buffer, make sure the 322312f080e7Smrj * new window we just moved to is set to sync. 322412f080e7Smrj */ 322512f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 322612f080e7Smrj window->wd_dosync = B_TRUE; 322712f080e7Smrj } 322812f080e7Smrj DTRACE_PROBE1(rootnex__maxxfer__window, dev_info_t *, 322912f080e7Smrj dma->dp_dip); 323012f080e7Smrj 323112f080e7Smrj /* else this cookie fits in the current window */ 323212f080e7Smrj } else { 323312f080e7Smrj window->wd_cookie_cnt++; 323412f080e7Smrj window->wd_size += dmac_size; 323512f080e7Smrj } 323612f080e7Smrj 323712f080e7Smrj /* track our offset into the buffer, go to the next cookie */ 323812f080e7Smrj ASSERT(dmac_size <= dma->dp_dma.dmao_size); 323912f080e7Smrj ASSERT(cookie->dmac_size <= dmac_size); 324012f080e7Smrj cur_offset += dmac_size; 324112f080e7Smrj cookie++; 324212f080e7Smrj } 324312f080e7Smrj 324412f080e7Smrj /* if we ended up with a zero sized window in the end, clean it up */ 324512f080e7Smrj if (window->wd_size == 0) { 324612f080e7Smrj hp->dmai_nwin--; 324712f080e7Smrj window--; 324812f080e7Smrj } 324912f080e7Smrj 325012f080e7Smrj ASSERT(window->wd_trim.tr_trim_last == B_FALSE); 325112f080e7Smrj 325212f080e7Smrj if (!partial) { 325312f080e7Smrj return (DDI_DMA_MAPPED); 325412f080e7Smrj } 325512f080e7Smrj 325612f080e7Smrj ASSERT(dma->dp_partial_required); 325712f080e7Smrj return (DDI_DMA_PARTIAL_MAP); 325812f080e7Smrj } 325912f080e7Smrj 326012f080e7Smrj 326112f080e7Smrj /* 326212f080e7Smrj * rootnex_setup_copybuf() 326312f080e7Smrj * Called in bind slowpath. Figures out if we're going to use the copy 326412f080e7Smrj * buffer, and if we do, sets up the basic state to handle it. 326512f080e7Smrj */ 326612f080e7Smrj static int 326712f080e7Smrj rootnex_setup_copybuf(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 326812f080e7Smrj rootnex_dma_t *dma, ddi_dma_attr_t *attr) 326912f080e7Smrj { 327012f080e7Smrj rootnex_sglinfo_t *sinfo; 327112f080e7Smrj ddi_dma_attr_t lattr; 327212f080e7Smrj size_t max_copybuf; 327312f080e7Smrj int cansleep; 327412f080e7Smrj int e; 327512f080e7Smrj #if !defined(__amd64) 327612f080e7Smrj int vmflag; 327712f080e7Smrj #endif 327812f080e7Smrj 327912f080e7Smrj 328012f080e7Smrj sinfo = &dma->dp_sglinfo; 328112f080e7Smrj 328236945f79Smrj /* read this first so it's consistent through the routine */ 328336945f79Smrj max_copybuf = i_ddi_copybuf_size() & MMU_PAGEMASK; 328412f080e7Smrj 328512f080e7Smrj /* We need to call into the rootnex on ddi_dma_sync() */ 328612f080e7Smrj hp->dmai_rflags &= ~DMP_NOSYNC; 328712f080e7Smrj 328812f080e7Smrj /* make sure the copybuf size <= the max size */ 328912f080e7Smrj dma->dp_copybuf_size = MIN(sinfo->si_copybuf_req, max_copybuf); 329012f080e7Smrj ASSERT((dma->dp_copybuf_size & MMU_PAGEOFFSET) == 0); 329112f080e7Smrj 329212f080e7Smrj #if !defined(__amd64) 329312f080e7Smrj /* 329412f080e7Smrj * if we don't have kva space to copy to/from, allocate the KVA space 329512f080e7Smrj * now. We only do this for the 32-bit kernel. We use seg kpm space for 329612f080e7Smrj * the 64-bit kernel. 329712f080e7Smrj */ 329812f080e7Smrj if ((dmareq->dmar_object.dmao_type == DMA_OTYP_PAGES) || 329912f080e7Smrj (dmareq->dmar_object.dmao_obj.virt_obj.v_as != NULL)) { 330012f080e7Smrj 330112f080e7Smrj /* convert the sleep flags */ 330212f080e7Smrj if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 330312f080e7Smrj vmflag = VM_SLEEP; 330412f080e7Smrj } else { 330512f080e7Smrj vmflag = VM_NOSLEEP; 330612f080e7Smrj } 330712f080e7Smrj 330812f080e7Smrj /* allocate Kernel VA space that we can bcopy to/from */ 330912f080e7Smrj dma->dp_kva = vmem_alloc(heap_arena, dma->dp_copybuf_size, 331012f080e7Smrj vmflag); 331112f080e7Smrj if (dma->dp_kva == NULL) { 331212f080e7Smrj return (DDI_DMA_NORESOURCES); 331312f080e7Smrj } 331412f080e7Smrj } 331512f080e7Smrj #endif 331612f080e7Smrj 331712f080e7Smrj /* convert the sleep flags */ 331812f080e7Smrj if (dmareq->dmar_fp == DDI_DMA_SLEEP) { 331912f080e7Smrj cansleep = 1; 332012f080e7Smrj } else { 332112f080e7Smrj cansleep = 0; 332212f080e7Smrj } 332312f080e7Smrj 332412f080e7Smrj /* 3325d21b39ddSmrj * Allocate the actual copy buffer. This needs to fit within the DMA 3326d21b39ddSmrj * engine limits, so we can't use kmem_alloc... We don't need 3327d21b39ddSmrj * contiguous memory (sgllen) since we will be forcing windows on 3328d21b39ddSmrj * sgllen anyway. 332912f080e7Smrj */ 333012f080e7Smrj lattr = *attr; 333112f080e7Smrj lattr.dma_attr_align = MMU_PAGESIZE; 3332d21b39ddSmrj /* 3333d21b39ddSmrj * this should be < 0 to indicate no limit, but due to a bug in 3334d21b39ddSmrj * the rootnex, we'll set it to the maximum positive int. 3335d21b39ddSmrj */ 3336d21b39ddSmrj lattr.dma_attr_sgllen = 0x7fffffff; 333707c6692fSMark Johnson /* 333807c6692fSMark Johnson * if we're using the copy buffer because of seg, use that for our 333907c6692fSMark Johnson * upper address limit. 334007c6692fSMark Johnson */ 334107c6692fSMark Johnson if (sinfo->si_bounce_on_seg) { 334207c6692fSMark Johnson lattr.dma_attr_addr_hi = lattr.dma_attr_seg; 334307c6692fSMark Johnson } 334412f080e7Smrj e = i_ddi_mem_alloc(dma->dp_dip, &lattr, dma->dp_copybuf_size, cansleep, 334512f080e7Smrj 0, NULL, &dma->dp_cbaddr, &dma->dp_cbsize, NULL); 334612f080e7Smrj if (e != DDI_SUCCESS) { 334712f080e7Smrj #if !defined(__amd64) 334812f080e7Smrj if (dma->dp_kva != NULL) { 334912f080e7Smrj vmem_free(heap_arena, dma->dp_kva, 335012f080e7Smrj dma->dp_copybuf_size); 335112f080e7Smrj } 335212f080e7Smrj #endif 335312f080e7Smrj return (DDI_DMA_NORESOURCES); 335412f080e7Smrj } 335512f080e7Smrj 335612f080e7Smrj DTRACE_PROBE2(rootnex__alloc__copybuf, dev_info_t *, dma->dp_dip, 335712f080e7Smrj size_t, dma->dp_copybuf_size); 335812f080e7Smrj 335912f080e7Smrj return (DDI_SUCCESS); 336012f080e7Smrj } 336112f080e7Smrj 336212f080e7Smrj 336312f080e7Smrj /* 336412f080e7Smrj * rootnex_setup_windows() 336512f080e7Smrj * Called in bind slowpath to setup the window state. We always have windows 336612f080e7Smrj * in the slowpath. Even if the window count = 1. 336712f080e7Smrj */ 336812f080e7Smrj static int 336912f080e7Smrj rootnex_setup_windows(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 337012f080e7Smrj ddi_dma_attr_t *attr, int kmflag) 337112f080e7Smrj { 337212f080e7Smrj rootnex_window_t *windowp; 337312f080e7Smrj rootnex_sglinfo_t *sinfo; 337412f080e7Smrj size_t copy_state_size; 337512f080e7Smrj size_t win_state_size; 337612f080e7Smrj size_t state_available; 337712f080e7Smrj size_t space_needed; 337812f080e7Smrj uint_t copybuf_win; 337912f080e7Smrj uint_t maxxfer_win; 338012f080e7Smrj size_t space_used; 338112f080e7Smrj uint_t sglwin; 338212f080e7Smrj 338312f080e7Smrj 338412f080e7Smrj sinfo = &dma->dp_sglinfo; 338512f080e7Smrj 338612f080e7Smrj dma->dp_current_win = 0; 338712f080e7Smrj hp->dmai_nwin = 0; 338812f080e7Smrj 338912f080e7Smrj /* If we don't need to do a partial, we only have one window */ 339012f080e7Smrj if (!dma->dp_partial_required) { 339112f080e7Smrj dma->dp_max_win = 1; 339212f080e7Smrj 339312f080e7Smrj /* 339412f080e7Smrj * we need multiple windows, need to figure out the worse case number 339512f080e7Smrj * of windows. 339612f080e7Smrj */ 33977c478bd9Sstevel@tonic-gate } else { 33987c478bd9Sstevel@tonic-gate /* 339912f080e7Smrj * if we need windows because we need more copy buffer that 340012f080e7Smrj * we allow, the worse case number of windows we could need 340112f080e7Smrj * here would be (copybuf space required / copybuf space that 340212f080e7Smrj * we have) plus one for remainder, and plus 2 to handle the 340312f080e7Smrj * extra pages on the trim for the first and last pages of the 340412f080e7Smrj * buffer (a page is the minimum window size so under the right 340512f080e7Smrj * attr settings, you could have a window for each page). 340612f080e7Smrj * The last page will only be hit here if the size is not a 340712f080e7Smrj * multiple of the granularity (which theoretically shouldn't 340812f080e7Smrj * be the case but never has been enforced, so we could have 340912f080e7Smrj * broken things without it). 34107c478bd9Sstevel@tonic-gate */ 341112f080e7Smrj if (sinfo->si_copybuf_req > dma->dp_copybuf_size) { 341212f080e7Smrj ASSERT(dma->dp_copybuf_size > 0); 341312f080e7Smrj copybuf_win = (sinfo->si_copybuf_req / 341412f080e7Smrj dma->dp_copybuf_size) + 1 + 2; 34157c478bd9Sstevel@tonic-gate } else { 341612f080e7Smrj copybuf_win = 0; 34177c478bd9Sstevel@tonic-gate } 341812f080e7Smrj 341912f080e7Smrj /* 342012f080e7Smrj * if we need windows because we have more cookies than the H/W 342112f080e7Smrj * can handle, the number of windows we would need here would 3422b57cd2d3SMark Johnson * be (cookie count / cookies count H/W supports minus 1[for 3423b57cd2d3SMark Johnson * trim]) plus one for remainder. 342412f080e7Smrj */ 342512f080e7Smrj if (attr->dma_attr_sgllen < sinfo->si_sgl_size) { 3426b57cd2d3SMark Johnson sglwin = (sinfo->si_sgl_size / 3427b57cd2d3SMark Johnson (attr->dma_attr_sgllen - 1)) + 1; 34287c478bd9Sstevel@tonic-gate } else { 342912f080e7Smrj sglwin = 0; 34307c478bd9Sstevel@tonic-gate } 343112f080e7Smrj 343212f080e7Smrj /* 343312f080e7Smrj * if we need windows because we're binding more memory than the 343412f080e7Smrj * H/W can transfer at once, the number of windows we would need 343512f080e7Smrj * here would be (xfer count / max xfer H/W supports) plus one 343612f080e7Smrj * for remainder, and plus 2 to handle the extra pages on the 343712f080e7Smrj * trim (see above comment about trim) 343812f080e7Smrj */ 343912f080e7Smrj if (dma->dp_dma.dmao_size > dma->dp_maxxfer) { 344012f080e7Smrj maxxfer_win = (dma->dp_dma.dmao_size / 344112f080e7Smrj dma->dp_maxxfer) + 1 + 2; 344212f080e7Smrj } else { 344312f080e7Smrj maxxfer_win = 0; 34447c478bd9Sstevel@tonic-gate } 344512f080e7Smrj dma->dp_max_win = copybuf_win + sglwin + maxxfer_win; 344612f080e7Smrj ASSERT(dma->dp_max_win > 0); 344712f080e7Smrj } 344812f080e7Smrj win_state_size = dma->dp_max_win * sizeof (rootnex_window_t); 344912f080e7Smrj 345012f080e7Smrj /* 345112f080e7Smrj * Get space for window and potential copy buffer state. Before we 345212f080e7Smrj * go and allocate memory, see if we can get away with using what's 345312f080e7Smrj * left in the pre-allocted state or the dynamically allocated sgl. 345412f080e7Smrj */ 345512f080e7Smrj space_used = (uintptr_t)(sinfo->si_sgl_size * 345612f080e7Smrj sizeof (ddi_dma_cookie_t)); 345712f080e7Smrj 345812f080e7Smrj /* if we dynamically allocated space for the cookies */ 345912f080e7Smrj if (dma->dp_need_to_free_cookie) { 346012f080e7Smrj /* if we have more space in the pre-allocted buffer, use it */ 346112f080e7Smrj ASSERT(space_used <= dma->dp_cookie_size); 346212f080e7Smrj if ((dma->dp_cookie_size - space_used) <= 346312f080e7Smrj rootnex_state->r_prealloc_size) { 346412f080e7Smrj state_available = rootnex_state->r_prealloc_size; 346512f080e7Smrj windowp = (rootnex_window_t *)dma->dp_prealloc_buffer; 346612f080e7Smrj 346712f080e7Smrj /* 346812f080e7Smrj * else, we have more free space in the dynamically allocated 346912f080e7Smrj * buffer, i.e. the buffer wasn't worse case fragmented so we 347012f080e7Smrj * didn't need a lot of cookies. 347112f080e7Smrj */ 347212f080e7Smrj } else { 347312f080e7Smrj state_available = dma->dp_cookie_size - space_used; 347412f080e7Smrj windowp = (rootnex_window_t *) 347512f080e7Smrj &dma->dp_cookies[sinfo->si_sgl_size]; 347612f080e7Smrj } 347712f080e7Smrj 347812f080e7Smrj /* we used the pre-alloced buffer */ 347912f080e7Smrj } else { 348012f080e7Smrj ASSERT(space_used <= rootnex_state->r_prealloc_size); 348112f080e7Smrj state_available = rootnex_state->r_prealloc_size - space_used; 348212f080e7Smrj windowp = (rootnex_window_t *) 348312f080e7Smrj &dma->dp_cookies[sinfo->si_sgl_size]; 348412f080e7Smrj } 348512f080e7Smrj 348612f080e7Smrj /* 348712f080e7Smrj * figure out how much state we need to track the copy buffer. Add an 348812f080e7Smrj * addition 8 bytes for pointer alignemnt later. 348912f080e7Smrj */ 349012f080e7Smrj if (dma->dp_copybuf_size > 0) { 349112f080e7Smrj copy_state_size = sinfo->si_max_pages * 349212f080e7Smrj sizeof (rootnex_pgmap_t); 349312f080e7Smrj } else { 349412f080e7Smrj copy_state_size = 0; 349512f080e7Smrj } 349612f080e7Smrj /* add an additional 8 bytes for pointer alignment */ 349712f080e7Smrj space_needed = win_state_size + copy_state_size + 0x8; 349812f080e7Smrj 349912f080e7Smrj /* if we have enough space already, use it */ 350012f080e7Smrj if (state_available >= space_needed) { 350112f080e7Smrj dma->dp_window = windowp; 350212f080e7Smrj dma->dp_need_to_free_window = B_FALSE; 350312f080e7Smrj 350412f080e7Smrj /* not enough space, need to allocate more. */ 350512f080e7Smrj } else { 350612f080e7Smrj dma->dp_window = kmem_alloc(space_needed, kmflag); 350712f080e7Smrj if (dma->dp_window == NULL) { 350812f080e7Smrj return (DDI_DMA_NORESOURCES); 350912f080e7Smrj } 351012f080e7Smrj dma->dp_need_to_free_window = B_TRUE; 351112f080e7Smrj dma->dp_window_size = space_needed; 351212f080e7Smrj DTRACE_PROBE2(rootnex__bind__sp__alloc, dev_info_t *, 351312f080e7Smrj dma->dp_dip, size_t, space_needed); 351412f080e7Smrj } 351512f080e7Smrj 351612f080e7Smrj /* 351712f080e7Smrj * we allocate copy buffer state and window state at the same time. 351812f080e7Smrj * setup our copy buffer state pointers. Make sure it's aligned. 351912f080e7Smrj */ 352012f080e7Smrj if (dma->dp_copybuf_size > 0) { 352112f080e7Smrj dma->dp_pgmap = (rootnex_pgmap_t *)(((uintptr_t) 352212f080e7Smrj &dma->dp_window[dma->dp_max_win] + 0x7) & ~0x7); 352312f080e7Smrj 352412f080e7Smrj #if !defined(__amd64) 352512f080e7Smrj /* 352612f080e7Smrj * make sure all pm_mapped, pm_vaddr, and pm_pp are set to 352712f080e7Smrj * false/NULL. Should be quicker to bzero vs loop and set. 352812f080e7Smrj */ 352912f080e7Smrj bzero(dma->dp_pgmap, copy_state_size); 353012f080e7Smrj #endif 353112f080e7Smrj } else { 353212f080e7Smrj dma->dp_pgmap = NULL; 353312f080e7Smrj } 353412f080e7Smrj 353512f080e7Smrj return (DDI_SUCCESS); 353612f080e7Smrj } 353712f080e7Smrj 353812f080e7Smrj 353912f080e7Smrj /* 354012f080e7Smrj * rootnex_teardown_copybuf() 354112f080e7Smrj * cleans up after rootnex_setup_copybuf() 354212f080e7Smrj */ 354312f080e7Smrj static void 354412f080e7Smrj rootnex_teardown_copybuf(rootnex_dma_t *dma) 354512f080e7Smrj { 354612f080e7Smrj #if !defined(__amd64) 354712f080e7Smrj int i; 354812f080e7Smrj 354912f080e7Smrj /* 355012f080e7Smrj * if we allocated kernel heap VMEM space, go through all the pages and 355112f080e7Smrj * map out any of the ones that we're mapped into the kernel heap VMEM 355212f080e7Smrj * arena. Then free the VMEM space. 355312f080e7Smrj */ 355412f080e7Smrj if (dma->dp_kva != NULL) { 355512f080e7Smrj for (i = 0; i < dma->dp_sglinfo.si_max_pages; i++) { 355612f080e7Smrj if (dma->dp_pgmap[i].pm_mapped) { 355712f080e7Smrj hat_unload(kas.a_hat, dma->dp_pgmap[i].pm_kaddr, 355812f080e7Smrj MMU_PAGESIZE, HAT_UNLOAD); 355912f080e7Smrj dma->dp_pgmap[i].pm_mapped = B_FALSE; 356012f080e7Smrj } 356112f080e7Smrj } 356212f080e7Smrj 356312f080e7Smrj vmem_free(heap_arena, dma->dp_kva, dma->dp_copybuf_size); 356412f080e7Smrj } 356512f080e7Smrj 356612f080e7Smrj #endif 356712f080e7Smrj 356812f080e7Smrj /* if we allocated a copy buffer, free it */ 356912f080e7Smrj if (dma->dp_cbaddr != NULL) { 35707b93957cSeota i_ddi_mem_free(dma->dp_cbaddr, NULL); 357112f080e7Smrj } 357212f080e7Smrj } 357312f080e7Smrj 357412f080e7Smrj 357512f080e7Smrj /* 357612f080e7Smrj * rootnex_teardown_windows() 357712f080e7Smrj * cleans up after rootnex_setup_windows() 357812f080e7Smrj */ 357912f080e7Smrj static void 358012f080e7Smrj rootnex_teardown_windows(rootnex_dma_t *dma) 358112f080e7Smrj { 358212f080e7Smrj /* 358312f080e7Smrj * if we had to allocate window state on the last bind (because we 358412f080e7Smrj * didn't have enough pre-allocated space in the handle), free it. 358512f080e7Smrj */ 358612f080e7Smrj if (dma->dp_need_to_free_window) { 358712f080e7Smrj kmem_free(dma->dp_window, dma->dp_window_size); 358812f080e7Smrj } 358912f080e7Smrj } 359012f080e7Smrj 359112f080e7Smrj 359212f080e7Smrj /* 359312f080e7Smrj * rootnex_init_win() 359412f080e7Smrj * Called in bind slow path during creation of a new window. Initializes 359512f080e7Smrj * window state to default values. 359612f080e7Smrj */ 359712f080e7Smrj /*ARGSUSED*/ 359812f080e7Smrj static void 359912f080e7Smrj rootnex_init_win(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 360012f080e7Smrj rootnex_window_t *window, ddi_dma_cookie_t *cookie, off_t cur_offset) 360112f080e7Smrj { 360212f080e7Smrj hp->dmai_nwin++; 360312f080e7Smrj window->wd_dosync = B_FALSE; 360412f080e7Smrj window->wd_offset = cur_offset; 360512f080e7Smrj window->wd_size = 0; 360612f080e7Smrj window->wd_first_cookie = cookie; 360712f080e7Smrj window->wd_cookie_cnt = 0; 360812f080e7Smrj window->wd_trim.tr_trim_first = B_FALSE; 360912f080e7Smrj window->wd_trim.tr_trim_last = B_FALSE; 361012f080e7Smrj window->wd_trim.tr_first_copybuf_win = B_FALSE; 361112f080e7Smrj window->wd_trim.tr_last_copybuf_win = B_FALSE; 361212f080e7Smrj #if !defined(__amd64) 361312f080e7Smrj window->wd_remap_copybuf = dma->dp_cb_remaping; 361412f080e7Smrj #endif 361512f080e7Smrj } 361612f080e7Smrj 361712f080e7Smrj 361812f080e7Smrj /* 361912f080e7Smrj * rootnex_setup_cookie() 362012f080e7Smrj * Called in the bind slow path when the sgl uses the copy buffer. If any of 362112f080e7Smrj * the sgl uses the copy buffer, we need to go through each cookie, figure 362212f080e7Smrj * out if it uses the copy buffer, and if it does, save away everything we'll 362312f080e7Smrj * need during sync. 362412f080e7Smrj */ 362512f080e7Smrj static void 362612f080e7Smrj rootnex_setup_cookie(ddi_dma_obj_t *dmar_object, rootnex_dma_t *dma, 362712f080e7Smrj ddi_dma_cookie_t *cookie, off_t cur_offset, size_t *copybuf_used, 362812f080e7Smrj page_t **cur_pp) 362912f080e7Smrj { 363012f080e7Smrj boolean_t copybuf_sz_power_2; 363112f080e7Smrj rootnex_sglinfo_t *sinfo; 3632843e1988Sjohnlev paddr_t paddr; 363312f080e7Smrj uint_t pidx; 363412f080e7Smrj uint_t pcnt; 363512f080e7Smrj off_t poff; 363612f080e7Smrj #if defined(__amd64) 363712f080e7Smrj pfn_t pfn; 363812f080e7Smrj #else 363912f080e7Smrj page_t **pplist; 364012f080e7Smrj #endif 364112f080e7Smrj 364212f080e7Smrj sinfo = &dma->dp_sglinfo; 364312f080e7Smrj 364412f080e7Smrj /* 364512f080e7Smrj * Calculate the page index relative to the start of the buffer. The 364612f080e7Smrj * index to the current page for our buffer is the offset into the 364712f080e7Smrj * first page of the buffer plus our current offset into the buffer 364812f080e7Smrj * itself, shifted of course... 364912f080e7Smrj */ 365012f080e7Smrj pidx = (sinfo->si_buf_offset + cur_offset) >> MMU_PAGESHIFT; 365112f080e7Smrj ASSERT(pidx < sinfo->si_max_pages); 365212f080e7Smrj 365312f080e7Smrj /* if this cookie uses the copy buffer */ 365412f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 365512f080e7Smrj /* 365612f080e7Smrj * NOTE: we know that since this cookie uses the copy buffer, it 365712f080e7Smrj * is <= MMU_PAGESIZE. 365812f080e7Smrj */ 365912f080e7Smrj 366012f080e7Smrj /* 366112f080e7Smrj * get the offset into the page. For the 64-bit kernel, get the 366212f080e7Smrj * pfn which we'll use with seg kpm. 366312f080e7Smrj */ 3664843e1988Sjohnlev poff = cookie->dmac_laddress & MMU_PAGEOFFSET; 366512f080e7Smrj #if defined(__amd64) 3666843e1988Sjohnlev /* mfn_to_pfn() is a NOP on i86pc */ 3667843e1988Sjohnlev pfn = mfn_to_pfn(cookie->dmac_laddress >> MMU_PAGESHIFT); 3668843e1988Sjohnlev #endif /* __amd64 */ 366912f080e7Smrj 367012f080e7Smrj /* figure out if the copybuf size is a power of 2 */ 367112f080e7Smrj if (dma->dp_copybuf_size & (dma->dp_copybuf_size - 1)) { 367212f080e7Smrj copybuf_sz_power_2 = B_FALSE; 367312f080e7Smrj } else { 367412f080e7Smrj copybuf_sz_power_2 = B_TRUE; 367512f080e7Smrj } 367612f080e7Smrj 367712f080e7Smrj /* This page uses the copy buffer */ 367812f080e7Smrj dma->dp_pgmap[pidx].pm_uses_copybuf = B_TRUE; 367912f080e7Smrj 368012f080e7Smrj /* 368112f080e7Smrj * save the copy buffer KVA that we'll use with this page. 368212f080e7Smrj * if we still fit within the copybuf, it's a simple add. 368312f080e7Smrj * otherwise, we need to wrap over using & or % accordingly. 368412f080e7Smrj */ 368512f080e7Smrj if ((*copybuf_used + MMU_PAGESIZE) <= dma->dp_copybuf_size) { 368612f080e7Smrj dma->dp_pgmap[pidx].pm_cbaddr = dma->dp_cbaddr + 368712f080e7Smrj *copybuf_used; 368812f080e7Smrj } else { 368912f080e7Smrj if (copybuf_sz_power_2) { 369012f080e7Smrj dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)( 369112f080e7Smrj (uintptr_t)dma->dp_cbaddr + 369212f080e7Smrj (*copybuf_used & 369312f080e7Smrj (dma->dp_copybuf_size - 1))); 369412f080e7Smrj } else { 369512f080e7Smrj dma->dp_pgmap[pidx].pm_cbaddr = (caddr_t)( 369612f080e7Smrj (uintptr_t)dma->dp_cbaddr + 369712f080e7Smrj (*copybuf_used % dma->dp_copybuf_size)); 369812f080e7Smrj } 369912f080e7Smrj } 370012f080e7Smrj 370112f080e7Smrj /* 370212f080e7Smrj * over write the cookie physical address with the address of 370312f080e7Smrj * the physical address of the copy buffer page that we will 370412f080e7Smrj * use. 370512f080e7Smrj */ 3706843e1988Sjohnlev paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, 370712f080e7Smrj dma->dp_pgmap[pidx].pm_cbaddr)) + poff; 370812f080e7Smrj 3709843e1988Sjohnlev #ifdef __xpv 3710843e1988Sjohnlev /* 3711843e1988Sjohnlev * If we're dom0, we're using a real device so we need to load 3712843e1988Sjohnlev * the cookies with MAs instead of PAs. 3713843e1988Sjohnlev */ 3714843e1988Sjohnlev cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 3715843e1988Sjohnlev #else 3716843e1988Sjohnlev cookie->dmac_laddress = paddr; 3717843e1988Sjohnlev #endif 3718843e1988Sjohnlev 371912f080e7Smrj /* if we have a kernel VA, it's easy, just save that address */ 372012f080e7Smrj if ((dmar_object->dmao_type != DMA_OTYP_PAGES) && 372112f080e7Smrj (sinfo->si_asp == &kas)) { 372212f080e7Smrj /* 372312f080e7Smrj * save away the page aligned virtual address of the 372412f080e7Smrj * driver buffer. Offsets are handled in the sync code. 372512f080e7Smrj */ 372612f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr = (caddr_t)(((uintptr_t) 372712f080e7Smrj dmar_object->dmao_obj.virt_obj.v_addr + cur_offset) 372812f080e7Smrj & MMU_PAGEMASK); 372912f080e7Smrj #if !defined(__amd64) 373012f080e7Smrj /* 373112f080e7Smrj * we didn't need to, and will never need to map this 373212f080e7Smrj * page. 373312f080e7Smrj */ 373412f080e7Smrj dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 373512f080e7Smrj #endif 373612f080e7Smrj 373712f080e7Smrj /* we don't have a kernel VA. We need one for the bcopy. */ 373812f080e7Smrj } else { 373912f080e7Smrj #if defined(__amd64) 374012f080e7Smrj /* 374112f080e7Smrj * for the 64-bit kernel, it's easy. We use seg kpm to 374212f080e7Smrj * get a Kernel VA for the corresponding pfn. 374312f080e7Smrj */ 374412f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr = hat_kpm_pfn2va(pfn); 374512f080e7Smrj #else 374612f080e7Smrj /* 374712f080e7Smrj * for the 32-bit kernel, this is a pain. First we'll 374812f080e7Smrj * save away the page_t or user VA for this page. This 374912f080e7Smrj * is needed in rootnex_dma_win() when we switch to a 375012f080e7Smrj * new window which requires us to re-map the copy 375112f080e7Smrj * buffer. 375212f080e7Smrj */ 375312f080e7Smrj pplist = dmar_object->dmao_obj.virt_obj.v_priv; 375412f080e7Smrj if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 375512f080e7Smrj dma->dp_pgmap[pidx].pm_pp = *cur_pp; 375612f080e7Smrj dma->dp_pgmap[pidx].pm_vaddr = NULL; 375712f080e7Smrj } else if (pplist != NULL) { 375812f080e7Smrj dma->dp_pgmap[pidx].pm_pp = pplist[pidx]; 375912f080e7Smrj dma->dp_pgmap[pidx].pm_vaddr = NULL; 376012f080e7Smrj } else { 376112f080e7Smrj dma->dp_pgmap[pidx].pm_pp = NULL; 376212f080e7Smrj dma->dp_pgmap[pidx].pm_vaddr = (caddr_t) 376312f080e7Smrj (((uintptr_t) 376412f080e7Smrj dmar_object->dmao_obj.virt_obj.v_addr + 376512f080e7Smrj cur_offset) & MMU_PAGEMASK); 376612f080e7Smrj } 376712f080e7Smrj 376812f080e7Smrj /* 376912f080e7Smrj * save away the page aligned virtual address which was 377012f080e7Smrj * allocated from the kernel heap arena (taking into 377112f080e7Smrj * account if we need more copy buffer than we alloced 377212f080e7Smrj * and use multiple windows to handle this, i.e. &,%). 377312f080e7Smrj * NOTE: there isn't and physical memory backing up this 377412f080e7Smrj * virtual address space currently. 377512f080e7Smrj */ 377612f080e7Smrj if ((*copybuf_used + MMU_PAGESIZE) <= 377712f080e7Smrj dma->dp_copybuf_size) { 377812f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 377912f080e7Smrj (((uintptr_t)dma->dp_kva + *copybuf_used) & 378012f080e7Smrj MMU_PAGEMASK); 378112f080e7Smrj } else { 378212f080e7Smrj if (copybuf_sz_power_2) { 378312f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 378412f080e7Smrj (((uintptr_t)dma->dp_kva + 378512f080e7Smrj (*copybuf_used & 378612f080e7Smrj (dma->dp_copybuf_size - 1))) & 378712f080e7Smrj MMU_PAGEMASK); 378812f080e7Smrj } else { 378912f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr = (caddr_t) 379012f080e7Smrj (((uintptr_t)dma->dp_kva + 379112f080e7Smrj (*copybuf_used % 379212f080e7Smrj dma->dp_copybuf_size)) & 379312f080e7Smrj MMU_PAGEMASK); 379412f080e7Smrj } 379512f080e7Smrj } 379612f080e7Smrj 379712f080e7Smrj /* 379812f080e7Smrj * if we haven't used up the available copy buffer yet, 379912f080e7Smrj * map the kva to the physical page. 380012f080e7Smrj */ 380112f080e7Smrj if (!dma->dp_cb_remaping && ((*copybuf_used + 380212f080e7Smrj MMU_PAGESIZE) <= dma->dp_copybuf_size)) { 380312f080e7Smrj dma->dp_pgmap[pidx].pm_mapped = B_TRUE; 380412f080e7Smrj if (dma->dp_pgmap[pidx].pm_pp != NULL) { 380512f080e7Smrj i86_pp_map(dma->dp_pgmap[pidx].pm_pp, 380612f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr); 380712f080e7Smrj } else { 380812f080e7Smrj i86_va_map(dma->dp_pgmap[pidx].pm_vaddr, 380912f080e7Smrj sinfo->si_asp, 381012f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr); 381112f080e7Smrj } 381212f080e7Smrj 381312f080e7Smrj /* 381412f080e7Smrj * we've used up the available copy buffer, this page 381512f080e7Smrj * will have to be mapped during rootnex_dma_win() when 381612f080e7Smrj * we switch to a new window which requires a re-map 381712f080e7Smrj * the copy buffer. (32-bit kernel only) 381812f080e7Smrj */ 381912f080e7Smrj } else { 382012f080e7Smrj dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 382112f080e7Smrj } 382212f080e7Smrj #endif 382312f080e7Smrj /* go to the next page_t */ 382412f080e7Smrj if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 382512f080e7Smrj *cur_pp = (*cur_pp)->p_next; 382612f080e7Smrj } 382712f080e7Smrj } 382812f080e7Smrj 382912f080e7Smrj /* add to the copy buffer count */ 383012f080e7Smrj *copybuf_used += MMU_PAGESIZE; 383112f080e7Smrj 383212f080e7Smrj /* 383312f080e7Smrj * This cookie doesn't use the copy buffer. Walk through the pages this 383412f080e7Smrj * cookie occupies to reflect this. 383512f080e7Smrj */ 383612f080e7Smrj } else { 383712f080e7Smrj /* 383812f080e7Smrj * figure out how many pages the cookie occupies. We need to 383912f080e7Smrj * use the original page offset of the buffer and the cookies 384012f080e7Smrj * offset in the buffer to do this. 384112f080e7Smrj */ 384212f080e7Smrj poff = (sinfo->si_buf_offset + cur_offset) & MMU_PAGEOFFSET; 384312f080e7Smrj pcnt = mmu_btopr(cookie->dmac_size + poff); 384412f080e7Smrj 384512f080e7Smrj while (pcnt > 0) { 384612f080e7Smrj #if !defined(__amd64) 384712f080e7Smrj /* 384812f080e7Smrj * the 32-bit kernel doesn't have seg kpm, so we need 384912f080e7Smrj * to map in the driver buffer (if it didn't come down 385012f080e7Smrj * with a kernel VA) on the fly. Since this page doesn't 385112f080e7Smrj * use the copy buffer, it's not, or will it ever, have 385212f080e7Smrj * to be mapped in. 385312f080e7Smrj */ 385412f080e7Smrj dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 385512f080e7Smrj #endif 385612f080e7Smrj dma->dp_pgmap[pidx].pm_uses_copybuf = B_FALSE; 385712f080e7Smrj 385812f080e7Smrj /* 385912f080e7Smrj * we need to update pidx and cur_pp or we'll loose 386012f080e7Smrj * track of where we are. 386112f080e7Smrj */ 386212f080e7Smrj if (dmar_object->dmao_type == DMA_OTYP_PAGES) { 386312f080e7Smrj *cur_pp = (*cur_pp)->p_next; 386412f080e7Smrj } 386512f080e7Smrj pidx++; 386612f080e7Smrj pcnt--; 386712f080e7Smrj } 386812f080e7Smrj } 386912f080e7Smrj } 387012f080e7Smrj 387112f080e7Smrj 387212f080e7Smrj /* 387312f080e7Smrj * rootnex_sgllen_window_boundary() 387412f080e7Smrj * Called in the bind slow path when the next cookie causes us to exceed (in 387512f080e7Smrj * this case == since we start at 0 and sgllen starts at 1) the maximum sgl 387612f080e7Smrj * length supported by the DMA H/W. 387712f080e7Smrj */ 387812f080e7Smrj static int 387912f080e7Smrj rootnex_sgllen_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 388012f080e7Smrj rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, ddi_dma_attr_t *attr, 388112f080e7Smrj off_t cur_offset) 388212f080e7Smrj { 388312f080e7Smrj off_t new_offset; 388412f080e7Smrj size_t trim_sz; 388512f080e7Smrj off_t coffset; 388612f080e7Smrj 388712f080e7Smrj 388812f080e7Smrj /* 388912f080e7Smrj * if we know we'll never have to trim, it's pretty easy. Just move to 389012f080e7Smrj * the next window and init it. We're done. 389112f080e7Smrj */ 389212f080e7Smrj if (!dma->dp_trim_required) { 389312f080e7Smrj (*windowp)++; 389412f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 389512f080e7Smrj (*windowp)->wd_cookie_cnt++; 389612f080e7Smrj (*windowp)->wd_size = cookie->dmac_size; 389712f080e7Smrj return (DDI_SUCCESS); 389812f080e7Smrj } 389912f080e7Smrj 390012f080e7Smrj /* figure out how much we need to trim from the window */ 390112f080e7Smrj ASSERT(attr->dma_attr_granular != 0); 390212f080e7Smrj if (dma->dp_granularity_power_2) { 390312f080e7Smrj trim_sz = (*windowp)->wd_size & (attr->dma_attr_granular - 1); 390412f080e7Smrj } else { 390512f080e7Smrj trim_sz = (*windowp)->wd_size % attr->dma_attr_granular; 390612f080e7Smrj } 390712f080e7Smrj 390812f080e7Smrj /* The window's a whole multiple of granularity. We're done */ 390912f080e7Smrj if (trim_sz == 0) { 391012f080e7Smrj (*windowp)++; 391112f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 391212f080e7Smrj (*windowp)->wd_cookie_cnt++; 391312f080e7Smrj (*windowp)->wd_size = cookie->dmac_size; 391412f080e7Smrj return (DDI_SUCCESS); 391512f080e7Smrj } 391612f080e7Smrj 391712f080e7Smrj /* 391812f080e7Smrj * The window's not a whole multiple of granularity, since we know this 391912f080e7Smrj * is due to the sgllen, we need to go back to the last cookie and trim 392012f080e7Smrj * that one, add the left over part of the old cookie into the new 392112f080e7Smrj * window, and then add in the new cookie into the new window. 392212f080e7Smrj */ 392312f080e7Smrj 392412f080e7Smrj /* 392512f080e7Smrj * make sure the driver isn't making us do something bad... Trimming and 392612f080e7Smrj * sgllen == 1 don't go together. 392712f080e7Smrj */ 392812f080e7Smrj if (attr->dma_attr_sgllen == 1) { 392912f080e7Smrj return (DDI_DMA_NOMAPPING); 393012f080e7Smrj } 393112f080e7Smrj 393212f080e7Smrj /* 393312f080e7Smrj * first, setup the current window to account for the trim. Need to go 393412f080e7Smrj * back to the last cookie for this. 393512f080e7Smrj */ 393612f080e7Smrj cookie--; 393712f080e7Smrj (*windowp)->wd_trim.tr_trim_last = B_TRUE; 393812f080e7Smrj (*windowp)->wd_trim.tr_last_cookie = cookie; 3939843e1988Sjohnlev (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 394012f080e7Smrj ASSERT(cookie->dmac_size > trim_sz); 394112f080e7Smrj (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 394212f080e7Smrj (*windowp)->wd_size -= trim_sz; 394312f080e7Smrj 394412f080e7Smrj /* save the buffer offsets for the next window */ 394512f080e7Smrj coffset = cookie->dmac_size - trim_sz; 394612f080e7Smrj new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 394712f080e7Smrj 394812f080e7Smrj /* 394912f080e7Smrj * set this now in case this is the first window. all other cases are 395012f080e7Smrj * set in dma_win() 395112f080e7Smrj */ 395212f080e7Smrj cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 395312f080e7Smrj 395412f080e7Smrj /* 395512f080e7Smrj * initialize the next window using what's left over in the previous 395612f080e7Smrj * cookie. 395712f080e7Smrj */ 395812f080e7Smrj (*windowp)++; 395912f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 396012f080e7Smrj (*windowp)->wd_cookie_cnt++; 396112f080e7Smrj (*windowp)->wd_trim.tr_trim_first = B_TRUE; 3962843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset; 396312f080e7Smrj (*windowp)->wd_trim.tr_first_size = trim_sz; 396412f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 396512f080e7Smrj (*windowp)->wd_dosync = B_TRUE; 396612f080e7Smrj } 396712f080e7Smrj 396812f080e7Smrj /* 396912f080e7Smrj * now go back to the current cookie and add it to the new window. set 397012f080e7Smrj * the new window size to the what was left over from the previous 397112f080e7Smrj * cookie and what's in the current cookie. 397212f080e7Smrj */ 397312f080e7Smrj cookie++; 397412f080e7Smrj (*windowp)->wd_cookie_cnt++; 397512f080e7Smrj (*windowp)->wd_size = trim_sz + cookie->dmac_size; 397612f080e7Smrj 397712f080e7Smrj /* 397812f080e7Smrj * trim plus the next cookie could put us over maxxfer (a cookie can be 397912f080e7Smrj * a max size of maxxfer). Handle that case. 398012f080e7Smrj */ 398112f080e7Smrj if ((*windowp)->wd_size > dma->dp_maxxfer) { 398212f080e7Smrj /* 398312f080e7Smrj * maxxfer is already a whole multiple of granularity, and this 398412f080e7Smrj * trim will be <= the previous trim (since a cookie can't be 398512f080e7Smrj * larger than maxxfer). Make things simple here. 398612f080e7Smrj */ 398712f080e7Smrj trim_sz = (*windowp)->wd_size - dma->dp_maxxfer; 398812f080e7Smrj (*windowp)->wd_trim.tr_trim_last = B_TRUE; 398912f080e7Smrj (*windowp)->wd_trim.tr_last_cookie = cookie; 3990843e1988Sjohnlev (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 399112f080e7Smrj (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 399212f080e7Smrj (*windowp)->wd_size -= trim_sz; 399312f080e7Smrj ASSERT((*windowp)->wd_size == dma->dp_maxxfer); 399412f080e7Smrj 399512f080e7Smrj /* save the buffer offsets for the next window */ 399612f080e7Smrj coffset = cookie->dmac_size - trim_sz; 399712f080e7Smrj new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 399812f080e7Smrj 399912f080e7Smrj /* setup the next window */ 400012f080e7Smrj (*windowp)++; 400112f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 400212f080e7Smrj (*windowp)->wd_cookie_cnt++; 400312f080e7Smrj (*windowp)->wd_trim.tr_trim_first = B_TRUE; 4004843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + 400512f080e7Smrj coffset; 400612f080e7Smrj (*windowp)->wd_trim.tr_first_size = trim_sz; 400712f080e7Smrj } 400812f080e7Smrj 400912f080e7Smrj return (DDI_SUCCESS); 401012f080e7Smrj } 401112f080e7Smrj 401212f080e7Smrj 401312f080e7Smrj /* 401412f080e7Smrj * rootnex_copybuf_window_boundary() 401512f080e7Smrj * Called in bind slowpath when we get to a window boundary because we used 401612f080e7Smrj * up all the copy buffer that we have. 401712f080e7Smrj */ 401812f080e7Smrj static int 401912f080e7Smrj rootnex_copybuf_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 402012f080e7Smrj rootnex_window_t **windowp, ddi_dma_cookie_t *cookie, off_t cur_offset, 402112f080e7Smrj size_t *copybuf_used) 402212f080e7Smrj { 402312f080e7Smrj rootnex_sglinfo_t *sinfo; 402412f080e7Smrj off_t new_offset; 402512f080e7Smrj size_t trim_sz; 4026843e1988Sjohnlev paddr_t paddr; 402712f080e7Smrj off_t coffset; 402812f080e7Smrj uint_t pidx; 402912f080e7Smrj off_t poff; 403012f080e7Smrj 403112f080e7Smrj 403212f080e7Smrj sinfo = &dma->dp_sglinfo; 403312f080e7Smrj 403412f080e7Smrj /* 403512f080e7Smrj * the copy buffer should be a whole multiple of page size. We know that 403612f080e7Smrj * this cookie is <= MMU_PAGESIZE. 403712f080e7Smrj */ 403812f080e7Smrj ASSERT(cookie->dmac_size <= MMU_PAGESIZE); 403912f080e7Smrj 404012f080e7Smrj /* 404112f080e7Smrj * from now on, all new windows in this bind need to be re-mapped during 404212f080e7Smrj * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf 404312f080e7Smrj * space... 404412f080e7Smrj */ 404512f080e7Smrj #if !defined(__amd64) 404612f080e7Smrj dma->dp_cb_remaping = B_TRUE; 404712f080e7Smrj #endif 404812f080e7Smrj 404912f080e7Smrj /* reset copybuf used */ 405012f080e7Smrj *copybuf_used = 0; 405112f080e7Smrj 405212f080e7Smrj /* 405312f080e7Smrj * if we don't have to trim (since granularity is set to 1), go to the 405412f080e7Smrj * next window and add the current cookie to it. We know the current 405512f080e7Smrj * cookie uses the copy buffer since we're in this code path. 405612f080e7Smrj */ 405712f080e7Smrj if (!dma->dp_trim_required) { 405812f080e7Smrj (*windowp)++; 405912f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 406012f080e7Smrj 406112f080e7Smrj /* Add this cookie to the new window */ 406212f080e7Smrj (*windowp)->wd_cookie_cnt++; 406312f080e7Smrj (*windowp)->wd_size += cookie->dmac_size; 406412f080e7Smrj *copybuf_used += MMU_PAGESIZE; 406512f080e7Smrj return (DDI_SUCCESS); 406612f080e7Smrj } 406712f080e7Smrj 406812f080e7Smrj /* 406912f080e7Smrj * *** may need to trim, figure it out. 407012f080e7Smrj */ 407112f080e7Smrj 407212f080e7Smrj /* figure out how much we need to trim from the window */ 407312f080e7Smrj if (dma->dp_granularity_power_2) { 407412f080e7Smrj trim_sz = (*windowp)->wd_size & 407512f080e7Smrj (hp->dmai_attr.dma_attr_granular - 1); 407612f080e7Smrj } else { 407712f080e7Smrj trim_sz = (*windowp)->wd_size % hp->dmai_attr.dma_attr_granular; 407812f080e7Smrj } 407912f080e7Smrj 408012f080e7Smrj /* 408112f080e7Smrj * if the window's a whole multiple of granularity, go to the next 408212f080e7Smrj * window, init it, then add in the current cookie. We know the current 408312f080e7Smrj * cookie uses the copy buffer since we're in this code path. 408412f080e7Smrj */ 408512f080e7Smrj if (trim_sz == 0) { 408612f080e7Smrj (*windowp)++; 408712f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, cur_offset); 408812f080e7Smrj 408912f080e7Smrj /* Add this cookie to the new window */ 409012f080e7Smrj (*windowp)->wd_cookie_cnt++; 409112f080e7Smrj (*windowp)->wd_size += cookie->dmac_size; 409212f080e7Smrj *copybuf_used += MMU_PAGESIZE; 409312f080e7Smrj return (DDI_SUCCESS); 409412f080e7Smrj } 409512f080e7Smrj 409612f080e7Smrj /* 409712f080e7Smrj * *** We figured it out, we definitly need to trim 409812f080e7Smrj */ 409912f080e7Smrj 410012f080e7Smrj /* 410112f080e7Smrj * make sure the driver isn't making us do something bad... 410212f080e7Smrj * Trimming and sgllen == 1 don't go together. 410312f080e7Smrj */ 410412f080e7Smrj if (hp->dmai_attr.dma_attr_sgllen == 1) { 410512f080e7Smrj return (DDI_DMA_NOMAPPING); 410612f080e7Smrj } 410712f080e7Smrj 410812f080e7Smrj /* 410912f080e7Smrj * first, setup the current window to account for the trim. Need to go 411012f080e7Smrj * back to the last cookie for this. Some of the last cookie will be in 411112f080e7Smrj * the current window, and some of the last cookie will be in the new 411212f080e7Smrj * window. All of the current cookie will be in the new window. 411312f080e7Smrj */ 411412f080e7Smrj cookie--; 411512f080e7Smrj (*windowp)->wd_trim.tr_trim_last = B_TRUE; 411612f080e7Smrj (*windowp)->wd_trim.tr_last_cookie = cookie; 4117843e1988Sjohnlev (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 411812f080e7Smrj ASSERT(cookie->dmac_size > trim_sz); 411912f080e7Smrj (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 412012f080e7Smrj (*windowp)->wd_size -= trim_sz; 412112f080e7Smrj 412212f080e7Smrj /* 412312f080e7Smrj * we're trimming the last cookie (not the current cookie). So that 412412f080e7Smrj * last cookie may have or may not have been using the copy buffer ( 412512f080e7Smrj * we know the cookie passed in uses the copy buffer since we're in 412612f080e7Smrj * this code path). 412712f080e7Smrj * 412812f080e7Smrj * If the last cookie doesn't use the copy buffer, nothing special to 412912f080e7Smrj * do. However, if it does uses the copy buffer, it will be both the 413012f080e7Smrj * last page in the current window and the first page in the next 413112f080e7Smrj * window. Since we are reusing the copy buffer (and KVA space on the 413212f080e7Smrj * 32-bit kernel), this page will use the end of the copy buffer in the 413312f080e7Smrj * current window, and the start of the copy buffer in the next window. 413412f080e7Smrj * Track that info... The cookie physical address was already set to 413512f080e7Smrj * the copy buffer physical address in setup_cookie.. 413612f080e7Smrj */ 413712f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 413812f080e7Smrj pidx = (sinfo->si_buf_offset + (*windowp)->wd_offset + 413912f080e7Smrj (*windowp)->wd_size) >> MMU_PAGESHIFT; 414012f080e7Smrj (*windowp)->wd_trim.tr_last_copybuf_win = B_TRUE; 414112f080e7Smrj (*windowp)->wd_trim.tr_last_pidx = pidx; 414212f080e7Smrj (*windowp)->wd_trim.tr_last_cbaddr = 414312f080e7Smrj dma->dp_pgmap[pidx].pm_cbaddr; 414412f080e7Smrj #if !defined(__amd64) 414512f080e7Smrj (*windowp)->wd_trim.tr_last_kaddr = 414612f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr; 414712f080e7Smrj #endif 414812f080e7Smrj } 414912f080e7Smrj 415012f080e7Smrj /* save the buffer offsets for the next window */ 415112f080e7Smrj coffset = cookie->dmac_size - trim_sz; 415212f080e7Smrj new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 415312f080e7Smrj 415412f080e7Smrj /* 415512f080e7Smrj * set this now in case this is the first window. all other cases are 415612f080e7Smrj * set in dma_win() 415712f080e7Smrj */ 415812f080e7Smrj cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 415912f080e7Smrj 416012f080e7Smrj /* 416112f080e7Smrj * initialize the next window using what's left over in the previous 416212f080e7Smrj * cookie. 416312f080e7Smrj */ 416412f080e7Smrj (*windowp)++; 416512f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 416612f080e7Smrj (*windowp)->wd_cookie_cnt++; 416712f080e7Smrj (*windowp)->wd_trim.tr_trim_first = B_TRUE; 4168843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + coffset; 416912f080e7Smrj (*windowp)->wd_trim.tr_first_size = trim_sz; 417012f080e7Smrj 417112f080e7Smrj /* 417212f080e7Smrj * again, we're tracking if the last cookie uses the copy buffer. 417312f080e7Smrj * read the comment above for more info on why we need to track 417412f080e7Smrj * additional state. 417512f080e7Smrj * 417612f080e7Smrj * For the first cookie in the new window, we need reset the physical 417712f080e7Smrj * address to DMA into to the start of the copy buffer plus any 417812f080e7Smrj * initial page offset which may be present. 417912f080e7Smrj */ 418012f080e7Smrj if (cookie->dmac_type & ROOTNEX_USES_COPYBUF) { 418112f080e7Smrj (*windowp)->wd_dosync = B_TRUE; 418212f080e7Smrj (*windowp)->wd_trim.tr_first_copybuf_win = B_TRUE; 418312f080e7Smrj (*windowp)->wd_trim.tr_first_pidx = pidx; 418412f080e7Smrj (*windowp)->wd_trim.tr_first_cbaddr = dma->dp_cbaddr; 418512f080e7Smrj poff = (*windowp)->wd_trim.tr_first_paddr & MMU_PAGEOFFSET; 4186843e1988Sjohnlev 4187843e1988Sjohnlev paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, dma->dp_cbaddr)) + 4188843e1988Sjohnlev poff; 4189843e1988Sjohnlev #ifdef __xpv 4190843e1988Sjohnlev /* 4191843e1988Sjohnlev * If we're dom0, we're using a real device so we need to load 4192843e1988Sjohnlev * the cookies with MAs instead of PAs. 4193843e1988Sjohnlev */ 4194843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = 4195843e1988Sjohnlev ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 4196843e1988Sjohnlev #else 4197843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = paddr; 4198843e1988Sjohnlev #endif 4199843e1988Sjohnlev 420012f080e7Smrj #if !defined(__amd64) 420112f080e7Smrj (*windowp)->wd_trim.tr_first_kaddr = dma->dp_kva; 420212f080e7Smrj #endif 420312f080e7Smrj /* account for the cookie copybuf usage in the new window */ 420412f080e7Smrj *copybuf_used += MMU_PAGESIZE; 420512f080e7Smrj 420612f080e7Smrj /* 420712f080e7Smrj * every piece of code has to have a hack, and here is this 420812f080e7Smrj * ones :-) 420912f080e7Smrj * 421012f080e7Smrj * There is a complex interaction between setup_cookie and the 421112f080e7Smrj * copybuf window boundary. The complexity had to be in either 421212f080e7Smrj * the maxxfer window, or the copybuf window, and I chose the 421312f080e7Smrj * copybuf code. 421412f080e7Smrj * 421512f080e7Smrj * So in this code path, we have taken the last cookie, 421612f080e7Smrj * virtually broken it in half due to the trim, and it happens 421712f080e7Smrj * to use the copybuf which further complicates life. At the 421812f080e7Smrj * same time, we have already setup the current cookie, which 421912f080e7Smrj * is now wrong. More background info: the current cookie uses 422012f080e7Smrj * the copybuf, so it is only a page long max. So we need to 422112f080e7Smrj * fix the current cookies copy buffer address, physical 422212f080e7Smrj * address, and kva for the 32-bit kernel. We due this by 422312f080e7Smrj * bumping them by page size (of course, we can't due this on 422412f080e7Smrj * the physical address since the copy buffer may not be 422512f080e7Smrj * physically contiguous). 422612f080e7Smrj */ 422712f080e7Smrj cookie++; 422812f080e7Smrj dma->dp_pgmap[pidx + 1].pm_cbaddr += MMU_PAGESIZE; 4229843e1988Sjohnlev poff = cookie->dmac_laddress & MMU_PAGEOFFSET; 4230843e1988Sjohnlev 4231843e1988Sjohnlev paddr = pfn_to_pa(hat_getpfnum(kas.a_hat, 423212f080e7Smrj dma->dp_pgmap[pidx + 1].pm_cbaddr)) + poff; 4233843e1988Sjohnlev #ifdef __xpv 4234843e1988Sjohnlev /* 4235843e1988Sjohnlev * If we're dom0, we're using a real device so we need to load 4236843e1988Sjohnlev * the cookies with MAs instead of PAs. 4237843e1988Sjohnlev */ 4238843e1988Sjohnlev cookie->dmac_laddress = ROOTNEX_PADDR_TO_RBASE(xen_info, paddr); 4239843e1988Sjohnlev #else 4240843e1988Sjohnlev cookie->dmac_laddress = paddr; 4241843e1988Sjohnlev #endif 4242843e1988Sjohnlev 424312f080e7Smrj #if !defined(__amd64) 424412f080e7Smrj ASSERT(dma->dp_pgmap[pidx + 1].pm_mapped == B_FALSE); 424512f080e7Smrj dma->dp_pgmap[pidx + 1].pm_kaddr += MMU_PAGESIZE; 424612f080e7Smrj #endif 424712f080e7Smrj } else { 424812f080e7Smrj /* go back to the current cookie */ 424912f080e7Smrj cookie++; 425012f080e7Smrj } 425112f080e7Smrj 425212f080e7Smrj /* 425312f080e7Smrj * add the current cookie to the new window. set the new window size to 425412f080e7Smrj * the what was left over from the previous cookie and what's in the 425512f080e7Smrj * current cookie. 425612f080e7Smrj */ 425712f080e7Smrj (*windowp)->wd_cookie_cnt++; 425812f080e7Smrj (*windowp)->wd_size = trim_sz + cookie->dmac_size; 425912f080e7Smrj ASSERT((*windowp)->wd_size < dma->dp_maxxfer); 426012f080e7Smrj 426112f080e7Smrj /* 426212f080e7Smrj * we know that the cookie passed in always uses the copy buffer. We 426312f080e7Smrj * wouldn't be here if it didn't. 426412f080e7Smrj */ 426512f080e7Smrj *copybuf_used += MMU_PAGESIZE; 426612f080e7Smrj 426712f080e7Smrj return (DDI_SUCCESS); 426812f080e7Smrj } 426912f080e7Smrj 427012f080e7Smrj 427112f080e7Smrj /* 427212f080e7Smrj * rootnex_maxxfer_window_boundary() 427312f080e7Smrj * Called in bind slowpath when we get to a window boundary because we will 427412f080e7Smrj * go over maxxfer. 427512f080e7Smrj */ 427612f080e7Smrj static int 427712f080e7Smrj rootnex_maxxfer_window_boundary(ddi_dma_impl_t *hp, rootnex_dma_t *dma, 427812f080e7Smrj rootnex_window_t **windowp, ddi_dma_cookie_t *cookie) 427912f080e7Smrj { 428012f080e7Smrj size_t dmac_size; 428112f080e7Smrj off_t new_offset; 428212f080e7Smrj size_t trim_sz; 428312f080e7Smrj off_t coffset; 428412f080e7Smrj 428512f080e7Smrj 428612f080e7Smrj /* 428712f080e7Smrj * calculate how much we have to trim off of the current cookie to equal 428812f080e7Smrj * maxxfer. We don't have to account for granularity here since our 428912f080e7Smrj * maxxfer already takes that into account. 429012f080e7Smrj */ 429112f080e7Smrj trim_sz = ((*windowp)->wd_size + cookie->dmac_size) - dma->dp_maxxfer; 429212f080e7Smrj ASSERT(trim_sz <= cookie->dmac_size); 429312f080e7Smrj ASSERT(trim_sz <= dma->dp_maxxfer); 429412f080e7Smrj 429512f080e7Smrj /* save cookie size since we need it later and we might change it */ 429612f080e7Smrj dmac_size = cookie->dmac_size; 429712f080e7Smrj 429812f080e7Smrj /* 429912f080e7Smrj * if we're not trimming the entire cookie, setup the current window to 430012f080e7Smrj * account for the trim. 430112f080e7Smrj */ 430212f080e7Smrj if (trim_sz < cookie->dmac_size) { 430312f080e7Smrj (*windowp)->wd_cookie_cnt++; 430412f080e7Smrj (*windowp)->wd_trim.tr_trim_last = B_TRUE; 430512f080e7Smrj (*windowp)->wd_trim.tr_last_cookie = cookie; 4306843e1988Sjohnlev (*windowp)->wd_trim.tr_last_paddr = cookie->dmac_laddress; 430712f080e7Smrj (*windowp)->wd_trim.tr_last_size = cookie->dmac_size - trim_sz; 430812f080e7Smrj (*windowp)->wd_size = dma->dp_maxxfer; 430912f080e7Smrj 431012f080e7Smrj /* 431112f080e7Smrj * set the adjusted cookie size now in case this is the first 431212f080e7Smrj * window. All other windows are taken care of in get win 431312f080e7Smrj */ 431412f080e7Smrj cookie->dmac_size = (*windowp)->wd_trim.tr_last_size; 431512f080e7Smrj } 431612f080e7Smrj 431712f080e7Smrj /* 431812f080e7Smrj * coffset is the current offset within the cookie, new_offset is the 431912f080e7Smrj * current offset with the entire buffer. 432012f080e7Smrj */ 432112f080e7Smrj coffset = dmac_size - trim_sz; 432212f080e7Smrj new_offset = (*windowp)->wd_offset + (*windowp)->wd_size; 432312f080e7Smrj 432412f080e7Smrj /* initialize the next window */ 432512f080e7Smrj (*windowp)++; 432612f080e7Smrj rootnex_init_win(hp, dma, *windowp, cookie, new_offset); 432712f080e7Smrj (*windowp)->wd_cookie_cnt++; 432812f080e7Smrj (*windowp)->wd_size = trim_sz; 432912f080e7Smrj if (trim_sz < dmac_size) { 433012f080e7Smrj (*windowp)->wd_trim.tr_trim_first = B_TRUE; 4331843e1988Sjohnlev (*windowp)->wd_trim.tr_first_paddr = cookie->dmac_laddress + 433212f080e7Smrj coffset; 433312f080e7Smrj (*windowp)->wd_trim.tr_first_size = trim_sz; 433412f080e7Smrj } 433512f080e7Smrj 433612f080e7Smrj return (DDI_SUCCESS); 433712f080e7Smrj } 433812f080e7Smrj 433912f080e7Smrj 434012f080e7Smrj /*ARGSUSED*/ 434112f080e7Smrj static int 434220906b23SVikram Hegde rootnex_coredma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 434312f080e7Smrj off_t off, size_t len, uint_t cache_flags) 434412f080e7Smrj { 434512f080e7Smrj rootnex_sglinfo_t *sinfo; 434612f080e7Smrj rootnex_pgmap_t *cbpage; 434712f080e7Smrj rootnex_window_t *win; 434812f080e7Smrj ddi_dma_impl_t *hp; 434912f080e7Smrj rootnex_dma_t *dma; 435012f080e7Smrj caddr_t fromaddr; 435112f080e7Smrj caddr_t toaddr; 435212f080e7Smrj uint_t psize; 435312f080e7Smrj off_t offset; 435412f080e7Smrj uint_t pidx; 435512f080e7Smrj size_t size; 435612f080e7Smrj off_t poff; 435712f080e7Smrj int e; 435812f080e7Smrj 435912f080e7Smrj 436012f080e7Smrj hp = (ddi_dma_impl_t *)handle; 436112f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 436212f080e7Smrj sinfo = &dma->dp_sglinfo; 436312f080e7Smrj 436412f080e7Smrj /* 436512f080e7Smrj * if we don't have any windows, we don't need to sync. A copybuf 436612f080e7Smrj * will cause us to have at least one window. 436712f080e7Smrj */ 436812f080e7Smrj if (dma->dp_window == NULL) { 436912f080e7Smrj return (DDI_SUCCESS); 437012f080e7Smrj } 437112f080e7Smrj 437212f080e7Smrj /* This window may not need to be sync'd */ 437312f080e7Smrj win = &dma->dp_window[dma->dp_current_win]; 437412f080e7Smrj if (!win->wd_dosync) { 437512f080e7Smrj return (DDI_SUCCESS); 437612f080e7Smrj } 437712f080e7Smrj 437812f080e7Smrj /* handle off and len special cases */ 437912f080e7Smrj if ((off == 0) || (rootnex_sync_ignore_params)) { 438012f080e7Smrj offset = win->wd_offset; 438112f080e7Smrj } else { 438212f080e7Smrj offset = off; 438312f080e7Smrj } 438412f080e7Smrj if ((len == 0) || (rootnex_sync_ignore_params)) { 438512f080e7Smrj size = win->wd_size; 438612f080e7Smrj } else { 438712f080e7Smrj size = len; 438812f080e7Smrj } 438912f080e7Smrj 439012f080e7Smrj /* check the sync args to make sure they make a little sense */ 439112f080e7Smrj if (rootnex_sync_check_parms) { 439212f080e7Smrj e = rootnex_valid_sync_parms(hp, win, offset, size, 439312f080e7Smrj cache_flags); 439412f080e7Smrj if (e != DDI_SUCCESS) { 439512f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_SYNC_FAIL]); 439612f080e7Smrj return (DDI_FAILURE); 439712f080e7Smrj } 439812f080e7Smrj } 439912f080e7Smrj 440012f080e7Smrj /* 440112f080e7Smrj * special case the first page to handle the offset into the page. The 440212f080e7Smrj * offset to the current page for our buffer is the offset into the 440312f080e7Smrj * first page of the buffer plus our current offset into the buffer 440412f080e7Smrj * itself, masked of course. 440512f080e7Smrj */ 440612f080e7Smrj poff = (sinfo->si_buf_offset + offset) & MMU_PAGEOFFSET; 440712f080e7Smrj psize = MIN((MMU_PAGESIZE - poff), size); 440812f080e7Smrj 440912f080e7Smrj /* go through all the pages that we want to sync */ 441012f080e7Smrj while (size > 0) { 441112f080e7Smrj /* 441212f080e7Smrj * Calculate the page index relative to the start of the buffer. 441312f080e7Smrj * The index to the current page for our buffer is the offset 441412f080e7Smrj * into the first page of the buffer plus our current offset 441512f080e7Smrj * into the buffer itself, shifted of course... 441612f080e7Smrj */ 441712f080e7Smrj pidx = (sinfo->si_buf_offset + offset) >> MMU_PAGESHIFT; 441812f080e7Smrj ASSERT(pidx < sinfo->si_max_pages); 441912f080e7Smrj 442012f080e7Smrj /* 442112f080e7Smrj * if this page uses the copy buffer, we need to sync it, 442212f080e7Smrj * otherwise, go on to the next page. 442312f080e7Smrj */ 442412f080e7Smrj cbpage = &dma->dp_pgmap[pidx]; 442512f080e7Smrj ASSERT((cbpage->pm_uses_copybuf == B_TRUE) || 442612f080e7Smrj (cbpage->pm_uses_copybuf == B_FALSE)); 442712f080e7Smrj if (cbpage->pm_uses_copybuf) { 442812f080e7Smrj /* cbaddr and kaddr should be page aligned */ 442912f080e7Smrj ASSERT(((uintptr_t)cbpage->pm_cbaddr & 443012f080e7Smrj MMU_PAGEOFFSET) == 0); 443112f080e7Smrj ASSERT(((uintptr_t)cbpage->pm_kaddr & 443212f080e7Smrj MMU_PAGEOFFSET) == 0); 443312f080e7Smrj 443412f080e7Smrj /* 443512f080e7Smrj * if we're copying for the device, we are going to 443612f080e7Smrj * copy from the drivers buffer and to the rootnex 443712f080e7Smrj * allocated copy buffer. 443812f080e7Smrj */ 443912f080e7Smrj if (cache_flags == DDI_DMA_SYNC_FORDEV) { 444012f080e7Smrj fromaddr = cbpage->pm_kaddr + poff; 444112f080e7Smrj toaddr = cbpage->pm_cbaddr + poff; 444212f080e7Smrj DTRACE_PROBE2(rootnex__sync__dev, 444312f080e7Smrj dev_info_t *, dma->dp_dip, size_t, psize); 444412f080e7Smrj 444512f080e7Smrj /* 444612f080e7Smrj * if we're copying for the cpu/kernel, we are going to 444712f080e7Smrj * copy from the rootnex allocated copy buffer to the 444812f080e7Smrj * drivers buffer. 444912f080e7Smrj */ 445012f080e7Smrj } else { 445112f080e7Smrj fromaddr = cbpage->pm_cbaddr + poff; 445212f080e7Smrj toaddr = cbpage->pm_kaddr + poff; 445312f080e7Smrj DTRACE_PROBE2(rootnex__sync__cpu, 445412f080e7Smrj dev_info_t *, dma->dp_dip, size_t, psize); 445512f080e7Smrj } 445612f080e7Smrj 445712f080e7Smrj bcopy(fromaddr, toaddr, psize); 445812f080e7Smrj } 445912f080e7Smrj 446012f080e7Smrj /* 446112f080e7Smrj * decrement size until we're done, update our offset into the 446212f080e7Smrj * buffer, and get the next page size. 446312f080e7Smrj */ 446412f080e7Smrj size -= psize; 446512f080e7Smrj offset += psize; 446612f080e7Smrj psize = MIN(MMU_PAGESIZE, size); 446712f080e7Smrj 446812f080e7Smrj /* page offset is zero for the rest of this loop */ 446912f080e7Smrj poff = 0; 447012f080e7Smrj } 447112f080e7Smrj 447212f080e7Smrj return (DDI_SUCCESS); 447312f080e7Smrj } 447412f080e7Smrj 447520906b23SVikram Hegde /* 447620906b23SVikram Hegde * rootnex_dma_sync() 447720906b23SVikram Hegde * called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags. 447820906b23SVikram Hegde * We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC 447920906b23SVikram Hegde * is set, ddi_dma_sync() returns immediately passing back success. 448020906b23SVikram Hegde */ 448120906b23SVikram Hegde /*ARGSUSED*/ 448220906b23SVikram Hegde static int 448320906b23SVikram Hegde rootnex_dma_sync(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 448420906b23SVikram Hegde off_t off, size_t len, uint_t cache_flags) 448520906b23SVikram Hegde { 44863a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 4487b51bbbf5SVikram Hegde if (IOMMU_USED(rdip)) { 448820906b23SVikram Hegde return (iommulib_nexdma_sync(dip, rdip, handle, off, len, 448920906b23SVikram Hegde cache_flags)); 449020906b23SVikram Hegde } 449120906b23SVikram Hegde #endif 449220906b23SVikram Hegde return (rootnex_coredma_sync(dip, rdip, handle, off, len, 449320906b23SVikram Hegde cache_flags)); 449420906b23SVikram Hegde } 449512f080e7Smrj 449612f080e7Smrj /* 449712f080e7Smrj * rootnex_valid_sync_parms() 449812f080e7Smrj * checks the parameters passed to sync to verify they are correct. 449912f080e7Smrj */ 450012f080e7Smrj static int 450112f080e7Smrj rootnex_valid_sync_parms(ddi_dma_impl_t *hp, rootnex_window_t *win, 450212f080e7Smrj off_t offset, size_t size, uint_t cache_flags) 450312f080e7Smrj { 450412f080e7Smrj off_t woffset; 450512f080e7Smrj 450612f080e7Smrj 450712f080e7Smrj /* 450812f080e7Smrj * the first part of the test to make sure the offset passed in is 450912f080e7Smrj * within the window. 451012f080e7Smrj */ 451112f080e7Smrj if (offset < win->wd_offset) { 451212f080e7Smrj return (DDI_FAILURE); 451312f080e7Smrj } 451412f080e7Smrj 451512f080e7Smrj /* 451612f080e7Smrj * second and last part of the test to make sure the offset and length 451712f080e7Smrj * passed in is within the window. 451812f080e7Smrj */ 451912f080e7Smrj woffset = offset - win->wd_offset; 452012f080e7Smrj if ((woffset + size) > win->wd_size) { 452112f080e7Smrj return (DDI_FAILURE); 452212f080e7Smrj } 452312f080e7Smrj 452412f080e7Smrj /* 452512f080e7Smrj * if we are sync'ing for the device, the DDI_DMA_WRITE flag should 452612f080e7Smrj * be set too. 452712f080e7Smrj */ 452812f080e7Smrj if ((cache_flags == DDI_DMA_SYNC_FORDEV) && 452912f080e7Smrj (hp->dmai_rflags & DDI_DMA_WRITE)) { 453012f080e7Smrj return (DDI_SUCCESS); 453112f080e7Smrj } 453212f080e7Smrj 453312f080e7Smrj /* 453412f080e7Smrj * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL 453512f080e7Smrj * should be set. Also DDI_DMA_READ should be set in the flags. 453612f080e7Smrj */ 453712f080e7Smrj if (((cache_flags == DDI_DMA_SYNC_FORCPU) || 453812f080e7Smrj (cache_flags == DDI_DMA_SYNC_FORKERNEL)) && 453912f080e7Smrj (hp->dmai_rflags & DDI_DMA_READ)) { 454012f080e7Smrj return (DDI_SUCCESS); 454112f080e7Smrj } 454212f080e7Smrj 454312f080e7Smrj return (DDI_FAILURE); 454412f080e7Smrj } 454512f080e7Smrj 454612f080e7Smrj 454712f080e7Smrj /*ARGSUSED*/ 454812f080e7Smrj static int 454920906b23SVikram Hegde rootnex_coredma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 455012f080e7Smrj uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep, 455112f080e7Smrj uint_t *ccountp) 455212f080e7Smrj { 455312f080e7Smrj rootnex_window_t *window; 455412f080e7Smrj rootnex_trim_t *trim; 455512f080e7Smrj ddi_dma_impl_t *hp; 455612f080e7Smrj rootnex_dma_t *dma; 455712f080e7Smrj #if !defined(__amd64) 455812f080e7Smrj rootnex_sglinfo_t *sinfo; 455912f080e7Smrj rootnex_pgmap_t *pmap; 456012f080e7Smrj uint_t pidx; 456112f080e7Smrj uint_t pcnt; 456212f080e7Smrj off_t poff; 456312f080e7Smrj int i; 456412f080e7Smrj #endif 456512f080e7Smrj 456612f080e7Smrj 456712f080e7Smrj hp = (ddi_dma_impl_t *)handle; 456812f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 456912f080e7Smrj #if !defined(__amd64) 457012f080e7Smrj sinfo = &dma->dp_sglinfo; 457112f080e7Smrj #endif 457212f080e7Smrj 457312f080e7Smrj /* If we try and get a window which doesn't exist, return failure */ 457412f080e7Smrj if (win >= hp->dmai_nwin) { 457512f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]); 457612f080e7Smrj return (DDI_FAILURE); 457712f080e7Smrj } 457812f080e7Smrj 457912f080e7Smrj /* 458012f080e7Smrj * if we don't have any windows, and they're asking for the first 458112f080e7Smrj * window, setup the cookie pointer to the first cookie in the bind. 458212f080e7Smrj * setup our return values, then increment the cookie since we return 458312f080e7Smrj * the first cookie on the stack. 458412f080e7Smrj */ 458512f080e7Smrj if (dma->dp_window == NULL) { 458612f080e7Smrj if (win != 0) { 458712f080e7Smrj ROOTNEX_PROF_INC(&rootnex_cnt[ROOTNEX_CNT_GETWIN_FAIL]); 458812f080e7Smrj return (DDI_FAILURE); 458912f080e7Smrj } 459012f080e7Smrj hp->dmai_cookie = dma->dp_cookies; 459112f080e7Smrj *offp = 0; 459212f080e7Smrj *lenp = dma->dp_dma.dmao_size; 459312f080e7Smrj *ccountp = dma->dp_sglinfo.si_sgl_size; 459412f080e7Smrj *cookiep = hp->dmai_cookie[0]; 459512f080e7Smrj hp->dmai_cookie++; 459612f080e7Smrj return (DDI_SUCCESS); 459712f080e7Smrj } 459812f080e7Smrj 459912f080e7Smrj /* sync the old window before moving on to the new one */ 460012f080e7Smrj window = &dma->dp_window[dma->dp_current_win]; 460112f080e7Smrj if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_READ)) { 460294f1124eSVikram Hegde (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 460312f080e7Smrj DDI_DMA_SYNC_FORCPU); 460412f080e7Smrj } 460512f080e7Smrj 460612f080e7Smrj #if !defined(__amd64) 460712f080e7Smrj /* 460812f080e7Smrj * before we move to the next window, if we need to re-map, unmap all 460912f080e7Smrj * the pages in this window. 461012f080e7Smrj */ 461112f080e7Smrj if (dma->dp_cb_remaping) { 461212f080e7Smrj /* 461312f080e7Smrj * If we switch to this window again, we'll need to map in 461412f080e7Smrj * on the fly next time. 461512f080e7Smrj */ 461612f080e7Smrj window->wd_remap_copybuf = B_TRUE; 461712f080e7Smrj 461812f080e7Smrj /* 461912f080e7Smrj * calculate the page index into the buffer where this window 462012f080e7Smrj * starts, and the number of pages this window takes up. 462112f080e7Smrj */ 462212f080e7Smrj pidx = (sinfo->si_buf_offset + window->wd_offset) >> 462312f080e7Smrj MMU_PAGESHIFT; 462412f080e7Smrj poff = (sinfo->si_buf_offset + window->wd_offset) & 462512f080e7Smrj MMU_PAGEOFFSET; 462612f080e7Smrj pcnt = mmu_btopr(window->wd_size + poff); 462712f080e7Smrj ASSERT((pidx + pcnt) <= sinfo->si_max_pages); 462812f080e7Smrj 462912f080e7Smrj /* unmap pages which are currently mapped in this window */ 463012f080e7Smrj for (i = 0; i < pcnt; i++) { 463112f080e7Smrj if (dma->dp_pgmap[pidx].pm_mapped) { 463212f080e7Smrj hat_unload(kas.a_hat, 463312f080e7Smrj dma->dp_pgmap[pidx].pm_kaddr, MMU_PAGESIZE, 463412f080e7Smrj HAT_UNLOAD); 463512f080e7Smrj dma->dp_pgmap[pidx].pm_mapped = B_FALSE; 463612f080e7Smrj } 463712f080e7Smrj pidx++; 463812f080e7Smrj } 463912f080e7Smrj } 464012f080e7Smrj #endif 464112f080e7Smrj 464212f080e7Smrj /* 464312f080e7Smrj * Move to the new window. 464412f080e7Smrj * NOTE: current_win must be set for sync to work right 464512f080e7Smrj */ 464612f080e7Smrj dma->dp_current_win = win; 464712f080e7Smrj window = &dma->dp_window[win]; 464812f080e7Smrj 464912f080e7Smrj /* if needed, adjust the first and/or last cookies for trim */ 465012f080e7Smrj trim = &window->wd_trim; 465112f080e7Smrj if (trim->tr_trim_first) { 4652843e1988Sjohnlev window->wd_first_cookie->dmac_laddress = trim->tr_first_paddr; 465312f080e7Smrj window->wd_first_cookie->dmac_size = trim->tr_first_size; 465412f080e7Smrj #if !defined(__amd64) 465512f080e7Smrj window->wd_first_cookie->dmac_type = 465612f080e7Smrj (window->wd_first_cookie->dmac_type & 465712f080e7Smrj ROOTNEX_USES_COPYBUF) + window->wd_offset; 465812f080e7Smrj #endif 465912f080e7Smrj if (trim->tr_first_copybuf_win) { 466012f080e7Smrj dma->dp_pgmap[trim->tr_first_pidx].pm_cbaddr = 466112f080e7Smrj trim->tr_first_cbaddr; 466212f080e7Smrj #if !defined(__amd64) 466312f080e7Smrj dma->dp_pgmap[trim->tr_first_pidx].pm_kaddr = 466412f080e7Smrj trim->tr_first_kaddr; 466512f080e7Smrj #endif 466612f080e7Smrj } 466712f080e7Smrj } 466812f080e7Smrj if (trim->tr_trim_last) { 4669843e1988Sjohnlev trim->tr_last_cookie->dmac_laddress = trim->tr_last_paddr; 467012f080e7Smrj trim->tr_last_cookie->dmac_size = trim->tr_last_size; 467112f080e7Smrj if (trim->tr_last_copybuf_win) { 467212f080e7Smrj dma->dp_pgmap[trim->tr_last_pidx].pm_cbaddr = 467312f080e7Smrj trim->tr_last_cbaddr; 467412f080e7Smrj #if !defined(__amd64) 467512f080e7Smrj dma->dp_pgmap[trim->tr_last_pidx].pm_kaddr = 467612f080e7Smrj trim->tr_last_kaddr; 467712f080e7Smrj #endif 467812f080e7Smrj } 467912f080e7Smrj } 468012f080e7Smrj 468112f080e7Smrj /* 468212f080e7Smrj * setup the cookie pointer to the first cookie in the window. setup 468312f080e7Smrj * our return values, then increment the cookie since we return the 468412f080e7Smrj * first cookie on the stack. 468512f080e7Smrj */ 468612f080e7Smrj hp->dmai_cookie = window->wd_first_cookie; 468712f080e7Smrj *offp = window->wd_offset; 468812f080e7Smrj *lenp = window->wd_size; 468912f080e7Smrj *ccountp = window->wd_cookie_cnt; 469012f080e7Smrj *cookiep = hp->dmai_cookie[0]; 469112f080e7Smrj hp->dmai_cookie++; 469212f080e7Smrj 469312f080e7Smrj #if !defined(__amd64) 469412f080e7Smrj /* re-map copybuf if required for this window */ 469512f080e7Smrj if (dma->dp_cb_remaping) { 469612f080e7Smrj /* 469712f080e7Smrj * calculate the page index into the buffer where this 469812f080e7Smrj * window starts. 469912f080e7Smrj */ 470012f080e7Smrj pidx = (sinfo->si_buf_offset + window->wd_offset) >> 470112f080e7Smrj MMU_PAGESHIFT; 470212f080e7Smrj ASSERT(pidx < sinfo->si_max_pages); 470312f080e7Smrj 470412f080e7Smrj /* 470512f080e7Smrj * the first page can get unmapped if it's shared with the 470612f080e7Smrj * previous window. Even if the rest of this window is already 470712f080e7Smrj * mapped in, we need to still check this one. 470812f080e7Smrj */ 470912f080e7Smrj pmap = &dma->dp_pgmap[pidx]; 471012f080e7Smrj if ((pmap->pm_uses_copybuf) && (pmap->pm_mapped == B_FALSE)) { 471112f080e7Smrj if (pmap->pm_pp != NULL) { 471212f080e7Smrj pmap->pm_mapped = B_TRUE; 471312f080e7Smrj i86_pp_map(pmap->pm_pp, pmap->pm_kaddr); 471412f080e7Smrj } else if (pmap->pm_vaddr != NULL) { 471512f080e7Smrj pmap->pm_mapped = B_TRUE; 471612f080e7Smrj i86_va_map(pmap->pm_vaddr, sinfo->si_asp, 471712f080e7Smrj pmap->pm_kaddr); 471812f080e7Smrj } 471912f080e7Smrj } 472012f080e7Smrj pidx++; 472112f080e7Smrj 472212f080e7Smrj /* map in the rest of the pages if required */ 472312f080e7Smrj if (window->wd_remap_copybuf) { 472412f080e7Smrj window->wd_remap_copybuf = B_FALSE; 472512f080e7Smrj 472612f080e7Smrj /* figure out many pages this window takes up */ 472712f080e7Smrj poff = (sinfo->si_buf_offset + window->wd_offset) & 472812f080e7Smrj MMU_PAGEOFFSET; 472912f080e7Smrj pcnt = mmu_btopr(window->wd_size + poff); 473012f080e7Smrj ASSERT(((pidx - 1) + pcnt) <= sinfo->si_max_pages); 473112f080e7Smrj 473212f080e7Smrj /* map pages which require it */ 473312f080e7Smrj for (i = 1; i < pcnt; i++) { 473412f080e7Smrj pmap = &dma->dp_pgmap[pidx]; 473512f080e7Smrj if (pmap->pm_uses_copybuf) { 473612f080e7Smrj ASSERT(pmap->pm_mapped == B_FALSE); 473712f080e7Smrj if (pmap->pm_pp != NULL) { 473812f080e7Smrj pmap->pm_mapped = B_TRUE; 473912f080e7Smrj i86_pp_map(pmap->pm_pp, 474012f080e7Smrj pmap->pm_kaddr); 474112f080e7Smrj } else if (pmap->pm_vaddr != NULL) { 474212f080e7Smrj pmap->pm_mapped = B_TRUE; 474312f080e7Smrj i86_va_map(pmap->pm_vaddr, 474412f080e7Smrj sinfo->si_asp, 474512f080e7Smrj pmap->pm_kaddr); 474612f080e7Smrj } 474712f080e7Smrj } 474812f080e7Smrj pidx++; 474912f080e7Smrj } 475012f080e7Smrj } 475112f080e7Smrj } 475212f080e7Smrj #endif 475312f080e7Smrj 475412f080e7Smrj /* if the new window uses the copy buffer, sync it for the device */ 475512f080e7Smrj if ((window->wd_dosync) && (hp->dmai_rflags & DDI_DMA_WRITE)) { 475694f1124eSVikram Hegde (void) rootnex_coredma_sync(dip, rdip, handle, 0, 0, 475712f080e7Smrj DDI_DMA_SYNC_FORDEV); 475812f080e7Smrj } 475912f080e7Smrj 476012f080e7Smrj return (DDI_SUCCESS); 476112f080e7Smrj } 476212f080e7Smrj 476320906b23SVikram Hegde /* 476420906b23SVikram Hegde * rootnex_dma_win() 476520906b23SVikram Hegde * called from ddi_dma_getwin() 476620906b23SVikram Hegde */ 476720906b23SVikram Hegde /*ARGSUSED*/ 476820906b23SVikram Hegde static int 476920906b23SVikram Hegde rootnex_dma_win(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 477020906b23SVikram Hegde uint_t win, off_t *offp, size_t *lenp, ddi_dma_cookie_t *cookiep, 477120906b23SVikram Hegde uint_t *ccountp) 477220906b23SVikram Hegde { 47733a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 4774b51bbbf5SVikram Hegde if (IOMMU_USED(rdip)) { 477520906b23SVikram Hegde return (iommulib_nexdma_win(dip, rdip, handle, win, offp, lenp, 477620906b23SVikram Hegde cookiep, ccountp)); 477720906b23SVikram Hegde } 477820906b23SVikram Hegde #endif 477912f080e7Smrj 478020906b23SVikram Hegde return (rootnex_coredma_win(dip, rdip, handle, win, offp, lenp, 478120906b23SVikram Hegde cookiep, ccountp)); 478220906b23SVikram Hegde } 478312f080e7Smrj 478412f080e7Smrj /* 478512f080e7Smrj * ************************ 478612f080e7Smrj * obsoleted dma routines 478712f080e7Smrj * ************************ 478812f080e7Smrj */ 478912f080e7Smrj 4790b51bbbf5SVikram Hegde /* 4791b51bbbf5SVikram Hegde * rootnex_dma_map() 4792b51bbbf5SVikram Hegde * called from ddi_dma_setup() 4793b51bbbf5SVikram Hegde * NO IOMMU in 32 bit mode. The below routines doesn't work in 64 bit mode. 4794b51bbbf5SVikram Hegde */ 479512f080e7Smrj /* ARGSUSED */ 479612f080e7Smrj static int 4797b51bbbf5SVikram Hegde rootnex_dma_map(dev_info_t *dip, dev_info_t *rdip, 479820906b23SVikram Hegde struct ddi_dma_req *dmareq, ddi_dma_handle_t *handlep) 479912f080e7Smrj { 480012f080e7Smrj #if defined(__amd64) 480112f080e7Smrj /* 480212f080e7Smrj * this interface is not supported in 64-bit x86 kernel. See comment in 480312f080e7Smrj * rootnex_dma_mctl() 480412f080e7Smrj */ 480512f080e7Smrj return (DDI_DMA_NORESOURCES); 480612f080e7Smrj 480712f080e7Smrj #else /* 32-bit x86 kernel */ 480812f080e7Smrj ddi_dma_handle_t *lhandlep; 480912f080e7Smrj ddi_dma_handle_t lhandle; 481012f080e7Smrj ddi_dma_cookie_t cookie; 481112f080e7Smrj ddi_dma_attr_t dma_attr; 481212f080e7Smrj ddi_dma_lim_t *dma_lim; 481312f080e7Smrj uint_t ccnt; 481412f080e7Smrj int e; 481512f080e7Smrj 481612f080e7Smrj 481712f080e7Smrj /* 481812f080e7Smrj * if the driver is just testing to see if it's possible to do the bind, 481912f080e7Smrj * we'll use local state. Otherwise, use the handle pointer passed in. 482012f080e7Smrj */ 482112f080e7Smrj if (handlep == NULL) { 482212f080e7Smrj lhandlep = &lhandle; 482312f080e7Smrj } else { 482412f080e7Smrj lhandlep = handlep; 482512f080e7Smrj } 482612f080e7Smrj 482712f080e7Smrj /* convert the limit structure to a dma_attr one */ 482812f080e7Smrj dma_lim = dmareq->dmar_limits; 482912f080e7Smrj dma_attr.dma_attr_version = DMA_ATTR_V0; 483012f080e7Smrj dma_attr.dma_attr_addr_lo = dma_lim->dlim_addr_lo; 483112f080e7Smrj dma_attr.dma_attr_addr_hi = dma_lim->dlim_addr_hi; 483212f080e7Smrj dma_attr.dma_attr_minxfer = dma_lim->dlim_minxfer; 483312f080e7Smrj dma_attr.dma_attr_seg = dma_lim->dlim_adreg_max; 483412f080e7Smrj dma_attr.dma_attr_count_max = dma_lim->dlim_ctreg_max; 483512f080e7Smrj dma_attr.dma_attr_granular = dma_lim->dlim_granular; 483612f080e7Smrj dma_attr.dma_attr_sgllen = dma_lim->dlim_sgllen; 483712f080e7Smrj dma_attr.dma_attr_maxxfer = dma_lim->dlim_reqsize; 483812f080e7Smrj dma_attr.dma_attr_burstsizes = dma_lim->dlim_burstsizes; 483912f080e7Smrj dma_attr.dma_attr_align = MMU_PAGESIZE; 484012f080e7Smrj dma_attr.dma_attr_flags = 0; 484112f080e7Smrj 484212f080e7Smrj e = rootnex_dma_allochdl(dip, rdip, &dma_attr, dmareq->dmar_fp, 484312f080e7Smrj dmareq->dmar_arg, lhandlep); 484412f080e7Smrj if (e != DDI_SUCCESS) { 484512f080e7Smrj return (e); 484612f080e7Smrj } 484712f080e7Smrj 484812f080e7Smrj e = rootnex_dma_bindhdl(dip, rdip, *lhandlep, dmareq, &cookie, &ccnt); 484912f080e7Smrj if ((e != DDI_DMA_MAPPED) && (e != DDI_DMA_PARTIAL_MAP)) { 485012f080e7Smrj (void) rootnex_dma_freehdl(dip, rdip, *lhandlep); 485112f080e7Smrj return (e); 485212f080e7Smrj } 485312f080e7Smrj 485412f080e7Smrj /* 485512f080e7Smrj * if the driver is just testing to see if it's possible to do the bind, 485612f080e7Smrj * free up the local state and return the result. 485712f080e7Smrj */ 485812f080e7Smrj if (handlep == NULL) { 485912f080e7Smrj (void) rootnex_dma_unbindhdl(dip, rdip, *lhandlep); 486012f080e7Smrj (void) rootnex_dma_freehdl(dip, rdip, *lhandlep); 486112f080e7Smrj if (e == DDI_DMA_MAPPED) { 486212f080e7Smrj return (DDI_DMA_MAPOK); 486312f080e7Smrj } else { 486412f080e7Smrj return (DDI_DMA_NOMAPPING); 486512f080e7Smrj } 486612f080e7Smrj } 486712f080e7Smrj 486812f080e7Smrj return (e); 486912f080e7Smrj #endif /* defined(__amd64) */ 487012f080e7Smrj } 487112f080e7Smrj 487220906b23SVikram Hegde /* 487312f080e7Smrj * rootnex_dma_mctl() 487412f080e7Smrj * 4875b51bbbf5SVikram Hegde * No IOMMU in 32 bit mode. The below routine doesn't work in 64 bit mode. 487612f080e7Smrj */ 487712f080e7Smrj /* ARGSUSED */ 487812f080e7Smrj static int 4879b51bbbf5SVikram Hegde rootnex_dma_mctl(dev_info_t *dip, dev_info_t *rdip, ddi_dma_handle_t handle, 488012f080e7Smrj enum ddi_dma_ctlops request, off_t *offp, size_t *lenp, caddr_t *objpp, 488112f080e7Smrj uint_t cache_flags) 488212f080e7Smrj { 488312f080e7Smrj #if defined(__amd64) 488412f080e7Smrj /* 488512f080e7Smrj * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a 488612f080e7Smrj * common implementation in genunix, so they no longer have x86 488712f080e7Smrj * specific functionality which called into dma_ctl. 488812f080e7Smrj * 488912f080e7Smrj * The rest of the obsoleted interfaces were never supported in the 489012f080e7Smrj * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface 489112f080e7Smrj * was not ported to the x86 64-bit kernel do to serious x86 rootnex 489212f080e7Smrj * implementation issues. 489312f080e7Smrj * 489412f080e7Smrj * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and 489512f080e7Smrj * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we 489612f080e7Smrj * reflect that now too... 489712f080e7Smrj * 489812f080e7Smrj * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are 489912f080e7Smrj * not going to put this functionality into the 64-bit x86 kernel now. 490012f080e7Smrj * It wasn't ported to the 64-bit kernel for s10, no reason to change 490112f080e7Smrj * that in a future release. 490212f080e7Smrj */ 490312f080e7Smrj return (DDI_FAILURE); 490412f080e7Smrj 490512f080e7Smrj #else /* 32-bit x86 kernel */ 490612f080e7Smrj ddi_dma_cookie_t lcookie; 490712f080e7Smrj ddi_dma_cookie_t *cookie; 490812f080e7Smrj rootnex_window_t *window; 490912f080e7Smrj ddi_dma_impl_t *hp; 491012f080e7Smrj rootnex_dma_t *dma; 491112f080e7Smrj uint_t nwin; 491212f080e7Smrj uint_t ccnt; 491312f080e7Smrj size_t len; 491412f080e7Smrj off_t off; 491512f080e7Smrj int e; 491612f080e7Smrj 491712f080e7Smrj 491812f080e7Smrj /* 491912f080e7Smrj * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little 492012f080e7Smrj * hacky since were optimizing for the current interfaces and so we can 492112f080e7Smrj * cleanup the mess in genunix. Hopefully we will remove the this 492212f080e7Smrj * obsoleted routines someday soon. 492312f080e7Smrj */ 492412f080e7Smrj 492512f080e7Smrj switch (request) { 492612f080e7Smrj 492712f080e7Smrj case DDI_DMA_SEGTOC: /* ddi_dma_segtocookie() */ 492812f080e7Smrj hp = (ddi_dma_impl_t *)handle; 492912f080e7Smrj cookie = (ddi_dma_cookie_t *)objpp; 493012f080e7Smrj 493112f080e7Smrj /* 493212f080e7Smrj * convert segment to cookie. We don't distinguish between the 493312f080e7Smrj * two :-) 493412f080e7Smrj */ 493512f080e7Smrj *cookie = *hp->dmai_cookie; 493612f080e7Smrj *lenp = cookie->dmac_size; 493712f080e7Smrj *offp = cookie->dmac_type & ~ROOTNEX_USES_COPYBUF; 493812f080e7Smrj return (DDI_SUCCESS); 493912f080e7Smrj 494012f080e7Smrj case DDI_DMA_NEXTSEG: /* ddi_dma_nextseg() */ 494112f080e7Smrj hp = (ddi_dma_impl_t *)handle; 494212f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 494312f080e7Smrj 494412f080e7Smrj if ((*lenp != NULL) && ((uintptr_t)*lenp != (uintptr_t)hp)) { 494512f080e7Smrj return (DDI_DMA_STALE); 494612f080e7Smrj } 494712f080e7Smrj 494812f080e7Smrj /* handle the case where we don't have any windows */ 494912f080e7Smrj if (dma->dp_window == NULL) { 495012f080e7Smrj /* 495112f080e7Smrj * if seg == NULL, and we don't have any windows, 495212f080e7Smrj * return the first cookie in the sgl. 495312f080e7Smrj */ 495412f080e7Smrj if (*lenp == NULL) { 495512f080e7Smrj dma->dp_current_cookie = 0; 495612f080e7Smrj hp->dmai_cookie = dma->dp_cookies; 495712f080e7Smrj *objpp = (caddr_t)handle; 495812f080e7Smrj return (DDI_SUCCESS); 495912f080e7Smrj 496012f080e7Smrj /* if we have more cookies, go to the next cookie */ 496112f080e7Smrj } else { 496212f080e7Smrj if ((dma->dp_current_cookie + 1) >= 496312f080e7Smrj dma->dp_sglinfo.si_sgl_size) { 496412f080e7Smrj return (DDI_DMA_DONE); 496512f080e7Smrj } 496612f080e7Smrj dma->dp_current_cookie++; 496712f080e7Smrj hp->dmai_cookie++; 496812f080e7Smrj return (DDI_SUCCESS); 496912f080e7Smrj } 497012f080e7Smrj } 497112f080e7Smrj 497212f080e7Smrj /* We have one or more windows */ 497312f080e7Smrj window = &dma->dp_window[dma->dp_current_win]; 497412f080e7Smrj 497512f080e7Smrj /* 497612f080e7Smrj * if seg == NULL, return the first cookie in the current 497712f080e7Smrj * window 497812f080e7Smrj */ 497912f080e7Smrj if (*lenp == NULL) { 498012f080e7Smrj dma->dp_current_cookie = 0; 4981cf4e9a1dSmrj hp->dmai_cookie = window->wd_first_cookie; 498212f080e7Smrj 498312f080e7Smrj /* 498412f080e7Smrj * go to the next cookie in the window then see if we done with 498512f080e7Smrj * this window. 498612f080e7Smrj */ 498712f080e7Smrj } else { 498812f080e7Smrj if ((dma->dp_current_cookie + 1) >= 498912f080e7Smrj window->wd_cookie_cnt) { 499012f080e7Smrj return (DDI_DMA_DONE); 499112f080e7Smrj } 499212f080e7Smrj dma->dp_current_cookie++; 499312f080e7Smrj hp->dmai_cookie++; 499412f080e7Smrj } 499512f080e7Smrj *objpp = (caddr_t)handle; 499612f080e7Smrj return (DDI_SUCCESS); 499712f080e7Smrj 499812f080e7Smrj case DDI_DMA_NEXTWIN: /* ddi_dma_nextwin() */ 499912f080e7Smrj hp = (ddi_dma_impl_t *)handle; 500012f080e7Smrj dma = (rootnex_dma_t *)hp->dmai_private; 500112f080e7Smrj 500212f080e7Smrj if ((*offp != NULL) && ((uintptr_t)*offp != (uintptr_t)hp)) { 500312f080e7Smrj return (DDI_DMA_STALE); 500412f080e7Smrj } 500512f080e7Smrj 500612f080e7Smrj /* if win == NULL, return the first window in the bind */ 500712f080e7Smrj if (*offp == NULL) { 500812f080e7Smrj nwin = 0; 500912f080e7Smrj 501012f080e7Smrj /* 501112f080e7Smrj * else, go to the next window then see if we're done with all 501212f080e7Smrj * the windows. 501312f080e7Smrj */ 501412f080e7Smrj } else { 501512f080e7Smrj nwin = dma->dp_current_win + 1; 501612f080e7Smrj if (nwin >= hp->dmai_nwin) { 501712f080e7Smrj return (DDI_DMA_DONE); 501812f080e7Smrj } 501912f080e7Smrj } 502012f080e7Smrj 502112f080e7Smrj /* switch to the next window */ 502212f080e7Smrj e = rootnex_dma_win(dip, rdip, handle, nwin, &off, &len, 502312f080e7Smrj &lcookie, &ccnt); 502412f080e7Smrj ASSERT(e == DDI_SUCCESS); 502512f080e7Smrj if (e != DDI_SUCCESS) { 502612f080e7Smrj return (DDI_DMA_STALE); 502712f080e7Smrj } 502812f080e7Smrj 502912f080e7Smrj /* reset the cookie back to the first cookie in the window */ 503012f080e7Smrj if (dma->dp_window != NULL) { 503112f080e7Smrj window = &dma->dp_window[dma->dp_current_win]; 503212f080e7Smrj hp->dmai_cookie = window->wd_first_cookie; 503312f080e7Smrj } else { 503412f080e7Smrj hp->dmai_cookie = dma->dp_cookies; 503512f080e7Smrj } 503612f080e7Smrj 503712f080e7Smrj *objpp = (caddr_t)handle; 503812f080e7Smrj return (DDI_SUCCESS); 503912f080e7Smrj 504012f080e7Smrj case DDI_DMA_FREE: /* ddi_dma_free() */ 504112f080e7Smrj (void) rootnex_dma_unbindhdl(dip, rdip, handle); 504212f080e7Smrj (void) rootnex_dma_freehdl(dip, rdip, handle); 504312f080e7Smrj if (rootnex_state->r_dvma_call_list_id) { 504412f080e7Smrj ddi_run_callback(&rootnex_state->r_dvma_call_list_id); 504512f080e7Smrj } 504612f080e7Smrj return (DDI_SUCCESS); 504712f080e7Smrj 504812f080e7Smrj case DDI_DMA_IOPB_ALLOC: /* get contiguous DMA-able memory */ 504912f080e7Smrj case DDI_DMA_SMEM_ALLOC: /* get contiguous DMA-able memory */ 505012f080e7Smrj /* should never get here, handled in genunix */ 505112f080e7Smrj ASSERT(0); 505212f080e7Smrj return (DDI_FAILURE); 505312f080e7Smrj 505412f080e7Smrj case DDI_DMA_KVADDR: 505512f080e7Smrj case DDI_DMA_GETERR: 505612f080e7Smrj case DDI_DMA_COFF: 505712f080e7Smrj return (DDI_FAILURE); 505812f080e7Smrj } 505912f080e7Smrj 506012f080e7Smrj return (DDI_FAILURE); 506112f080e7Smrj #endif /* defined(__amd64) */ 50627c478bd9Sstevel@tonic-gate } 50637aec1d6eScindi 506420906b23SVikram Hegde /* 506500d0963fSdilpreet * ********* 506600d0963fSdilpreet * FMA Code 506700d0963fSdilpreet * ********* 506800d0963fSdilpreet */ 506900d0963fSdilpreet 507000d0963fSdilpreet /* 507100d0963fSdilpreet * rootnex_fm_init() 507200d0963fSdilpreet * FMA init busop 507300d0963fSdilpreet */ 50747aec1d6eScindi /* ARGSUSED */ 50757aec1d6eScindi static int 507600d0963fSdilpreet rootnex_fm_init(dev_info_t *dip, dev_info_t *tdip, int tcap, 507700d0963fSdilpreet ddi_iblock_cookie_t *ibc) 50787aec1d6eScindi { 507900d0963fSdilpreet *ibc = rootnex_state->r_err_ibc; 508000d0963fSdilpreet 508100d0963fSdilpreet return (ddi_system_fmcap); 508200d0963fSdilpreet } 508300d0963fSdilpreet 508400d0963fSdilpreet /* 508500d0963fSdilpreet * rootnex_dma_check() 508600d0963fSdilpreet * Function called after a dma fault occurred to find out whether the 508700d0963fSdilpreet * fault address is associated with a driver that is able to handle faults 508800d0963fSdilpreet * and recover from faults. 508900d0963fSdilpreet */ 509000d0963fSdilpreet /* ARGSUSED */ 509100d0963fSdilpreet static int 509200d0963fSdilpreet rootnex_dma_check(dev_info_t *dip, const void *handle, const void *addr, 509300d0963fSdilpreet const void *not_used) 509400d0963fSdilpreet { 509500d0963fSdilpreet rootnex_window_t *window; 509600d0963fSdilpreet uint64_t start_addr; 509700d0963fSdilpreet uint64_t fault_addr; 509800d0963fSdilpreet ddi_dma_impl_t *hp; 509900d0963fSdilpreet rootnex_dma_t *dma; 510000d0963fSdilpreet uint64_t end_addr; 510100d0963fSdilpreet size_t csize; 510200d0963fSdilpreet int i; 510300d0963fSdilpreet int j; 510400d0963fSdilpreet 510500d0963fSdilpreet 510600d0963fSdilpreet /* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */ 510700d0963fSdilpreet hp = (ddi_dma_impl_t *)handle; 510800d0963fSdilpreet ASSERT(hp); 510900d0963fSdilpreet 511000d0963fSdilpreet dma = (rootnex_dma_t *)hp->dmai_private; 511100d0963fSdilpreet 511200d0963fSdilpreet /* Get the address that we need to search for */ 511300d0963fSdilpreet fault_addr = *(uint64_t *)addr; 511400d0963fSdilpreet 511500d0963fSdilpreet /* 511600d0963fSdilpreet * if we don't have any windows, we can just walk through all the 511700d0963fSdilpreet * cookies. 511800d0963fSdilpreet */ 511900d0963fSdilpreet if (dma->dp_window == NULL) { 512000d0963fSdilpreet /* for each cookie */ 512100d0963fSdilpreet for (i = 0; i < dma->dp_sglinfo.si_sgl_size; i++) { 512200d0963fSdilpreet /* 512300d0963fSdilpreet * if the faulted address is within the physical address 512400d0963fSdilpreet * range of the cookie, return DDI_FM_NONFATAL. 512500d0963fSdilpreet */ 512600d0963fSdilpreet if ((fault_addr >= dma->dp_cookies[i].dmac_laddress) && 512700d0963fSdilpreet (fault_addr <= (dma->dp_cookies[i].dmac_laddress + 512800d0963fSdilpreet dma->dp_cookies[i].dmac_size))) { 512900d0963fSdilpreet return (DDI_FM_NONFATAL); 513000d0963fSdilpreet } 513100d0963fSdilpreet } 513200d0963fSdilpreet 513300d0963fSdilpreet /* fault_addr not within this DMA handle */ 513400d0963fSdilpreet return (DDI_FM_UNKNOWN); 513500d0963fSdilpreet } 513600d0963fSdilpreet 513700d0963fSdilpreet /* we have mutiple windows, walk through each window */ 513800d0963fSdilpreet for (i = 0; i < hp->dmai_nwin; i++) { 513900d0963fSdilpreet window = &dma->dp_window[i]; 514000d0963fSdilpreet 514100d0963fSdilpreet /* Go through all the cookies in the window */ 514200d0963fSdilpreet for (j = 0; j < window->wd_cookie_cnt; j++) { 514300d0963fSdilpreet 514400d0963fSdilpreet start_addr = window->wd_first_cookie[j].dmac_laddress; 514500d0963fSdilpreet csize = window->wd_first_cookie[j].dmac_size; 514600d0963fSdilpreet 514700d0963fSdilpreet /* 514800d0963fSdilpreet * if we are trimming the first cookie in the window, 514900d0963fSdilpreet * and this is the first cookie, adjust the start 515000d0963fSdilpreet * address and size of the cookie to account for the 515100d0963fSdilpreet * trim. 515200d0963fSdilpreet */ 515300d0963fSdilpreet if (window->wd_trim.tr_trim_first && (j == 0)) { 515400d0963fSdilpreet start_addr = window->wd_trim.tr_first_paddr; 515500d0963fSdilpreet csize = window->wd_trim.tr_first_size; 515600d0963fSdilpreet } 515700d0963fSdilpreet 515800d0963fSdilpreet /* 515900d0963fSdilpreet * if we are trimming the last cookie in the window, 516000d0963fSdilpreet * and this is the last cookie, adjust the start 516100d0963fSdilpreet * address and size of the cookie to account for the 516200d0963fSdilpreet * trim. 516300d0963fSdilpreet */ 516400d0963fSdilpreet if (window->wd_trim.tr_trim_last && 516500d0963fSdilpreet (j == (window->wd_cookie_cnt - 1))) { 516600d0963fSdilpreet start_addr = window->wd_trim.tr_last_paddr; 516700d0963fSdilpreet csize = window->wd_trim.tr_last_size; 516800d0963fSdilpreet } 516900d0963fSdilpreet 517000d0963fSdilpreet end_addr = start_addr + csize; 517100d0963fSdilpreet 517200d0963fSdilpreet /* 51733a634bfcSVikram Hegde * if the faulted address is within the physical 51743a634bfcSVikram Hegde * address of the cookie, return DDI_FM_NONFATAL. 517500d0963fSdilpreet */ 517600d0963fSdilpreet if ((fault_addr >= start_addr) && 517700d0963fSdilpreet (fault_addr <= end_addr)) { 517800d0963fSdilpreet return (DDI_FM_NONFATAL); 517900d0963fSdilpreet } 518000d0963fSdilpreet } 518100d0963fSdilpreet } 518200d0963fSdilpreet 518300d0963fSdilpreet /* fault_addr not within this DMA handle */ 518400d0963fSdilpreet return (DDI_FM_UNKNOWN); 51857aec1d6eScindi } 51863a634bfcSVikram Hegde 51873a634bfcSVikram Hegde /*ARGSUSED*/ 51883a634bfcSVikram Hegde static int 51893a634bfcSVikram Hegde rootnex_quiesce(dev_info_t *dip) 51903a634bfcSVikram Hegde { 51913a634bfcSVikram Hegde #if defined(__amd64) && !defined(__xpv) 51923a634bfcSVikram Hegde return (immu_quiesce()); 51933a634bfcSVikram Hegde #else 51943a634bfcSVikram Hegde return (DDI_SUCCESS); 51953a634bfcSVikram Hegde #endif 51963a634bfcSVikram Hegde } 51973a634bfcSVikram Hegde 51983a634bfcSVikram Hegde #if defined(__xpv) 51993a634bfcSVikram Hegde void 52003a634bfcSVikram Hegde immu_init(void) 52013a634bfcSVikram Hegde { 52023a634bfcSVikram Hegde ; 52033a634bfcSVikram Hegde } 52043a634bfcSVikram Hegde 52053a634bfcSVikram Hegde void 52063a634bfcSVikram Hegde immu_startup(void) 52073a634bfcSVikram Hegde { 52083a634bfcSVikram Hegde ; 52093a634bfcSVikram Hegde } 52103a634bfcSVikram Hegde /*ARGSUSED*/ 52113a634bfcSVikram Hegde void 52123a634bfcSVikram Hegde immu_physmem_update(uint64_t addr, uint64_t size) 52133a634bfcSVikram Hegde { 52143a634bfcSVikram Hegde ; 52153a634bfcSVikram Hegde } 52163a634bfcSVikram Hegde #endif 5217