| /linux/drivers/memory/ |
| H A D | renesas-rpc-if.c | 3 * Renesas RPC-IF core driver 19 #include <memory/renesas-rpc-if.h> 21 #include "renesas-rpc-if-regs.h" 47 int (*hw_init)(struct rpcif_priv *rpc, bool hyperflash); 48 void (*prepare)(struct rpcif_priv *rpc, const struct rpcif_op *op, 50 int (*manual_xfer)(struct rpcif_priv *rpc); 51 size_t (*dirmap_read)(struct rpcif_priv *rpc, u64 offs, size_t len, 97 struct rpcif_priv *rpc = context; in rpcif_reg_read() local 102 switch (rpc->xfer_size) { in rpcif_reg_read() 104 *val = readb(rpc->base + reg); in rpcif_reg_read() [all …]
|
| /linux/arch/mips/pci/ |
| H A D | pci-rt3883.c | 81 static inline u32 rt3883_pci_r32(struct rt3883_pci_controller *rpc, in rt3883_pci_r32() argument 84 return ioread32(rpc->base + reg); in rt3883_pci_r32() 87 static inline void rt3883_pci_w32(struct rt3883_pci_controller *rpc, in rt3883_pci_w32() argument 90 iowrite32(val, rpc->base + reg); in rt3883_pci_w32() 100 static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc, in rt3883_pci_read_cfg32() argument 108 rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); in rt3883_pci_read_cfg32() 110 return rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA); in rt3883_pci_read_cfg32() 113 static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc, in rt3883_pci_write_cfg32() argument 121 rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); in rt3883_pci_write_cfg32() 122 rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA); in rt3883_pci_write_cfg32() [all …]
|
| /linux/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/rm/r535/ |
| H A D | rpc.c | 22 #include <rm/rpc.h> 36 * RPC via the GSP command queue, GSP writes the status of the submitted 37 * RPC in the status queue. 44 * - RPC message header (struct nvfw_gsp_rpc), which maintains the info 45 * of the RPC. E.g., the RPC function number. 47 * - The payload, where the RPC message stays. E.g. the params of a 48 * specific RPC function. Some RPC functions also have their headers 60 * | GSP RPC Header | 85 * - gsp_msg(msg): GSP message element (element header + GSP RPC header + 87 * - gsp_rpc(rpc): GSP RPC (RPC header + payload) [all …]
|
| H A D | gsp.c | 22 #include <rm/rpc.h> 215 GspStaticConfigInfo *rpc; in r535_gsp_get_static_info() local 217 rpc = nvkm_gsp_rpc_rd(gsp, NV_VGPU_MSG_FUNCTION_GET_GSP_STATIC_INFO, sizeof(*rpc)); in r535_gsp_get_static_info() 218 if (IS_ERR(rpc)) in r535_gsp_get_static_info() 219 return PTR_ERR(rpc); in r535_gsp_get_static_info() 223 gsp->internal.client.object.handle = rpc->hInternalClient; in r535_gsp_get_static_info() 228 gsp->internal.device.object.handle = rpc->hInternalDevice; in r535_gsp_get_static_info() 232 gsp->internal.device.subdevice.handle = rpc->hInternalSubdevice; in r535_gsp_get_static_info() 234 gsp->bar.rm_bar1_pdb = rpc->bar1PdeBase; in r535_gsp_get_static_info() 235 gsp->bar.rm_bar2_pdb = rpc->bar2PdeBase; in r535_gsp_get_static_info() [all …]
|
| /linux/Documentation/devicetree/bindings/memory-controllers/ |
| H A D | renesas,rpc-if.yaml | 4 $id: http://devicetree.org/schemas/memory-controllers/renesas,rpc-if.yaml# 7 title: Renesas Reduced Pin Count Interface (RPC-IF) 13 Renesas RPC-IF allows a SPI flash or HyperFlash connected to the SoC to 16 The flash chip itself should be represented by a subnode of the RPC-IF node. 30 - renesas,r8a774a1-rpc-if # RZ/G2M 31 - renesas,r8a774b1-rpc-if # RZ/G2N 32 - renesas,r8a774c0-rpc-if # RZ/G2E 33 - renesas,r8a774e1-rpc-if # RZ/G2H 34 - renesas,r8a7795-rpc-if # R-Car H3 35 - renesas,r8a7796-rpc-if # R-Car M3-W [all …]
|
| /linux/drivers/mtd/hyperbus/ |
| H A D | rpc-if.c | 3 * Linux driver for RPC-IF HyperFlash 18 #include <memory/renesas-rpc-if.h> 21 struct rpcif rpc; member 46 static void rpcif_hb_prepare_read(struct rpcif *rpc, void *to, in rpcif_hb_prepare_read() argument 59 rpcif_prepare(rpc->dev, &op, NULL, NULL); in rpcif_hb_prepare_read() 62 static void rpcif_hb_prepare_write(struct rpcif *rpc, unsigned long to, in rpcif_hb_prepare_write() argument 73 rpcif_prepare(rpc->dev, &op, NULL, NULL); in rpcif_hb_prepare_write() 82 rpcif_hb_prepare_read(&hyperbus->rpc, &data, addr, 2); in rpcif_hb_read16() 84 rpcif_manual_xfer(hyperbus->rpc.dev); in rpcif_hb_read16() 95 rpcif_hb_prepare_write(&hyperbus->rpc, addr, &data, 2); in rpcif_hb_write16() [all …]
|
| /linux/drivers/clk/renesas/ |
| H A D | rcar-cpg-lib.c | 130 * One notifier covers both RPC and RPCD2 clocks as they are both 144 struct rpc_clock *rpc; in cpg_rpc_clk_register() local 147 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL); in cpg_rpc_clk_register() 148 if (!rpc) in cpg_rpc_clk_register() 151 rpc->div.reg = rpcckcr; in cpg_rpc_clk_register() 152 rpc->div.width = 3; in cpg_rpc_clk_register() 153 rpc->div.table = cpg_rpc_div_table; in cpg_rpc_clk_register() 154 rpc->div.lock = &cpg_lock; in cpg_rpc_clk_register() 156 rpc->gate.reg = rpcckcr; in cpg_rpc_clk_register() 157 rpc->gate.bit_idx = 8; in cpg_rpc_clk_register() [all …]
|
| /linux/drivers/greybus/ |
| H A D | es2.c | 95 * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC 881 struct arpc *rpc; in arpc_alloc() local 883 if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX) in arpc_alloc() 886 rpc = kzalloc(sizeof(*rpc), GFP_KERNEL); in arpc_alloc() 887 if (!rpc) in arpc_alloc() 890 INIT_LIST_HEAD(&rpc->list); in arpc_alloc() 891 rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL); in arpc_alloc() 892 if (!rpc->req) in arpc_alloc() 895 rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL); in arpc_alloc() 896 if (!rpc->resp) in arpc_alloc() [all …]
|
| H A D | arpc.h | 10 /* APBridgeA RPC (ARPC) */ 21 __le16 id; /* RPC unique id */ 23 __u8 type; /* RPC type */ 28 __le16 id; /* RPC unique id */ 29 __u8 result; /* Result of RPC */
|
| /linux/include/linux/sunrpc/ |
| H A D | metrics.h | 5 * Declarations for RPC client per-operation metrics 9 * RPC client per-operation statistics provide latency and retry 10 * information about each type of RPC procedure in a given RPC program. 21 * The counters are maintained in a single array per RPC client, indexed 47 om_ntrans, /* count of RPC transmissions */ 52 * given RPC procedure type. This indicates how much load a 54 * counts include the RPC and ULP headers, and the request 61 * The length of time an RPC request waits in queue before 67 om_rtt, /* RPC RTT */ 68 om_execute; /* RPC execution */
|
| H A D | svc.h | 5 * RPC server declarations. 28 * RPC service thread pool. 31 * a single one of these per RPC service, but on NUMA machines those 60 * RPC service. 62 * An RPC service is a ``daemon,'' possibly multithreaded, which 63 * receives and processes incoming RPC messages. 67 * We currently do not support more than one RPC program per daemon. 70 struct svc_program * sv_programs; /* RPC programs */ 71 struct svc_stat * sv_stats; /* RPC statistics */ 107 * Maximum payload size supported by a kernel RPC server. [all …]
|
| H A D | sched.h | 5 * Scheduling primitives for kernel Sun RPC. 22 * This is the actual RPC procedure call info. 52 * This is the RPC task struct 69 struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */ 72 struct rpc_wait tk_wait; /* RPC wait */ 76 * RPC call state 78 struct rpc_message tk_msg; /* RPC call info */ 82 struct rpc_clnt * tk_client; /* RPC client */ 86 struct rpc_rqst * tk_rqstp; /* RPC request */ 91 ktime_t tk_start; /* RPC task init timestamp */ [all …]
|
| /linux/net/sunrpc/ |
| H A D | stats.c | 5 * procfs-based user access to generic RPC statistics. The stats files 6 * reside in /proc/net/rpc. 9 * If you implement an RPC service that has its own stats routine which 10 * appends the generic RPC stats, make sure you don't exceed the PAGE_SIZE 35 * Get RPC client stats 49 "rpc %u %u %u\n", in rpc_proc_show() 80 * Get RPC server stats 96 "rpc %u %u %u %u %u\n", in svc_seq_show() 121 * @clnt: RPC program, version, and xprt 284 * Register/unregister RPC proc files [all …]
|
| H A D | clnt.c | 5 * This file contains the high-level RPC interface. 9 * - RPC header generation and argument serialization. 126 pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name); in rpc_setup_pipedir_sb() 137 printk(KERN_INFO "RPC: Couldn't create pipefs entry" in rpc_setup_pipedir_sb() 312 dprintk("RPC: Couldn't create auth handle (flavor %u)\n", in rpc_client_register() 511 * rpc_create - create an RPC client and transport with one call 514 * Creates and initializes an RPC transport and an RPC client. 595 * By default, kernel RPC client connects from a reserved port. in rpc_create() 620 * This function clones the RPC clien [all...] |
| H A D | Kconfig | 20 tristate "Secure RPC: Kerberos V mechanism" 27 Choose Y here to enable Secure RPC using the Kerberos version 5 30 Secure RPC calls with Kerberos require an auxiliary user-space 91 bool "RPC: Enable dprintk debugging" 97 logging of different aspects of the kernel RPC activity. 105 bool "RPC: Send dfprintk() output to the trace buffer" 119 tristate "RPC-over-RDMA transport"
|
| H A D | xprt.c | 5 * This is a generic RPC call interface supporting congestion avoidance, 13 * - Next, the caller puts together the RPC message, stuffs it into 26 * - When the caller receives a notification from RPC that a reply arrived, 27 * it should release the RPC slot, and process the reply. 32 * Support for async RPC is done through a set of RPC-specific scheduling 95 * call this interface to make itself known to the RPC client. 116 printk(KERN_INFO "RPC: Registered %s transport module.\n", in xprt_register_transport() 144 "RPC: Unregistered %s transport module.\n", in xprt_unregister_transport() 217 request_module("rpc%s", netid); in xprt_class_find_by_netid() 494 * @req: pointer to RPC request [all …]
|
| H A D | backchannel_rqst.c | 60 dprintk("RPC: free allocations for req= %p\n", req); in xprt_free_allocation() 153 dprintk("RPC: setup backchannel transport\n"); in xprt_setup_bc() 176 dprintk("RPC: adding req= %p\n", req); in xprt_setup_bc() 190 dprintk("RPC: setup backchannel transport done\n"); in xprt_setup_bc() 205 dprintk("RPC: setup backchannel transport failed\n"); in xprt_setup_bc() 229 dprintk("RPC: destroy backchannel transport\n"); in xprt_destroy_bc() 237 dprintk("RPC: req=%p\n", req); in xprt_destroy_bc() 248 dprintk("RPC: backchannel list empty= %s\n", in xprt_destroy_bc() 257 dprintk("RPC: allocate a backchannel request\n"); in xprt_get_bc_request() 274 dprintk("RPC: backchannel req=%p\n", req); in xprt_get_bc_request() [all …]
|
| H A D | timer.c | 5 * Estimate RPC request round trip time. 12 * This RTT estimator is used only for RPC over datagram protocols. 30 * rpc_init_rtt - Initialize an RPC RTT estimator context 53 * rpc_update_rtt - Update an RPC RTT estimator context 97 * Estimate RTO for an NFS RPC sent via an unreliable datagram. Use 98 * the mean and mean deviation of RTT for the appropriate type of RPC
|
| H A D | auth.c | 5 * Generic RPC client authentication API. 92 MODULE_PARM_DESC(auth_hashtable_size, "RPC credential cache hashtable size"); 96 MODULE_PARM_DESC(auth_max_cred_cachesize, "RPC credential maximum total cache size"); 148 request_module("rpc-auth-%u", flavor); in rpcauth_get_authops() 285 * Initialize RPC credential cache 374 * Clear the RPC credential cache, and delete those credentials 404 * Destroy the RPC credential cache 715 * rpcauth_marshcred - Append RPC credential to end of @xdr 716 * @task: controlling RPC task 717 * @xdr: xdr_stream containing initial portion of RPC Call header [all …]
|
| /linux/tools/net/sunrpc/xdrgen/generators/ |
| H A D | program.py | 4 """Generate code for an RPC program's procedures""" 15 """Emit procedure numbers for each RPC version's procedures""" 36 """Emit declarations for each RPC version's procedures""" 61 """Emit server argument decoders for each RPC version's procedures""" 75 """Emit client result decoders for each RPC version's procedures""" 89 """Emit client argument encoders for each RPC version's procedures""" 103 """Emit server result encoders for each RPC version's procedures""" 115 """Generate source code for an RPC program's procedures""" 123 """Emit procedure numbers for each of an RPC programs's procedures""" 131 """Emit a declaration pair for each of an RPC programs's procedures""" [all …]
|
| /linux/Documentation/filesystems/nfs/ |
| H A D | rpc-server-gss.rst | 2 rpcsec_gss support for kernel RPC servers 6 implement RPCGSS authentication in kernel RPC servers such as the NFS 50 to talk to a custom daemon called rpc.svcgssd that is provide by the 67 NFS Server New RPC Upcall Mechanism 70 The newer upcall mechanism uses RPC over a unix socket to a daemon 73 The gss_proxy RPC protocol is currently documented `here 76 This upcall mechanism uses the kernel rpc client and connects to the gssproxy 86 /proc/net/rpc/use-gss-proxy. If gss-proxy dies, it must repeat both 92 from /proc/net/rpc/use-gss-proxy and checking that it contains a
|
| /linux/net/sunrpc/xprtrdma/ |
| H A D | transport.c | 45 * This file contains the top-level implementation of an RPC RDMA 49 * transport switch. All others are RPC RDMA internal. 219 * transport while a fresh connection is being established. RPC tasks 299 * @args: rpc transport arguments 387 * @xprt: controlling RPC transport 410 * xprt_rdma_timer - invoked when an RPC times out 411 * @xprt: controlling RPC transport 412 * @task: RPC task that timed out 414 * Invoked when the transport is still connected, but an RPC 469 * @task: RPC scheduler context (unused) [all …]
|
| H A D | svc_rdma_sendto.c | 48 * RPC server when an RPC Reply is ready to be transmitted to a client. 51 * XDR-encoded RPC Reply message. sendto must construct the RPC-over-RDMA 53 * a Send WR conveying the transport header and the RPC message itself to 404 * svc_rdma_encode_read_list - Encode RPC Reply's Read chunk list in svc_rdma_encode_read_list() 405 * @sctxt: Send context for the RPC Reply in svc_rdma_encode_read_list() 414 /* RPC-over-RDMA version 1 replies never have a Read list. */ 420 * @sctxt: Send context for the RPC Reply in svc_rdma_encode_write_segment() 454 * @sctxt: Send context for the RPC Repl [all...] |
| H A D | rpc_rdma.c | 45 * This file contains the guts of the RPC RDMA protocol, and 47 * to the Linux RPC framework lives. 57 /* Returns size of largest RPC-over-RDMA header in a Call message 80 /* Returns size of largest RPC-over-RDMA header in a Reply message 104 * The max_inline fields contain the maximum size of an RPC message 106 * for every RPC. 119 * plus the RPC call fit under the transport's inline limit. If the 123 * A Read chunk is also required if sending the RPC call inline would 165 * size of the non-payload part of the RPC Reply is larger than 554 /* Prepare an SGE for the RPC-over-RDMA transport header. [all …]
|
| /linux/drivers/gpu/drm/nouveau/include/nvkm/subdev/ |
| H A D | gsp.h | 40 * When sending a GSP RPC command, there can be multiple cases of handling 41 * the GSP RPC messages, which are the reply of GSP RPC commands, according 42 * to the requirement of the callers and the nature of the GSP RPC commands. 45 * caller after the GSP RPC command is issued. 48 * RPC message after the GSP RPC command is issued. 239 /* A linked list of registry items. The registry RPC will be built from it. */ 242 /* The size of the registry RPC */ 275 return gsp->rm->api->rpc->get(gsp, fn, argc); in nvkm_gsp_rpc_get() 282 return gsp->rm->api->rpc->push(gsp, argv, policy, repc); in nvkm_gsp_rpc_push() 311 gsp->rm->api->rpc->done(gsp, repv); in nvkm_gsp_rpc_done()
|