xref: /linux/drivers/misc/vmw_vmci/vmci_resource.c (revision c4ee0af3fa0dc65f690fc908f02b8355f9576ea0)
1 /*
2  * VMware VMCI Driver
3  *
4  * Copyright (C) 2012 VMware, Inc. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation version 2 and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12  * or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
13  * for more details.
14  */
15 
16 #include <linux/vmw_vmci_defs.h>
17 #include <linux/hash.h>
18 #include <linux/types.h>
19 #include <linux/rculist.h>
20 
21 #include "vmci_resource.h"
22 #include "vmci_driver.h"
23 
24 
25 #define VMCI_RESOURCE_HASH_BITS         7
26 #define VMCI_RESOURCE_HASH_BUCKETS      (1 << VMCI_RESOURCE_HASH_BITS)
27 
28 struct vmci_hash_table {
29 	spinlock_t lock;
30 	struct hlist_head entries[VMCI_RESOURCE_HASH_BUCKETS];
31 };
32 
33 static struct vmci_hash_table vmci_resource_table = {
34 	.lock = __SPIN_LOCK_UNLOCKED(vmci_resource_table.lock),
35 };
36 
37 static unsigned int vmci_resource_hash(struct vmci_handle handle)
38 {
39 	return hash_32(handle.resource, VMCI_RESOURCE_HASH_BITS);
40 }
41 
42 /*
43  * Gets a resource (if one exists) matching given handle from the hash table.
44  */
45 static struct vmci_resource *vmci_resource_lookup(struct vmci_handle handle,
46 						  enum vmci_resource_type type)
47 {
48 	struct vmci_resource *r, *resource = NULL;
49 	unsigned int idx = vmci_resource_hash(handle);
50 
51 	rcu_read_lock();
52 	hlist_for_each_entry_rcu(r,
53 				 &vmci_resource_table.entries[idx], node) {
54 		u32 cid = r->handle.context;
55 		u32 rid = r->handle.resource;
56 
57 		if (r->type == type &&
58 		    rid == handle.resource &&
59 		    (cid == handle.context || cid == VMCI_INVALID_ID)) {
60 			resource = r;
61 			break;
62 		}
63 	}
64 	rcu_read_unlock();
65 
66 	return resource;
67 }
68 
69 /*
70  * Find an unused resource ID and return it. The first
71  * VMCI_RESERVED_RESOURCE_ID_MAX are reserved so we start from
72  * its value + 1.
73  * Returns VMCI resource id on success, VMCI_INVALID_ID on failure.
74  */
75 static u32 vmci_resource_find_id(u32 context_id,
76 				 enum vmci_resource_type resource_type)
77 {
78 	static u32 resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
79 	u32 old_rid = resource_id;
80 	u32 current_rid;
81 
82 	/*
83 	 * Generate a unique resource ID.  Keep on trying until we wrap around
84 	 * in the RID space.
85 	 */
86 	do {
87 		struct vmci_handle handle;
88 
89 		current_rid = resource_id;
90 		resource_id++;
91 		if (unlikely(resource_id == VMCI_INVALID_ID)) {
92 			/* Skip the reserved rids. */
93 			resource_id = VMCI_RESERVED_RESOURCE_ID_MAX + 1;
94 		}
95 
96 		handle = vmci_make_handle(context_id, current_rid);
97 		if (!vmci_resource_lookup(handle, resource_type))
98 			return current_rid;
99 	} while (resource_id != old_rid);
100 
101 	return VMCI_INVALID_ID;
102 }
103 
104 
105 int vmci_resource_add(struct vmci_resource *resource,
106 		      enum vmci_resource_type resource_type,
107 		      struct vmci_handle handle)
108 
109 {
110 	unsigned int idx;
111 	int result;
112 
113 	spin_lock(&vmci_resource_table.lock);
114 
115 	if (handle.resource == VMCI_INVALID_ID) {
116 		handle.resource = vmci_resource_find_id(handle.context,
117 			resource_type);
118 		if (handle.resource == VMCI_INVALID_ID) {
119 			result = VMCI_ERROR_NO_HANDLE;
120 			goto out;
121 		}
122 	} else if (vmci_resource_lookup(handle, resource_type)) {
123 		result = VMCI_ERROR_ALREADY_EXISTS;
124 		goto out;
125 	}
126 
127 	resource->handle = handle;
128 	resource->type = resource_type;
129 	INIT_HLIST_NODE(&resource->node);
130 	kref_init(&resource->kref);
131 	init_completion(&resource->done);
132 
133 	idx = vmci_resource_hash(resource->handle);
134 	hlist_add_head_rcu(&resource->node, &vmci_resource_table.entries[idx]);
135 
136 	result = VMCI_SUCCESS;
137 
138 out:
139 	spin_unlock(&vmci_resource_table.lock);
140 	return result;
141 }
142 
143 void vmci_resource_remove(struct vmci_resource *resource)
144 {
145 	struct vmci_handle handle = resource->handle;
146 	unsigned int idx = vmci_resource_hash(handle);
147 	struct vmci_resource *r;
148 
149 	/* Remove resource from hash table. */
150 	spin_lock(&vmci_resource_table.lock);
151 
152 	hlist_for_each_entry(r, &vmci_resource_table.entries[idx], node) {
153 		if (vmci_handle_is_equal(r->handle, resource->handle)) {
154 			hlist_del_init_rcu(&r->node);
155 			break;
156 		}
157 	}
158 
159 	spin_unlock(&vmci_resource_table.lock);
160 	synchronize_rcu();
161 
162 	vmci_resource_put(resource);
163 	wait_for_completion(&resource->done);
164 }
165 
166 struct vmci_resource *
167 vmci_resource_by_handle(struct vmci_handle resource_handle,
168 			enum vmci_resource_type resource_type)
169 {
170 	struct vmci_resource *r, *resource = NULL;
171 
172 	rcu_read_lock();
173 
174 	r = vmci_resource_lookup(resource_handle, resource_type);
175 	if (r &&
176 	    (resource_type == r->type ||
177 	     resource_type == VMCI_RESOURCE_TYPE_ANY)) {
178 		resource = vmci_resource_get(r);
179 	}
180 
181 	rcu_read_unlock();
182 
183 	return resource;
184 }
185 
186 /*
187  * Get a reference to given resource.
188  */
189 struct vmci_resource *vmci_resource_get(struct vmci_resource *resource)
190 {
191 	kref_get(&resource->kref);
192 
193 	return resource;
194 }
195 
196 static void vmci_release_resource(struct kref *kref)
197 {
198 	struct vmci_resource *resource =
199 		container_of(kref, struct vmci_resource, kref);
200 
201 	/* Verify the resource has been unlinked from hash table */
202 	WARN_ON(!hlist_unhashed(&resource->node));
203 
204 	/* Signal that container of this resource can now be destroyed */
205 	complete(&resource->done);
206 }
207 
208 /*
209  * Resource's release function will get called if last reference.
210  * If it is the last reference, then we are sure that nobody else
211  * can increment the count again (it's gone from the resource hash
212  * table), so there's no need for locking here.
213  */
214 int vmci_resource_put(struct vmci_resource *resource)
215 {
216 	/*
217 	 * We propagate the information back to caller in case it wants to know
218 	 * whether entry was freed.
219 	 */
220 	return kref_put(&resource->kref, vmci_release_resource) ?
221 		VMCI_SUCCESS_ENTRY_DEAD : VMCI_SUCCESS;
222 }
223 
224 struct vmci_handle vmci_resource_handle(struct vmci_resource *resource)
225 {
226 	return resource->handle;
227 }
228