summaryrefslogtreecommitdiffstats
path: root/kernel/fs/lockd/xdr.c
blob: 5b651daad5183c8e7b31c75340bfce42f8ad28d7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
/*
 * linux/fs/lockd/xdr.c
 *
 * XDR support for lockd and the lock client.
 *
 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
 */

#include <linux/types.h>
#include <linux/sched.h>
#include <linux/nfs.h>

#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/svc.h>
#include <linux/sunrpc/stats.h>
#include <linux/lockd/lockd.h>

#include <uapi/linux/nfs2.h>

#define NLMDBG_FACILITY		NLMDBG_XDR


static inline loff_t
s32_to_loff_t(__s32 offset)
{
	return (loff_t)offset;
}

static inline __s32
loff_t_to_s32(loff_t offset)
{
	__s32 res;
	if (offset >= NLM_OFFSET_MAX)
		res = NLM_OFFSET_MAX;
	else if (offset <= -NLM_OFFSET_MAX)
		res = -NLM_OFFSET_MAX;
	else
		res = offset;
	return res;
}

/*
 * XDR functions for basic NLM types
 */
static __be32 *nlm_decode_cookie(__be32 *p, struct nlm_cookie *c)
{
	unsigned int	len;

	len = ntohl(*p++);
	
	if(len==0)
	{
		c->len=4;
		memset(c->data, 0, 4);	/* hockeypux brain damage */
	}
	else if(len<=NLM_MAXCOOKIELEN)
	{
		c->len=len;
		memcpy(c->data, p, len);
		p+=XDR_QUADLEN(len);
	}
	else 
	{
		dprintk("lockd: bad cookie size %d (only cookies under "
			"%d bytes are supported.)\n",
				len, NLM_MAXCOOKIELEN);
		return NULL;
	}
	return p;
}

static inline __be32 *
nlm_encode_cookie(__be32 *p, struct nlm_cookie *c)
{
	*p++ = htonl(c->len);
	memcpy(p, c->data, c->len);
	p+=XDR_QUADLEN(c->len);
	return p;
}

static __be32 *
nlm_decode_fh(__be32 *p, struct nfs_fh *f)
{
	unsigned int	len;

	if ((len = ntohl(*p++)) != NFS2_FHSIZE) {
		dprintk("lockd: bad fhandle size %d (should be %d)\n",
			len, NFS2_FHSIZE);
		return NULL;
	}
	f->size = NFS2_FHSIZE;
	memset(f->data, 0, sizeof(f->data));
	memcpy(f->data, p, NFS2_FHSIZE);
	return p + XDR_QUADLEN(NFS2_FHSIZE);
}

/*
 * Encode and decode owner handle
 */
static inline __be32 *
nlm_decode_oh(__be32 *p, struct xdr_netobj *oh)
{
	return xdr_decode_netobj(p, oh);
}

static inline __be32 *
nlm_encode_oh(__be32 *p, struct xdr_netobj *oh)
{
	return xdr_encode_netobj(p, oh);
}

static __be32 *
nlm_decode_lock(__be32 *p, struct nlm_lock *lock)
{
	struct file_lock	*fl = &lock->fl;
	s32			start, len, end;

	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
					    &lock->len,
					    NLM_MAXSTRLEN))
	 || !(p = nlm_decode_fh(p, &lock->fh))
	 || !(p = nlm_decode_oh(p, &lock->oh)))
		return NULL;
	lock->svid  = ntohl(*p++);

	locks_init_lock(fl);
	fl->fl_owner = current->files;
	fl->fl_pid   = (pid_t)lock->svid;
	fl->fl_flags = FL_POSIX;
	fl->fl_type  = F_RDLCK;		/* as good as anything else */
	start = ntohl(*p++);
	len = ntohl(*p++);
	end = start + len - 1;

	fl->fl_start = s32_to_loff_t(start);

	if (len == 0 || end < 0)
		fl->fl_end = OFFSET_MAX;
	else
		fl->fl_end = s32_to_loff_t(end);
	return p;
}

/*
 * Encode result of a TEST/TEST_MSG call
 */
static __be32 *
nlm_encode_testres(__be32 *p, struct nlm_res *resp)
{
	s32		start, len;

	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
		return NULL;
	*p++ = resp->status;

	if (resp->status == nlm_lck_denied) {
		struct file_lock	*fl = &resp->lock.fl;

		*p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one;
		*p++ = htonl(resp->lock.svid);

		/* Encode owner handle. */
		if (!(p = xdr_encode_netobj(p, &resp->lock.oh)))
			return NULL;

		start = loff_t_to_s32(fl->fl_start);
		if (fl->fl_end == OFFSET_MAX)
			len = 0;
		else
			len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1);

		*p++ = htonl(start);
		*p++ = htonl(len);
	}

	return p;
}


/*
 * First, the server side XDR functions
 */
int
nlmsvc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
	u32	exclusive;

	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
		return 0;

	exclusive = ntohl(*p++);
	if (!(p = nlm_decode_lock(p, &argp->lock)))
		return 0;
	if (exclusive)
		argp->lock.fl.fl_type = F_WRLCK;

	return xdr_argsize_check(rqstp, p);
}

int
nlmsvc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
	if (!(p = nlm_encode_testres(p, resp)))
		return 0;
	return xdr_ressize_check(rqstp, p);
}

int
nlmsvc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
	u32	exclusive;

	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
		return 0;
	argp->block  = ntohl(*p++);
	exclusive    = ntohl(*p++);
	if (!(p = nlm_decode_lock(p, &argp->lock)))
		return 0;
	if (exclusive)
		argp->lock.fl.fl_type = F_WRLCK;
	argp->reclaim = ntohl(*p++);
	argp->state   = ntohl(*p++);
	argp->monitor = 1;		/* monitor client by default */

	return xdr_argsize_check(rqstp, p);
}

int
nlmsvc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
	u32	exclusive;

	if (!(p = nlm_decode_cookie(p, &argp->cookie)))
		return 0;
	argp->block = ntohl(*p++);
	exclusive = ntohl(*p++);
	if (!(p = nlm_decode_lock(p, &argp->lock)))
		return 0;
	if (exclusive)
		argp->lock.fl.fl_type = F_WRLCK;
	return xdr_argsize_check(rqstp, p);
}

int
nlmsvc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
	if (!(p = nlm_decode_cookie(p, &argp->cookie))
	 || !(p = nlm_decode_lock(p, &argp->lock)))
		return 0;
	argp->lock.fl.fl_type = F_UNLCK;
	return xdr_argsize_check(rqstp, p);
}

int
nlmsvc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp)
{
	struct nlm_lock	*lock = &argp->lock;

	memset(lock, 0, sizeof(*lock));
	locks_init_lock(&lock->fl);
	lock->svid = ~(u32) 0;
	lock->fl.fl_pid = (pid_t)lock->svid;

	if (!(p = nlm_decode_cookie(p, &argp->cookie))
	 || !(p = xdr_decode_string_inplace(p, &lock->caller,
					    &lock->len, NLM_MAXSTRLEN))
	 || !(p = nlm_decode_fh(p, &lock->fh))
	 || !(p = nlm_decode_oh(p, &lock->oh)))
		return 0;
	argp->fsm_mode = ntohl(*p++);
	argp->fsm_access = ntohl(*p++);
	return xdr_argsize_check(rqstp, p);
}

int
nlmsvc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
		return 0;
	*p++ = resp->status;
	*p++ = xdr_zero;		/* sequence argument */
	return xdr_ressize_check(rqstp, p);
}

int
nlmsvc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
	if (!(p = nlm_encode_cookie(p, &resp->cookie)))
		return 0;
	*p++ = resp->status;
	return xdr_ressize_check(rqstp, p);
}

int
nlmsvc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp)
{
	struct nlm_lock	*lock = &argp->lock;

	if (!(p = xdr_decode_string_inplace(p, &lock->caller,
					    &lock->len, NLM_MAXSTRLEN)))
		return 0;
	argp->state = ntohl(*p++);
	return xdr_argsize_check(rqstp, p);
}

int
nlmsvc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp)
{
	if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN)))
		return 0;
	argp->state = ntohl(*p++);
	memcpy(&argp->priv.data, p, sizeof(argp->priv.data));
	p += XDR_QUADLEN(SM_PRIV_SIZE);
	return xdr_argsize_check(rqstp, p);
}

int
nlmsvc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp)
{
	if (!(p = nlm_decode_cookie(p, &resp->cookie)))
		return 0;
	resp->status = *p++;
	return xdr_argsize_check(rqstp, p);
}

int
nlmsvc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
	return xdr_argsize_check(rqstp, p);
}

int
nlmsvc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy)
{
	return xdr_ressize_check(rqstp, p);
}
">"Power resource [%s] already off\n", resource->name)); return 0; } if (--resource->ref_count) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Power resource [%s] still in use\n", resource->name)); } else { result = __acpi_power_off(resource); if (result) resource->ref_count++; } return result; } static int acpi_power_off(struct acpi_power_resource *resource) { int result; mutex_lock(&resource->resource_lock); result = acpi_power_off_unlocked(resource); mutex_unlock(&resource->resource_lock); return result; } static int acpi_power_off_list(struct list_head *list) { struct acpi_power_resource_entry *entry; int result = 0; list_for_each_entry_reverse(entry, list, node) { result = acpi_power_off(entry->resource); if (result) goto err; } return 0; err: list_for_each_entry_continue(entry, list, node) acpi_power_on(entry->resource); return result; } static int acpi_power_on_list(struct list_head *list) { struct acpi_power_resource_entry *entry; int result = 0; list_for_each_entry(entry, list, node) { result = acpi_power_on(entry->resource); if (result) goto err; } return 0; err: list_for_each_entry_continue_reverse(entry, list, node) acpi_power_off(entry->resource); return result; } static struct attribute *attrs[] = { NULL, }; static struct attribute_group attr_groups[] = { [ACPI_STATE_D0] = { .name = "power_resources_D0", .attrs = attrs, }, [ACPI_STATE_D1] = { .name = "power_resources_D1", .attrs = attrs, }, [ACPI_STATE_D2] = { .name = "power_resources_D2", .attrs = attrs, }, [ACPI_STATE_D3_HOT] = { .name = "power_resources_D3hot", .attrs = attrs, }, }; static struct attribute_group wakeup_attr_group = { .name = "power_resources_wakeup", .attrs = attrs, }; static void acpi_power_hide_list(struct acpi_device *adev, struct list_head *resources, struct attribute_group *attr_group) { struct acpi_power_resource_entry *entry; if (list_empty(resources)) return; list_for_each_entry_reverse(entry, resources, node) { struct acpi_device *res_dev = &entry->resource->device; sysfs_remove_link_from_group(&adev->dev.kobj, attr_group->name, dev_name(&res_dev->dev)); } sysfs_remove_group(&adev->dev.kobj, attr_group); } static void acpi_power_expose_list(struct acpi_device *adev, struct list_head *resources, struct attribute_group *attr_group) { struct acpi_power_resource_entry *entry; int ret; if (list_empty(resources)) return; ret = sysfs_create_group(&adev->dev.kobj, attr_group); if (ret) return; list_for_each_entry(entry, resources, node) { struct acpi_device *res_dev = &entry->resource->device; ret = sysfs_add_link_to_group(&adev->dev.kobj, attr_group->name, &res_dev->dev.kobj, dev_name(&res_dev->dev)); if (ret) { acpi_power_hide_list(adev, resources, attr_group); break; } } } static void acpi_power_expose_hide(struct acpi_device *adev, struct list_head *resources, struct attribute_group *attr_group, bool expose) { if (expose) acpi_power_expose_list(adev, resources, attr_group); else acpi_power_hide_list(adev, resources, attr_group); } void acpi_power_add_remove_device(struct acpi_device *adev, bool add) { int state; if (adev->wakeup.flags.valid) acpi_power_expose_hide(adev, &adev->wakeup.resources, &wakeup_attr_group, add); if (!adev->power.flags.power_resources) return; for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++) acpi_power_expose_hide(adev, &adev->power.states[state].resources, &attr_groups[state], add); } int acpi_power_wakeup_list_init(struct list_head *list, int *system_level_p) { struct acpi_power_resource_entry *entry; int system_level = 5; list_for_each_entry(entry, list, node) { struct acpi_power_resource *resource = entry->resource; acpi_handle handle = resource->device.handle; int result; int state; mutex_lock(&resource->resource_lock); result = acpi_power_get_state(handle, &state); if (result) { mutex_unlock(&resource->resource_lock); return result; } if (state == ACPI_POWER_RESOURCE_STATE_ON) { resource->ref_count++; resource->wakeup_enabled = true; } if (system_level > resource->system_level) system_level = resource->system_level; mutex_unlock(&resource->resource_lock); } *system_level_p = system_level; return 0; } /* -------------------------------------------------------------------------- Device Power Management -------------------------------------------------------------------------- */ /** * acpi_device_sleep_wake - execute _DSW (Device Sleep Wake) or (deprecated in * ACPI 3.0) _PSW (Power State Wake) * @dev: Device to handle. * @enable: 0 - disable, 1 - enable the wake capabilities of the device. * @sleep_state: Target sleep state of the system. * @dev_state: Target power state of the device. * * Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power * State Wake) for the device, if present. On failure reset the device's * wakeup.flags.valid flag. * * RETURN VALUE: * 0 if either _DSW or _PSW has been successfully executed * 0 if neither _DSW nor _PSW has been found * -ENODEV if the execution of either _DSW or _PSW has failed */ int acpi_device_sleep_wake(struct acpi_device *dev, int enable, int sleep_state, int dev_state) { union acpi_object in_arg[3]; struct acpi_object_list arg_list = { 3, in_arg }; acpi_status status = AE_OK; /* * Try to execute _DSW first. * * Three agruments are needed for the _DSW object: * Argument 0: enable/disable the wake capabilities * Argument 1: target system state * Argument 2: target device state * When _DSW object is called to disable the wake capabilities, maybe * the first argument is filled. The values of the other two agruments * are meaningless. */ in_arg[0].type = ACPI_TYPE_INTEGER; in_arg[0].integer.value = enable; in_arg[1].type = ACPI_TYPE_INTEGER; in_arg[1].integer.value = sleep_state; in_arg[2].type = ACPI_TYPE_INTEGER; in_arg[2].integer.value = dev_state; status = acpi_evaluate_object(dev->handle, "_DSW", &arg_list, NULL); if (ACPI_SUCCESS(status)) { return 0; } else if (status != AE_NOT_FOUND) { printk(KERN_ERR PREFIX "_DSW execution failed\n"); dev->wakeup.flags.valid = 0; return -ENODEV; } /* Execute _PSW */ status = acpi_execute_simple_method(dev->handle, "_PSW", enable); if (ACPI_FAILURE(status) && (status != AE_NOT_FOUND)) { printk(KERN_ERR PREFIX "_PSW execution failed\n"); dev->wakeup.flags.valid = 0; return -ENODEV; } return 0; } /* * Prepare a wakeup device, two steps (Ref ACPI 2.0:P229): * 1. Power on the power resources required for the wakeup device * 2. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power * State Wake) for the device, if present */ int acpi_enable_wakeup_device_power(struct acpi_device *dev, int sleep_state) { struct acpi_power_resource_entry *entry; int err = 0; if (!dev || !dev->wakeup.flags.valid) return -EINVAL; mutex_lock(&acpi_device_lock); if (dev->wakeup.prepare_count++) goto out; list_for_each_entry(entry, &dev->wakeup.resources, node) { struct acpi_power_resource *resource = entry->resource; mutex_lock(&resource->resource_lock); if (!resource->wakeup_enabled) { err = acpi_power_on_unlocked(resource); if (!err) resource->wakeup_enabled = true; } mutex_unlock(&resource->resource_lock); if (err) { dev_err(&dev->dev, "Cannot turn wakeup power resources on\n"); dev->wakeup.flags.valid = 0; goto out; } } /* * Passing 3 as the third argument below means the device may be * put into arbitrary power state afterward. */ err = acpi_device_sleep_wake(dev, 1, sleep_state, 3); if (err) dev->wakeup.prepare_count = 0; out: mutex_unlock(&acpi_device_lock); return err; } /* * Shutdown a wakeup device, counterpart of above method * 1. Execute _DSW (Device Sleep Wake) or (deprecated in ACPI 3.0) _PSW (Power * State Wake) for the device, if present * 2. Shutdown down the power resources */ int acpi_disable_wakeup_device_power(struct acpi_device *dev) { struct acpi_power_resource_entry *entry; int err = 0; if (!dev || !dev->wakeup.flags.valid) return -EINVAL; mutex_lock(&acpi_device_lock); if (--dev->wakeup.prepare_count > 0) goto out; /* * Executing the code below even if prepare_count is already zero when * the function is called may be useful, for example for initialisation. */ if (dev->wakeup.prepare_count < 0) dev->wakeup.prepare_count = 0; err = acpi_device_sleep_wake(dev, 0, 0, 0); if (err) goto out; list_for_each_entry(entry, &dev->wakeup.resources, node) { struct acpi_power_resource *resource = entry->resource; mutex_lock(&resource->resource_lock); if (resource->wakeup_enabled) { err = acpi_power_off_unlocked(resource); if (!err) resource->wakeup_enabled = false; } mutex_unlock(&resource->resource_lock); if (err) { dev_err(&dev->dev, "Cannot turn wakeup power resources off\n"); dev->wakeup.flags.valid = 0; break; } } out: mutex_unlock(&acpi_device_lock); return err; } int acpi_power_get_inferred_state(struct acpi_device *device, int *state) { int result = 0; int list_state = 0; int i = 0; if (!device || !state) return -EINVAL; /* * We know a device's inferred power state when all the resources * required for a given D-state are 'on'. */ for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { struct list_head *list = &device->power.states[i].resources; if (list_empty(list)) continue; result = acpi_power_get_list_state(list, &list_state); if (result) return result; if (list_state == ACPI_POWER_RESOURCE_STATE_ON) { *state = i; return 0; } } *state = device->power.states[ACPI_STATE_D3_COLD].flags.valid ? ACPI_STATE_D3_COLD : ACPI_STATE_D3_HOT; return 0; } int acpi_power_on_resources(struct acpi_device *device, int state) { if (!device || state < ACPI_STATE_D0 || state > ACPI_STATE_D3_HOT) return -EINVAL; return acpi_power_on_list(&device->power.states[state].resources); } int acpi_power_transition(struct acpi_device *device, int state) { int result = 0; if (!device || (state < ACPI_STATE_D0) || (state > ACPI_STATE_D3_COLD)) return -EINVAL; if (device->power.state == state || !device->flags.power_manageable) return 0; if ((device->power.state < ACPI_STATE_D0) || (device->power.state > ACPI_STATE_D3_COLD)) return -ENODEV; /* * First we reference all power resources required in the target list * (e.g. so the device doesn't lose power while transitioning). Then, * we dereference all power resources used in the current list. */ if (state < ACPI_STATE_D3_COLD) result = acpi_power_on_list( &device->power.states[state].resources); if (!result && device->power.state < ACPI_STATE_D3_COLD) acpi_power_off_list( &device->power.states[device->power.state].resources); /* We shouldn't change the state unless the above operations succeed. */ device->power.state = result ? ACPI_STATE_UNKNOWN : state; return result; } static void acpi_release_power_resource(struct device *dev) { struct acpi_device *device = to_acpi_device(dev); struct acpi_power_resource *resource; resource = container_of(device, struct acpi_power_resource, device); mutex_lock(&power_resource_list_lock); list_del(&resource->list_node); mutex_unlock(&power_resource_list_lock); acpi_free_pnp_ids(&device->pnp); kfree(resource); } static ssize_t acpi_power_in_use_show(struct device *dev, struct device_attribute *attr, char *buf) { struct acpi_power_resource *resource; resource = to_power_resource(to_acpi_device(dev)); return sprintf(buf, "%u\n", !!resource->ref_count); } static DEVICE_ATTR(resource_in_use, 0444, acpi_power_in_use_show, NULL); static void acpi_power_sysfs_remove(struct acpi_device *device) { device_remove_file(&device->dev, &dev_attr_resource_in_use); } static void acpi_power_add_resource_to_list(struct acpi_power_resource *resource) { mutex_lock(&power_resource_list_lock); if (!list_empty(&acpi_power_resource_list)) { struct acpi_power_resource *r; list_for_each_entry(r, &acpi_power_resource_list, list_node) if (r->order > resource->order) { list_add_tail(&resource->list_node, &r->list_node); goto out; } } list_add_tail(&resource->list_node, &acpi_power_resource_list); out: mutex_unlock(&power_resource_list_lock); } int acpi_add_power_resource(acpi_handle handle) { struct acpi_power_resource *resource; struct acpi_device *device = NULL; union acpi_object acpi_object; struct acpi_buffer buffer = { sizeof(acpi_object), &acpi_object }; acpi_status status; int state, result = -ENODEV; acpi_bus_get_device(handle, &device); if (device) return 0; resource = kzalloc(sizeof(*resource), GFP_KERNEL); if (!resource) return -ENOMEM; device = &resource->device; acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER, ACPI_STA_DEFAULT); mutex_init(&resource->resource_lock); INIT_LIST_HEAD(&resource->list_node); resource->name = device->pnp.bus_id; strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_CLASS); device->power.state = ACPI_STATE_UNKNOWN; /* Evalute the object to get the system level and resource order. */ status = acpi_evaluate_object(handle, NULL, NULL, &buffer); if (ACPI_FAILURE(status)) goto err; resource->system_level = acpi_object.power_resource.system_level; resource->order = acpi_object.power_resource.resource_order; result = acpi_power_get_state(handle, &state); if (result) goto err; printk(KERN_INFO PREFIX "%s [%s] (%s)\n", acpi_device_name(device), acpi_device_bid(device), state ? "on" : "off"); device->flags.match_driver = true; result = acpi_device_add(device, acpi_release_power_resource); if (result) goto err; if (!device_create_file(&device->dev, &dev_attr_resource_in_use)) device->remove = acpi_power_sysfs_remove; acpi_power_add_resource_to_list(resource); acpi_device_add_finalize(device); return 0; err: acpi_release_power_resource(&device->dev); return result; } #ifdef CONFIG_ACPI_SLEEP void acpi_resume_power_resources(void) { struct acpi_power_resource *resource; mutex_lock(&power_resource_list_lock); list_for_each_entry(resource, &acpi_power_resource_list, list_node) { int result, state; mutex_lock(&resource->resource_lock); result = acpi_power_get_state(resource->device.handle, &state); if (result) { mutex_unlock(&resource->resource_lock); continue; } if (state == ACPI_POWER_RESOURCE_STATE_OFF && resource->ref_count) { dev_info(&resource->device.dev, "Turning ON\n"); __acpi_power_on(resource); } mutex_unlock(&resource->resource_lock); } list_for_each_entry_reverse(resource, &acpi_power_resource_list, list_node) { int result, state; mutex_lock(&resource->resource_lock); result = acpi_power_get_state(resource->device.handle, &state); if (result) { mutex_unlock(&resource->resource_lock); continue; } if (state == ACPI_POWER_RESOURCE_STATE_ON && !resource->ref_count) { dev_info(&resource->device.dev, "Turning OFF\n"); __acpi_power_off(resource); } mutex_unlock(&resource->resource_lock); } mutex_unlock(&power_resource_list_lock); } #endif