summaryrefslogtreecommitdiffstats
path: root/kernel/drivers/acpi/nvs.c
blob: 85287b8fe3aa4923ef9d37739d567e5ed4ef86ad (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
/*
 * nvs.c - Routines for saving and restoring ACPI NVS memory region
 *
 * Copyright (C) 2008-2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
 *
 * This file is released under the GPLv2.
 */

#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/acpi.h>

#include "internal.h"

/* ACPI NVS regions, APEI may use it */

struct nvs_region {
	__u64 phys_start;
	__u64 size;
	struct list_head node;
};

static LIST_HEAD(nvs_region_list);

#ifdef CONFIG_ACPI_SLEEP
static int suspend_nvs_register(unsigned long start, unsigned long size);
#else
static inline int suspend_nvs_register(unsigned long a, unsigned long b)
{
	return 0;
}
#endif

int acpi_nvs_register(__u64 start, __u64 size)
{
	struct nvs_region *region;

	region = kmalloc(sizeof(*region), GFP_KERNEL);
	if (!region)
		return -ENOMEM;
	region->phys_start = start;
	region->size = size;
	list_add_tail(&region->node, &nvs_region_list);

	return suspend_nvs_register(start, size);
}

int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
			     void *data)
{
	int rc;
	struct nvs_region *region;

	list_for_each_entry(region, &nvs_region_list, node) {
		rc = func(region->phys_start, region->size, data);
		if (rc)
			return rc;
	}

	return 0;
}


#ifdef CONFIG_ACPI_SLEEP
/*
 * Platforms, like ACPI, may want us to save some memory used by them during
 * suspend and to restore the contents of this memory during the subsequent
 * resume.  The code below implements a mechanism allowing us to do that.
 */

struct nvs_page {
	unsigned long phys_start;
	unsigned int size;
	void *kaddr;
	void *data;
	bool unmap;
	struct list_head node;
};

static LIST_HEAD(nvs_list);

/**
 *	suspend_nvs_register - register platform NVS memory region to save
 *	@start - physical address of the region
 *	@size - size of the region
 *
 *	The NVS region need not be page-aligned (both ends) and we arrange
 *	things so that the data from page-aligned addresses in this region will
 *	be copied into separate RAM pages.
 */
static int suspend_nvs_register(unsigned long start, unsigned long size)
{
	struct nvs_page *entry, *next;

	pr_info("PM: Registering ACPI NVS region [mem %#010lx-%#010lx] (%ld bytes)\n",
		start, start + size - 1, size);

	while (size > 0) {
		unsigned int nr_bytes;

		entry = kzalloc(sizeof(struct nvs_page), GFP_KERNEL);
		if (!entry)
			goto Error;

		list_add_tail(&entry->node, &nvs_list);
		entry->phys_start = start;
		nr_bytes = PAGE_SIZE - (start & ~PAGE_MASK);
		entry->size = (size < nr_bytes) ? size : nr_bytes;

		start += entry->size;
		size -= entry->size;
	}
	return 0;

 Error:
	list_for_each_entry_safe(entry, next, &nvs_list, node) {
		list_del(&entry->node);
		kfree(entry);
	}
	return -ENOMEM;
}

/**
 *	suspend_nvs_free - free data pages allocated for saving NVS regions
 */
void suspend_nvs_free(void)
{
	struct nvs_page *entry;

	list_for_each_entry(entry, &nvs_list, node)
		if (entry->data) {
			free_page((unsigned long)entry->data);
			entry->data = NULL;
			if (entry->kaddr) {
				if (entry->unmap) {
					iounmap(entry->kaddr);
					entry->unmap = false;
				} else {
					acpi_os_unmap_iomem(entry->kaddr,
							    entry->size);
				}
				entry->kaddr = NULL;
			}
		}
}

/**
 *	suspend_nvs_alloc - allocate memory necessary for saving NVS regions
 */
int suspend_nvs_alloc(void)
{
	struct nvs_page *entry;

	list_for_each_entry(entry, &nvs_list, node) {
		entry->data = (void *)__get_free_page(GFP_KERNEL);
		if (!entry->data) {
			suspend_nvs_free();
			return -ENOMEM;
		}
	}
	return 0;
}

/**
 *	suspend_nvs_save - save NVS memory regions
 */
int suspend_nvs_save(void)
{
	struct nvs_page *entry;

	printk(KERN_INFO "PM: Saving platform NVS memory\n");

	list_for_each_entry(entry, &nvs_list, node)
		if (entry->data) {
			unsigned long phys = entry->phys_start;
			unsigned int size = entry->size;

			entry->kaddr = acpi_os_get_iomem(phys, size);
			if (!entry->kaddr) {
				entry->kaddr = acpi_os_ioremap(phys, size);
				entry->unmap = !!entry->kaddr;
			}
			if (!entry->kaddr) {
				suspend_nvs_free();
				return -ENOMEM;
			}
			memcpy(entry->data, entry->kaddr, entry->size);
		}

	return 0;
}

/**
 *	suspend_nvs_restore - restore NVS memory regions
 *
 *	This function is going to be called with interrupts disabled, so it
 *	cannot iounmap the virtual addresses used to access the NVS region.
 */
void suspend_nvs_restore(void)
{
	struct nvs_page *entry;

	printk(KERN_INFO "PM: Restoring platform NVS memory\n");

	list_for_each_entry(entry, &nvs_list, node)
		if (entry->data)
			memcpy(entry->kaddr, entry->data, entry->size);
}
#endif
0x00000008 /* Status register bit definitions */ #define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000 #define TW_STATUS_MINOR_VERSION_MASK 0x0F000000 #define TW_STATUS_PCI_PARITY_ERROR 0x00800000 #define TW_STATUS_QUEUE_ERROR 0x00400000 #define TW_STATUS_MICROCONTROLLER_ERROR 0x00200000 #define TW_STATUS_PCI_ABORT 0x00100000 #define TW_STATUS_HOST_INTERRUPT 0x00080000 #define TW_STATUS_ATTENTION_INTERRUPT 0x00040000 #define TW_STATUS_COMMAND_INTERRUPT 0x00020000 #define TW_STATUS_RESPONSE_INTERRUPT 0x00010000 #define TW_STATUS_COMMAND_QUEUE_FULL 0x00008000 #define TW_STATUS_RESPONSE_QUEUE_EMPTY 0x00004000 #define TW_STATUS_MICROCONTROLLER_READY 0x00002000 #define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000 #define TW_STATUS_ALL_INTERRUPTS 0x000F0000 #define TW_STATUS_CLEARABLE_BITS 0x00D00000 #define TW_STATUS_EXPECTED_BITS 0x00002000 #define TW_STATUS_UNEXPECTED_BITS 0x00F00008 #define TW_STATUS_SBUF_WRITE_ERROR 0x00000008 #define TW_STATUS_VALID_INTERRUPT 0x00DF0008 /* RESPONSE QUEUE BIT DEFINITIONS */ #define TW_RESPONSE_ID_MASK 0x00000FF0 /* PCI related defines */ #define TW_IO_ADDRESS_RANGE 0x10 #define TW_DEVICE_NAME "3ware Storage Controller" #define TW_VENDOR_ID (0x13C1) /* 3ware */ #define TW_DEVICE_ID (0x1000) /* Storage Controller */ #define TW_DEVICE_ID2 (0x1001) /* 7000 series controller */ #define TW_NUMDEVICES 2 #define TW_PCI_CLEAR_PARITY_ERRORS 0xc100 #define TW_PCI_CLEAR_PCI_ABORT 0x2000 /* Command packet opcodes */ #define TW_OP_NOP 0x0 #define TW_OP_INIT_CONNECTION 0x1 #define TW_OP_READ 0x2 #define TW_OP_WRITE 0x3 #define TW_OP_VERIFY 0x4 #define TW_OP_GET_PARAM 0x12 #define TW_OP_SET_PARAM 0x13 #define TW_OP_SECTOR_INFO 0x1a #define TW_OP_AEN_LISTEN 0x1c #define TW_OP_FLUSH_CACHE 0x0e #define TW_CMD_PACKET 0x1d #define TW_CMD_PACKET_WITH_DATA 0x1f /* Asynchronous Event Notification (AEN) Codes */ #define TW_AEN_QUEUE_EMPTY 0x0000 #define TW_AEN_SOFT_RESET 0x0001 #define TW_AEN_DEGRADED_MIRROR 0x0002 #define TW_AEN_CONTROLLER_ERROR 0x0003 #define TW_AEN_REBUILD_FAIL 0x0004 #define TW_AEN_REBUILD_DONE 0x0005 #define TW_AEN_QUEUE_FULL 0x00ff #define TW_AEN_TABLE_UNDEFINED 0x15 #define TW_AEN_APORT_TIMEOUT 0x0009 #define TW_AEN_DRIVE_ERROR 0x000A #define TW_AEN_SMART_FAIL 0x000F #define TW_AEN_SBUF_FAIL 0x0024 /* Misc defines */ #define TW_ALIGNMENT_6000 64 /* 64 bytes */ #define TW_ALIGNMENT_7000 4 /* 4 bytes */ #define TW_MAX_UNITS 16 #define TW_COMMAND_ALIGNMENT_MASK 0x1ff #define TW_INIT_MESSAGE_CREDITS 0x100 #define TW_INIT_COMMAND_PACKET_SIZE 0x3 #define TW_POLL_MAX_RETRIES 20000 #define TW_MAX_SGL_LENGTH 62 #define TW_ATA_PASS_SGL_MAX 60 #define TW_Q_LENGTH 256 #define TW_Q_START 0 #define TW_MAX_SLOT 32 #define TW_MAX_PCI_BUSES 255 #define TW_MAX_RESET_TRIES 3 #define TW_UNIT_INFORMATION_TABLE_BASE 0x300 #define TW_MAX_CMDS_PER_LUN 254 /* 254 for io, 1 for chrdev ioctl, one for internal aen post */ #define TW_BLOCK_SIZE 0x200 /* 512-byte blocks */ #define TW_IOCTL 0x80 #define TW_UNIT_ONLINE 1 #define TW_IN_INTR 1 #define TW_IN_RESET 2 #define TW_IN_CHRDEV_IOCTL 3 #define TW_MAX_SECTORS 256 #define TW_MAX_IOCTL_SECTORS 512 #define TW_AEN_WAIT_TIME 1000 #define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */ #define TW_ISR_DONT_COMPLETE 2 #define TW_ISR_DONT_RESULT 3 #define TW_IOCTL_TIMEOUT 25 /* 25 seconds */ #define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */ #define TW_IOCTL_CHRDEV_FREE -1 #define TW_DMA_MASK DMA_BIT_MASK(32) #define TW_MAX_CDB_LEN 16 /* Bitmask macros to eliminate bitfields */ /* opcode: 5, sgloffset: 3 */ #define TW_OPSGL_IN(x,y) ((x << 5) | (y & 0x1f)) #define TW_SGL_OUT(x) ((x >> 5) & 0x7) /* reserved_1: 4, response_id: 8, reserved_2: 20 */ #define TW_RESID_OUT(x) ((x >> 4) & 0xff) /* unit: 4, host_id: 4 */ #define TW_UNITHOST_IN(x,y) ((x << 4) | ( y & 0xf)) #define TW_UNIT_OUT(x) (x & 0xf) /* Macros */ #define TW_CONTROL_REG_ADDR(x) (x->base_addr) #define TW_STATUS_REG_ADDR(x) (x->base_addr + 0x4) #define TW_COMMAND_QUEUE_REG_ADDR(x) (x->base_addr + 0x8) #define TW_RESPONSE_QUEUE_REG_ADDR(x) (x->base_addr + 0xC) #define TW_CLEAR_ALL_INTERRUPTS(x) (outl(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x))) #define TW_CLEAR_ATTENTION_INTERRUPT(x) (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x))) #define TW_CLEAR_HOST_INTERRUPT(x) (outl(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x))) #define TW_DISABLE_INTERRUPTS(x) (outl(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) #define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) #define TW_MASK_COMMAND_INTERRUPT(x) (outl(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x))) #define TW_UNMASK_COMMAND_INTERRUPT(x) (outl(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x))) #define TW_SOFT_RESET(x) (outl(TW_CONTROL_ISSUE_SOFT_RESET | \ TW_CONTROL_CLEAR_HOST_INTERRUPT | \ TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \ TW_CONTROL_MASK_COMMAND_INTERRUPT | \ TW_CONTROL_MASK_RESPONSE_INTERRUPT | \ TW_CONTROL_CLEAR_ERROR_STATUS | \ TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x))) #define TW_STATUS_ERRORS(x) \ (((x & TW_STATUS_PCI_ABORT) || \ (x & TW_STATUS_PCI_PARITY_ERROR) || \ (x & TW_STATUS_QUEUE_ERROR) || \ (x & TW_STATUS_MICROCONTROLLER_ERROR)) && \ (x & TW_STATUS_MICROCONTROLLER_READY)) #ifdef TW_DEBUG #define dprintk(msg...) printk(msg) #else #define dprintk(msg...) do { } while(0) #endif #pragma pack(1) /* Scatter Gather List Entry */ typedef struct TAG_TW_SG_Entry { u32 address; u32 length; } TW_SG_Entry; typedef unsigned char TW_Sector[512]; /* Command Packet */ typedef struct TW_Command { unsigned char opcode__sgloffset; unsigned char size; unsigned char request_id; unsigned char unit__hostid; /* Second DWORD */ unsigned char status; unsigned char flags; union { unsigned short block_count; unsigned short parameter_count; unsigned short message_credits; } byte6; union { struct { u32 lba; TW_SG_Entry sgl[TW_MAX_SGL_LENGTH]; u32 padding; /* pad to 512 bytes */ } io; struct { TW_SG_Entry sgl[TW_MAX_SGL_LENGTH]; u32 padding[2]; } param; struct { u32 response_queue_pointer; u32 padding[125]; } init_connection; struct { char version[504]; } ioctl_miniport_version; } byte8; } TW_Command; #pragma pack() typedef struct TAG_TW_Ioctl { unsigned char opcode; unsigned short table_id; unsigned char parameter_id; unsigned char parameter_size_bytes; unsigned char unit_index; unsigned char data[1]; } TW_Ioctl; #pragma pack(1) /* Structure for new chardev ioctls */ typedef struct TAG_TW_New_Ioctl { unsigned int data_buffer_length; unsigned char padding [508]; TW_Command firmware_command; char data_buffer[1]; } TW_New_Ioctl; /* GetParam descriptor */ typedef struct { unsigned short table_id; unsigned char parameter_id; unsigned char parameter_size_bytes; unsigned char data[1]; } TW_Param, *PTW_Param; /* Response queue */ typedef union TAG_TW_Response_Queue { u32 response_id; u32 value; } TW_Response_Queue; typedef int TW_Cmd_State; #define TW_S_INITIAL 0x1 /* Initial state */ #define TW_S_STARTED 0x2 /* Id in use */ #define TW_S_POSTED 0x4 /* Posted to the controller */ #define TW_S_PENDING 0x8 /* Waiting to be posted in isr */ #define TW_S_COMPLETED 0x10 /* Completed by isr */ #define TW_S_FINISHED 0x20 /* I/O completely done */ #define TW_START_MASK (TW_S_STARTED | TW_S_POSTED | TW_S_PENDING | TW_S_COMPLETED) /* Command header for ATA pass-thru */ typedef struct TAG_TW_Passthru { unsigned char opcode__sgloffset; unsigned char size; unsigned char request_id; unsigned char aport__hostid; unsigned char status; unsigned char flags; unsigned short param; unsigned short features; unsigned short sector_count; unsigned short sector_num; unsigned short cylinder_lo; unsigned short cylinder_hi; unsigned char drive_head; unsigned char command; TW_SG_Entry sg_list[TW_ATA_PASS_SGL_MAX]; unsigned char padding[12]; } TW_Passthru; #pragma pack() typedef struct TAG_TW_Device_Extension { u32 base_addr; unsigned long *alignment_virtual_address[TW_Q_LENGTH]; unsigned long alignment_physical_address[TW_Q_LENGTH]; int is_unit_present[TW_MAX_UNITS]; unsigned long *command_packet_virtual_address[TW_Q_LENGTH]; unsigned long command_packet_physical_address[TW_Q_LENGTH]; struct pci_dev *tw_pci_dev; struct scsi_cmnd *srb[TW_Q_LENGTH]; unsigned char free_queue[TW_Q_LENGTH]; unsigned char free_head; unsigned char free_tail; unsigned char pending_queue[TW_Q_LENGTH]; unsigned char pending_head; unsigned char pending_tail; TW_Cmd_State state[TW_Q_LENGTH]; u32 posted_request_count; u32 max_posted_request_count; u32 request_count_marked_pending; u32 pending_request_count; u32 max_pending_request_count; u32 max_sgl_entries; u32 sgl_entries; u32 num_resets; u32 sector_count; u32 max_sector_count; u32 aen_count; struct Scsi_Host *host; struct mutex ioctl_lock; unsigned short aen_queue[TW_Q_LENGTH]; unsigned char aen_head; unsigned char aen_tail; volatile long flags; /* long req'd for set_bit --RR */ int reset_print; volatile int chrdev_request_id; wait_queue_head_t ioctl_wqueue; } TW_Device_Extension; #endif /* _3W_XXXX_H */