summaryrefslogtreecommitdiffstats
path: root/kernel/fs/pstore
diff options
context:
space:
mode:
authorYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 12:17:53 -0700
committerYunhong Jiang <yunhong.jiang@intel.com>2015-08-04 15:44:42 -0700
commit9ca8dbcc65cfc63d6f5ef3312a33184e1d726e00 (patch)
tree1c9cafbcd35f783a87880a10f85d1a060db1a563 /kernel/fs/pstore
parent98260f3884f4a202f9ca5eabed40b1354c489b29 (diff)
Add the rt linux 4.1.3-rt3 as base
Import the rt linux 4.1.3-rt3 as OPNFV kvm base. It's from git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git linux-4.1.y-rt and the base is: commit 0917f823c59692d751951bf5ea699a2d1e2f26a2 Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Date: Sat Jul 25 12:13:34 2015 +0200 Prepare v4.1.3-rt3 Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> We lose all the git history this way and it's not good. We should apply another opnfv project repo in future. Change-Id: I87543d81c9df70d99c5001fbdf646b202c19f423 Signed-off-by: Yunhong Jiang <yunhong.jiang@intel.com>
Diffstat (limited to 'kernel/fs/pstore')
-rw-r--r--kernel/fs/pstore/Kconfig62
-rw-r--r--kernel/fs/pstore/Makefile13
-rw-r--r--kernel/fs/pstore/ftrace.c131
-rw-r--r--kernel/fs/pstore/inode.c483
-rw-r--r--kernel/fs/pstore/internal.h64
-rw-r--r--kernel/fs/pstore/platform.c547
-rw-r--r--kernel/fs/pstore/pmsg.c114
-rw-r--r--kernel/fs/pstore/ram.c648
-rw-r--r--kernel/fs/pstore/ram_core.c539
9 files changed, 2601 insertions, 0 deletions
diff --git a/kernel/fs/pstore/Kconfig b/kernel/fs/pstore/Kconfig
new file mode 100644
index 000000000..916b8e23d
--- /dev/null
+++ b/kernel/fs/pstore/Kconfig
@@ -0,0 +1,62 @@
+config PSTORE
+ bool "Persistent store support"
+ default n
+ select ZLIB_DEFLATE
+ select ZLIB_INFLATE
+ help
+ This option enables generic access to platform level
+ persistent storage via "pstore" filesystem that can
+ be mounted as /dev/pstore. Only useful if you have
+ a platform level driver that registers with pstore to
+ provide the data, so you probably should just go say "Y"
+ (or "M") to a platform specific persistent store driver
+ (e.g. ACPI_APEI on X86) which will select this for you.
+ If you don't have a platform persistent store driver,
+ say N.
+
+config PSTORE_CONSOLE
+ bool "Log kernel console messages"
+ depends on PSTORE
+ help
+ When the option is enabled, pstore will log all kernel
+ messages, even if no oops or panic happened.
+
+config PSTORE_PMSG
+ bool "Log user space messages"
+ depends on PSTORE
+ help
+ When the option is enabled, pstore will export a character
+ interface /dev/pmsg0 to log user space messages. On reboot
+ data can be retrieved from /sys/fs/pstore/pmsg-ramoops-[ID].
+
+ If unsure, say N.
+
+config PSTORE_FTRACE
+ bool "Persistent function tracer"
+ depends on PSTORE
+ depends on FUNCTION_TRACER
+ depends on DEBUG_FS
+ help
+ With this option kernel traces function calls into a persistent
+ ram buffer that can be decoded and dumped after reboot through
+ pstore filesystem. It can be used to determine what function
+ was last called before a reset or panic.
+
+ If unsure, say N.
+
+config PSTORE_RAM
+ tristate "Log panic/oops to a RAM buffer"
+ depends on PSTORE
+ depends on HAS_IOMEM
+ depends on HAVE_MEMBLOCK
+ select REED_SOLOMON
+ select REED_SOLOMON_ENC8
+ select REED_SOLOMON_DEC8
+ help
+ This enables panic and oops messages to be logged to a circular
+ buffer in RAM where it can be read back at some later point.
+
+ Note that for historical reasons, the module will be named
+ "ramoops.ko".
+
+ For more information, see Documentation/ramoops.txt.
diff --git a/kernel/fs/pstore/Makefile b/kernel/fs/pstore/Makefile
new file mode 100644
index 000000000..e647d8e81
--- /dev/null
+++ b/kernel/fs/pstore/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the linux pstorefs routines.
+#
+
+obj-y += pstore.o
+
+pstore-objs += inode.o platform.o
+obj-$(CONFIG_PSTORE_FTRACE) += ftrace.o
+
+obj-$(CONFIG_PSTORE_PMSG) += pmsg.o
+
+ramoops-objs += ram.o ram_core.o
+obj-$(CONFIG_PSTORE_RAM) += ramoops.o
diff --git a/kernel/fs/pstore/ftrace.c b/kernel/fs/pstore/ftrace.c
new file mode 100644
index 000000000..76a4eeb92
--- /dev/null
+++ b/kernel/fs/pstore/ftrace.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/ftrace.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/cache.h>
+#include <asm/barrier.h>
+#include "internal.h"
+
+static void notrace pstore_ftrace_call(unsigned long ip,
+ unsigned long parent_ip,
+ struct ftrace_ops *op,
+ struct pt_regs *regs)
+{
+ unsigned long flags;
+ struct pstore_ftrace_record rec = {};
+
+ if (unlikely(oops_in_progress))
+ return;
+
+ local_irq_save(flags);
+
+ rec.ip = ip;
+ rec.parent_ip = parent_ip;
+ pstore_ftrace_encode_cpu(&rec, raw_smp_processor_id());
+ psinfo->write_buf(PSTORE_TYPE_FTRACE, 0, NULL, 0, (void *)&rec,
+ 0, sizeof(rec), psinfo);
+
+ local_irq_restore(flags);
+}
+
+static struct ftrace_ops pstore_ftrace_ops __read_mostly = {
+ .func = pstore_ftrace_call,
+};
+
+static DEFINE_MUTEX(pstore_ftrace_lock);
+static bool pstore_ftrace_enabled;
+
+static ssize_t pstore_ftrace_knob_write(struct file *f, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u8 on;
+ ssize_t ret;
+
+ ret = kstrtou8_from_user(buf, count, 2, &on);
+ if (ret)
+ return ret;
+
+ mutex_lock(&pstore_ftrace_lock);
+
+ if (!on ^ pstore_ftrace_enabled)
+ goto out;
+
+ if (on)
+ ret = register_ftrace_function(&pstore_ftrace_ops);
+ else
+ ret = unregister_ftrace_function(&pstore_ftrace_ops);
+ if (ret) {
+ pr_err("%s: unable to %sregister ftrace ops: %zd\n",
+ __func__, on ? "" : "un", ret);
+ goto err;
+ }
+
+ pstore_ftrace_enabled = on;
+out:
+ ret = count;
+err:
+ mutex_unlock(&pstore_ftrace_lock);
+
+ return ret;
+}
+
+static ssize_t pstore_ftrace_knob_read(struct file *f, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char val[] = { '0' + pstore_ftrace_enabled, '\n' };
+
+ return simple_read_from_buffer(buf, count, ppos, val, sizeof(val));
+}
+
+static const struct file_operations pstore_knob_fops = {
+ .open = simple_open,
+ .read = pstore_ftrace_knob_read,
+ .write = pstore_ftrace_knob_write,
+};
+
+void pstore_register_ftrace(void)
+{
+ struct dentry *dir;
+ struct dentry *file;
+
+ if (!psinfo->write_buf)
+ return;
+
+ dir = debugfs_create_dir("pstore", NULL);
+ if (!dir) {
+ pr_err("%s: unable to create pstore directory\n", __func__);
+ return;
+ }
+
+ file = debugfs_create_file("record_ftrace", 0600, dir, NULL,
+ &pstore_knob_fops);
+ if (!file) {
+ pr_err("%s: unable to create record_ftrace file\n", __func__);
+ goto err_file;
+ }
+
+ return;
+err_file:
+ debugfs_remove(dir);
+}
diff --git a/kernel/fs/pstore/inode.c b/kernel/fs/pstore/inode.c
new file mode 100644
index 000000000..3adcc4669
--- /dev/null
+++ b/kernel/fs/pstore/inode.c
@@ -0,0 +1,483 @@
+/*
+ * Persistent Storage - ramfs parts.
+ *
+ * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/pagemap.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/string.h>
+#include <linux/mount.h>
+#include <linux/seq_file.h>
+#include <linux/ramfs.h>
+#include <linux/parser.h>
+#include <linux/sched.h>
+#include <linux/magic.h>
+#include <linux/pstore.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/syslog.h>
+
+#include "internal.h"
+
+#define PSTORE_NAMELEN 64
+
+static DEFINE_SPINLOCK(allpstore_lock);
+static LIST_HEAD(allpstore);
+
+struct pstore_private {
+ struct list_head list;
+ struct pstore_info *psi;
+ enum pstore_type_id type;
+ u64 id;
+ int count;
+ ssize_t size;
+ char data[];
+};
+
+struct pstore_ftrace_seq_data {
+ const void *ptr;
+ size_t off;
+ size_t size;
+};
+
+#define REC_SIZE sizeof(struct pstore_ftrace_record)
+
+static void *pstore_ftrace_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct pstore_private *ps = s->private;
+ struct pstore_ftrace_seq_data *data;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return NULL;
+
+ data->off = ps->size % REC_SIZE;
+ data->off += *pos * REC_SIZE;
+ if (data->off + REC_SIZE > ps->size) {
+ kfree(data);
+ return NULL;
+ }
+
+ return data;
+
+}
+
+static void pstore_ftrace_seq_stop(struct seq_file *s, void *v)
+{
+ kfree(v);
+}
+
+static void *pstore_ftrace_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct pstore_private *ps = s->private;
+ struct pstore_ftrace_seq_data *data = v;
+
+ data->off += REC_SIZE;
+ if (data->off + REC_SIZE > ps->size)
+ return NULL;
+
+ (*pos)++;
+ return data;
+}
+
+static int pstore_ftrace_seq_show(struct seq_file *s, void *v)
+{
+ struct pstore_private *ps = s->private;
+ struct pstore_ftrace_seq_data *data = v;
+ struct pstore_ftrace_record *rec = (void *)(ps->data + data->off);
+
+ seq_printf(s, "%d %08lx %08lx %pf <- %pF\n",
+ pstore_ftrace_decode_cpu(rec), rec->ip, rec->parent_ip,
+ (void *)rec->ip, (void *)rec->parent_ip);
+
+ return 0;
+}
+
+static const struct seq_operations pstore_ftrace_seq_ops = {
+ .start = pstore_ftrace_seq_start,
+ .next = pstore_ftrace_seq_next,
+ .stop = pstore_ftrace_seq_stop,
+ .show = pstore_ftrace_seq_show,
+};
+
+static int pstore_check_syslog_permissions(struct pstore_private *ps)
+{
+ switch (ps->type) {
+ case PSTORE_TYPE_DMESG:
+ case PSTORE_TYPE_CONSOLE:
+ return check_syslog_permissions(SYSLOG_ACTION_READ_ALL,
+ SYSLOG_FROM_READER);
+ default:
+ return 0;
+ }
+}
+
+static ssize_t pstore_file_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *sf = file->private_data;
+ struct pstore_private *ps = sf->private;
+
+ if (ps->type == PSTORE_TYPE_FTRACE)
+ return seq_read(file, userbuf, count, ppos);
+ return simple_read_from_buffer(userbuf, count, ppos, ps->data, ps->size);
+}
+
+static int pstore_file_open(struct inode *inode, struct file *file)
+{
+ struct pstore_private *ps = inode->i_private;
+ struct seq_file *sf;
+ int err;
+ const struct seq_operations *sops = NULL;
+
+ err = pstore_check_syslog_permissions(ps);
+ if (err)
+ return err;
+
+ if (ps->type == PSTORE_TYPE_FTRACE)
+ sops = &pstore_ftrace_seq_ops;
+
+ err = seq_open(file, sops);
+ if (err < 0)
+ return err;
+
+ sf = file->private_data;
+ sf->private = ps;
+
+ return 0;
+}
+
+static loff_t pstore_file_llseek(struct file *file, loff_t off, int whence)
+{
+ struct seq_file *sf = file->private_data;
+
+ if (sf->op)
+ return seq_lseek(file, off, whence);
+ return default_llseek(file, off, whence);
+}
+
+static const struct file_operations pstore_file_operations = {
+ .open = pstore_file_open,
+ .read = pstore_file_read,
+ .llseek = pstore_file_llseek,
+ .release = seq_release,
+};
+
+/*
+ * When a file is unlinked from our file system we call the
+ * platform driver to erase the record from persistent store.
+ */
+static int pstore_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct pstore_private *p = d_inode(dentry)->i_private;
+ int err;
+
+ err = pstore_check_syslog_permissions(p);
+ if (err)
+ return err;
+
+ if (p->psi->erase)
+ p->psi->erase(p->type, p->id, p->count,
+ d_inode(dentry)->i_ctime, p->psi);
+ else
+ return -EPERM;
+
+ return simple_unlink(dir, dentry);
+}
+
+static void pstore_evict_inode(struct inode *inode)
+{
+ struct pstore_private *p = inode->i_private;
+ unsigned long flags;
+
+ clear_inode(inode);
+ if (p) {
+ spin_lock_irqsave(&allpstore_lock, flags);
+ list_del(&p->list);
+ spin_unlock_irqrestore(&allpstore_lock, flags);
+ kfree(p);
+ }
+}
+
+static const struct inode_operations pstore_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .unlink = pstore_unlink,
+};
+
+static struct inode *pstore_get_inode(struct super_block *sb)
+{
+ struct inode *inode = new_inode(sb);
+ if (inode) {
+ inode->i_ino = get_next_ino();
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ }
+ return inode;
+}
+
+enum {
+ Opt_kmsg_bytes, Opt_err
+};
+
+static const match_table_t tokens = {
+ {Opt_kmsg_bytes, "kmsg_bytes=%u"},
+ {Opt_err, NULL}
+};
+
+static void parse_options(char *options)
+{
+ char *p;
+ substring_t args[MAX_OPT_ARGS];
+ int option;
+
+ if (!options)
+ return;
+
+ while ((p = strsep(&options, ",")) != NULL) {
+ int token;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ switch (token) {
+ case Opt_kmsg_bytes:
+ if (!match_int(&args[0], &option))
+ pstore_set_kmsg_bytes(option);
+ break;
+ }
+ }
+}
+
+static int pstore_remount(struct super_block *sb, int *flags, char *data)
+{
+ sync_filesystem(sb);
+ parse_options(data);
+
+ return 0;
+}
+
+static const struct super_operations pstore_ops = {
+ .statfs = simple_statfs,
+ .drop_inode = generic_delete_inode,
+ .evict_inode = pstore_evict_inode,
+ .remount_fs = pstore_remount,
+ .show_options = generic_show_options,
+};
+
+static struct super_block *pstore_sb;
+
+int pstore_is_mounted(void)
+{
+ return pstore_sb != NULL;
+}
+
+/*
+ * Make a regular file in the root directory of our file system.
+ * Load it up with "size" bytes of data from "buf".
+ * Set the mtime & ctime to the date that this record was originally stored.
+ */
+int pstore_mkfile(enum pstore_type_id type, char *psname, u64 id, int count,
+ char *data, bool compressed, size_t size,
+ struct timespec time, struct pstore_info *psi)
+{
+ struct dentry *root = pstore_sb->s_root;
+ struct dentry *dentry;
+ struct inode *inode;
+ int rc = 0;
+ char name[PSTORE_NAMELEN];
+ struct pstore_private *private, *pos;
+ unsigned long flags;
+
+ spin_lock_irqsave(&allpstore_lock, flags);
+ list_for_each_entry(pos, &allpstore, list) {
+ if (pos->type == type &&
+ pos->id == id &&
+ pos->psi == psi) {
+ rc = -EEXIST;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&allpstore_lock, flags);
+ if (rc)
+ return rc;
+
+ rc = -ENOMEM;
+ inode = pstore_get_inode(pstore_sb);
+ if (!inode)
+ goto fail;
+ inode->i_mode = S_IFREG | 0444;
+ inode->i_fop = &pstore_file_operations;
+ private = kmalloc(sizeof *private + size, GFP_KERNEL);
+ if (!private)
+ goto fail_alloc;
+ private->type = type;
+ private->id = id;
+ private->count = count;
+ private->psi = psi;
+
+ switch (type) {
+ case PSTORE_TYPE_DMESG:
+ scnprintf(name, sizeof(name), "dmesg-%s-%lld%s",
+ psname, id, compressed ? ".enc.z" : "");
+ break;
+ case PSTORE_TYPE_CONSOLE:
+ scnprintf(name, sizeof(name), "console-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_FTRACE:
+ scnprintf(name, sizeof(name), "ftrace-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_MCE:
+ scnprintf(name, sizeof(name), "mce-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_PPC_RTAS:
+ scnprintf(name, sizeof(name), "rtas-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_PPC_OF:
+ scnprintf(name, sizeof(name), "powerpc-ofw-%s-%lld",
+ psname, id);
+ break;
+ case PSTORE_TYPE_PPC_COMMON:
+ scnprintf(name, sizeof(name), "powerpc-common-%s-%lld",
+ psname, id);
+ break;
+ case PSTORE_TYPE_PMSG:
+ scnprintf(name, sizeof(name), "pmsg-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_PPC_OPAL:
+ sprintf(name, "powerpc-opal-%s-%lld", psname, id);
+ break;
+ case PSTORE_TYPE_UNKNOWN:
+ scnprintf(name, sizeof(name), "unknown-%s-%lld", psname, id);
+ break;
+ default:
+ scnprintf(name, sizeof(name), "type%d-%s-%lld",
+ type, psname, id);
+ break;
+ }
+
+ mutex_lock(&d_inode(root)->i_mutex);
+
+ dentry = d_alloc_name(root, name);
+ if (!dentry)
+ goto fail_lockedalloc;
+
+ memcpy(private->data, data, size);
+ inode->i_size = private->size = size;
+
+ inode->i_private = private;
+
+ if (time.tv_sec)
+ inode->i_mtime = inode->i_ctime = time;
+
+ d_add(dentry, inode);
+
+ spin_lock_irqsave(&allpstore_lock, flags);
+ list_add(&private->list, &allpstore);
+ spin_unlock_irqrestore(&allpstore_lock, flags);
+
+ mutex_unlock(&d_inode(root)->i_mutex);
+
+ return 0;
+
+fail_lockedalloc:
+ mutex_unlock(&d_inode(root)->i_mutex);
+ kfree(private);
+fail_alloc:
+ iput(inode);
+
+fail:
+ return rc;
+}
+
+static int pstore_fill_super(struct super_block *sb, void *data, int silent)
+{
+ struct inode *inode;
+
+ save_mount_options(sb, data);
+
+ pstore_sb = sb;
+
+ sb->s_maxbytes = MAX_LFS_FILESIZE;
+ sb->s_blocksize = PAGE_CACHE_SIZE;
+ sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
+ sb->s_magic = PSTOREFS_MAGIC;
+ sb->s_op = &pstore_ops;
+ sb->s_time_gran = 1;
+
+ parse_options(data);
+
+ inode = pstore_get_inode(sb);
+ if (inode) {
+ inode->i_mode = S_IFDIR | 0755;
+ inode->i_op = &pstore_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inc_nlink(inode);
+ }
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ pstore_get_records(0);
+
+ return 0;
+}
+
+static struct dentry *pstore_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ return mount_single(fs_type, flags, data, pstore_fill_super);
+}
+
+static void pstore_kill_sb(struct super_block *sb)
+{
+ kill_litter_super(sb);
+ pstore_sb = NULL;
+}
+
+static struct file_system_type pstore_fs_type = {
+ .name = "pstore",
+ .mount = pstore_mount,
+ .kill_sb = pstore_kill_sb,
+};
+
+static int __init init_pstore_fs(void)
+{
+ int err;
+
+ /* Create a convenient mount point for people to access pstore */
+ err = sysfs_create_mount_point(fs_kobj, "pstore");
+ if (err)
+ goto out;
+
+ err = register_filesystem(&pstore_fs_type);
+ if (err < 0)
+ sysfs_remove_mount_point(fs_kobj, "pstore");
+
+out:
+ return err;
+}
+module_init(init_pstore_fs)
+
+MODULE_AUTHOR("Tony Luck <tony.luck@intel.com>");
+MODULE_LICENSE("GPL");
diff --git a/kernel/fs/pstore/internal.h b/kernel/fs/pstore/internal.h
new file mode 100644
index 000000000..c36ba2cd0
--- /dev/null
+++ b/kernel/fs/pstore/internal.h
@@ -0,0 +1,64 @@
+#ifndef __PSTORE_INTERNAL_H__
+#define __PSTORE_INTERNAL_H__
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/pstore.h>
+
+#if NR_CPUS <= 2 && defined(CONFIG_ARM_THUMB)
+#define PSTORE_CPU_IN_IP 0x1
+#elif NR_CPUS <= 4 && defined(CONFIG_ARM)
+#define PSTORE_CPU_IN_IP 0x3
+#endif
+
+struct pstore_ftrace_record {
+ unsigned long ip;
+ unsigned long parent_ip;
+#ifndef PSTORE_CPU_IN_IP
+ unsigned int cpu;
+#endif
+};
+
+static inline void
+pstore_ftrace_encode_cpu(struct pstore_ftrace_record *rec, unsigned int cpu)
+{
+#ifndef PSTORE_CPU_IN_IP
+ rec->cpu = cpu;
+#else
+ rec->ip |= cpu;
+#endif
+}
+
+static inline unsigned int
+pstore_ftrace_decode_cpu(struct pstore_ftrace_record *rec)
+{
+#ifndef PSTORE_CPU_IN_IP
+ return rec->cpu;
+#else
+ return rec->ip & PSTORE_CPU_IN_IP;
+#endif
+}
+
+#ifdef CONFIG_PSTORE_FTRACE
+extern void pstore_register_ftrace(void);
+#else
+static inline void pstore_register_ftrace(void) {}
+#endif
+
+#ifdef CONFIG_PSTORE_PMSG
+extern void pstore_register_pmsg(void);
+#else
+static inline void pstore_register_pmsg(void) {}
+#endif
+
+extern struct pstore_info *psinfo;
+
+extern void pstore_set_kmsg_bytes(int);
+extern void pstore_get_records(int);
+extern int pstore_mkfile(enum pstore_type_id, char *psname, u64 id,
+ int count, char *data, bool compressed,
+ size_t size, struct timespec time,
+ struct pstore_info *psi);
+extern int pstore_is_mounted(void);
+
+#endif
diff --git a/kernel/fs/pstore/platform.c b/kernel/fs/pstore/platform.c
new file mode 100644
index 000000000..c4c9a10c5
--- /dev/null
+++ b/kernel/fs/pstore/platform.c
@@ -0,0 +1,547 @@
+/*
+ * Persistent Storage - platform driver interface parts.
+ *
+ * Copyright (C) 2007-2008 Google, Inc.
+ * Copyright (C) 2010 Intel Corporation <tony.luck@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#define pr_fmt(fmt) "pstore: " fmt
+
+#include <linux/atomic.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kmsg_dump.h>
+#include <linux/console.h>
+#include <linux/module.h>
+#include <linux/pstore.h>
+#include <linux/zlib.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/hardirq.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+
+#include "internal.h"
+
+/*
+ * We defer making "oops" entries appear in pstore - see
+ * whether the system is actually still running well enough
+ * to let someone see the entry
+ */
+static int pstore_update_ms = -1;
+module_param_named(update_ms, pstore_update_ms, int, 0600);
+MODULE_PARM_DESC(update_ms, "milliseconds before pstore updates its content "
+ "(default is -1, which means runtime updates are disabled; "
+ "enabling this option is not safe, it may lead to further "
+ "corruption on Oopses)");
+
+static int pstore_new_entry;
+
+static void pstore_timefunc(unsigned long);
+static DEFINE_TIMER(pstore_timer, pstore_timefunc, 0, 0);
+
+static void pstore_dowork(struct work_struct *);
+static DECLARE_WORK(pstore_work, pstore_dowork);
+
+/*
+ * pstore_lock just protects "psinfo" during
+ * calls to pstore_register()
+ */
+static DEFINE_SPINLOCK(pstore_lock);
+struct pstore_info *psinfo;
+
+static char *backend;
+
+/* Compression parameters */
+#define COMPR_LEVEL 6
+#define WINDOW_BITS 12
+#define MEM_LEVEL 4
+static struct z_stream_s stream;
+
+static char *big_oops_buf;
+static size_t big_oops_buf_sz;
+
+/* How much of the console log to snapshot */
+static unsigned long kmsg_bytes = 10240;
+
+void pstore_set_kmsg_bytes(int bytes)
+{
+ kmsg_bytes = bytes;
+}
+
+/* Tag each group of saved records with a sequence number */
+static int oopscount;
+
+static const char *get_reason_str(enum kmsg_dump_reason reason)
+{
+ switch (reason) {
+ case KMSG_DUMP_PANIC:
+ return "Panic";
+ case KMSG_DUMP_OOPS:
+ return "Oops";
+ case KMSG_DUMP_EMERG:
+ return "Emergency";
+ case KMSG_DUMP_RESTART:
+ return "Restart";
+ case KMSG_DUMP_HALT:
+ return "Halt";
+ case KMSG_DUMP_POWEROFF:
+ return "Poweroff";
+ default:
+ return "Unknown";
+ }
+}
+
+bool pstore_cannot_block_path(enum kmsg_dump_reason reason)
+{
+ /*
+ * In case of NMI path, pstore shouldn't be blocked
+ * regardless of reason.
+ */
+ if (in_nmi())
+ return true;
+
+ switch (reason) {
+ /* In panic case, other cpus are stopped by smp_send_stop(). */
+ case KMSG_DUMP_PANIC:
+ /* Emergency restart shouldn't be blocked by spin lock. */
+ case KMSG_DUMP_EMERG:
+ return true;
+ default:
+ return false;
+ }
+}
+EXPORT_SYMBOL_GPL(pstore_cannot_block_path);
+
+/* Derived from logfs_compress() */
+static int pstore_compress(const void *in, void *out, size_t inlen,
+ size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+ MEM_LEVEL, Z_DEFAULT_STRATEGY);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_deflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_deflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ if (stream.total_out >= stream.total_in)
+ goto error;
+
+ ret = stream.total_out;
+error:
+ return ret;
+}
+
+/* Derived from logfs_uncompress */
+static int pstore_decompress(void *in, void *out, size_t inlen, size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ err = zlib_inflateInit2(&stream, WINDOW_BITS);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_inflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_inflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ ret = stream.total_out;
+error:
+ return ret;
+}
+
+static void allocate_buf_for_compression(void)
+{
+ size_t size;
+ size_t cmpr;
+
+ switch (psinfo->bufsize) {
+ /* buffer range for efivars */
+ case 1000 ... 2000:
+ cmpr = 56;
+ break;
+ case 2001 ... 3000:
+ cmpr = 54;
+ break;
+ case 3001 ... 3999:
+ cmpr = 52;
+ break;
+ /* buffer range for nvram, erst */
+ case 4000 ... 10000:
+ cmpr = 45;
+ break;
+ default:
+ cmpr = 60;
+ break;
+ }
+
+ big_oops_buf_sz = (psinfo->bufsize * 100) / cmpr;
+ big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
+ if (big_oops_buf) {
+ size = max(zlib_deflate_workspacesize(WINDOW_BITS, MEM_LEVEL),
+ zlib_inflate_workspacesize());
+ stream.workspace = kmalloc(size, GFP_KERNEL);
+ if (!stream.workspace) {
+ pr_err("No memory for compression workspace; skipping compression\n");
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ }
+ } else {
+ pr_err("No memory for uncompressed data; skipping compression\n");
+ stream.workspace = NULL;
+ }
+
+}
+
+/*
+ * Called when compression fails, since the printk buffer
+ * would be fetched for compression calling it again when
+ * compression fails would have moved the iterator of
+ * printk buffer which results in fetching old contents.
+ * Copy the recent messages from big_oops_buf to psinfo->buf
+ */
+static size_t copy_kmsg_to_buffer(int hsize, size_t len)
+{
+ size_t total_len;
+ size_t diff;
+
+ total_len = hsize + len;
+
+ if (total_len > psinfo->bufsize) {
+ diff = total_len - psinfo->bufsize + hsize;
+ memcpy(psinfo->buf, big_oops_buf, hsize);
+ memcpy(psinfo->buf + hsize, big_oops_buf + diff,
+ psinfo->bufsize - hsize);
+ total_len = psinfo->bufsize;
+ } else
+ memcpy(psinfo->buf, big_oops_buf, total_len);
+
+ return total_len;
+}
+
+/*
+ * callback from kmsg_dump. (s2,l2) has the most recently
+ * written bytes, older bytes are in (s1,l1). Save as much
+ * as we can from the end of the buffer.
+ */
+static void pstore_dump(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ unsigned long total = 0;
+ const char *why;
+ u64 id;
+ unsigned int part = 1;
+ unsigned long flags = 0;
+ int is_locked = 0;
+ int ret;
+
+ why = get_reason_str(reason);
+
+ if (pstore_cannot_block_path(reason)) {
+ is_locked = spin_trylock_irqsave(&psinfo->buf_lock, flags);
+ if (!is_locked) {
+ pr_err("pstore dump routine blocked in %s path, may corrupt error record\n"
+ , in_nmi() ? "NMI" : why);
+ }
+ } else
+ spin_lock_irqsave(&psinfo->buf_lock, flags);
+ oopscount++;
+ while (total < kmsg_bytes) {
+ char *dst;
+ unsigned long size;
+ int hsize;
+ int zipped_len = -1;
+ size_t len;
+ bool compressed;
+ size_t total_len;
+
+ if (big_oops_buf) {
+ dst = big_oops_buf;
+ hsize = sprintf(dst, "%s#%d Part%u\n", why,
+ oopscount, part);
+ size = big_oops_buf_sz - hsize;
+
+ if (!kmsg_dump_get_buffer(dumper, true, dst + hsize,
+ size, &len))
+ break;
+
+ zipped_len = pstore_compress(dst, psinfo->buf,
+ hsize + len, psinfo->bufsize);
+
+ if (zipped_len > 0) {
+ compressed = true;
+ total_len = zipped_len;
+ } else {
+ compressed = false;
+ total_len = copy_kmsg_to_buffer(hsize, len);
+ }
+ } else {
+ dst = psinfo->buf;
+ hsize = sprintf(dst, "%s#%d Part%u\n", why, oopscount,
+ part);
+ size = psinfo->bufsize - hsize;
+ dst += hsize;
+
+ if (!kmsg_dump_get_buffer(dumper, true, dst,
+ size, &len))
+ break;
+
+ compressed = false;
+ total_len = hsize + len;
+ }
+
+ ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
+ oopscount, compressed, total_len, psinfo);
+ if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
+ pstore_new_entry = 1;
+
+ total += total_len;
+ part++;
+ }
+ if (pstore_cannot_block_path(reason)) {
+ if (is_locked)
+ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ } else
+ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+}
+
+static struct kmsg_dumper pstore_dumper = {
+ .dump = pstore_dump,
+};
+
+#ifdef CONFIG_PSTORE_CONSOLE
+static void pstore_console_write(struct console *con, const char *s, unsigned c)
+{
+ const char *e = s + c;
+
+ while (s < e) {
+ unsigned long flags;
+ u64 id;
+
+ if (c > psinfo->bufsize)
+ c = psinfo->bufsize;
+
+ if (oops_in_progress) {
+ if (!spin_trylock_irqsave(&psinfo->buf_lock, flags))
+ break;
+ } else {
+ spin_lock_irqsave(&psinfo->buf_lock, flags);
+ }
+ memcpy(psinfo->buf, s, c);
+ psinfo->write(PSTORE_TYPE_CONSOLE, 0, &id, 0, 0, 0, c, psinfo);
+ spin_unlock_irqrestore(&psinfo->buf_lock, flags);
+ s += c;
+ c = e - s;
+ }
+}
+
+static struct console pstore_console = {
+ .name = "pstore",
+ .write = pstore_console_write,
+ .flags = CON_PRINTBUFFER | CON_ENABLED | CON_ANYTIME,
+ .index = -1,
+};
+
+static void pstore_register_console(void)
+{
+ register_console(&pstore_console);
+}
+#else
+static void pstore_register_console(void) {}
+#endif
+
+static int pstore_write_compat(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part, int count,
+ bool compressed, size_t size,
+ struct pstore_info *psi)
+{
+ return psi->write_buf(type, reason, id, part, psinfo->buf, compressed,
+ size, psi);
+}
+
+/*
+ * platform specific persistent storage driver registers with
+ * us here. If pstore is already mounted, call the platform
+ * read function right away to populate the file system. If not
+ * then the pstore mount code will call us later to fill out
+ * the file system.
+ *
+ * Register with kmsg_dump to save last part of console log on panic.
+ */
+int pstore_register(struct pstore_info *psi)
+{
+ struct module *owner = psi->owner;
+
+ if (backend && strcmp(backend, psi->name))
+ return -EPERM;
+
+ spin_lock(&pstore_lock);
+ if (psinfo) {
+ spin_unlock(&pstore_lock);
+ return -EBUSY;
+ }
+
+ if (!psi->write)
+ psi->write = pstore_write_compat;
+ psinfo = psi;
+ mutex_init(&psinfo->read_mutex);
+ spin_unlock(&pstore_lock);
+
+ if (owner && !try_module_get(owner)) {
+ psinfo = NULL;
+ return -EINVAL;
+ }
+
+ allocate_buf_for_compression();
+
+ if (pstore_is_mounted())
+ pstore_get_records(0);
+
+ kmsg_dump_register(&pstore_dumper);
+
+ if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
+ pstore_register_console();
+ pstore_register_ftrace();
+ pstore_register_pmsg();
+ }
+
+ if (pstore_update_ms >= 0) {
+ pstore_timer.expires = jiffies +
+ msecs_to_jiffies(pstore_update_ms);
+ add_timer(&pstore_timer);
+ }
+
+ pr_info("Registered %s as persistent store backend\n", psi->name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pstore_register);
+
+/*
+ * Read all the records from the persistent store. Create
+ * files in our filesystem. Don't warn about -EEXIST errors
+ * when we are re-scanning the backing store looking to add new
+ * error records.
+ */
+void pstore_get_records(int quiet)
+{
+ struct pstore_info *psi = psinfo;
+ char *buf = NULL;
+ ssize_t size;
+ u64 id;
+ int count;
+ enum pstore_type_id type;
+ struct timespec time;
+ int failed = 0, rc;
+ bool compressed;
+ int unzipped_len = -1;
+
+ if (!psi)
+ return;
+
+ mutex_lock(&psi->read_mutex);
+ if (psi->open && psi->open(psi))
+ goto out;
+
+ while ((size = psi->read(&id, &type, &count, &time, &buf, &compressed,
+ psi)) > 0) {
+ if (compressed && (type == PSTORE_TYPE_DMESG)) {
+ if (big_oops_buf)
+ unzipped_len = pstore_decompress(buf,
+ big_oops_buf, size,
+ big_oops_buf_sz);
+
+ if (unzipped_len > 0) {
+ kfree(buf);
+ buf = big_oops_buf;
+ size = unzipped_len;
+ compressed = false;
+ } else {
+ pr_err("decompression failed;returned %d\n",
+ unzipped_len);
+ compressed = true;
+ }
+ }
+ rc = pstore_mkfile(type, psi->name, id, count, buf,
+ compressed, (size_t)size, time, psi);
+ if (unzipped_len < 0) {
+ /* Free buffer other than big oops */
+ kfree(buf);
+ buf = NULL;
+ } else
+ unzipped_len = -1;
+ if (rc && (rc != -EEXIST || !quiet))
+ failed++;
+ }
+ if (psi->close)
+ psi->close(psi);
+out:
+ mutex_unlock(&psi->read_mutex);
+
+ if (failed)
+ pr_warn("failed to load %d record(s) from '%s'\n",
+ failed, psi->name);
+}
+
+static void pstore_dowork(struct work_struct *work)
+{
+ pstore_get_records(1);
+}
+
+static void pstore_timefunc(unsigned long dummy)
+{
+ if (pstore_new_entry) {
+ pstore_new_entry = 0;
+ schedule_work(&pstore_work);
+ }
+
+ mod_timer(&pstore_timer, jiffies + msecs_to_jiffies(pstore_update_ms));
+}
+
+module_param(backend, charp, 0444);
+MODULE_PARM_DESC(backend, "Pstore backend to use");
diff --git a/kernel/fs/pstore/pmsg.c b/kernel/fs/pstore/pmsg.c
new file mode 100644
index 000000000..feb5dd294
--- /dev/null
+++ b/kernel/fs/pstore/pmsg.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright 2014 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/vmalloc.h>
+#include "internal.h"
+
+static DEFINE_MUTEX(pmsg_lock);
+#define PMSG_MAX_BOUNCE_BUFFER_SIZE (2*PAGE_SIZE)
+
+static ssize_t write_pmsg(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ size_t i, buffer_size;
+ char *buffer;
+
+ if (!count)
+ return 0;
+
+ if (!access_ok(VERIFY_READ, buf, count))
+ return -EFAULT;
+
+ buffer_size = count;
+ if (buffer_size > PMSG_MAX_BOUNCE_BUFFER_SIZE)
+ buffer_size = PMSG_MAX_BOUNCE_BUFFER_SIZE;
+ buffer = vmalloc(buffer_size);
+
+ mutex_lock(&pmsg_lock);
+ for (i = 0; i < count; ) {
+ size_t c = min(count - i, buffer_size);
+ u64 id;
+ long ret;
+
+ ret = __copy_from_user(buffer, buf + i, c);
+ if (unlikely(ret != 0)) {
+ mutex_unlock(&pmsg_lock);
+ vfree(buffer);
+ return -EFAULT;
+ }
+ psinfo->write_buf(PSTORE_TYPE_PMSG, 0, &id, 0, buffer, 0, c,
+ psinfo);
+
+ i += c;
+ }
+
+ mutex_unlock(&pmsg_lock);
+ vfree(buffer);
+ return count;
+}
+
+static const struct file_operations pmsg_fops = {
+ .owner = THIS_MODULE,
+ .llseek = noop_llseek,
+ .write = write_pmsg,
+};
+
+static struct class *pmsg_class;
+static int pmsg_major;
+#define PMSG_NAME "pmsg"
+#undef pr_fmt
+#define pr_fmt(fmt) PMSG_NAME ": " fmt
+
+static char *pmsg_devnode(struct device *dev, umode_t *mode)
+{
+ if (mode)
+ *mode = 0220;
+ return NULL;
+}
+
+void pstore_register_pmsg(void)
+{
+ struct device *pmsg_device;
+
+ pmsg_major = register_chrdev(0, PMSG_NAME, &pmsg_fops);
+ if (pmsg_major < 0) {
+ pr_err("register_chrdev failed\n");
+ goto err;
+ }
+
+ pmsg_class = class_create(THIS_MODULE, PMSG_NAME);
+ if (IS_ERR(pmsg_class)) {
+ pr_err("device class file already in use\n");
+ goto err_class;
+ }
+ pmsg_class->devnode = pmsg_devnode;
+
+ pmsg_device = device_create(pmsg_class, NULL, MKDEV(pmsg_major, 0),
+ NULL, "%s%d", PMSG_NAME, 0);
+ if (IS_ERR(pmsg_device)) {
+ pr_err("failed to create device\n");
+ goto err_device;
+ }
+ return;
+
+err_device:
+ class_destroy(pmsg_class);
+err_class:
+ unregister_chrdev(pmsg_major, PMSG_NAME);
+err:
+ return;
+}
diff --git a/kernel/fs/pstore/ram.c b/kernel/fs/pstore/ram.c
new file mode 100644
index 000000000..44a549bee
--- /dev/null
+++ b/kernel/fs/pstore/ram.c
@@ -0,0 +1,648 @@
+/*
+ * RAM Oops/Panic logger
+ *
+ * Copyright (C) 2010 Marco Stornelli <marco.stornelli@gmail.com>
+ * Copyright (C) 2011 Kees Cook <keescook@chromium.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/pstore.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/compiler.h>
+#include <linux/pstore_ram.h>
+
+#define RAMOOPS_KERNMSG_HDR "===="
+#define MIN_MEM_SIZE 4096UL
+
+static ulong record_size = MIN_MEM_SIZE;
+module_param(record_size, ulong, 0400);
+MODULE_PARM_DESC(record_size,
+ "size of each dump done on oops/panic");
+
+static ulong ramoops_console_size = MIN_MEM_SIZE;
+module_param_named(console_size, ramoops_console_size, ulong, 0400);
+MODULE_PARM_DESC(console_size, "size of kernel console log");
+
+static ulong ramoops_ftrace_size = MIN_MEM_SIZE;
+module_param_named(ftrace_size, ramoops_ftrace_size, ulong, 0400);
+MODULE_PARM_DESC(ftrace_size, "size of ftrace log");
+
+static ulong ramoops_pmsg_size = MIN_MEM_SIZE;
+module_param_named(pmsg_size, ramoops_pmsg_size, ulong, 0400);
+MODULE_PARM_DESC(pmsg_size, "size of user space message log");
+
+static ulong mem_address;
+module_param(mem_address, ulong, 0400);
+MODULE_PARM_DESC(mem_address,
+ "start of reserved RAM used to store oops/panic logs");
+
+static ulong mem_size;
+module_param(mem_size, ulong, 0400);
+MODULE_PARM_DESC(mem_size,
+ "size of reserved RAM used to store oops/panic logs");
+
+static unsigned int mem_type;
+module_param(mem_type, uint, 0600);
+MODULE_PARM_DESC(mem_type,
+ "set to 1 to try to use unbuffered memory (default 0)");
+
+static int dump_oops = 1;
+module_param(dump_oops, int, 0600);
+MODULE_PARM_DESC(dump_oops,
+ "set to 1 to dump oopses, 0 to only dump panics (default 1)");
+
+static int ramoops_ecc;
+module_param_named(ecc, ramoops_ecc, int, 0600);
+MODULE_PARM_DESC(ramoops_ecc,
+ "if non-zero, the option enables ECC support and specifies "
+ "ECC buffer size in bytes (1 is a special value, means 16 "
+ "bytes ECC)");
+
+struct ramoops_context {
+ struct persistent_ram_zone **przs;
+ struct persistent_ram_zone *cprz;
+ struct persistent_ram_zone *fprz;
+ struct persistent_ram_zone *mprz;
+ phys_addr_t phys_addr;
+ unsigned long size;
+ unsigned int memtype;
+ size_t record_size;
+ size_t console_size;
+ size_t ftrace_size;
+ size_t pmsg_size;
+ int dump_oops;
+ struct persistent_ram_ecc_info ecc_info;
+ unsigned int max_dump_cnt;
+ unsigned int dump_write_cnt;
+ /* _read_cnt need clear on ramoops_pstore_open */
+ unsigned int dump_read_cnt;
+ unsigned int console_read_cnt;
+ unsigned int ftrace_read_cnt;
+ unsigned int pmsg_read_cnt;
+ struct pstore_info pstore;
+};
+
+static struct platform_device *dummy;
+static struct ramoops_platform_data *dummy_data;
+
+static int ramoops_pstore_open(struct pstore_info *psi)
+{
+ struct ramoops_context *cxt = psi->data;
+
+ cxt->dump_read_cnt = 0;
+ cxt->console_read_cnt = 0;
+ cxt->ftrace_read_cnt = 0;
+ cxt->pmsg_read_cnt = 0;
+ return 0;
+}
+
+static struct persistent_ram_zone *
+ramoops_get_next_prz(struct persistent_ram_zone *przs[], uint *c, uint max,
+ u64 *id,
+ enum pstore_type_id *typep, enum pstore_type_id type,
+ bool update)
+{
+ struct persistent_ram_zone *prz;
+ int i = (*c)++;
+
+ if (i >= max)
+ return NULL;
+
+ prz = przs[i];
+ if (!prz)
+ return NULL;
+
+ /* Update old/shadowed buffer. */
+ if (update)
+ persistent_ram_save_old(prz);
+
+ if (!persistent_ram_old_size(prz))
+ return NULL;
+
+ *typep = type;
+ *id = i;
+
+ return prz;
+}
+
+static int ramoops_read_kmsg_hdr(char *buffer, struct timespec *time,
+ bool *compressed)
+{
+ char data_type;
+ int header_length = 0;
+
+ if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lu.%lu-%c\n%n", &time->tv_sec,
+ &time->tv_nsec, &data_type, &header_length) == 3) {
+ if (data_type == 'C')
+ *compressed = true;
+ else
+ *compressed = false;
+ } else if (sscanf(buffer, RAMOOPS_KERNMSG_HDR "%lu.%lu\n%n",
+ &time->tv_sec, &time->tv_nsec, &header_length) == 2) {
+ *compressed = false;
+ } else {
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ *compressed = false;
+ }
+ return header_length;
+}
+
+static bool prz_ok(struct persistent_ram_zone *prz)
+{
+ return !!prz && !!(persistent_ram_old_size(prz) +
+ persistent_ram_ecc_string(prz, NULL, 0));
+}
+
+static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
+ int *count, struct timespec *time,
+ char **buf, bool *compressed,
+ struct pstore_info *psi)
+{
+ ssize_t size;
+ ssize_t ecc_notice_size;
+ struct ramoops_context *cxt = psi->data;
+ struct persistent_ram_zone *prz;
+ int header_length;
+
+ prz = ramoops_get_next_prz(cxt->przs, &cxt->dump_read_cnt,
+ cxt->max_dump_cnt, id, type,
+ PSTORE_TYPE_DMESG, 1);
+ if (!prz_ok(prz))
+ prz = ramoops_get_next_prz(&cxt->cprz, &cxt->console_read_cnt,
+ 1, id, type, PSTORE_TYPE_CONSOLE, 0);
+ if (!prz_ok(prz))
+ prz = ramoops_get_next_prz(&cxt->fprz, &cxt->ftrace_read_cnt,
+ 1, id, type, PSTORE_TYPE_FTRACE, 0);
+ if (!prz_ok(prz))
+ prz = ramoops_get_next_prz(&cxt->mprz, &cxt->pmsg_read_cnt,
+ 1, id, type, PSTORE_TYPE_PMSG, 0);
+ if (!prz_ok(prz))
+ return 0;
+
+ if (!persistent_ram_old(prz))
+ return 0;
+
+ size = persistent_ram_old_size(prz);
+ header_length = ramoops_read_kmsg_hdr(persistent_ram_old(prz), time,
+ compressed);
+ size -= header_length;
+
+ /* ECC correction notice */
+ ecc_notice_size = persistent_ram_ecc_string(prz, NULL, 0);
+
+ *buf = kmalloc(size + ecc_notice_size + 1, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+
+ memcpy(*buf, (char *)persistent_ram_old(prz) + header_length, size);
+ persistent_ram_ecc_string(prz, *buf + size, ecc_notice_size + 1);
+
+ return size + ecc_notice_size;
+}
+
+static size_t ramoops_write_kmsg_hdr(struct persistent_ram_zone *prz,
+ bool compressed)
+{
+ char *hdr;
+ struct timespec timestamp;
+ size_t len;
+
+ /* Report zeroed timestamp if called before timekeeping has resumed. */
+ if (__getnstimeofday(&timestamp)) {
+ timestamp.tv_sec = 0;
+ timestamp.tv_nsec = 0;
+ }
+ hdr = kasprintf(GFP_ATOMIC, RAMOOPS_KERNMSG_HDR "%lu.%lu-%c\n",
+ (long)timestamp.tv_sec, (long)(timestamp.tv_nsec / 1000),
+ compressed ? 'C' : 'D');
+ WARN_ON_ONCE(!hdr);
+ len = hdr ? strlen(hdr) : 0;
+ persistent_ram_write(prz, hdr, len);
+ kfree(hdr);
+
+ return len;
+}
+
+static int notrace ramoops_pstore_write_buf(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part,
+ const char *buf,
+ bool compressed, size_t size,
+ struct pstore_info *psi)
+{
+ struct ramoops_context *cxt = psi->data;
+ struct persistent_ram_zone *prz;
+ size_t hlen;
+
+ if (type == PSTORE_TYPE_CONSOLE) {
+ if (!cxt->cprz)
+ return -ENOMEM;
+ persistent_ram_write(cxt->cprz, buf, size);
+ return 0;
+ } else if (type == PSTORE_TYPE_FTRACE) {
+ if (!cxt->fprz)
+ return -ENOMEM;
+ persistent_ram_write(cxt->fprz, buf, size);
+ return 0;
+ } else if (type == PSTORE_TYPE_PMSG) {
+ if (!cxt->mprz)
+ return -ENOMEM;
+ persistent_ram_write(cxt->mprz, buf, size);
+ return 0;
+ }
+
+ if (type != PSTORE_TYPE_DMESG)
+ return -EINVAL;
+
+ /* Out of the various dmesg dump types, ramoops is currently designed
+ * to only store crash logs, rather than storing general kernel logs.
+ */
+ if (reason != KMSG_DUMP_OOPS &&
+ reason != KMSG_DUMP_PANIC)
+ return -EINVAL;
+
+ /* Skip Oopes when configured to do so. */
+ if (reason == KMSG_DUMP_OOPS && !cxt->dump_oops)
+ return -EINVAL;
+
+ /* Explicitly only take the first part of any new crash.
+ * If our buffer is larger than kmsg_bytes, this can never happen,
+ * and if our buffer is smaller than kmsg_bytes, we don't want the
+ * report split across multiple records.
+ */
+ if (part != 1)
+ return -ENOSPC;
+
+ if (!cxt->przs)
+ return -ENOSPC;
+
+ prz = cxt->przs[cxt->dump_write_cnt];
+
+ hlen = ramoops_write_kmsg_hdr(prz, compressed);
+ if (size + hlen > prz->buffer_size)
+ size = prz->buffer_size - hlen;
+ persistent_ram_write(prz, buf, size);
+
+ cxt->dump_write_cnt = (cxt->dump_write_cnt + 1) % cxt->max_dump_cnt;
+
+ return 0;
+}
+
+static int ramoops_pstore_erase(enum pstore_type_id type, u64 id, int count,
+ struct timespec time, struct pstore_info *psi)
+{
+ struct ramoops_context *cxt = psi->data;
+ struct persistent_ram_zone *prz;
+
+ switch (type) {
+ case PSTORE_TYPE_DMESG:
+ if (id >= cxt->max_dump_cnt)
+ return -EINVAL;
+ prz = cxt->przs[id];
+ break;
+ case PSTORE_TYPE_CONSOLE:
+ prz = cxt->cprz;
+ break;
+ case PSTORE_TYPE_FTRACE:
+ prz = cxt->fprz;
+ break;
+ case PSTORE_TYPE_PMSG:
+ prz = cxt->mprz;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ persistent_ram_free_old(prz);
+ persistent_ram_zap(prz);
+
+ return 0;
+}
+
+static struct ramoops_context oops_cxt = {
+ .pstore = {
+ .owner = THIS_MODULE,
+ .name = "ramoops",
+ .open = ramoops_pstore_open,
+ .read = ramoops_pstore_read,
+ .write_buf = ramoops_pstore_write_buf,
+ .erase = ramoops_pstore_erase,
+ },
+};
+
+static void ramoops_free_przs(struct ramoops_context *cxt)
+{
+ int i;
+
+ cxt->max_dump_cnt = 0;
+ if (!cxt->przs)
+ return;
+
+ for (i = 0; !IS_ERR_OR_NULL(cxt->przs[i]); i++)
+ persistent_ram_free(cxt->przs[i]);
+ kfree(cxt->przs);
+}
+
+static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
+ phys_addr_t *paddr, size_t dump_mem_sz)
+{
+ int err = -ENOMEM;
+ int i;
+
+ if (!cxt->record_size)
+ return 0;
+
+ if (*paddr + dump_mem_sz - cxt->phys_addr > cxt->size) {
+ dev_err(dev, "no room for dumps\n");
+ return -ENOMEM;
+ }
+
+ cxt->max_dump_cnt = dump_mem_sz / cxt->record_size;
+ if (!cxt->max_dump_cnt)
+ return -ENOMEM;
+
+ cxt->przs = kzalloc(sizeof(*cxt->przs) * cxt->max_dump_cnt,
+ GFP_KERNEL);
+ if (!cxt->przs) {
+ dev_err(dev, "failed to initialize a prz array for dumps\n");
+ goto fail_prz;
+ }
+
+ for (i = 0; i < cxt->max_dump_cnt; i++) {
+ size_t sz = cxt->record_size;
+
+ cxt->przs[i] = persistent_ram_new(*paddr, sz, 0,
+ &cxt->ecc_info,
+ cxt->memtype);
+ if (IS_ERR(cxt->przs[i])) {
+ err = PTR_ERR(cxt->przs[i]);
+ dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
+ sz, (unsigned long long)*paddr, err);
+ goto fail_prz;
+ }
+ *paddr += sz;
+ }
+
+ return 0;
+fail_prz:
+ ramoops_free_przs(cxt);
+ return err;
+}
+
+static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
+ struct persistent_ram_zone **prz,
+ phys_addr_t *paddr, size_t sz, u32 sig)
+{
+ if (!sz)
+ return 0;
+
+ if (*paddr + sz - cxt->phys_addr > cxt->size) {
+ dev_err(dev, "no room for mem region (0x%zx@0x%llx) in (0x%lx@0x%llx)\n",
+ sz, (unsigned long long)*paddr,
+ cxt->size, (unsigned long long)cxt->phys_addr);
+ return -ENOMEM;
+ }
+
+ *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
+ if (IS_ERR(*prz)) {
+ int err = PTR_ERR(*prz);
+
+ dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
+ sz, (unsigned long long)*paddr, err);
+ return err;
+ }
+
+ persistent_ram_zap(*prz);
+
+ *paddr += sz;
+
+ return 0;
+}
+
+static int ramoops_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ramoops_platform_data *pdata = pdev->dev.platform_data;
+ struct ramoops_context *cxt = &oops_cxt;
+ size_t dump_mem_sz;
+ phys_addr_t paddr;
+ int err = -EINVAL;
+
+ /* Only a single ramoops area allowed at a time, so fail extra
+ * probes.
+ */
+ if (cxt->max_dump_cnt)
+ goto fail_out;
+
+ if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size &&
+ !pdata->ftrace_size && !pdata->pmsg_size)) {
+ pr_err("The memory size and the record/console size must be "
+ "non-zero\n");
+ goto fail_out;
+ }
+
+ if (pdata->record_size && !is_power_of_2(pdata->record_size))
+ pdata->record_size = rounddown_pow_of_two(pdata->record_size);
+ if (pdata->console_size && !is_power_of_2(pdata->console_size))
+ pdata->console_size = rounddown_pow_of_two(pdata->console_size);
+ if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size))
+ pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size);
+ if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size))
+ pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size);
+
+ cxt->size = pdata->mem_size;
+ cxt->phys_addr = pdata->mem_address;
+ cxt->memtype = pdata->mem_type;
+ cxt->record_size = pdata->record_size;
+ cxt->console_size = pdata->console_size;
+ cxt->ftrace_size = pdata->ftrace_size;
+ cxt->pmsg_size = pdata->pmsg_size;
+ cxt->dump_oops = pdata->dump_oops;
+ cxt->ecc_info = pdata->ecc_info;
+
+ paddr = cxt->phys_addr;
+
+ dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size
+ - cxt->pmsg_size;
+ err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz);
+ if (err)
+ goto fail_out;
+
+ err = ramoops_init_prz(dev, cxt, &cxt->cprz, &paddr,
+ cxt->console_size, 0);
+ if (err)
+ goto fail_init_cprz;
+
+ err = ramoops_init_prz(dev, cxt, &cxt->fprz, &paddr, cxt->ftrace_size,
+ LINUX_VERSION_CODE);
+ if (err)
+ goto fail_init_fprz;
+
+ err = ramoops_init_prz(dev, cxt, &cxt->mprz, &paddr, cxt->pmsg_size, 0);
+ if (err)
+ goto fail_init_mprz;
+
+ cxt->pstore.data = cxt;
+ /*
+ * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we
+ * have to handle dumps, we must have at least record_size buffer. And
+ * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be
+ * ZERO_SIZE_PTR).
+ */
+ if (cxt->console_size)
+ cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */
+ cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize);
+ cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL);
+ spin_lock_init(&cxt->pstore.buf_lock);
+ if (!cxt->pstore.buf) {
+ pr_err("cannot allocate pstore buffer\n");
+ err = -ENOMEM;
+ goto fail_clear;
+ }
+
+ err = pstore_register(&cxt->pstore);
+ if (err) {
+ pr_err("registering with pstore failed\n");
+ goto fail_buf;
+ }
+
+ /*
+ * Update the module parameter variables as well so they are visible
+ * through /sys/module/ramoops/parameters/
+ */
+ mem_size = pdata->mem_size;
+ mem_address = pdata->mem_address;
+ record_size = pdata->record_size;
+ dump_oops = pdata->dump_oops;
+ ramoops_console_size = pdata->console_size;
+ ramoops_pmsg_size = pdata->pmsg_size;
+ ramoops_ftrace_size = pdata->ftrace_size;
+
+ pr_info("attached 0x%lx@0x%llx, ecc: %d/%d\n",
+ cxt->size, (unsigned long long)cxt->phys_addr,
+ cxt->ecc_info.ecc_size, cxt->ecc_info.block_size);
+
+ return 0;
+
+fail_buf:
+ kfree(cxt->pstore.buf);
+fail_clear:
+ cxt->pstore.bufsize = 0;
+ kfree(cxt->mprz);
+fail_init_mprz:
+ kfree(cxt->fprz);
+fail_init_fprz:
+ kfree(cxt->cprz);
+fail_init_cprz:
+ ramoops_free_przs(cxt);
+fail_out:
+ return err;
+}
+
+static int __exit ramoops_remove(struct platform_device *pdev)
+{
+#if 0
+ /* TODO(kees): We cannot unload ramoops since pstore doesn't support
+ * unregistering yet.
+ */
+ struct ramoops_context *cxt = &oops_cxt;
+
+ iounmap(cxt->virt_addr);
+ release_mem_region(cxt->phys_addr, cxt->size);
+ cxt->max_dump_cnt = 0;
+
+ /* TODO(kees): When pstore supports unregistering, call it here. */
+ kfree(cxt->pstore.buf);
+ cxt->pstore.bufsize = 0;
+
+ return 0;
+#endif
+ return -EBUSY;
+}
+
+static struct platform_driver ramoops_driver = {
+ .probe = ramoops_probe,
+ .remove = __exit_p(ramoops_remove),
+ .driver = {
+ .name = "ramoops",
+ },
+};
+
+static void ramoops_register_dummy(void)
+{
+ if (!mem_size)
+ return;
+
+ pr_info("using module parameters\n");
+
+ dummy_data = kzalloc(sizeof(*dummy_data), GFP_KERNEL);
+ if (!dummy_data) {
+ pr_info("could not allocate pdata\n");
+ return;
+ }
+
+ dummy_data->mem_size = mem_size;
+ dummy_data->mem_address = mem_address;
+ dummy_data->mem_type = 0;
+ dummy_data->record_size = record_size;
+ dummy_data->console_size = ramoops_console_size;
+ dummy_data->ftrace_size = ramoops_ftrace_size;
+ dummy_data->pmsg_size = ramoops_pmsg_size;
+ dummy_data->dump_oops = dump_oops;
+ /*
+ * For backwards compatibility ramoops.ecc=1 means 16 bytes ECC
+ * (using 1 byte for ECC isn't much of use anyway).
+ */
+ dummy_data->ecc_info.ecc_size = ramoops_ecc == 1 ? 16 : ramoops_ecc;
+
+ dummy = platform_device_register_data(NULL, "ramoops", -1,
+ dummy_data, sizeof(struct ramoops_platform_data));
+ if (IS_ERR(dummy)) {
+ pr_info("could not create platform device: %ld\n",
+ PTR_ERR(dummy));
+ }
+}
+
+static int __init ramoops_init(void)
+{
+ ramoops_register_dummy();
+ return platform_driver_register(&ramoops_driver);
+}
+postcore_initcall(ramoops_init);
+
+static void __exit ramoops_exit(void)
+{
+ platform_driver_unregister(&ramoops_driver);
+ platform_device_unregister(dummy);
+ kfree(dummy_data);
+}
+module_exit(ramoops_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Marco Stornelli <marco.stornelli@gmail.com>");
+MODULE_DESCRIPTION("RAM Oops/Panic logger/driver");
diff --git a/kernel/fs/pstore/ram_core.c b/kernel/fs/pstore/ram_core.c
new file mode 100644
index 000000000..76c3f80ef
--- /dev/null
+++ b/kernel/fs/pstore/ram_core.c
@@ -0,0 +1,539 @@
+/*
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "persistent_ram: " fmt
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/rslib.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/pstore_ram.h>
+#include <asm/page.h>
+
+struct persistent_ram_buffer {
+ uint32_t sig;
+ atomic_t start;
+ atomic_t size;
+ uint8_t data[0];
+};
+
+#define PERSISTENT_RAM_SIG (0x43474244) /* DBGC */
+
+static inline size_t buffer_size(struct persistent_ram_zone *prz)
+{
+ return atomic_read(&prz->buffer->size);
+}
+
+static inline size_t buffer_start(struct persistent_ram_zone *prz)
+{
+ return atomic_read(&prz->buffer->start);
+}
+
+/* increase and wrap the start pointer, returning the old value */
+static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
+{
+ int old;
+ int new;
+
+ do {
+ old = atomic_read(&prz->buffer->start);
+ new = old + a;
+ while (unlikely(new >= prz->buffer_size))
+ new -= prz->buffer_size;
+ } while (atomic_cmpxchg(&prz->buffer->start, old, new) != old);
+
+ return old;
+}
+
+/* increase the size counter until it hits the max size */
+static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
+{
+ size_t old;
+ size_t new;
+
+ if (atomic_read(&prz->buffer->size) == prz->buffer_size)
+ return;
+
+ do {
+ old = atomic_read(&prz->buffer->size);
+ new = old + a;
+ if (new > prz->buffer_size)
+ new = prz->buffer_size;
+ } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
+}
+
+static DEFINE_RAW_SPINLOCK(buffer_lock);
+
+/* increase and wrap the start pointer, returning the old value */
+static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
+{
+ int old;
+ int new;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&buffer_lock, flags);
+
+ old = atomic_read(&prz->buffer->start);
+ new = old + a;
+ while (unlikely(new >= prz->buffer_size))
+ new -= prz->buffer_size;
+ atomic_set(&prz->buffer->start, new);
+
+ raw_spin_unlock_irqrestore(&buffer_lock, flags);
+
+ return old;
+}
+
+/* increase the size counter until it hits the max size */
+static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
+{
+ size_t old;
+ size_t new;
+ unsigned long flags;
+
+ raw_spin_lock_irqsave(&buffer_lock, flags);
+
+ old = atomic_read(&prz->buffer->size);
+ if (old == prz->buffer_size)
+ goto exit;
+
+ new = old + a;
+ if (new > prz->buffer_size)
+ new = prz->buffer_size;
+ atomic_set(&prz->buffer->size, new);
+
+exit:
+ raw_spin_unlock_irqrestore(&buffer_lock, flags);
+}
+
+static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
+static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
+
+static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+ uint8_t *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[prz->ecc_info.ecc_size];
+
+ /* Initialize the parity buffer */
+ memset(par, 0, sizeof(par));
+ encode_rs8(prz->rs_decoder, data, len, par, 0);
+ for (i = 0; i < prz->ecc_info.ecc_size; i++)
+ ecc[i] = par[i];
+}
+
+static int persistent_ram_decode_rs8(struct persistent_ram_zone *prz,
+ void *data, size_t len, uint8_t *ecc)
+{
+ int i;
+ uint16_t par[prz->ecc_info.ecc_size];
+
+ for (i = 0; i < prz->ecc_info.ecc_size; i++)
+ par[i] = ecc[i];
+ return decode_rs8(prz->rs_decoder, data, par, len,
+ NULL, 0, NULL, 0, NULL);
+}
+
+static void notrace persistent_ram_update_ecc(struct persistent_ram_zone *prz,
+ unsigned int start, unsigned int count)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ uint8_t *buffer_end = buffer->data + prz->buffer_size;
+ uint8_t *block;
+ uint8_t *par;
+ int ecc_block_size = prz->ecc_info.block_size;
+ int ecc_size = prz->ecc_info.ecc_size;
+ int size = ecc_block_size;
+
+ if (!ecc_size)
+ return;
+
+ block = buffer->data + (start & ~(ecc_block_size - 1));
+ par = prz->par_buffer + (start / ecc_block_size) * ecc_size;
+
+ do {
+ if (block + ecc_block_size > buffer_end)
+ size = buffer_end - block;
+ persistent_ram_encode_rs8(prz, block, size, par);
+ block += ecc_block_size;
+ par += ecc_size;
+ } while (block < buffer->data + start + count);
+}
+
+static void persistent_ram_update_header_ecc(struct persistent_ram_zone *prz)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+
+ if (!prz->ecc_info.ecc_size)
+ return;
+
+ persistent_ram_encode_rs8(prz, (uint8_t *)buffer, sizeof(*buffer),
+ prz->par_header);
+}
+
+static void persistent_ram_ecc_old(struct persistent_ram_zone *prz)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ uint8_t *block;
+ uint8_t *par;
+
+ if (!prz->ecc_info.ecc_size)
+ return;
+
+ block = buffer->data;
+ par = prz->par_buffer;
+ while (block < buffer->data + buffer_size(prz)) {
+ int numerr;
+ int size = prz->ecc_info.block_size;
+ if (block + size > buffer->data + prz->buffer_size)
+ size = buffer->data + prz->buffer_size - block;
+ numerr = persistent_ram_decode_rs8(prz, block, size, par);
+ if (numerr > 0) {
+ pr_devel("error in block %p, %d\n", block, numerr);
+ prz->corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ pr_devel("uncorrectable error in block %p\n", block);
+ prz->bad_blocks++;
+ }
+ block += prz->ecc_info.block_size;
+ par += prz->ecc_info.ecc_size;
+ }
+}
+
+static int persistent_ram_init_ecc(struct persistent_ram_zone *prz,
+ struct persistent_ram_ecc_info *ecc_info)
+{
+ int numerr;
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ int ecc_blocks;
+ size_t ecc_total;
+
+ if (!ecc_info || !ecc_info->ecc_size)
+ return 0;
+
+ prz->ecc_info.block_size = ecc_info->block_size ?: 128;
+ prz->ecc_info.ecc_size = ecc_info->ecc_size ?: 16;
+ prz->ecc_info.symsize = ecc_info->symsize ?: 8;
+ prz->ecc_info.poly = ecc_info->poly ?: 0x11d;
+
+ ecc_blocks = DIV_ROUND_UP(prz->buffer_size - prz->ecc_info.ecc_size,
+ prz->ecc_info.block_size +
+ prz->ecc_info.ecc_size);
+ ecc_total = (ecc_blocks + 1) * prz->ecc_info.ecc_size;
+ if (ecc_total >= prz->buffer_size) {
+ pr_err("%s: invalid ecc_size %u (total %zu, buffer size %zu)\n",
+ __func__, prz->ecc_info.ecc_size,
+ ecc_total, prz->buffer_size);
+ return -EINVAL;
+ }
+
+ prz->buffer_size -= ecc_total;
+ prz->par_buffer = buffer->data + prz->buffer_size;
+ prz->par_header = prz->par_buffer +
+ ecc_blocks * prz->ecc_info.ecc_size;
+
+ /*
+ * first consecutive root is 0
+ * primitive element to generate roots = 1
+ */
+ prz->rs_decoder = init_rs(prz->ecc_info.symsize, prz->ecc_info.poly,
+ 0, 1, prz->ecc_info.ecc_size);
+ if (prz->rs_decoder == NULL) {
+ pr_info("init_rs failed\n");
+ return -EINVAL;
+ }
+
+ prz->corrected_bytes = 0;
+ prz->bad_blocks = 0;
+
+ numerr = persistent_ram_decode_rs8(prz, buffer, sizeof(*buffer),
+ prz->par_header);
+ if (numerr > 0) {
+ pr_info("error in header, %d\n", numerr);
+ prz->corrected_bytes += numerr;
+ } else if (numerr < 0) {
+ pr_info("uncorrectable error in header\n");
+ prz->bad_blocks++;
+ }
+
+ return 0;
+}
+
+ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
+ char *str, size_t len)
+{
+ ssize_t ret;
+
+ if (!prz->ecc_info.ecc_size)
+ return 0;
+
+ if (prz->corrected_bytes || prz->bad_blocks)
+ ret = snprintf(str, len, ""
+ "\n%d Corrected bytes, %d unrecoverable blocks\n",
+ prz->corrected_bytes, prz->bad_blocks);
+ else
+ ret = snprintf(str, len, "\nNo errors detected\n");
+
+ return ret;
+}
+
+static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
+ const void *s, unsigned int start, unsigned int count)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ memcpy(buffer->data + start, s, count);
+ persistent_ram_update_ecc(prz, start, count);
+}
+
+void persistent_ram_save_old(struct persistent_ram_zone *prz)
+{
+ struct persistent_ram_buffer *buffer = prz->buffer;
+ size_t size = buffer_size(prz);
+ size_t start = buffer_start(prz);
+
+ if (!size)
+ return;
+
+ if (!prz->old_log) {
+ persistent_ram_ecc_old(prz);
+ prz->old_log = kmalloc(size, GFP_KERNEL);
+ }
+ if (!prz->old_log) {
+ pr_err("failed to allocate buffer\n");
+ return;
+ }
+
+ prz->old_log_size = size;
+ memcpy(prz->old_log, &buffer->data[start], size - start);
+ memcpy(prz->old_log + size - start, &buffer->data[0], start);
+}
+
+int notrace persistent_ram_write(struct persistent_ram_zone *prz,
+ const void *s, unsigned int count)
+{
+ int rem;
+ int c = count;
+ size_t start;
+
+ if (unlikely(c > prz->buffer_size)) {
+ s += c - prz->buffer_size;
+ c = prz->buffer_size;
+ }
+
+ buffer_size_add(prz, c);
+
+ start = buffer_start_add(prz, c);
+
+ rem = prz->buffer_size - start;
+ if (unlikely(rem < c)) {
+ persistent_ram_update(prz, s, start, rem);
+ s += rem;
+ c -= rem;
+ start = 0;
+ }
+ persistent_ram_update(prz, s, start, c);
+
+ persistent_ram_update_header_ecc(prz);
+
+ return count;
+}
+
+size_t persistent_ram_old_size(struct persistent_ram_zone *prz)
+{
+ return prz->old_log_size;
+}
+
+void *persistent_ram_old(struct persistent_ram_zone *prz)
+{
+ return prz->old_log;
+}
+
+void persistent_ram_free_old(struct persistent_ram_zone *prz)
+{
+ kfree(prz->old_log);
+ prz->old_log = NULL;
+ prz->old_log_size = 0;
+}
+
+void persistent_ram_zap(struct persistent_ram_zone *prz)
+{
+ atomic_set(&prz->buffer->start, 0);
+ atomic_set(&prz->buffer->size, 0);
+ persistent_ram_update_header_ecc(prz);
+}
+
+static void *persistent_ram_vmap(phys_addr_t start, size_t size,
+ unsigned int memtype)
+{
+ struct page **pages;
+ phys_addr_t page_start;
+ unsigned int page_count;
+ pgprot_t prot;
+ unsigned int i;
+ void *vaddr;
+
+ page_start = start - offset_in_page(start);
+ page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
+
+ if (memtype)
+ prot = pgprot_noncached(PAGE_KERNEL);
+ else
+ prot = pgprot_writecombine(PAGE_KERNEL);
+
+ pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
+ if (!pages) {
+ pr_err("%s: Failed to allocate array for %u pages\n",
+ __func__, page_count);
+ return NULL;
+ }
+
+ for (i = 0; i < page_count; i++) {
+ phys_addr_t addr = page_start + i * PAGE_SIZE;
+ pages[i] = pfn_to_page(addr >> PAGE_SHIFT);
+ }
+ vaddr = vmap(pages, page_count, VM_MAP, prot);
+ kfree(pages);
+
+ return vaddr;
+}
+
+static void *persistent_ram_iomap(phys_addr_t start, size_t size,
+ unsigned int memtype)
+{
+ void *va;
+
+ if (!request_mem_region(start, size, "persistent_ram")) {
+ pr_err("request mem region (0x%llx@0x%llx) failed\n",
+ (unsigned long long)size, (unsigned long long)start);
+ return NULL;
+ }
+
+ buffer_start_add = buffer_start_add_locked;
+ buffer_size_add = buffer_size_add_locked;
+
+ if (memtype)
+ va = ioremap(start, size);
+ else
+ va = ioremap_wc(start, size);
+
+ return va;
+}
+
+static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
+ struct persistent_ram_zone *prz, int memtype)
+{
+ prz->paddr = start;
+ prz->size = size;
+
+ if (pfn_valid(start >> PAGE_SHIFT))
+ prz->vaddr = persistent_ram_vmap(start, size, memtype);
+ else
+ prz->vaddr = persistent_ram_iomap(start, size, memtype);
+
+ if (!prz->vaddr) {
+ pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
+ (unsigned long long)size, (unsigned long long)start);
+ return -ENOMEM;
+ }
+
+ prz->buffer = prz->vaddr + offset_in_page(start);
+ prz->buffer_size = size - sizeof(struct persistent_ram_buffer);
+
+ return 0;
+}
+
+static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
+ struct persistent_ram_ecc_info *ecc_info)
+{
+ int ret;
+
+ ret = persistent_ram_init_ecc(prz, ecc_info);
+ if (ret)
+ return ret;
+
+ sig ^= PERSISTENT_RAM_SIG;
+
+ if (prz->buffer->sig == sig) {
+ if (buffer_size(prz) > prz->buffer_size ||
+ buffer_start(prz) > buffer_size(prz))
+ pr_info("found existing invalid buffer, size %zu, start %zu\n",
+ buffer_size(prz), buffer_start(prz));
+ else {
+ pr_debug("found existing buffer, size %zu, start %zu\n",
+ buffer_size(prz), buffer_start(prz));
+ persistent_ram_save_old(prz);
+ return 0;
+ }
+ } else {
+ pr_debug("no valid data in buffer (sig = 0x%08x)\n",
+ prz->buffer->sig);
+ }
+
+ prz->buffer->sig = sig;
+ persistent_ram_zap(prz);
+
+ return 0;
+}
+
+void persistent_ram_free(struct persistent_ram_zone *prz)
+{
+ if (!prz)
+ return;
+
+ if (prz->vaddr) {
+ if (pfn_valid(prz->paddr >> PAGE_SHIFT)) {
+ vunmap(prz->vaddr);
+ } else {
+ iounmap(prz->vaddr);
+ release_mem_region(prz->paddr, prz->size);
+ }
+ prz->vaddr = NULL;
+ }
+ persistent_ram_free_old(prz);
+ kfree(prz);
+}
+
+struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+ u32 sig, struct persistent_ram_ecc_info *ecc_info,
+ unsigned int memtype)
+{
+ struct persistent_ram_zone *prz;
+ int ret = -ENOMEM;
+
+ prz = kzalloc(sizeof(struct persistent_ram_zone), GFP_KERNEL);
+ if (!prz) {
+ pr_err("failed to allocate persistent ram zone\n");
+ goto err;
+ }
+
+ ret = persistent_ram_buffer_map(start, size, prz, memtype);
+ if (ret)
+ goto err;
+
+ ret = persistent_ram_post_init(prz, sig, ecc_info);
+ if (ret)
+ goto err;
+
+ return prz;
+err:
+ persistent_ram_free(prz);
+ return ERR_PTR(ret);
+}