diff options
author | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-11 10:41:07 +0300 |
---|---|---|
committer | José Pekkarinen <jose.pekkarinen@nokia.com> | 2016-04-13 08:17:18 +0300 |
commit | e09b41010ba33a20a87472ee821fa407a5b8da36 (patch) | |
tree | d10dc367189862e7ca5c592f033dc3726e1df4e3 /kernel/ipc | |
parent | f93b97fd65072de626c074dbe099a1fff05ce060 (diff) |
These changes are the raw update to linux-4.4.6-rt14. Kernel sources
are taken from kernel.org, and rt patch from the rt wiki download page.
During the rebasing, the following patch collided:
Force tick interrupt and get rid of softirq magic(I70131fb85).
Collisions have been removed because its logic was found on the
source already.
Change-Id: I7f57a4081d9deaa0d9ccfc41a6c8daccdee3b769
Signed-off-by: José Pekkarinen <jose.pekkarinen@nokia.com>
Diffstat (limited to 'kernel/ipc')
-rw-r--r-- | kernel/ipc/msg.c | 105 | ||||
-rw-r--r-- | kernel/ipc/msgutil.c | 1 | ||||
-rw-r--r-- | kernel/ipc/sem.c | 10 | ||||
-rw-r--r-- | kernel/ipc/shm.c | 74 | ||||
-rw-r--r-- | kernel/ipc/util.c | 36 | ||||
-rw-r--r-- | kernel/ipc/util.h | 2 |
6 files changed, 116 insertions, 112 deletions
diff --git a/kernel/ipc/msg.c b/kernel/ipc/msg.c index cedbf5f50..b8c5e7f2b 100644 --- a/kernel/ipc/msg.c +++ b/kernel/ipc/msg.c @@ -76,7 +76,7 @@ struct msg_sender { static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&msg_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -137,13 +137,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) return retval; } - /* ipc_addid() locks msq upon success. */ - id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); - if (id < 0) { - ipc_rcu_putref(msq, msg_rcu_free); - return id; - } - msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; @@ -153,6 +146,13 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); + /* ipc_addid() locks msq upon success. */ + id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); + if (id < 0) { + ipc_rcu_putref(msq, msg_rcu_free); + return id; + } + ipc_unlock_object(&msq->q_perm); rcu_read_unlock(); @@ -183,29 +183,15 @@ static void ss_wakeup(struct list_head *h, int kill) } } -static void expunge_all(struct msg_queue *msq, int res) +static void expunge_all(struct msg_queue *msq, int res, + struct wake_q_head *wake_q) { struct msg_receiver *msr, *t; list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { - /* - * Make sure that the wakeup doesnt preempt - * this CPU prematurely. (on PREEMPT_RT) - */ - preempt_disable_rt(); - msr->r_msg = NULL; /* initialize expunge ordering */ - wake_up_process(msr->r_tsk); - /* - * Ensure that the wakeup is visible before setting r_msg as - * the receiving end depends on it: either spinning on a nil, - * or dealing with -EAGAIN cases. See lockless receive part 1 - * and 2 in do_msgrcv(). - */ - smp_mb(); + wake_q_add(wake_q, msr->r_tsk); msr->r_msg = ERR_PTR(res); - - preempt_enable_rt(); } } @@ -221,11 +207,13 @@ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct msg_msg *msg, *t; struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); + WAKE_Q(wake_q); - expunge_all(msq, -EIDRM); + expunge_all(msq, -EIDRM, &wake_q); ss_wakeup(&msq->q_senders, 1); msg_rmid(ns, msq); ipc_unlock_object(&msq->q_perm); + wake_up_q(&wake_q); rcu_read_unlock(); list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { @@ -350,6 +338,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, struct kern_ipc_perm *ipcp; struct msqid64_ds uninitialized_var(msqid64); struct msg_queue *msq; + WAKE_Q(wake_q); int err; if (cmd == IPC_SET) { @@ -397,7 +386,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, /* sleeping receivers might be excluded by * stricter permissions. */ - expunge_all(msq, -EAGAIN); + expunge_all(msq, -EAGAIN, &wake_q); /* sleeping senders might be able to send * due to a larger queue size. */ @@ -410,6 +399,7 @@ static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, out_unlock0: ipc_unlock_object(&msq->q_perm); + wake_up_q(&wake_q); out_unlock1: rcu_read_unlock(); out_up: @@ -574,7 +564,8 @@ static int testmsg(struct msg_msg *msg, long type, int mode) return 0; } -static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) +static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg, + struct wake_q_head *wake_q) { struct msg_receiver *msr, *t; @@ -582,39 +573,21 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) if (testmsg(msg, msr->r_msgtype, msr->r_mode) && !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { - /* - * Make sure that the wakeup doesnt preempt - * this CPU prematurely. (on PREEMPT_RT) - */ - preempt_disable_rt(); list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { - /* initialize pipelined send ordering */ - msr->r_msg = NULL; - wake_up_process(msr->r_tsk); - smp_mb(); /* see barrier comment below */ + wake_q_add(wake_q, msr->r_tsk); msr->r_msg = ERR_PTR(-E2BIG); } else { - msr->r_msg = NULL; msq->q_lrpid = task_pid_vnr(msr->r_tsk); msq->q_rtime = get_seconds(); - wake_up_process(msr->r_tsk); - /* - * Ensure that the wakeup is visible before - * setting r_msg, as the receiving end depends - * on it. See lockless receive part 1 and 2 in - * do_msgrcv(). - */ - smp_mb(); + wake_q_add(wake_q, msr->r_tsk); msr->r_msg = msg; - preempt_enable_rt(); - return 1; } - preempt_enable_rt(); } } + return 0; } @@ -625,6 +598,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, struct msg_msg *msg; int err; struct ipc_namespace *ns; + WAKE_Q(wake_q); ns = current->nsproxy->ipc_ns; @@ -710,7 +684,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, msq->q_lspid = task_tgid_vnr(current); msq->q_stime = get_seconds(); - if (!pipelined_send(msq, msg)) { + if (!pipelined_send(msq, msg, &wake_q)) { /* no one is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; @@ -724,6 +698,7 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, out_unlock0: ipc_unlock_object(&msq->q_perm); + wake_up_q(&wake_q); out_unlock1: rcu_read_unlock(); if (msg != NULL) @@ -944,31 +919,25 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl rcu_read_lock(); /* Lockless receive, part 2: - * Wait until pipelined_send or expunge_all are outside of - * wake_up_process(). There is a race with exit(), see - * ipc/mqueue.c for the details. + * The work in pipelined_send() and expunge_all(): + * - Set pointer to message + * - Queue the receiver task for later wakeup + * - Wake up the process after the lock is dropped. + * + * Should the process wake up before this wakeup (due to a + * signal) it will either see the message and continue … */ - msg = (struct msg_msg *)msr_d.r_msg; - while (msg == NULL) { - cpu_relax(); - msg = (struct msg_msg *)msr_d.r_msg; - } - /* Lockless receive, part 3: - * If there is a message or an error then accept it without - * locking. - */ + msg = (struct msg_msg *)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) goto out_unlock1; - /* Lockless receive, part 3: - * Acquire the queue spinlock. - */ + /* + * … or see -EAGAIN, acquire the lock to check the message + * again. + */ ipc_lock_object(&msq->q_perm); - /* Lockless receive, part 4: - * Repeat test after acquiring the spinlock. - */ msg = (struct msg_msg *)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) goto out_unlock0; diff --git a/kernel/ipc/msgutil.c b/kernel/ipc/msgutil.c index 2b491590e..ed81aafd2 100644 --- a/kernel/ipc/msgutil.c +++ b/kernel/ipc/msgutil.c @@ -123,7 +123,6 @@ struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst) size_t len = src->m_ts; size_t alen; - BUG_ON(dst == NULL); if (src->m_ts > dst->m_ts) return ERR_PTR(-EINVAL); diff --git a/kernel/ipc/sem.c b/kernel/ipc/sem.c index d6261bef5..dc67f53c0 100644 --- a/kernel/ipc/sem.c +++ b/kernel/ipc/sem.c @@ -401,7 +401,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp; struct sem_array *sma; - ipcp = ipc_obtain_object(&sem_ids(ns), id); + ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -420,7 +420,7 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&sem_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -2143,9 +2143,11 @@ void exit_sem(struct task_struct *tsk) ipc_assert_locked_object(&sma->sem_perm); list_del(&un->list_id); - spin_lock(&ulp->lock); + /* we are the last process using this ulp, acquiring ulp->lock + * isn't required. Besides that, we are also protected against + * IPC_RMID as we hold sma->sem_perm lock now + */ list_del_rcu(&un->list_proc); - spin_unlock(&ulp->lock); /* perform adjustments registered in un */ for (i = 0; i < sma->sem_nsems; i++) { diff --git a/kernel/ipc/shm.c b/kernel/ipc/shm.c index 6d767071c..3174634ca 100644 --- a/kernel/ipc/shm.c +++ b/kernel/ipc/shm.c @@ -129,7 +129,7 @@ void __init shm_init(void) static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); if (IS_ERR(ipcp)) return ERR_CAST(ipcp); @@ -155,9 +155,13 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); + /* + * Callers of shm_lock() must validate the status of the returned ipc + * object pointer (as returned by ipc_lock()), and error out as + * appropriate. + */ if (IS_ERR(ipcp)) - return (struct shmid_kernel *)ipcp; - + return (void *)ipcp; return container_of(ipcp, struct shmid_kernel, shm_perm); } @@ -183,19 +187,33 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) } -/* This is called by fork, once for every shm attach. */ -static void shm_open(struct vm_area_struct *vma) +static int __shm_open(struct vm_area_struct *vma) { struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; shp = shm_lock(sfd->ns, sfd->id); - BUG_ON(IS_ERR(shp)); + + if (IS_ERR(shp)) + return PTR_ERR(shp); + shp->shm_atim = get_seconds(); shp->shm_lprid = task_tgid_vnr(current); shp->shm_nattch++; shm_unlock(shp); + return 0; +} + +/* This is called by fork, once for every shm attach. */ +static void shm_open(struct vm_area_struct *vma) +{ + int err = __shm_open(vma); + /* + * We raced in the idr lookup or with shm_destroy(). + * Either way, the ID is busted. + */ + WARN_ON_ONCE(err); } /* @@ -258,7 +276,14 @@ static void shm_close(struct vm_area_struct *vma) down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ shp = shm_lock(ns, sfd->id); - BUG_ON(IS_ERR(shp)); + + /* + * We raced in the idr lookup or with shm_destroy(). + * Either way, the ID is busted. + */ + if (WARN_ON_ONCE(IS_ERR(shp))) + goto done; /* no-op */ + shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; @@ -266,6 +291,7 @@ static void shm_close(struct vm_area_struct *vma) shm_destroy(ns, shp); else shm_unlock(shp); +done: up_write(&shm_ids(ns).rwsem); } @@ -387,17 +413,25 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma) struct shm_file_data *sfd = shm_file_data(file); int ret; + /* + * In case of remap_file_pages() emulation, the file can represent + * removed IPC ID: propogate shm_lock() error to caller. + */ + ret =__shm_open(vma); + if (ret) + return ret; + ret = sfd->file->f_op->mmap(sfd->file, vma); - if (ret != 0) + if (ret) { + shm_close(vma); return ret; + } sfd->vm_ops = vma->vm_ops; #ifdef CONFIG_MMU - BUG_ON(!sfd->vm_ops->fault); + WARN_ON(!sfd->vm_ops->fault); #endif vma->vm_ops = &shm_vm_ops; - shm_open(vma); - - return ret; + return 0; } static int shm_release(struct inode *ino, struct file *file) @@ -544,18 +578,12 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) acctflag = VM_NORESERVE; - file = shmem_file_setup(name, size, acctflag); + file = shmem_kernel_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; - id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); - if (id < 0) { - error = id; - goto no_id; - } - shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; @@ -564,6 +592,13 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_nattch = 0; shp->shm_file = file; shp->shm_creator = current; + + id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); + if (id < 0) { + error = id; + goto no_id; + } + list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); /* @@ -1191,7 +1226,6 @@ out_fput: out_nattch: down_write(&shm_ids(ns).rwsem); shp = shm_lock(ns, shmid); - BUG_ON(IS_ERR(shp)); shp->shm_nattch--; if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); diff --git a/kernel/ipc/util.c b/kernel/ipc/util.c index ff3323ef8..0f401d94b 100644 --- a/kernel/ipc/util.c +++ b/kernel/ipc/util.c @@ -237,6 +237,10 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) rcu_read_lock(); spin_lock(&new->lock); + current_euid_egid(&euid, &egid); + new->cuid = new->uid = euid; + new->gid = new->cgid = egid; + id = idr_alloc(&ids->ipcs_idr, new, (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, GFP_NOWAIT); @@ -249,10 +253,6 @@ int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) ids->in_use++; - current_euid_egid(&euid, &egid); - new->cuid = new->uid = euid; - new->gid = new->cgid = egid; - if (next_id < 0) { new->seq = ids->seq++; if (ids->seq > IPCID_SEQ_MAX) @@ -467,10 +467,7 @@ void ipc_rcu_free(struct rcu_head *head) { struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); - if (is_vmalloc_addr(p)) - vfree(p); - else - kfree(p); + kvfree(p); } /** @@ -558,7 +555,7 @@ void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out) * Call inside the RCU critical section. * The ipc object is *not* locked on exit. */ -struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) +struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id) { struct kern_ipc_perm *out; int lid = ipcid_to_idx(id); @@ -584,21 +581,24 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) struct kern_ipc_perm *out; rcu_read_lock(); - out = ipc_obtain_object(ids, id); + out = ipc_obtain_object_idr(ids, id); if (IS_ERR(out)) - goto err1; + goto err; spin_lock(&out->lock); - /* ipc_rmid() may have already freed the ID while ipc_lock - * was spinning: here verify that the structure is still valid + /* + * ipc_rmid() may have already freed the ID while ipc_lock() + * was spinning: here verify that the structure is still valid. + * Upon races with RMID, return -EIDRM, thus indicating that + * the ID points to a removed identifier. */ if (ipc_valid_object(out)) return out; spin_unlock(&out->lock); - out = ERR_PTR(-EINVAL); -err1: + out = ERR_PTR(-EIDRM); +err: rcu_read_unlock(); return out; } @@ -608,7 +608,7 @@ err1: * @ids: ipc identifier set * @id: ipc id to look for * - * Similar to ipc_obtain_object() but also checks + * Similar to ipc_obtain_object_idr() but also checks * the ipc object reference counter. * * Call inside the RCU critical section. @@ -616,13 +616,13 @@ err1: */ struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id) { - struct kern_ipc_perm *out = ipc_obtain_object(ids, id); + struct kern_ipc_perm *out = ipc_obtain_object_idr(ids, id); if (IS_ERR(out)) goto out; if (ipc_checkid(out, id)) - return ERR_PTR(-EIDRM); + return ERR_PTR(-EINVAL); out: return out; } diff --git a/kernel/ipc/util.h b/kernel/ipc/util.h index 1a5a0fcd0..3a8a5a0ec 100644 --- a/kernel/ipc/util.h +++ b/kernel/ipc/util.h @@ -132,7 +132,7 @@ void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); void ipc_rcu_free(struct rcu_head *head); struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); -struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); +struct kern_ipc_perm *ipc_obtain_object_idr(struct ipc_ids *ids, int id); void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); |