@@ -243,6 +243,7 @@ extern int set_signals_trace(int enable);
extern int os_is_signal_stack(void);
extern void deliver_alarm(void);
extern void register_pm_wake_signal(void);
+extern void set_pending_signals(int sig);
/* util.c */
extern void stack_protections(unsigned long address);
@@ -21,6 +21,7 @@
#include <irq_user.h>
#include <irq_kern.h>
#include <as-layout.h>
+#include <asm/cpu.h>
extern void free_irqs(void);
@@ -563,6 +564,11 @@ unsigned long to_irq_stack(unsigned long *mask_out)
unsigned long mask, old;
int nested;
+#ifdef CONFIG_UMMODE_LIB
+ if (!lkl_irq_enter(ffs(*mask_out) - 1))
+ return 1;
+#endif
+
mask = xchg(&pending_mask, *mask_out);
if (mask != 0) {
/*
@@ -579,6 +585,10 @@ unsigned long to_irq_stack(unsigned long *mask_out)
old |= mask;
mask = xchg(&pending_mask, old);
} while (mask != old);
+
+#ifdef CONFIG_UMMODE_LIB
+ lkl_irq_exit();
+#endif
return 1;
}
@@ -616,6 +626,9 @@ unsigned long from_irq_stack(int nested)
*to = *ti;
mask = xchg(&pending_mask, 0);
+#ifdef CONFIG_UMMODE_LIB
+ lkl_irq_exit();
+#endif
return mask & ~1;
}
@@ -659,11 +659,13 @@ static struct clock_event_device timer_clockevent = {
static irqreturn_t um_timer(int irq, void *dev)
{
+#ifndef CONFIG_UMMODE_LIB
if (get_current()->mm != NULL)
{
/* userspace - relay signal, results in correct userspace timers */
os_alarm_process(get_current()->mm->context.id.u.pid);
}
+#endif
(*timer_clockevent.event_handler)(&timer_clockevent);
@@ -8,4 +8,8 @@ int lkl_cpu_init(void);
void lkl_cpu_wait_shutdown(void);
void lkl_cpu_change_owner(lkl_thread_t owner);
+int lkl_cpu_try_run_irq(int irq);
+void lkl_irq_exit(void);
+int lkl_irq_enter(int sig);
+
#endif
@@ -8,6 +8,7 @@
#include <asm/sched.h>
#include <asm/syscalls.h>
#include <init.h>
+#include <os.h>
/*
* This structure is used to get access to the "LKL CPU" that allows us to run
@@ -41,6 +42,7 @@ static struct lkl_cpu {
*/
#define MAX_THREADS 1000000
unsigned int shutdown_gate;
+ bool irqs_pending;
/* no of threads waiting the CPU */
unsigned int sleepers;
/* no of times the current thread got the CPU */
@@ -53,6 +55,16 @@ static struct lkl_cpu {
struct lkl_sem *shutdown_sem;
} cpu;
+static void run_irqs(void)
+{
+ unblock_signals();
+}
+
+static void set_irq_pending(int sig)
+{
+ set_pending_signals(sig);
+}
+
/*
* internal routine to acquire LKL CPU's lock
*/
@@ -132,6 +144,16 @@ void lkl_cpu_put(void)
!lkl_thread_equal(cpu.owner, lkl_thread_self()))
lkl_bug("%s: unbalanced put\n", __func__);
+ /* we're going to trigger irq handlers if there are any pending
+ * interrupts, and not irq_disabled.
+ */
+ while (cpu.irqs_pending && !irqs_disabled()) {
+ cpu.irqs_pending = false;
+ lkl_mutex_unlock(cpu.lock);
+ run_irqs();
+ lkl_mutex_lock(cpu.lock);
+ }
+
/* switch to userspace code if current is host task (TIF_HOST_THREAD),
* AND, there are other running tasks.
*/
@@ -163,6 +185,30 @@ void lkl_cpu_put(void)
lkl_mutex_unlock(cpu.lock);
}
+int lkl_cpu_try_run_irq(int irq)
+{
+ int ret;
+
+ ret = __cpu_try_get_lock(1);
+ if (!ret) {
+ set_irq_pending(irq);
+ cpu.irqs_pending = true;
+ }
+ __cpu_try_get_unlock(ret, 1);
+
+ return ret;
+}
+
+int lkl_irq_enter(int sig)
+{
+ return lkl_cpu_try_run_irq(sig);
+}
+
+void lkl_irq_exit(void)
+{
+ return lkl_cpu_put();
+}
+
static void lkl_cpu_shutdown(void)
{
__sync_fetch_and_add(&cpu.shutdown_gate, MAX_THREADS);
@@ -36,6 +36,8 @@ static void __init *lkl_run_kernel(void *arg)
panic_blink = lkl_panic_blink;
+ /* signal should be received at this thread (main and idle threads) */
+ init_new_thread_signals();
threads_init();
lkl_cpu_get();
start_kernel();
@@ -58,6 +60,9 @@ int __init lkl_start_kernel(struct lkl_host_operations *ops,
if (ret)
goto out_free_init_sem;
+ change_sig(SIGALRM, 0);
+ change_sig(SIGIO, 0);
+
ret = lkl_thread_create(lkl_run_kernel, NULL);
if (!ret) {
ret = -ENOMEM;
@@ -152,6 +152,9 @@ static void *thread_bootstrap(void *_tba)
int (*f)(void *) = tba->f;
void *arg = tba->arg;
+ change_sig(SIGALRM, 0);
+ change_sig(SIGIO, 0);
+
lkl_sem_down(ti->task->thread.arch.sched_sem);
kfree(tba);
if (ti->task->thread.prev_sched)
@@ -230,8 +230,8 @@ void set_handler(int sig)
sigemptyset(&sig_mask);
sigaddset(&sig_mask, sig);
- if (sigprocmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
- panic("sigprocmask failed - errno = %d\n", errno);
+ if (pthread_sigmask(SIG_UNBLOCK, &sig_mask, NULL) < 0)
+ panic("pthread_sigmask failed - errno = %d\n", errno);
}
void send_sigio_to_self(void)
@@ -375,3 +375,11 @@ int os_is_signal_stack(void)
return ss.ss_flags & SS_ONSTACK;
}
+
+void set_pending_signals(int sig)
+{
+ if (sig == SIGIO)
+ signals_pending |= SIGIO_MASK;
+ else if (sig == SIGALRM)
+ signals_pending |= SIGALRM_MASK;
+}
In order to cooperate with UML's irq infrastructure and LKL threads based on host threads, irq handlers shall synchronize the our scheduler which is controlled by struct lkl_cpu. To do that, the irq infra notifies its entry of handlers by obtaining cpu access of thread scheduler (lkl_cpu_try_run_irq) and its release (lkl_cpu_put). In additon to that, in order to stick the signal handler's thread to the one of the idle thread, several required configurations of (thread's) signal mask are added: otherwise handlers running on arbitrary thread cannot obtain cpu access and immediately fall into pending interrupt which may slow down the delivery of signals. Signed-off-by: Hajime Tazaki <thehajime@gmail.com> --- arch/um/include/shared/os.h | 1 + arch/um/kernel/irq.c | 13 ++++++++++ arch/um/kernel/time.c | 2 ++ arch/um/lkl/include/asm/cpu.h | 4 +++ arch/um/lkl/um/cpu.c | 46 +++++++++++++++++++++++++++++++++++ arch/um/lkl/um/setup.c | 5 ++++ arch/um/lkl/um/threads.c | 3 +++ tools/um/uml/signal.c | 12 +++++++-- 8 files changed, 84 insertions(+), 2 deletions(-)