diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 09a737c..4293aae 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -2744,7 +2744,7 @@ static void regulator_bulk_enable_async(void *data, async_cookie_t cookie) int regulator_bulk_enable(int num_consumers, struct regulator_bulk_data *consumers) { - LIST_HEAD(async_domain); + ASYNC_DOMAIN_EXCLUSIVE(async_domain); int i; int ret = 0; diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 6a6c80f..3f5b620 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -742,7 +742,7 @@ static void async_sas_ata_eh(void *data, async_cookie_t cookie) void sas_ata_strategy_handler(struct Scsi_Host *shost) { struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); - LIST_HEAD(async); + ASYNC_DOMAIN_EXCLUSIVE(async); int i; /* it's ok to defer revalidation events during ata eh, these diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index bbbc9c9..2936b44 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -54,6 +54,7 @@ #include #include #include +#include #include #include @@ -91,7 +92,7 @@ EXPORT_SYMBOL(scsi_logging_level); #endif /* sd, scsi core and power management need to coordinate flushing async actions */ -LIST_HEAD(scsi_sd_probe_domain); +ASYNC_DOMAIN(scsi_sd_probe_domain); EXPORT_SYMBOL(scsi_sd_probe_domain); /* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. @@ -1354,6 +1355,7 @@ static void __exit exit_scsi(void) scsi_exit_devinfo(); scsi_exit_procfs(); scsi_exit_queue(); + async_unregister_domain(&scsi_sd_probe_domain); } subsys_initcall(init_scsi); diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index 13d74da..8f9a0ca 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h @@ -2,6 +2,7 @@ #define _SCSI_PRIV_H #include +#include #include struct request_queue; @@ -163,7 +164,7 @@ static inline int scsi_autopm_get_host(struct Scsi_Host *h) { return 0; } static inline void scsi_autopm_put_host(struct Scsi_Host *h) {} #endif /* CONFIG_PM_RUNTIME */ -extern struct list_head scsi_sd_probe_domain; +extern struct async_domain scsi_sd_probe_domain; /* * internal scsi timeout functions: for use by mid-layer and transport diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index f55e5f1..56a9379 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -147,7 +147,7 @@ int scsi_complete_async_scans(void) do { if (list_empty(&scanning_hosts)) - goto out; + return 0; /* If we can't get memory immediately, that's OK. Just * sleep a little. Even if we never get memory, the async * scans will finish eventually. @@ -179,26 +179,11 @@ int scsi_complete_async_scans(void) } done: spin_unlock(&async_scan_lock); - kfree(data); - - out: - async_synchronize_full_domain(&scsi_sd_probe_domain); + kfree(data); return 0; } -/* Only exported for the benefit of scsi_wait_scan */ -EXPORT_SYMBOL_GPL(scsi_complete_async_scans); - -#ifndef MODULE -/* - * For async scanning we need to wait for all the scans to complete before - * trying to mount the root fs. Otherwise non-modular drivers may not be ready - * yet. - */ -late_initcall(scsi_complete_async_scans); -#endif - /** * scsi_unlock_floptical - unlock device via a special MODE SENSE command * @sdev: scsi device to send command to @@ -1845,14 +1830,13 @@ static void do_scsi_scan_host(struct Scsi_Host *shost) } } -static int do_scan_async(void *_data) +static void do_scan_async(void *_data, async_cookie_t c) { struct async_scan_data *data = _data; struct Scsi_Host *shost = data->shost; do_scsi_scan_host(shost); scsi_finish_async_scan(data); - return 0; } /** @@ -1861,7 +1845,6 @@ static int do_scan_async(void *_data) **/ void scsi_scan_host(struct Scsi_Host *shost) { - struct task_struct *p; struct async_scan_data *data; if (strncmp(scsi_scan_type, "none", 4) == 0) @@ -1876,9 +1859,11 @@ void scsi_scan_host(struct Scsi_Host *shost) return; } - p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); - if (IS_ERR(p)) - do_scan_async(data); + /* register with the async subsystem so wait_for_device_probe() + * will flush this work + */ + async_schedule(do_scan_async, data); + /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */ } EXPORT_SYMBOL(scsi_scan_host); diff --git a/include/linux/async.h b/include/linux/async.h index 68a9530..7a24fe9 100644 --- a/include/linux/async.h +++ b/include/linux/async.h @@ -9,19 +9,47 @@ * as published by the Free Software Foundation; version 2 * of the License. */ +#ifndef __ASYNC_H__ +#define __ASYNC_H__ #include #include typedef u64 async_cookie_t; typedef void (async_func_ptr) (void *data, async_cookie_t cookie); +struct async_domain { + struct list_head node; + struct list_head domain; + int count; + unsigned registered:1; +}; + +/* + * domain participates in global async_synchronize_full + */ +#define ASYNC_DOMAIN(_name) \ + struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \ + .domain = LIST_HEAD_INIT(_name.domain), \ + .count = 0, \ + .registered = 1 } + +/* + * domain is free to go out of scope as soon as all pending work is + * complete, this domain does not participate in async_synchronize_full + */ +#define ASYNC_DOMAIN_EXCLUSIVE(_name) \ + struct async_domain _name = { .node = LIST_HEAD_INIT(_name.node), \ + .domain = LIST_HEAD_INIT(_name.domain), \ + .count = 0, \ + .registered = 0 } extern async_cookie_t async_schedule(async_func_ptr *ptr, void *data); extern async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, - struct list_head *list); + struct async_domain *domain); +void async_unregister_domain(struct async_domain *domain); extern void async_synchronize_full(void); -extern void async_synchronize_full_domain(struct list_head *list); +extern void async_synchronize_full_domain(struct async_domain *domain); extern void async_synchronize_cookie(async_cookie_t cookie); extern void async_synchronize_cookie_domain(async_cookie_t cookie, - struct list_head *list); - + struct async_domain *domain); +#endif diff --git a/include/scsi/scsi_scan.h b/include/scsi/scsi_scan.h deleted file mode 100644 index 7889888..0000000 --- a/include/scsi/scsi_scan.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _SCSI_SCSI_SCAN_H -#define _SCSI_SCSI_SCAN_H - -#ifdef CONFIG_SCSI -/* drivers/scsi/scsi_scan.c */ -extern int scsi_complete_async_scans(void); -#else -static inline int scsi_complete_async_scans(void) { return 0; } -#endif - -#endif /* _SCSI_SCSI_SCAN_H */ diff --git a/kernel/async.c b/kernel/async.c index bd0c168..9d31183 100644 --- a/kernel/async.c +++ b/kernel/async.c @@ -62,8 +62,10 @@ static async_cookie_t next_cookie = 1; #define MAX_WORK 32768 static LIST_HEAD(async_pending); -static LIST_HEAD(async_running); +static ASYNC_DOMAIN(async_running); +static LIST_HEAD(async_domains); static DEFINE_SPINLOCK(async_lock); +static DEFINE_MUTEX(async_register_mutex); struct async_entry { struct list_head list; @@ -71,7 +73,7 @@ struct async_entry { async_cookie_t cookie; async_func_ptr *func; void *data; - struct list_head *running; + struct async_domain *running; }; static DECLARE_WAIT_QUEUE_HEAD(async_done); @@ -82,13 +84,12 @@ static atomic_t entry_count; /* * MUST be called with the lock held! */ -static async_cookie_t __lowest_in_progress(struct list_head *running) +static async_cookie_t __lowest_in_progress(struct async_domain *running) { struct async_entry *entry; - if (!list_empty(running)) { - entry = list_first_entry(running, - struct async_entry, list); + if (!list_empty(&running->domain)) { + entry = list_first_entry(&running->domain, typeof(*entry), list); return entry->cookie; } @@ -99,7 +100,7 @@ static async_cookie_t __lowest_in_progress(struct list_head *running) return next_cookie; /* "infinity" value */ } -static async_cookie_t lowest_in_progress(struct list_head *running) +static async_cookie_t lowest_in_progress(struct async_domain *running) { unsigned long flags; async_cookie_t ret; @@ -119,10 +120,11 @@ static void async_run_entry_fn(struct work_struct *work) container_of(work, struct async_entry, work); unsigned long flags; ktime_t uninitialized_var(calltime), delta, rettime; + struct async_domain *running = entry->running; /* 1) move self to the running queue */ spin_lock_irqsave(&async_lock, flags); - list_move_tail(&entry->list, entry->running); + list_move_tail(&entry->list, &running->domain); spin_unlock_irqrestore(&async_lock, flags); /* 2) run (and print duration) */ @@ -145,6 +147,8 @@ static void async_run_entry_fn(struct work_struct *work) /* 3) remove self from the running queue */ spin_lock_irqsave(&async_lock, flags); list_del(&entry->list); + if (running->registered && --running->count == 0) + list_del_init(&running->node); /* 4) free the entry */ kfree(entry); @@ -156,7 +160,7 @@ static void async_run_entry_fn(struct work_struct *work) wake_up(&async_done); } -static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct list_head *running) +static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct async_domain *running) { struct async_entry *entry; unsigned long flags; @@ -187,6 +191,8 @@ static async_cookie_t __async_schedule(async_func_ptr *ptr, void *data, struct l spin_lock_irqsave(&async_lock, flags); newcookie = entry->cookie = next_cookie++; list_add_tail(&entry->list, &async_pending); + if (running->registered && running->count++ == 0) + list_add_tail(&running->node, &async_domains); atomic_inc(&entry_count); spin_unlock_irqrestore(&async_lock, flags); @@ -223,7 +229,7 @@ EXPORT_SYMBOL_GPL(async_schedule); * Note: This function may be called from atomic or non-atomic contexts. */ async_cookie_t async_schedule_domain(async_func_ptr *ptr, void *data, - struct list_head *running) + struct async_domain *running) { return __async_schedule(ptr, data, running); } @@ -236,22 +242,52 @@ EXPORT_SYMBOL_GPL(async_schedule_domain); */ void async_synchronize_full(void) { + mutex_lock(&async_register_mutex); do { - async_synchronize_cookie(next_cookie); - } while (!list_empty(&async_running) || !list_empty(&async_pending)); + struct async_domain *domain = NULL; + + spin_lock_irq(&async_lock); + if (!list_empty(&async_domains)) + domain = list_first_entry(&async_domains, typeof(*domain), node); + spin_unlock_irq(&async_lock); + + async_synchronize_cookie_domain(next_cookie, domain); + } while (!list_empty(&async_domains)); + mutex_unlock(&async_register_mutex); } EXPORT_SYMBOL_GPL(async_synchronize_full); /** + * async_unregister_domain - ensure no more anonymous waiters on this domain + * @domain: idle domain to flush out of any async_synchronize_full instances + * + * async_synchronize_{cookie|full}_domain() are not flushed since callers + * of these routines should know the lifetime of @domain + * + * Prefer ASYNC_DOMAIN_EXCLUSIVE() declarations over flushing + */ +void async_unregister_domain(struct async_domain *domain) +{ + mutex_lock(&async_register_mutex); + spin_lock_irq(&async_lock); + WARN_ON(!domain->registered || !list_empty(&domain->node) || + !list_empty(&domain->domain)); + domain->registered = 0; + spin_unlock_irq(&async_lock); + mutex_unlock(&async_register_mutex); +} +EXPORT_SYMBOL_GPL(async_unregister_domain); + +/** * async_synchronize_full_domain - synchronize all asynchronous function within a certain domain - * @list: running list to synchronize on + * @domain: running list to synchronize on * * This function waits until all asynchronous function calls for the - * synchronization domain specified by the running list @list have been done. + * synchronization domain specified by the running list @domain have been done. */ -void async_synchronize_full_domain(struct list_head *list) +void async_synchronize_full_domain(struct async_domain *domain) { - async_synchronize_cookie_domain(next_cookie, list); + async_synchronize_cookie_domain(next_cookie, domain); } EXPORT_SYMBOL_GPL(async_synchronize_full_domain); @@ -261,14 +297,16 @@ EXPORT_SYMBOL_GPL(async_synchronize_full_domain); * @running: running list to synchronize on * * This function waits until all asynchronous function calls for the - * synchronization domain specified by the running list @list submitted + * synchronization domain specified by running list @running submitted * prior to @cookie have been done. */ -void async_synchronize_cookie_domain(async_cookie_t cookie, - struct list_head *running) +void async_synchronize_cookie_domain(async_cookie_t cookie, struct async_domain *running) { ktime_t uninitialized_var(starttime), delta, endtime; + if (!running) + return; + if (initcall_debug && system_state == SYSTEM_BOOTING) { printk(KERN_DEBUG "async_waiting @ %i\n", task_pid_nr(current)); starttime = ktime_get(); diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 8b53db3..238025f 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -27,7 +27,6 @@ #include #include #include -#include #include "power.h" @@ -748,13 +747,6 @@ static int software_resume(void) async_synchronize_full(); } - /* - * We can't depend on SCSI devices being available after loading - * one of their modules until scsi_complete_async_scans() is - * called and the resume device usually is a SCSI one. - */ - scsi_complete_async_scans(); - swsusp_resume_device = name_to_dev_t(resume_file); if (!swsusp_resume_device) { error = -ENODEV; diff --git a/kernel/power/user.c b/kernel/power/user.c index 91b0fd0..4ed81e7 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c @@ -24,7 +24,6 @@ #include #include #include -#include #include @@ -84,7 +83,6 @@ static int snapshot_open(struct inode *inode, struct file *filp) * appear. */ wait_for_device_probe(); - scsi_complete_async_scans(); data->swap = -1; data->mode = O_WRONLY; diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 89eae93..fa1e312 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c @@ -1545,7 +1545,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event) struct snd_soc_dapm_context *d; LIST_HEAD(up_list); LIST_HEAD(down_list); - LIST_HEAD(async_domain); + ASYNC_DOMAIN_EXCLUSIVE(async_domain); enum snd_soc_bias_level bias; trace_snd_soc_dapm_start(card);