diff mbox

[.32-rc3] scheduler: iwlagn consistently high in "waiting for CPU"

Message ID 1255001062.7500.1.camel@marge.simson.net (mailing list archive)
State Not Applicable, archived
Headers show

Commit Message

Mike Galbraith Oct. 8, 2009, 11:24 a.m. UTC
None
diff mbox

Patch

Index: linux-2.6/include/linux/latencytop.h
===================================================================
--- linux-2.6.orig/include/linux/latencytop.h
+++ linux-2.6/include/linux/latencytop.h
@@ -26,12 +26,12 @@  struct latency_record {
 struct task_struct;
 
 extern int latencytop_enabled;
-void __account_scheduler_latency(struct task_struct *task, int usecs, int inter);
+void __account_scheduler_latency(struct task_struct *task, unsigned long usecs);
 static inline void
-account_scheduler_latency(struct task_struct *task, int usecs, int inter)
+account_scheduler_latency(struct task_struct *task, unsigned long usecs)
 {
 	if (unlikely(latencytop_enabled))
-		__account_scheduler_latency(task, usecs, inter);
+		__account_scheduler_latency(task, usecs);
 }
 
 void clear_all_latency_tracing(struct task_struct *p);
Index: linux-2.6/kernel/latencytop.c
===================================================================
--- linux-2.6.orig/kernel/latencytop.c
+++ linux-2.6/kernel/latencytop.c
@@ -157,34 +157,17 @@  static inline void store_stacktrace(stru
  * __account_scheduler_latency - record an occured latency
  * @tsk - the task struct of the task hitting the latency
  * @usecs - the duration of the latency in microseconds
- * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
  *
  * This function is the main entry point for recording latency entries
  * as called by the scheduler.
- *
- * This function has a few special cases to deal with normal 'non-latency'
- * sleeps: specifically, interruptible sleep longer than 5 msec is skipped
- * since this usually is caused by waiting for events via select() and co.
- *
- * Negative latencies (caused by time going backwards) are also explicitly
- * skipped.
  */
 void __sched
-__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
+__account_scheduler_latency(struct task_struct *tsk, unsigned long usecs)
 {
 	unsigned long flags;
 	int i, q;
 	struct latency_record lat;
 
-	/* Long interruptible waits are generally user requested... */
-	if (inter && usecs > 5000)
-		return;
-
-	/* Negative sleeps are time going backwards */
-	/* Zero-time sleeps are non-interesting */
-	if (usecs <= 0)
-		return;
-
 	memset(&lat, 0, sizeof(lat));
 	lat.count = 1;
 	lat.time = usecs;
Index: linux-2.6/kernel/sched_fair.c
===================================================================
--- linux-2.6.orig/kernel/sched_fair.c
+++ linux-2.6/kernel/sched_fair.c
@@ -495,8 +495,10 @@  static void update_curr(struct cfs_rq *c
 	u64 now = rq_of(cfs_rq)->clock;
 	unsigned long delta_exec;
 
-	if (unlikely(!curr))
+	if (unlikely(!curr)) {
+		update_rq_clock(rq_of(cfs_rq));
 		return;
+	}
 
 	/*
 	 * Get the amount of time the current task was running
@@ -548,8 +550,11 @@  update_stats_wait_end(struct cfs_rq *cfs
 			rq_of(cfs_rq)->clock - se->wait_start);
 #ifdef CONFIG_SCHEDSTATS
 	if (entity_is_task(se)) {
-		trace_sched_stat_wait(task_of(se),
-			rq_of(cfs_rq)->clock - se->wait_start);
+		struct task_struct *tsk = task_of(se);
+		u64 delta = rq_of(cfs_rq)->clock - se->wait_start;
+
+		trace_sched_stat_wait(tsk, delta);
+		account_scheduler_latency(tsk, delta >> 10);
 	}
 #endif
 	schedstat_set(se->wait_start, 0);
@@ -643,10 +648,8 @@  static void enqueue_sleeper(struct cfs_r
 		se->sleep_start = 0;
 		se->sum_sleep_runtime += delta;
 
-		if (tsk) {
-			account_scheduler_latency(tsk, delta >> 10, 1);
+		if (tsk)
 			trace_sched_stat_sleep(tsk, delta);
-		}
 	}
 	if (se->block_start) {
 		u64 delta = rq_of(cfs_rq)->clock - se->block_start;
@@ -677,7 +680,6 @@  static void enqueue_sleeper(struct cfs_r
 						(void *)get_wchan(tsk),
 						delta >> 20);
 			}
-			account_scheduler_latency(tsk, delta >> 10, 0);
 		}
 	}
 #endif