staticintinit_timers_cpu(int cpu) { int j; structtvec_base *base; staticchar tvec_base_done[NR_CPUS];
if (!tvec_base_done[cpu]) { staticchar boot_done;
if (boot_done) { /* * The APs use this path later in boot */ base = kmalloc_node(sizeof(*base), GFP_KERNEL | __GFP_ZERO, cpu_to_node(cpu)); if (!base) return -ENOMEM;
/* Make sure that tvec_base is 2 byte aligned */ if (tbase_get_deferrable(base)) { WARN_ON(1); kfree(base); return -ENOMEM; } per_cpu(tvec_bases, cpu) = base; } else { /* * This is for the boot CPU - we use compile-time * static initialisation because per-cpu memory isn't * ready yet and because the memory allocators are not * initialised either. */ boot_done = 1; base = &boot_tvec_bases; } spin_lock_init(&base->lock); tvec_base_done[cpu] = 1; } else { base = per_cpu(tvec_bases, cpu); }
structtimer_list { /* * All fields that change during normal runtime grouped to the * same cacheline */ // 用于插入到tvec_base。 structlist_headentry; // 定时器到期tick时间。 unsignedlong expires; // 标识归属的tvec_base,也就是对应了cpu。 // 由于tvec_base做了4字节对齐,因此可以使用低2位做标记 // TIMER_DEFERRABLE 和 TIMER_IRQSAFE structtvec_base *base;
/** * add_timer - start a timer * @timer: the timer to be added * * The kernel will do a ->function(->data) callback from the * timer interrupt at the ->expires point in the future. The * current time is 'jiffies'. * * The timer's ->expires, ->function (and if the handler uses it, ->data) * fields must be set prior calling this function. * * Timers with an ->expires field in the past will be executed in the next * timer tick. */ voidadd_timer(struct timer_list *timer) { BUG_ON(timer_pending(timer)); mod_timer(timer, timer->expires); } EXPORT_SYMBOL(add_timer);
/** * mod_timer - modify a timer's timeout * @timer: the timer to be modified * @expires: new timeout in jiffies * * mod_timer() is a more efficient way to update the expire field of an * active timer (if the timer is inactive it will be activated) * * mod_timer(timer, expires) is equivalent to: * * del_timer(timer); timer->expires = expires; add_timer(timer); * * Note that if there are multiple unserialized concurrent users of the * same timer, then mod_timer() is the only safe way to modify the timeout, * since add_timer() cannot modify an already running timer. * * The function returns whether it has modified a pending timer or not. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an * active timer returns 1.) */ intmod_timer(struct timer_list *timer, unsignedlong expires) { expires = apply_slack(timer, expires);
/* * This is a common optimization triggered by the * networking code - if the timer is re-modified * to be the same thing then just return: */ if (timer_pending(timer) && timer->expires == expires) return1;
/* * Decide where to put the timer while taking the slack into account * * Algorithm: * 1) calculate the maximum (absolute) time * 2) calculate the highest bit where the expires and new max are different * 3) use this bit to make a mask * 4) use the bitmask to round down the maximum time, so that all last * bits are zeros */ staticinline unsignedlongapply_slack(struct timer_list *timer, unsignedlong expires) { unsignedlong expires_limit, mask; int bit;
if (timer->slack >= 0) { expires_limit = expires + timer->slack; } else { long delta = expires - jiffies;
/* * This function runs timers and the timer-tq in bottom half context. */ staticvoidrun_timer_softirq(struct softirq_action *h) { structtvec_base *base = __this_cpu_read(tvec_bases);
if (time_after_eq(jiffies, base->timer_jiffies)) __run_timers(base); }