linux-kernel-cpufreq_interactive1

本文深入探讨了智能频率调节技术在处理器中的应用,包括如何通过动态调整处理器频率来实现性能与能效的最佳平衡。重点介绍了核心概念、工作原理、关键参数设置以及实际应用场景。同时,还讨论了与频率调节相关的高级特性,如动态负载预测、频率调整算法、以及与输入脉冲支持的集成,旨在提供一种高效、节能的处理器频率管理策略。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

/*
 * drivers/cpufreq/cpufreq_interactive.c
 *
 * Copyright (C) 2010 Google, Inc.
 *
 */

#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/tick.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/slab.h>
#include <linux/input.h>
#include "cpufreq_governor.h"

#define CREATE_TRACE_POINTS
#include <trace/events/cpufreq_interactive.h>

#ifdef CONFIG_HI66XXX_LP_TRACE
#include "pwrctrl_multi_memcfg.h"
#include "pwrctrl_multi_def.h"
#endif

#ifdef CONFIG_HISI_HMPTH_SET
#define DEFAULT_HMP_UP_THRESHOLD (768)
#define DEFAULT_HMP_DOWN_THRESHOLD (448)
#define HMP_OFF   (0)
#define HMP_ON   (1)
#define HMP_PRIO  (1)
#define HMP_NAME  "boostpulse"
extern int set_hmp_policy(const char *pname, int prio, int state, int up_thresholds, int down_thresholds);
#endif

#ifdef CONFIG_ARCH_HIXXX

static int cpufreq_interactive_initialized;
#endif

struct cpufreq_interactive_cpuinfo {
 struct timer_list cpu_timer;
 struct timer_list cpu_slack_timer;
 spinlock_t load_lock; /* protects the next 4 fields */
 u64 time_in_idle;
 u64 time_in_idle_timestamp;
 u64 cputime_speedadj;
 u64 cputime_speedadj_timestamp;
 struct cpufreq_policy *policy;
 struct cpufreq_frequency_table *freq_table;
 unsigned int target_freq;
 unsigned int floor_freq;
 u64 floor_validate_time;
 u64 hispeed_validate_time;
 struct rw_semaphore enable_sem;
 int governor_enabled;
};

static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);

#ifdef CONFIG_INPUT_PULSE_SUPPORT
struct cpufreq_interactive_inputopen {
 struct input_handle *handle;
 struct work_struct inputopen_work;
};

static struct cpufreq_interactive_inputopen inputopen;
static struct workqueue_struct *down_wq;
#endif

/* realtime thread handles frequency scaling */
static struct task_struct *speedchange_task;
static cpumask_t speedchange_cpumask;
static spinlock_t speedchange_cpumask_lock;
static struct mutex gov_lock;

/* Target load.  Lower values result in higher CPU speeds. */
#define DEFAULT_TARGET_LOAD 95
static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};

#define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
#define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
static unsigned int default_above_hispeed_delay[] = {
 DEFAULT_ABOVE_HISPEED_DELAY };

struct cpufreq_interactive_tunables {
 int usage_count;
 /* Hi speed to bump to from lo speed when load burst (default max) */
 unsigned int hispeed_freq;
 /* Go to hi speed when CPU load at or above this value. */
#define DEFAULT_GO_HISPEED_LOAD 99
 unsigned long go_hispeed_load;
 /* Target load. Lower values result in higher CPU speeds. */
 spinlock_t target_loads_lock;
 unsigned int *target_loads;
 int ntarget_loads;
 /*
  * The minimum amount of time to spend at a frequency before we can ramp
  * down.
  */
#define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
 unsigned long min_sample_time;
 /*
  * The sample rate of the timer used to increase frequency
  */
 unsigned long timer_rate;
 /*
  * Wait this long before raising speed above hispeed, by default a
  * single timer interval.
  */
 spinlock_t above_hispeed_delay_lock;
 unsigned int *above_hispeed_delay;
 int nabove_hispeed_delay;
 /* Non-zero means indefinite speed boost active */
 int boost_val;
 /* Duration of a boot pulse in usecs */
 int boostpulse_duration_val;
 /* End time of boost pulse in ktime converted to usecs */
 u64 boostpulse_endtime;

#ifdef CONFIG_ARCH_HIXXX
 /* Minimun boostpulse interval */
#define DEFAULT_MIN_BOOSTPULSE_INTERVAL (500 * USEC_PER_MSEC)
 int boostpulse_min_interval;
#endif
 /*
  * Max additional time to wait in idle, beyond timer_rate, at speeds
  * above minimum before wakeup to reduce speed, or -1 if unnecessary.
  */
#define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
 int timer_slack_val;
 bool io_is_busy;

#ifdef CONFIG_HISI_HMPTH_SET
 /* Non-zero mean hmp boost active */
 int boost_hmp_val;
 bool hmp_boosted;
 /* boost hmp upthreshold, default 768 */
 int boost_hmp_upthreshold;
 /* boost hmp downthreshold, default 448 */
 int boost_hmp_downthreshold;
#endif
};

/* For cases where we have single governor instance for system */
struct cpufreq_interactive_tunables *common_tunables;

#ifdef CONFIG_HI6XXX_LP_TRACE
static void __iomem *g_freqdump_addr;

static volatile unsigned int g_print_flag = 0;
#endif

static struct attribute_group *get_sysfs_attr(void);

static void cpufreq_interactive_timer_resched(
 struct cpufreq_interactive_cpuinfo *pcpu)
{
 struct cpufreq_interactive_tunables *tunables =
  pcpu->policy->governor_data;
 unsigned long expires;
 unsigned long flags;

 spin_lock_irqsave(&pcpu->load_lock, flags);
 pcpu->time_in_idle =
  get_cpu_idle_time(smp_processor_id(),
      &pcpu->time_in_idle_timestamp,
      tunables->io_is_busy);
 pcpu->cputime_speedadj = 0;
 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
 expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
 mod_timer_pinned(&pcpu->cpu_timer, expires);

 if (tunables->timer_slack_val >= 0 &&
     pcpu->target_freq > pcpu->policy->min) {
  expires += usecs_to_jiffies(tunables->timer_slack_val);
  mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
 }

 spin_unlock_irqrestore(&pcpu->load_lock, flags);
}

/* The caller shall take enable_sem write semaphore to avoid any timer race.
 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
 * function.
 */
static void cpufreq_interactive_timer_start(
 struct cpufreq_interactive_tunables *tunables, int cpu)
{
 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
 unsigned long expires = jiffies +
  usecs_to_jiffies(tunables->timer_rate);
 unsigned long flags;

 spin_lock_irqsave(&pcpu->load_lock, flags);

 pcpu->cpu_timer.expires = expires;
 add_timer_on(&pcpu->cpu_timer, cpu);
 if (tunables->timer_slack_val >= 0 &&
     pcpu->target_freq > pcpu->policy->min) {
  expires += usecs_to_jiffies(tunables->timer_slack_val);
  pcpu->cpu_slack_timer.expires = expires;
  add_timer_on(&pcpu->cpu_slack_timer, cpu);
 }

 pcpu->time_in_idle =
  get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
      tunables->io_is_busy);
 pcpu->cputime_speedadj = 0;
 pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
 spin_unlock_irqrestore(&pcpu->load_lock, flags);
}

static unsigned int freq_to_above_hispeed_delay(
 struct cpufreq_interactive_tunables *tunables,
 unsigned int freq)
{
 int i;
 unsigned int ret;
 unsigned long flags;

 spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);

 for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
   freq >= tunables->above_hispeed_delay[i+1]; i += 2)
  ;

 ret = tunables->above_hispeed_delay[i];
 spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
 return ret;
}

static unsigned int freq_to_targetload(
 struct cpufreq_interactive_tunables *tunables, unsigned int freq)
{
 int i;
 unsigned int ret;
 unsigned long flags;

 spin_lock_irqsave(&tunables->target_loads_lock, flags);

 for (i = 0; i < tunables->ntarget_loads - 1 &&
      freq >= tunables->target_loads[i+1]; i += 2)
  ;

 ret = tunables->target_loads[i];
 spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
 return ret;
}

/*
 * If increasing frequencies never map to a lower target load then
 * choose_freq() will find the minimum frequency that does not exceed its
 * target load given the current load.
 */
static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
  unsigned int loadadjfreq)
{
 unsigned int freq = pcpu->policy->cur;
 unsigned int prevfreq, freqmin, freqmax;
 unsigned int tl;
 int index;

 freqmin = 0;
 freqmax = UINT_MAX;

 do {
  prevfreq = freq;
  tl = freq_to_targetload(pcpu->policy->governor_data, freq);

  /*
   * Find the lowest frequency where the computed load is less
   * than or equal to the target load.
   */

  if (cpufreq_frequency_table_target(
       pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
       CPUFREQ_RELATION_L, &index))
   break;
  freq = pcpu->freq_table[index].frequency;

  if (freq > prevfreq) {
   /* The previous frequency is too low. */
   freqmin = prevfreq;

   if (freq >= freqmax) {
    /*
     * Find the highest frequency that is less
     * than freqmax.
     */
    if (cpufreq_frequency_table_target(
         pcpu->policy, pcpu->freq_table,
         freqmax - 1, CPUFREQ_RELATION_H,
         &index))
     break;
    freq = pcpu->freq_table[index].frequency;

    if (freq == freqmin) {
     /*
      * The first frequency below freqmax
      * has already been found to be too
      * low.  freqmax is the lowest speed
      * we found that is fast enough.
      */
     freq = freqmax;
     break;
    }
   }
  } else if (freq < prevfreq) {
   /* The previous frequency is high enough. */
   freqmax = prevfreq;

   if (freq <= freqmin) {
    /*
     * Find the lowest frequency that is higher
     * than freqmin.
     */
    if (cpufreq_frequency_table_target(
         pcpu->policy, pcpu->freq_table,
         freqmin + 1, CPUFREQ_RELATION_L,
         &index))
     break;
    freq = pcpu->freq_table[index].frequency;

    /*
     * If freqmax is the first frequency above
     * freqmin then we have already found that
     * this speed is fast enough.
     */
    if (freq == freqmax)
     break;
   }
  }

  /* If same frequency chosen as previous then done. */
 } while (freq != prevfreq);

 return freq;
}

static u64 update_load(int cpu)
{
 struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
 struct cpufreq_interactive_tunables *tunables =
  pcpu->policy->governor_data;
 u64 now;
 u64 now_idle;
 u64 delta_idle;
 u64 delta_time;
 u64 active_time;

 now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
 delta_idle = (now_idle - pcpu->time_in_idle);
 delta_time = (now - pcpu->time_in_idle_timestamp);

 if (delta_time <= delta_idle)
  active_time = 0;
 else
  active_time = delta_time - delta_idle;

 pcpu->cputime_speedadj += active_time * pcpu->policy->cur;

 pcpu->time_in_idle = now_idle;
 pcpu->time_in_idle_timestamp = now;
 return now;
}

#ifdef CONFIG_HI6XXX_LP_TRACE
void cpufreq_set_print_flag(unsigned int flag)
{
    if(1 == flag)
    {
        g_print_flag = 1;
    }
    else if(0 == flag)
    {
        g_print_flag = 0;
    }
    else
    {
        pr_err("cpufreq_set_print_flag: input para is err!\n");
    }
}
EXPORT_SYMBOL_GPL(cpufreq_set_print_flag);
#endif

static void cpufreq_interactive_timer(unsigned long data)
{
 u64 now;
 unsigned int delta_time;
 u64 cputime_speedadj;
 int cpu_load;
 struct cpufreq_interactive_cpuinfo *pcpu =
  &per_cpu(cpuinfo, data);
 struct cpufreq_interactive_tunables *tunables =
  pcpu->policy->governor_data;
 unsigned int new_freq;
 unsigned int loadadjfreq;
 unsigned int index;
 unsigned long flags;
 bool boosted;
#ifdef CONFIG_HI6XXX_LP_TRACE
    struct freqdump* freqdump_temp;
    freqdump_temp = (struct freqdump*)g_freqdump_addr;
#endif

 if (!down_read_trylock(&pcpu->enable_sem))
  return;
 if (!pcpu->governor_enabled)
  goto exit;

 spin_lock_irqsave(&pcpu->load_lock, flags);
 now = update_load(data);
 delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
 cputime_speedadj = pcpu->cputime_speedadj;
 spin_unlock_irqrestore(&pcpu->load_lock, flags);

 if (WARN_ON_ONCE(!delta_time))
  goto rearm;

 do_div(cputime_speedadj, delta_time);
 loadadjfreq = (unsigned int)cputime_speedadj * 100;
 cpu_load = loadadjfreq / pcpu->target_freq;
 boosted = tunables->boost_val || now < tunables->boostpulse_endtime;

#ifdef CONFIG_HI6XXX_LP_TRACE
    freqdump_temp->freq_acpu_load = (unsigned int)cpu_load;

    if(g_print_flag)
    {
        pr_err("acpu dfs: cpuload = %d\n", cpu_load);
    }
#endif
#ifdef CONFIG_HISI_HMPTH_SET
 if (tunables->hmp_boosted && !boosted) {
  set_hmp_policy(HMP_NAME, HMP_PRIO, HMP_OFF, tunables->boost_hmp_upthreshold,
    tunables->boost_hmp_downthreshold);
  tunables->hmp_boosted = false;
 }
#endif

 if (cpu_load >= tunables->go_hispeed_load || boosted) {
  if (pcpu->target_freq < tunables->hispeed_freq) {
   new_freq = tunables->hispeed_freq;
  } else {
   new_freq = choose_freq(pcpu, loadadjfreq);

   if (new_freq < tunables->hispeed_freq)
    new_freq = tunables->hispeed_freq;
  }
 } else {
  new_freq = choose_freq(pcpu, loadadjfreq);
 }

 if (pcpu->target_freq >= tunables->hispeed_freq &&
     new_freq > pcpu->target_freq &&
     now - pcpu->hispeed_validate_time <
     freq_to_above_hispeed_delay(tunables, pcpu->target_freq)) {
  trace_cpufreq_interactive_notyet(
   data, cpu_load, pcpu->target_freq,
   pcpu->policy->cur, new_freq);
  goto rearm;
 }

 pcpu->hispeed_validate_time = now;

 if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
        new_freq, CPUFREQ_RELATION_L,
        &index))
  goto rearm;

 new_freq = pcpu->freq_table[index].frequency;

 /*
  * Do not scale below floor_freq unless we have been at or above the
  * floor frequency for the minimum sample time since last validated.
  */
 if (new_freq < pcpu->floor_freq) {
  if (now - pcpu->floor_validate_time <
    tunables->min_sample_time) {
   trace_cpufreq_interactive_notyet(
    data, cpu_load, pcpu->target_freq,
    pcpu->policy->cur, new_freq);
   goto rearm;
  }
 }

 /*
  * Update the timestamp for checking whether speed has been held at
  * or above the selected frequency for a minimum of min_sample_time,
  * if not boosted to hispeed_freq.  If boosted to hispeed_freq then we
  * allow the speed to drop as soon as the boostpulse duration expires
  * (or the indefinite boost is turned off).
  */

 if (!boosted || new_freq > tunables->hispeed_freq) {
  pcpu->floor_freq = new_freq;
  pcpu->floor_validate_time = now;
 }

 if (pcpu->target_freq == new_freq) {
  trace_cpufreq_interactive_already(
   data, cpu_load, pcpu->target_freq,
   pcpu->policy->cur, new_freq);
  goto rearm_if_notmax;
 }

 trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
      pcpu->policy->cur, new_freq);

#ifdef CONFIG_HI6XXX_LP_TRACE
    if(g_print_flag)
    {
        pr_err("acpu dfs: target freq = %d\n", new_freq);
    }
#endif

 pcpu->target_freq = new_freq;
 spin_lock_irqsave(&speedchange_cpumask_lock, flags);
 cpumask_set_cpu(data, &speedchange_cpumask);
 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
 wake_up_process(speedchange_task);

rearm_if_notmax:
 /*
  * Already set max speed and don't see a need to change that,
  * wait until next idle to re-evaluate, don't need timer.
  */
 if (pcpu->target_freq == pcpu->policy->max)
  goto exit;

rearm:
 if (!timer_pending(&pcpu->cpu_timer))
  cpufreq_interactive_timer_resched(pcpu);

exit:
 up_read(&pcpu->enable_sem);
 return;
}

static void cpufreq_interactive_idle_start(void)
{
 struct cpufreq_interactive_cpuinfo *pcpu =
  &per_cpu(cpuinfo, smp_processor_id());
 int pending;

 if (!down_read_trylock(&pcpu->enable_sem))
  return;
 if (!pcpu->governor_enabled) {
  up_read(&pcpu->enable_sem);
  return;
 }

 pending = timer_pending(&pcpu->cpu_timer);

 if (pcpu->target_freq != pcpu->policy->min) {
  /*
   * Entering idle while not at lowest speed.  On some
   * platforms this can hold the other CPU(s) at that speed
   * even though the CPU is idle. Set a timer to re-evaluate
   * speed so this idle CPU doesn't hold the other CPUs above
   * min indefinitely.  This should probably be a quirk of
   * the CPUFreq driver.
   */
  if (!pending)
   cpufreq_interactive_timer_resched(pcpu);
 }

 up_read(&pcpu->enable_sem);
}

static void cpufreq_interactive_idle_end(void)
{
 struct cpufreq_interactive_cpuinfo *pcpu =
  &per_cpu(cpuinfo, smp_processor_id());

 if (!down_read_trylock(&pcpu->enable_sem))
  return;
 if (!pcpu->governor_enabled) {
  up_read(&pcpu->enable_sem);
  return;
 }

 /* Arm the timer for 1-2 ticks later if not already. */
 if (!timer_pending(&pcpu->cpu_timer)) {
  cpufreq_interactive_timer_resched(pcpu);
 } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
  del_timer(&pcpu->cpu_timer);
  del_timer(&pcpu->cpu_slack_timer);
  cpufreq_interactive_timer(smp_processor_id());
 }

 up_read(&pcpu->enable_sem);
}

static int cpufreq_interactive_speedchange_task(void *data)
{
 unsigned int cpu;
 cpumask_t tmp_mask;
 unsigned long flags;
 struct cpufreq_interactive_cpuinfo *pcpu;

 while (1) {
  set_current_state(TASK_INTERRUPTIBLE);
  spin_lock_irqsave(&speedchange_cpumask_lock, flags);

  if (cpumask_empty(&speedchange_cpumask)) {
   spin_unlock_irqrestore(&speedchange_cpumask_lock,
            flags);
   schedule();

   if (kthread_should_stop())
    break;

   spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  }

  set_current_state(TASK_RUNNING);
  tmp_mask = speedchange_cpumask;
  cpumask_clear(&speedchange_cpumask);
  spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);

  for_each_cpu(cpu, &tmp_mask) {
   unsigned int j;
   unsigned int max_freq = 0;

   pcpu = &per_cpu(cpuinfo, cpu);
   if (!down_read_trylock(&pcpu->enable_sem))
    continue;
   if (!pcpu->governor_enabled) {
    up_read(&pcpu->enable_sem);
    continue;
   }

   for_each_cpu(j, pcpu->policy->cpus) {
    struct cpufreq_interactive_cpuinfo *pjcpu =
     &per_cpu(cpuinfo, j);

    if (pjcpu->target_freq > max_freq)
     max_freq = pjcpu->target_freq;
   }

   if (max_freq != pcpu->policy->cur)
    __cpufreq_driver_target(pcpu->policy,
       max_freq,
       CPUFREQ_RELATION_H);
   trace_cpufreq_interactive_setspeed(cpu,
           pcpu->target_freq,
           pcpu->policy->cur);

   up_read(&pcpu->enable_sem);
  }
 }

 return 0;
}

static void cpufreq_interactive_boost(void)
{
 int i;
 int anyboost = 0;
 unsigned long flags;
 struct cpufreq_interactive_cpuinfo *pcpu;
 struct cpufreq_interactive_tunables *tunables;

 spin_lock_irqsave(&speedchange_cpumask_lock, flags);

 for_each_online_cpu(i) {
  pcpu = &per_cpu(cpuinfo, i);
  if (!pcpu->governor_enabled)
   continue;

  tunables = pcpu->policy->governor_data;

  if (pcpu->target_freq < tunables->hispeed_freq) {
   pcpu->target_freq = tunables->hispeed_freq;
   cpumask_set_cpu(i, &speedchange_cpumask);
   pcpu->hispeed_validate_time =
    ktime_to_us(ktime_get());
   anyboost = 1;
  }

  /*
   * Set floor freq and (re)start timer for when last
   * validated.
   */

  pcpu->floor_freq = tunables->hispeed_freq;
  pcpu->floor_validate_time = ktime_to_us(ktime_get());
 }

 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);

 if (anyboost)
  wake_up_process(speedchange_task);
}

#ifdef CONFIG_ARCH_HIXXX
#define MAX_LITTLE_CPU_NR 4

int hisi_little_cluster_boost(void)
{
 int i;
 int anyboost = 0;
 unsigned long flags;
 struct cpufreq_interactive_cpuinfo *pcpu;
 struct cpufreq_interactive_tunables *tunables;

 if (!cpufreq_interactive_initialized)
  return -ENODEV;

 spin_lock_irqsave(&speedchange_cpumask_lock, flags);

 for_each_online_cpu(i) {
  if (i == MAX_LITTLE_CPU_NR)
   break;

  pcpu = &per_cpu(cpuinfo, i);
  if (!pcpu->governor_enabled)
   continue;

  tunables = pcpu->policy->governor_data;

  if (pcpu->target_freq < tunables->hispeed_freq) {
   pcpu->target_freq = tunables->hispeed_freq;
   cpumask_set_cpu(i, &speedchange_cpumask);
   pcpu->hispeed_validate_time =
    ktime_to_us(ktime_get());
   anyboost = 1;
  }

  /*
   * Set floor freq and (re)start timer for when last
   * validated.
   */

  pcpu->floor_freq = tunables->hispeed_freq;
  pcpu->floor_validate_time = ktime_to_us(ktime_get());
 }

 spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);

 if (anyboost)
  wake_up_process(speedchange_task);

 return 0;
}
EXPORT_SYMBOL(hisi_little_cluster_boost);
#endif

static int cpufreq_interactive_notifier(
 struct notifier_block *nb, unsigned long val, void *data)
{
 struct cpufreq_freqs *freq = data;
 struct cpufreq_interactive_cpuinfo *pcpu;
 int cpu;
 unsigned long flags;

 if (val == CPUFREQ_POSTCHANGE) {
  pcpu = &per_cpu(cpuinfo, freq->cpu);
  if (!down_read_trylock(&pcpu->enable_sem))
   return 0;
  if (!pcpu->governor_enabled) {
   up_read(&pcpu->enable_sem);
   return 0;
  }

  for_each_cpu(cpu, pcpu->policy->cpus) {
   struct cpufreq_interactive_cpuinfo *pjcpu =
    &per_cpu(cpuinfo, cpu);
   if (cpu != freq->cpu) {
    if (!down_read_trylock(&pjcpu->enable_sem))
     continue;
    if (!pjcpu->governor_enabled) {
     up_read(&pjcpu->enable_sem);
     continue;
    }
   }
   spin_lock_irqsave(&pjcpu->load_lock, flags);
   update_load(cpu);
   spin_unlock_irqrestore(&pjcpu->load_lock, flags);
   if (cpu != freq->cpu)
    up_read(&pjcpu->enable_sem);
  }

  up_read(&pcpu->enable_sem);
 }
 return 0;
}

static struct notifier_block cpufreq_notifier_block = {
 .notifier_call = cpufreq_interactive_notifier,
};


#ifdef CONFIG_INPUT_PULSE_SUPPORT
/*
 * Pulsed boost on input event raises CPUs to hispeed_freq and lets
 * usual algorithm of min_sample_time  decide when to allow speed
 * to drop.
 */

static void cpufreq_interactive_input_event(struct input_handle *handle,
         unsigned int type,
         unsigned int code, int value)
{
 struct cpufreq_policy *policy;
 struct cpufreq_interactive_tunables *tunables;
 unsigned int cpu = smp_processor_id();

 if (type == EV_SYN && code == SYN_REPORT) {
  trace_cpufreq_interactive_boost("input");

  policy = cpufreq_cpu_get(cpu);
  if (policy != NULL) {
   tunables = policy->governor_data;
   tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
    tunables->boostpulse_duration_val;

   cpufreq_interactive_boost();
   cpufreq_cpu_put(policy);
  }
 }
}

static void cpufreq_interactive_input_open(struct work_struct *w)
{
 struct cpufreq_interactive_inputopen *io =
  container_of(w, struct cpufreq_interactive_inputopen,
        inputopen_work);
 int error;

 error = input_open_device(io->handle);
 if (error)
  input_unregister_handle(io->handle);
}

static int cpufreq_interactive_input_connect(struct input_handler *handler,
          struct input_dev *dev,
          const struct input_device_id *id)
{
 struct input_handle *handle;
 int error;

 pr_info("%s: connect to %s\n", __func__, dev->name);
 handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
 if (!handle)
  return -ENOMEM;

 handle->dev = dev;
 handle->handler = handler;
 handle->name = "cpufreq_interactive";

 error = input_register_handle(handle);
 if (error)
  goto err;

 inputopen.handle = handle;
 queue_work(down_wq, &inputopen.inputopen_work);
 return 0;
err:
 kfree(handle);
 return error;
}

static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
{
 input_close_device(handle);
 input_unregister_handle(handle);
 kfree(handle);
}

static const struct input_device_id cpufreq_interactive_ids[] = {
 {
  .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
    INPUT_DEVICE_ID_MATCH_ABSBIT,
  .evbit = { BIT_MASK(EV_ABS) },
  .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
       BIT_MASK(ABS_MT_POSITION_X) |
       BIT_MASK(ABS_MT_POSITION_Y) },
 }, /* multi-touch touchscreen */
 {
  .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
    INPUT_DEVICE_ID_MATCH_ABSBIT,
  .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
  .absbit = { [BIT_WORD(ABS_X)] =
       BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
 }, /* touchpad */
 { },
};

static struct input_handler cpufreq_interactive_input_handler = {
 .event          = cpufreq_interactive_input_event,
 .connect        = cpufreq_interactive_input_connect,
 .disconnect     = cpufreq_interactive_input_disconnect,
 .name           = "cpufreq_interactive",
 .id_table       = cpufreq_interactive_ids,
};

#endif /*CONFIG_INPUT_PULSE_SUPPORT*/

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值