Lines Matching refs:sample

235 	struct cpu_sample *sample;  in pid_put_sample()  local
247 sample = zalloc(sizeof(*sample)); in pid_put_sample()
248 assert(sample != NULL); in pid_put_sample()
249 sample->start_time = start; in pid_put_sample()
250 sample->end_time = end; in pid_put_sample()
251 sample->type = type; in pid_put_sample()
252 sample->next = c->samples; in pid_put_sample()
253 sample->cpu = cpu; in pid_put_sample()
254 c->samples = sample; in pid_put_sample()
256 if (sample->type == TYPE_RUNNING && end > start && start > 0) { in pid_put_sample()
276 struct perf_sample *sample __maybe_unused, in process_comm_event()
285 struct perf_sample *sample __maybe_unused, in process_fork_event()
294 struct perf_sample *sample __maybe_unused, in process_exit_event()
467 struct perf_sample *sample);
471 struct perf_sample *sample, in process_sample_event() argument
476 if (!first_time || first_time > sample->time) in process_sample_event()
477 first_time = sample->time; in process_sample_event()
478 if (last_time < sample->time) in process_sample_event()
479 last_time = sample->time; in process_sample_event()
482 if (sample->cpu > numcpus) in process_sample_event()
483 numcpus = sample->cpu; in process_sample_event()
487 return f(evsel, sample); in process_sample_event()
495 struct perf_sample *sample) in process_sample_cpu_idle() argument
497 struct power_processor_entry *ppe = sample->raw_data; in process_sample_cpu_idle()
500 c_state_end(ppe->cpu_id, sample->time); in process_sample_cpu_idle()
502 c_state_start(ppe->cpu_id, sample->time, ppe->state); in process_sample_cpu_idle()
508 struct perf_sample *sample) in process_sample_cpu_frequency() argument
510 struct power_processor_entry *ppe = sample->raw_data; in process_sample_cpu_frequency()
512 p_state_change(ppe->cpu_id, sample->time, ppe->state); in process_sample_cpu_frequency()
518 struct perf_sample *sample) in process_sample_sched_wakeup() argument
520 struct trace_entry *te = sample->raw_data; in process_sample_sched_wakeup()
522 sched_wakeup(sample->cpu, sample->time, sample->pid, te); in process_sample_sched_wakeup()
528 struct perf_sample *sample) in process_sample_sched_switch() argument
530 struct trace_entry *te = sample->raw_data; in process_sample_sched_switch()
532 sched_switch(sample->cpu, sample->time, te); in process_sample_sched_switch()
539 struct perf_sample *sample) in process_sample_power_start() argument
541 struct power_entry_old *peo = sample->raw_data; in process_sample_power_start()
543 c_state_start(peo->cpu_id, sample->time, peo->value); in process_sample_power_start()
549 struct perf_sample *sample) in process_sample_power_end() argument
551 c_state_end(sample->cpu, sample->time); in process_sample_power_end()
557 struct perf_sample *sample) in process_sample_power_frequency() argument
559 struct power_entry_old *peo = sample->raw_data; in process_sample_power_frequency()
561 p_state_change(peo->cpu_id, sample->time, peo->value); in process_sample_power_frequency()
757 struct cpu_sample *sample; in draw_cpu_usage() local
762 sample = c->samples; in draw_cpu_usage()
763 while (sample) { in draw_cpu_usage()
764 if (sample->type == TYPE_RUNNING) in draw_cpu_usage()
765 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm); in draw_cpu_usage()
767 sample = sample->next; in draw_cpu_usage()
779 struct cpu_sample *sample; in draw_process_bars() local
795 sample = c->samples; in draw_process_bars()
796 while (sample) { in draw_process_bars()
797 if (sample->type == TYPE_RUNNING) in draw_process_bars()
798 svg_sample(Y, sample->cpu, sample->start_time, sample->end_time); in draw_process_bars()
799 if (sample->type == TYPE_BLOCKED) in draw_process_bars()
800 svg_box(Y, sample->start_time, sample->end_time, "blocked"); in draw_process_bars()
801 if (sample->type == TYPE_WAITING) in draw_process_bars()
802 svg_waiting(Y, sample->start_time, sample->end_time); in draw_process_bars()
803 sample = sample->next; in draw_process_bars()
979 .sample = process_sample_event, in __cmd_timechart()