2222 * Zhaoxin PerfMon, used on Lujiazui and later.
2323 */
2424static u64 zx_pmon_event_map [PERF_COUNT_HW_MAX ] __read_mostly = {
25- [PERF_COUNT_HW_CPU_CYCLES ] = 0x0082 ,
26- [PERF_COUNT_HW_INSTRUCTIONS ] = 0x00c0 ,
27- [PERF_COUNT_HW_BUS_CYCLES ] = 0x0083 ,
28- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = 0x0028 ,
29- [PERF_COUNT_HW_BRANCH_MISSES ] = 0x0029 ,
25+ [PERF_COUNT_HW_CPU_CYCLES ] = 0x0082 ,
26+ [PERF_COUNT_HW_INSTRUCTIONS ] = 0x00c0 ,
27+ [PERF_COUNT_HW_BUS_CYCLES ] = 0x0083 ,
28+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS ] = 0x0028 ,
29+ [PERF_COUNT_HW_BRANCH_MISSES ] = 0x0029 ,
3030};
3131
3232static struct event_constraint zxc_event_constraints [] __read_mostly = {
33-
3433 FIXED_EVENT_CONSTRAINT (0x0082 , 1 ), /* unhalted core clock cycles */
3534 EVENT_CONSTRAINT_END
3635};
3736
3837static struct event_constraint wudaokou_event_constraints [] __read_mostly = {
39-
4038 FIXED_EVENT_CONSTRAINT (0x00c0 , 0 ), /* retired instructions */
4139 FIXED_EVENT_CONSTRAINT (0x0082 , 1 ), /* unhalted core clock cycles */
4240 FIXED_EVENT_CONSTRAINT (0x0083 , 2 ), /* unhalted bus clock cycles */
@@ -445,9 +443,8 @@ static u64 zhaoxin_pmu_event_map(int hw_event)
445443 return zx_pmon_event_map [hw_event ];
446444}
447445
448- static struct event_constraint *
449- zhaoxin_get_event_constraints (struct cpu_hw_events * cpuc , int idx ,
450- struct perf_event * event )
446+ static struct event_constraint * zhaoxin_get_event_constraints (struct cpu_hw_events * cpuc , int idx ,
447+ struct perf_event * event )
451448{
452449 struct event_constraint * c ;
453450
@@ -461,11 +458,11 @@ zhaoxin_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
461458 return & unconstrained ;
462459}
463460
464- PMU_FORMAT_ATTR (event , "config:0-7" );
465- PMU_FORMAT_ATTR (umask , "config:8-15" );
466- PMU_FORMAT_ATTR (edge , "config:18" );
467- PMU_FORMAT_ATTR (inv , "config:23" );
468- PMU_FORMAT_ATTR (cmask , "config:24-31" );
461+ PMU_FORMAT_ATTR (event , "config:0-7" );
462+ PMU_FORMAT_ATTR (umask , "config:8-15" );
463+ PMU_FORMAT_ATTR (edge , "config:18" );
464+ PMU_FORMAT_ATTR (inv , "config:23" );
465+ PMU_FORMAT_ATTR (cmask , "config:24-31" );
469466
470467static struct attribute * zx_arch_formats_attr [] = {
471468 & format_attr_event .attr ,
@@ -497,32 +494,34 @@ static struct perf_guest_switch_msr *zhaoxin_guest_get_msrs(int *nr, void *data)
497494}
498495
499496static const struct x86_pmu zhaoxin_pmu __initconst = {
500- .name = "zhaoxin" ,
501- .handle_irq = zhaoxin_pmu_handle_irq ,
502- .disable_all = zhaoxin_pmu_disable_all ,
503- .enable_all = zhaoxin_pmu_enable_all ,
504- .enable = zhaoxin_pmu_enable_event ,
505- .disable = zhaoxin_pmu_disable_event ,
506- .hw_config = x86_pmu_hw_config ,
507- .schedule_events = x86_schedule_events ,
508- .eventsel = MSR_ARCH_PERFMON_EVENTSEL0 ,
509- .perfctr = MSR_ARCH_PERFMON_PERFCTR0 ,
510- .event_map = zhaoxin_pmu_event_map ,
511- .max_events = ARRAY_SIZE (zx_pmon_event_map ),
512- .apic = 1 ,
497+ .name = "zhaoxin" ,
498+ .handle_irq = zhaoxin_pmu_handle_irq ,
499+ .disable_all = zhaoxin_pmu_disable_all ,
500+ .enable_all = zhaoxin_pmu_enable_all ,
501+ .enable = zhaoxin_pmu_enable_event ,
502+ .disable = zhaoxin_pmu_disable_event ,
503+ .hw_config = x86_pmu_hw_config ,
504+ .schedule_events = x86_schedule_events ,
505+ .eventsel = MSR_ARCH_PERFMON_EVENTSEL0 ,
506+ .perfctr = MSR_ARCH_PERFMON_PERFCTR0 ,
507+ .event_map = zhaoxin_pmu_event_map ,
508+ .max_events = ARRAY_SIZE (zx_pmon_event_map ),
509+ .apic = 1 ,
513510 /*
514511 * For wudaokou/lujiazui, read/write operation for PMCx MSR is 48 bits.
515512 */
516- .max_period = (1ULL << 47 ) - 1 ,
517- .get_event_constraints = zhaoxin_get_event_constraints ,
513+ .max_period = (1ULL << 47 ) - 1 ,
514+ .get_event_constraints = zhaoxin_get_event_constraints ,
518515
519- .format_attrs = zx_arch_formats_attr ,
520- .events_sysfs_show = zhaoxin_event_sysfs_show ,
521-
522- .guest_get_msrs = zhaoxin_guest_get_msrs ,
516+ .format_attrs = zx_arch_formats_attr ,
517+ .events_sysfs_show = zhaoxin_event_sysfs_show ,
518+ .guest_get_msrs = zhaoxin_guest_get_msrs ,
523519};
524520
525- static const struct { int id ; char * name ; } zx_arch_events_map [] __initconst = {
521+ static struct {
522+ int id ;
523+ char * name ;
524+ } const zx_arch_events_map [] __initconst = {
526525 { PERF_COUNT_HW_CPU_CYCLES , "cpu cycles" },
527526 { PERF_COUNT_HW_INSTRUCTIONS , "instructions" },
528527 { PERF_COUNT_HW_BUS_CYCLES , "bus cycles" },
@@ -539,8 +538,7 @@ static __init void zhaoxin_arch_events_quirk(void)
539538 /* disable event that reported as not present by cpuid */
540539 for_each_set_bit (bit , x86_pmu .events_mask , ARRAY_SIZE (zx_arch_events_map )) {
541540 zx_pmon_event_map [zx_arch_events_map [bit ].id ] = 0 ;
542- pr_warn ("CPUID marked event: \'%s\' unavailable\n" ,
543- zx_arch_events_map [bit ].name );
541+ pr_warn ("CPUID marked event: \'%s\' unavailable\n" , zx_arch_events_map [bit ].name );
544542 }
545543}
546544
@@ -571,12 +569,12 @@ __init int zhaoxin_pmu_init(void)
571569 x86_pmu = zhaoxin_pmu ;
572570 pr_info ("Version check pass!\n" );
573571
574- x86_pmu .version = version ;
575- x86_pmu .num_counters = eax .split .num_counters ;
576- x86_pmu .cntval_bits = eax .split .bit_width ;
577- x86_pmu .cntval_mask = (1ULL << eax .split .bit_width ) - 1 ;
578- x86_pmu .events_maskl = ebx .full ;
579- x86_pmu .events_mask_len = eax .split .mask_length ;
572+ x86_pmu .version = version ;
573+ x86_pmu .num_counters = eax .split .num_counters ;
574+ x86_pmu .cntval_bits = eax .split .bit_width ;
575+ x86_pmu .cntval_mask = (1ULL << eax .split .bit_width ) - 1 ;
576+ x86_pmu .events_maskl = ebx .full ;
577+ x86_pmu .events_mask_len = eax .split .mask_length ;
580578
581579 x86_pmu .num_counters_fixed = edx .split .num_counters_fixed ;
582580 x86_add_quirk (zhaoxin_arch_events_quirk );
@@ -589,8 +587,7 @@ __init int zhaoxin_pmu_init(void)
589587 * ZXC FMS: Family=6, Model=F, Stepping=E-F OR Family=6, Model=0x19, Stepping=0-3
590588 */
591589 if ((boot_cpu_data .x86_model == 0x0f && boot_cpu_data .x86_stepping >= 0x0e ) ||
592- boot_cpu_data .x86_model == 0x19 ) {
593-
590+ boot_cpu_data .x86_model == 0x19 ) {
594591 x86_pmu .max_period = x86_pmu .cntval_mask >> 1 ;
595592
596593 /* Clearing status works only if the global control is enable on zxc. */
@@ -644,6 +641,7 @@ __init int zhaoxin_pmu_init(void)
644641 break ;
645642 case 0x5b :
646643 case 0x6b :
644+ case 0x7b :
647645 zx_pmon_event_map [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND ] =
648646 X86_CONFIG (.event = 0x02 , .umask = 0x01 , .inv = 0x01 ,
649647 .cmask = 0x01 );
@@ -659,7 +657,7 @@ __init int zhaoxin_pmu_init(void)
659657 if (boot_cpu_data .x86_model == 0x5b )
660658 pr_cont ("Yongfeng events, " );
661659
662- if (boot_cpu_data .x86_model == 0x6b )
660+ if (boot_cpu_data .x86_model == 0x6b || boot_cpu_data . x86_model == 0x7b )
663661 pr_cont ("Shijidadao events, " );
664662
665663 break ;
@@ -673,7 +671,7 @@ __init int zhaoxin_pmu_init(void)
673671 }
674672
675673 x86_pmu .intel_ctrl = (1 << (x86_pmu .num_counters )) - 1 ;
676- x86_pmu .intel_ctrl |= ((1LL << x86_pmu .num_counters_fixed )- 1 ) << INTEL_PMC_IDX_FIXED ;
674+ x86_pmu .intel_ctrl |= ((1LL << x86_pmu .num_counters_fixed ) - 1 ) << INTEL_PMC_IDX_FIXED ;
677675
678676 if (x86_pmu .event_constraints ) {
679677 for_each_event_constraint (c , x86_pmu .event_constraints ) {
0 commit comments