1#!/usr/bin/env python3 2 3# requires https://github.com/bright-tools/ccsm 4 5import os 6import sys 7import csv 8 9folders = [ 10'src', 11'src/ble', 12'src/ble/gatt-service', 13'src/classic', 14] 15 16metrics = {} 17targets = {} 18 19targets['PATH'] = 1000 20targets['GOTO'] = 0 21targets['CCN'] = 20 22targets['CALLS'] = 12 23targets['PARAM'] = 7 24targets['STMT'] = 100 25targets['LEVEL'] = 6 26targets['RETURN'] = 1 27 28excluded_functions = [ 29 # deprecated functions 30 'src/l2cap.c:l2cap_le_register_service', 31 'src/l2cap.c:l2cap_le_unregister_service', 32 'src/l2cap.c:l2cap_le_accept_connection', 33 'src/l2cap.c:l2cap_le_decline_connection', 34 'src/l2cap.c:l2cap_le_provide_credits', 35 'src/l2cap.c:l2cap_le_create_channel', 36 'src/l2cap.c:l2cap_le_can_send_now', 37 'src/l2cap.c:l2cap_le_request_can_send_now_event', 38 'src/l2cap.c:l2cap_le_send_data', 39 'src/l2cap.c:l2cap_le_disconnect', 40 'src/l2cap.c:l2cap_cbm_can_send_now', 41 'src/l2cap.c:l2cap_cbm_request_can_send_now_even' 42] 43 44def metric_sum(name, value): 45 global metrics 46 old = 0 47 if name in metrics: 48 old = metrics[name] 49 metrics[name] = old + value 50 51 52def metric_list(name, item): 53 global metrics 54 value = [] 55 if name in metrics: 56 value = metrics[name] 57 value.append(item) 58 metrics[name] = value 59 60def metric_max(name, max): 61 global metrics 62 if name in metrics: 63 if metrics[name] > max: 64 return 65 metrics[name] = max 66 67def metric_measure(metric_name, function_name, actual): 68 metric_max(metric_name + '_MAX', actual) 69 if metric_name in targets: 70 metric_sum(metric_name + '_SUM', actual) 71 if actual > targets[metric_name]: 72 metric_sum(metric_name + '_DEVIATIONS', 1) 73 # metric_list(metric_name + '_LIST', function_name + '(%u)' % actual) 74 metric_list(metric_name + '_LIST', function_name) 75 76 77def analyze_folders(btstack_root, folders): 78 global excluded_functions 79 80 # File,Name,"'goto' keyword count (raw source)","Return points","Statement count (raw source)(local)", 81 # "Statement count (raw source)(cumulative)","Comment density","McCabe complexity (raw source)", 82 # "Number of paths through the function","No. different functions called","Function Parameters", 83 # "Nesting Level","VOCF","Number of functions which call this function", 84 fields = [ 'file','function','GOTO','RETURN','_','STMT' ,'_','CCN','PATH','CALLS','PARAM','LEVEL','_','_','_'] 85 86 # init deviations 87 for key in fields: 88 metrics[key + '_DEVIATIONS'] = 0 89 90 # for now, just read the file 91 with open("metrics.tsv") as fd: 92 rd = csv.reader(fd, delimiter="\t") 93 last_function_name = '' 94 for row in rd: 95 file = '' 96 function_metrics = {} 97 for key, value in zip(fields, row): 98 if key == 'file': 99 # get rid of directory traversal on buildbot 100 pos_metrics_folder = value.find('tool/metrics/') 101 if pos_metrics_folder > 0: 102 value = value[pos_metrics_folder+13:] 103 # streamline path 104 file = value.replace('../../','') 105 continue 106 if key == 'function': 107 function_name = value 108 continue 109 if key == '_': 110 continue 111 function_metrics[key] = value 112 if file.endswith('.h'): 113 continue 114 qualified_function_name = file+':'+function_name 115 # excluded functions 116 if qualified_function_name in excluded_functions: 117 continue 118 metric_sum('FUNC', 1) 119 for key,value in function_metrics.items(): 120 metric_measure(key, qualified_function_name, int(function_metrics[key])) 121 122def analyze(folders): 123 # print ("\nAnalyzing:") 124 # for path in folders: 125 # print('- %s' % path) 126 # analyze_folder(btstack_root + "/" + path) 127 btstack_root = os.path.abspath(os.path.dirname(sys.argv[0]) + '/../..') 128 analyze_folders(btstack_root, folders) 129 130def list_targets(): 131 print ("Targets:") 132 for key,value in sorted(targets.items()): 133 print ('- %-20s: %u' % (key, value)) 134 135def list_metrics(): 136 print ("\nResult:") 137 num_funcs = metrics['FUNC'] 138 for key,value in sorted(metrics.items()): 139 if key.endswith('LIST'): 140 continue 141 if key.endswith('_SUM'): 142 average = 1.0 * value / num_funcs 143 metric = key.replace('_SUM','_AVERAGE') 144 print ('- %-20s: %4.3f' % (metric, average)) 145 else: 146 print ('- %-20s: %5u' % (key, value)) 147 148def list_metrics_table(): 149 row = "%-11s |%11s |%11s |%11s" 150 151 print( row % ('Name', 'Target', 'Deviations', 'Max value')) 152 print("------------|------------|------------|------------") 153 154 ordered_metrics = [ 'PATH', 'GOTO', 'CCN', 'CALLS', 'PARAM', 'STMT', 'LEVEL', 'RETURN', 'FUNC']; 155 for metric_name in ordered_metrics: 156 if metric_name in targets: 157 target = targets[metric_name] 158 deviations = metrics[metric_name + '_DEVIATIONS'] 159 max = metrics[metric_name + '_MAX'] 160 print ( row % ( metric_name, target, deviations, max)) 161 else: 162 max = metrics[metric_name] 163 print ( row % ( metric_name, '', '', max)) 164 165def list_deviations(): 166 global metrics 167 for key,value in sorted(metrics.items()): 168 if not key.endswith('LIST'): 169 continue 170 print ("\n%s" % key) 171 print ('\n'.join(value)) 172 173def main(argv): 174 analyze(folders) 175 list_metrics_table() 176 # list_targets() 177 # list_metrics() 178 # list_deviations() 179 180if __name__ == "__main__": 181 main(sys.argv[1:]) 182