diff --git a/src/apic.c b/src/apic.c index 6c3c439..8ed183b 100644 --- a/src/apic.c +++ b/src/apic.c @@ -83,7 +83,7 @@ bool bind_to_cpu(int cpu_id) { #endif } -bool fill_topo_masks_apic(struct topology** topo) { +bool fill_topo_masks_apic(struct topology* topo) { uint32_t eax = 0x00000001; uint32_t ebx = 0; uint32_t ecx = 0; @@ -103,16 +103,16 @@ bool fill_topo_masks_apic(struct topology** topo) { core_id_max_cnt = (eax >> 26) + 1; smt_id_per_core_max_cnt = core_plus_smt_id_max_cnt / core_id_max_cnt; - (*topo)->apic->smt_mask = create_mask(smt_id_per_core_max_cnt, &((*topo)->apic->smt_mask_width)); - (*topo)->apic->core_mask = create_mask(core_id_max_cnt,&((*topo)->apic->pkg_mask_shift)); - (*topo)->apic->pkg_mask_shift += (*topo)->apic->smt_mask_width; - (*topo)->apic->core_mask <<= (*topo)->apic->smt_mask_width; - (*topo)->apic->pkg_mask = (-1) ^ ((*topo)->apic->core_mask | (*topo)->apic->smt_mask); + topo->apic->smt_mask = create_mask(smt_id_per_core_max_cnt, &(topo->apic->smt_mask_width)); + topo->apic->core_mask = create_mask(core_id_max_cnt,&(topo->apic->pkg_mask_shift)); + topo->apic->pkg_mask_shift += topo->apic->smt_mask_width; + topo->apic->core_mask <<= topo->apic->smt_mask_width; + topo->apic->pkg_mask = (-1) ^ (topo->apic->core_mask | topo->apic->smt_mask); return true; } -bool fill_topo_masks_x2apic(struct topology** topo) { +bool fill_topo_masks_x2apic(struct topology* topo) { int32_t level_type; int32_t level_shift; @@ -137,15 +137,15 @@ bool fill_topo_masks_x2apic(struct topology** topo) { switch(level_type) { case 1: // SMT - (*topo)->apic->smt_mask = ~(0xFFFFFFFF << level_shift); - (*topo)->apic->smt_mask_width = level_shift; - (*topo)->smt_supported = ebx & 0xFFFF; + topo->apic->smt_mask = ~(0xFFFFFFFF << level_shift); + topo->apic->smt_mask_width = level_shift; + topo->smt_supported = ebx & 0xFFFF; level1 = true; break; case 2: // Core coreplus_smt_mask = ~(0xFFFFFFFF << level_shift); - (*topo)->apic->pkg_mask_shift = level_shift; - (*topo)->apic->pkg_mask = (-1) ^ coreplus_smt_mask; + topo->apic->pkg_mask_shift = level_shift; + topo->apic->pkg_mask = (-1) ^ coreplus_smt_mask; level2 = true; break; default: @@ -157,12 +157,12 @@ bool fill_topo_masks_x2apic(struct topology** topo) { } if (level1 && level2) { - (*topo)->apic->core_mask = coreplus_smt_mask ^ (*topo)->apic->smt_mask; + topo->apic->core_mask = coreplus_smt_mask ^ topo->apic->smt_mask; } else if (!level2 && level1) { - (*topo)->apic->core_mask = 0; - (*topo)->apic->pkg_mask_shift = (*topo)->apic->smt_mask_width; - (*topo)->apic->pkg_mask = (-1) ^ (*topo)->apic->smt_mask; + topo->apic->core_mask = 0; + topo->apic->pkg_mask_shift = topo->apic->smt_mask_width; + topo->apic->pkg_mask = (-1) ^ topo->apic->smt_mask; } else { printErr("SMT level was not found when querying topology"); @@ -172,28 +172,23 @@ bool fill_topo_masks_x2apic(struct topology** topo) { return true; } -bool arr_contains_value(uint32_t* arr, uint32_t value, uint32_t arr_size) { - for(uint32_t i=0; i < arr_size; i++) { - if(arr[i] == value) return true; - } - return false; -} - -uint32_t max_apic_id_size(uint32_t** cache_id_apic, struct topology** topo) { +// Not a very elegant solution. The width should always be as long +// as the number of cores, but in the case of Xeon Phi KNL it is not +uint32_t max_apic_id_size(uint32_t** cache_id_apic, struct topology* topo) { uint32_t max = 0; - for(int i=0; i < (*topo)->cach->max_cache_level; i++) { - for(int j=0; j < (*topo)->total_cores; j++) { + for(int i=0; i < topo->cach->max_cache_level; i++) { + for(int j=0; j < topo->total_cores; j++) { if(cache_id_apic[j][i] > max) max = cache_id_apic[j][i]; } } max++; - if(max > (*topo)->total_cores) return max; - return (*topo)->total_cores; + if(max > topo->total_cores) return max; + return topo->total_cores; } -bool build_topo_from_apic(uint32_t* apic_pkg, uint32_t* apic_smt, uint32_t** cache_id_apic, struct topology** topo) { +bool build_topo_from_apic(uint32_t* apic_pkg, uint32_t* apic_smt, uint32_t** cache_id_apic, struct topology* topo) { uint32_t size = max_apic_id_size(cache_id_apic, topo); uint32_t* sockets = malloc(sizeof(uint32_t) * size); uint32_t* smt = malloc(sizeof(uint32_t) * size); @@ -204,32 +199,34 @@ bool build_topo_from_apic(uint32_t* apic_pkg, uint32_t* apic_smt, uint32_t** cac memset(smt, 0, sizeof(uint32_t) * size); memset(apic_id, 0, sizeof(uint32_t) * size); - for(int i=0; i < (*topo)->total_cores; i++) { + // System topology + for(int i=0; i < topo->total_cores; i++) { sockets[apic_pkg[i]] = 1; smt[apic_smt[i]] = 1; } - for(int i=0; i < (*topo)->total_cores; i++) { + for(int i=0; i < topo->total_cores; i++) { if(sockets[i] != 0) - (*topo)->sockets++; + topo->sockets++; if(smt[i] != 0) - (*topo)->smt_available++; + topo->smt_available++; } - (*topo)->logical_cores = (*topo)->total_cores / (*topo)->sockets; - (*topo)->physical_cores = (*topo)->logical_cores / (*topo)->smt_available; + topo->logical_cores = topo->total_cores / topo->sockets; + topo->physical_cores = topo->logical_cores / topo->smt_available; - for(int i=0; i < (*topo)->cach->max_cache_level; i++) { + // Cache topology + for(int i=0; i < topo->cach->max_cache_level; i++) { num_caches = 0; memset(apic_id, 0, sizeof(uint32_t) * size); - for(int c=0; c < (*topo)->total_cores; c++) { + for(int c=0; c < topo->total_cores; c++) { apic_id[cache_id_apic[c][i]]++; } for(uint32_t c=0; c < size; c++) { if(apic_id[c] > 0) num_caches++; } - (*topo)->cach->cach_arr[i]->num_caches = num_caches; + topo->cach->cach_arr[i]->num_caches = num_caches; } free(sockets); @@ -239,41 +236,39 @@ bool build_topo_from_apic(uint32_t* apic_pkg, uint32_t* apic_smt, uint32_t** cac return true; } -bool get_cache_topology_from_apic(struct topology** topo) { +void get_cache_topology_from_apic(struct topology* topo) { uint32_t eax = 0x00000004; uint32_t ebx = 0; uint32_t ecx = 0; uint32_t edx = 0; - for(int i=0; i < (*topo)->cach->max_cache_level; i++) { + for(int i=0; i < topo->cach->max_cache_level; i++) { eax = 0x00000004; ecx = i; cpuid(&eax, &ebx, &ecx, &edx); uint32_t SMTMaxCntPerEachCache = ((eax >> 14) & 0x7FF) + 1; - uint32_t EachCacheMaskWidth_targ_subleaf; - (*topo)->apic->cache_select_mask[i] = create_mask(SMTMaxCntPerEachCache,&EachCacheMaskWidth_targ_subleaf); + uint32_t dummy; + topo->apic->cache_select_mask[i] = create_mask(SMTMaxCntPerEachCache,&dummy); } - - return true; } -bool get_topology_from_apic(struct cpuInfo* cpu, struct topology** topo) { +bool get_topology_from_apic(struct cpuInfo* cpu, struct topology* topo) { uint32_t apic_id; - uint32_t* apic_pkg = malloc(sizeof(uint32_t) * (*topo)->total_cores); - uint32_t* apic_core = malloc(sizeof(uint32_t) * (*topo)->total_cores); - uint32_t* apic_smt = malloc(sizeof(uint32_t) * (*topo)->total_cores); - uint32_t** cache_smt_id_apic = malloc(sizeof(uint32_t*) * (*topo)->total_cores); - uint32_t** cache_id_apic = malloc(sizeof(uint32_t*) * (*topo)->total_cores); + uint32_t* apic_pkg = malloc(sizeof(uint32_t) * topo->total_cores); + uint32_t* apic_core = malloc(sizeof(uint32_t) * topo->total_cores); + uint32_t* apic_smt = malloc(sizeof(uint32_t) * topo->total_cores); + uint32_t** cache_smt_id_apic = malloc(sizeof(uint32_t*) * topo->total_cores); + uint32_t** cache_id_apic = malloc(sizeof(uint32_t*) * topo->total_cores); bool x2apic_id = cpu->maxLevels >= 0x0000000B; - for(int i=0; i < (*topo)->total_cores; i++) { - cache_smt_id_apic[i] = malloc(sizeof(uint32_t) * ((*topo)->cach->max_cache_level)); - cache_id_apic[i] = malloc(sizeof(uint32_t) * ((*topo)->cach->max_cache_level)); + for(int i=0; i < topo->total_cores; i++) { + cache_smt_id_apic[i] = malloc(sizeof(uint32_t) * (topo->cach->max_cache_level)); + cache_id_apic[i] = malloc(sizeof(uint32_t) * (topo->cach->max_cache_level)); } - (*topo)->apic->cache_select_mask = malloc(sizeof(uint32_t) * ((*topo)->cach->max_cache_level)); - (*topo)->apic->cache_id_apic = malloc(sizeof(uint32_t) * ((*topo)->cach->max_cache_level)); + topo->apic->cache_select_mask = malloc(sizeof(uint32_t) * (topo->cach->max_cache_level)); + topo->apic->cache_id_apic = malloc(sizeof(uint32_t) * (topo->cach->max_cache_level)); if(x2apic_id) { if(!fill_topo_masks_x2apic(topo)) @@ -286,47 +281,58 @@ bool get_topology_from_apic(struct cpuInfo* cpu, struct topology** topo) { get_cache_topology_from_apic(topo); - for(int i=0; i < (*topo)->total_cores; i++) { + for(int i=0; i < topo->total_cores; i++) { if(!bind_to_cpu(i)) { printErr("Failed binding to CPU %d", i); return false; } apic_id = get_apic_id(x2apic_id); - apic_pkg[i] = (apic_id & (*topo)->apic->pkg_mask) >> (*topo)->apic->pkg_mask_shift; - apic_core[i] = (apic_id & (*topo)->apic->core_mask) >> (*topo)->apic->smt_mask_width; - apic_smt[i] = apic_id & (*topo)->apic->smt_mask; + apic_pkg[i] = (apic_id & topo->apic->pkg_mask) >> topo->apic->pkg_mask_shift; + apic_core[i] = (apic_id & topo->apic->core_mask) >> topo->apic->smt_mask_width; + apic_smt[i] = apic_id & topo->apic->smt_mask; - for(int c=0; c < (*topo)->cach->max_cache_level; c++) { - cache_smt_id_apic[i][c] = apic_id & (*topo)->apic->cache_select_mask[c]; - cache_id_apic[i][c] = apic_id & (-1 ^ (*topo)->apic->cache_select_mask[c]); + for(int c=0; c < topo->cach->max_cache_level; c++) { + cache_smt_id_apic[i][c] = apic_id & topo->apic->cache_select_mask[c]; + cache_id_apic[i][c] = apic_id & (-1 ^ topo->apic->cache_select_mask[c]); } } /* DEBUG - for(int i=0; i < (*topo)->cach->max_cache_level; i++) { + for(int i=0; i < topo->cach->max_cache_level; i++) { printf("[CACH %1d]", i); - for(int j=0; j < (*topo)->total_cores; j++) + for(int j=0; j < topo->total_cores; j++) printf("[%03d]", cache_id_apic[j][i]); printf("\n"); } - for(int i=0; i < (*topo)->total_cores; i++) + for(int i=0; i < topo->total_cores; i++) printf("[%2d] 0x%.8X\n", i, apic_pkg[i]); printf("\n"); - for(int i=0; i < (*topo)->total_cores; i++) + for(int i=0; i < topo->total_cores; i++) printf("[%2d] 0x%.8X\n", i, apic_core[i]); printf("\n"); - for(int i=0; i < (*topo)->total_cores; i++) + for(int i=0; i < topo->total_cores; i++) printf("[%2d] 0x%.8X\n", i, apic_smt[i]);*/ bool ret = build_topo_from_apic(apic_pkg, apic_smt, cache_id_apic, topo); // Assumption: If we cant get smt_available, we assume it is equal to smt_supported... - if (!x2apic_id) (*topo)->smt_supported = (*topo)->smt_available; - - //TODO: free + if (!x2apic_id) { + printWarn("Can't read SMT from cpuid (needed level is 0x%.8X, max is 0x%.8X)", 0x0000000B, cpu->maxLevels); + topo->smt_supported = topo->smt_available; + } + free(apic_pkg); + free(apic_core); + free(apic_smt); + for(int i=0; i < topo->total_cores; i++) { + free(cache_smt_id_apic[i]); + free(cache_id_apic[i]); + } + free(cache_smt_id_apic); + free(cache_id_apic); + return ret; } @@ -339,7 +345,7 @@ uint32_t is_smt_enabled_amd(struct topology* topo) { return false; } id = get_apic_id(false) & 1; // get the last bit - if(id == 1) return 2; // We assume there isn't any AMD CPU with more than 2th per core. TODO: Fix + if(id == 1) return 2; // We assume there isn't any AMD CPU with more than 2th per core. } return 1; diff --git a/src/apic.h b/src/apic.h index 201373b..7e2b907 100644 --- a/src/apic.h +++ b/src/apic.h @@ -14,7 +14,7 @@ struct apic { uint32_t* cache_id_apic; }; -bool get_topology_from_apic(struct cpuInfo* cpu, struct topology** topo); +bool get_topology_from_apic(struct cpuInfo* cpu, struct topology* topo); uint32_t is_smt_enabled_amd(struct topology* topo); #endif diff --git a/src/cpuid.c b/src/cpuid.c index 2264e35..cbd71f6 100644 --- a/src/cpuid.c +++ b/src/cpuid.c @@ -58,15 +58,46 @@ void init_cpu_info(struct cpuInfo* cpu) { cpu->AES = false; cpu->SHA = false; } - -void init_topology_struct(struct topology** topo) { +/* +void init_topology_struct(struct topology* topo, struct cache* cach) { (*topo)->total_cores = 0; (*topo)->physical_cores = 0; (*topo)->logical_cores = 0; (*topo)->smt_available = 0; (*topo)->smt_supported = 0; (*topo)->sockets = 0; - // TODO: The other fields... + (*topo)->apic = malloc(sizeof(struct apic)); + (*topo)->cach = cach; +}*/ + +void init_topology_struct(struct topology* topo, struct cache* cach) { + topo->total_cores = 0; + topo->physical_cores = 0; + topo->logical_cores = 0; + topo->smt_available = 0; + topo->smt_supported = 0; + topo->sockets = 0; + topo->apic = malloc(sizeof(struct apic)); + topo->cach = cach; +} + +void init_cache_struct(struct cache* cach) { + cach->L1i = malloc(sizeof(struct cach)); + cach->L1d = malloc(sizeof(struct cach)); + cach->L2 = malloc(sizeof(struct cach)); + cach->L3 = malloc(sizeof(struct cach)); + + cach->cach_arr = malloc(sizeof(struct cach*) * 4); + cach->cach_arr[0] = cach->L1i; + cach->cach_arr[1] = cach->L1d; + cach->cach_arr[2] = cach->L2; + cach->cach_arr[3] = cach->L3; + + cach->max_cache_level = 0; + cach->L1i->exists = false; + cach->L1d->exists = false; + cach->L2->exists = false; + cach->L3->exists = false; } void get_cpu_vendor_internal(char* name, uint32_t ebx,uint32_t ecx,uint32_t edx) { @@ -245,18 +276,18 @@ uint8_t get_number_llc_amd(struct topology* topo) { return topo->logical_cores / num_sharing_cache; } -void get_cache_topology(struct cpuInfo* cpu, struct topology** topo) { - (*topo)->cach->L1i->num_caches = (*topo)->physical_cores; - (*topo)->cach->L1d->num_caches = (*topo)->physical_cores; - (*topo)->cach->L2->num_caches = (*topo)->physical_cores; +void guess_cache_topology_amd(struct cpuInfo* cpu, struct topology* topo) { + topo->cach->L1i->num_caches = topo->physical_cores; + topo->cach->L1d->num_caches = topo->physical_cores; + topo->cach->L2->num_caches = topo->physical_cores; - if((*topo)->cach->L3->size != UNKNOWN) { + if(topo->cach->L3->exists) { if(cpu->maxExtendedLevels >= 0x8000001D) { - (*topo)->cach->L3->num_caches = get_number_llc_amd(*topo); + topo->cach->L3->num_caches = get_number_llc_amd(topo); } else { printWarn("Can't read topology information from cpuid (needed extended level is 0x%.8X, max is 0x%.8X)", 0x8000001D, cpu->maxExtendedLevels); - (*topo)->cach->L3->num_caches = 1; + topo->cach->L3->num_caches = 1; } } } @@ -264,10 +295,8 @@ void get_cache_topology(struct cpuInfo* cpu, struct topology** topo) { // Main reference: https://software.intel.com/content/www/us/en/develop/articles/intel-64-architecture-processor-topology-enumeration.html // Very interesting resource: https://wiki.osdev.org/Detecting_CPU_Topology_(80x86) struct topology* get_topology_info(struct cpuInfo* cpu, struct cache* cach) { - struct topology* topo = malloc(sizeof(struct topology)); - topo->apic = malloc(sizeof(struct apic)); - topo->cach = cach; - init_topology_struct(&topo); + struct topology* topo = malloc(sizeof(struct topology)); + init_topology_struct(topo, cach); uint32_t eax = 0; uint32_t ebx = 0; @@ -292,7 +321,7 @@ struct topology* get_topology_info(struct cpuInfo* cpu, struct cache* cach) { switch(cpu->cpu_vendor) { case VENDOR_INTEL: if (cpu->maxLevels >= 0x00000004) { - get_topology_from_apic(cpu, &topo); + get_topology_from_apic(cpu, topo); } else { printErr("Can't read topology information from cpuid (needed level is 0x%.8X, max is 0x%.8X)", 0x00000001, cpu->maxLevels); @@ -339,7 +368,7 @@ struct topology* get_topology_info(struct cpuInfo* cpu, struct cache* cach) { else topo->sockets = topo->total_cores / topo->physical_cores; - get_cache_topology(cpu, &topo); + guess_cache_topology_amd(cpu, topo); break; @@ -353,16 +382,7 @@ struct topology* get_topology_info(struct cpuInfo* cpu, struct cache* cach) { struct cache* get_cache_info(struct cpuInfo* cpu) { struct cache* cach = malloc(sizeof(struct cache)); - cach->L1i = malloc(sizeof(struct cach)); - cach->L1d = malloc(sizeof(struct cach)); - cach->L2 = malloc(sizeof(struct cach)); - cach->L3 = malloc(sizeof(struct cach)); - cach->cach_arr = malloc(sizeof(struct cach*) * 4); - cach->cach_arr[0] = cach->L1i; - cach->cach_arr[1] = cach->L1d; - cach->cach_arr[2] = cach->L2; - cach->cach_arr[3] = cach->L3; - cach->max_cache_level = 0; + init_cache_struct(cach); uint32_t eax = 0; uint32_t ebx = 0; @@ -387,8 +407,9 @@ struct cache* get_cache_info(struct cpuInfo* cpu) { } } - // We suppose there are 4 caches (at most) - for(int i=0; i < 4; i++) { + int i=0; + int32_t cache_type; + do { eax = level; // get cache info ebx = 0; ecx = i; // cache id @@ -396,10 +417,10 @@ struct cache* get_cache_info(struct cpuInfo* cpu) { cpuid(&eax, &ebx, &ecx, &edx); - int32_t cache_type = eax & 0x1F; + cache_type = eax & 0x1F; // If its 0, we tried fetching a non existing cache - if (cache_type > 0) { // TODO: Change to while not == 0 + if (cache_type > 0) { int32_t cache_level = (eax >>= 5) & 0x7; uint32_t cache_sets = ecx + 1; uint32_t cache_coherency_line_size = (ebx & 0xFFF) + 1; @@ -415,7 +436,8 @@ struct cache* get_cache_info(struct cpuInfo* cpu) { printBug("Found data cache at level %d (expected 1)", cache_level); return NULL; } - cach->L1d->size = cache_total_size; + cach->L1d->size = cache_total_size; + cach->L1d->exists = true; break; case 2: // Instruction Cache (We assume this is L1i) @@ -424,11 +446,18 @@ struct cache* get_cache_info(struct cpuInfo* cpu) { return NULL; } cach->L1i->size = cache_total_size; + cach->L1i->exists = true; break; case 3: // Unified Cache (This may be L2 or L3) - if(cache_level == 2) cach->L2->size = cache_total_size; - else if(cache_level == 3) cach->L3->size = cache_total_size; + if(cache_level == 2) { + cach->L2->size = cache_total_size; + cach->L2->exists = true; + } + else if(cache_level == 3) { + cach->L3->size = cache_total_size; + cach->L3->exists = true; + } else { printBug("Found unified cache at level %d (expected == 2 or 3)", cache_level); return NULL; @@ -440,17 +469,9 @@ struct cache* get_cache_info(struct cpuInfo* cpu) { return NULL; } } - else if(i == 2) { - cach->L2->size = UNKNOWN; - } - else if(i == 3) { - cach->L3->size = UNKNOWN; - } - else { - printBug("Could not find cache ID %d", i); - return NULL; - } - } + + i++; + } while (cache_type > 0); // Sanity checks. If we read values greater than this, they can't be valid ones // The values were chosen by me @@ -462,8 +483,8 @@ struct cache* get_cache_info(struct cpuInfo* cpu) { printBug("Invalid L1d size: %dKB", cach->L1d->size/1024); return NULL; } - if(cach->L2->size != UNKNOWN) { - if(cach->L3->size != UNKNOWN && cach->L2->size > 2 * 1048576) { + if(cach->L2->exists) { + if(cach->L3->exists && cach->L2->size > 2 * 1048576) { printBug("Invalid L2 size: %dMB", cach->L2->size/(1048576)); return NULL; } @@ -472,11 +493,11 @@ struct cache* get_cache_info(struct cpuInfo* cpu) { return NULL; } } - if(cach->L3->size != UNKNOWN && cach->L3->size > 100 * 1048576) { + if(cach->L3->exists && cach->L3->size > 100 * 1048576) { printBug("Invalid L3 size: %dMB", cach->L3->size/(1048576)); return NULL; } - if(cach->L2->size == UNKNOWN) { + if(!cach->L2->exists) { printBug("Could not find L2 cache"); return NULL; } @@ -490,11 +511,11 @@ struct frequency* get_frequency_info(struct cpuInfo* cpu) { if(cpu->maxLevels < 0x16) { #ifdef _WIN32 printErr("Can't read frequency information from cpuid (needed level is %d, max is %d)", 0x16, cpu->maxLevels); - freq->base = UNKNOWN; - freq->max = UNKNOWN; + freq->base = UNKNOWN_FREQ; + freq->max = UNKNOWN_FREQ; #else printWarn("Can't read frequency information from cpuid (needed level is %d, max is %d). Using udev", 0x16, cpu->maxLevels); - freq->base = UNKNOWN; + freq->base = UNKNOWN_FREQ; freq->max = get_max_freq_from_file(); #endif } @@ -584,7 +605,7 @@ char* get_str_peak_performance(struct cpuInfo* cpu, struct topology* topo, int64 char* string = malloc(sizeof(char)*size); //First check we have consistent data - if(freq == UNKNOWN) { + if(freq == UNKNOWN_FREQ) { snprintf(string,strlen(STRING_UNKNOWN)+1,STRING_UNKNOWN); return string; } @@ -842,12 +863,12 @@ char* get_str_l1d(struct cache* cach) { } char* get_str_l2(struct cache* cach) { - assert(cach->L2->size != UNKNOWN); + assert(cach->L2->exists); return get_str_cache(cach->L2->size, cach->L2->num_caches); } char* get_str_l3(struct cache* cach) { - if(cach->L3->size == UNKNOWN) + if(!cach->L3->exists) return NULL; return get_str_cache(cach->L3->size, cach->L3->num_caches); } @@ -857,7 +878,7 @@ char* get_str_freq(struct frequency* freq) { uint32_t size = (4+3+1); assert(strlen(STRING_UNKNOWN)+1 <= size); char* string = malloc(sizeof(char)*size); - if(freq->max == UNKNOWN) + if(freq->max == UNKNOWN_FREQ) snprintf(string,strlen(STRING_UNKNOWN)+1,STRING_UNKNOWN); else if(freq->max >= 1000) snprintf(string,size,"%.2f"STRING_GIGAHERZ,(float)(freq->max)/1000); @@ -873,10 +894,15 @@ void print_levels(struct cpuInfo* cpu, char* cpu_name) { } void free_topo_struct(struct topology* topo) { + free(topo->apic->cache_select_mask); + free(topo->apic->cache_id_apic); + free(topo->apic); free(topo); } void free_cache_struct(struct cache* cach) { + for(int i=0; i < 4; i++) free(cach->cach_arr[i]); + free(cach->cach_arr); free(cach); } diff --git a/src/cpuid.h b/src/cpuid.h index ec29924..21ec95e 100644 --- a/src/cpuid.h +++ b/src/cpuid.h @@ -8,7 +8,7 @@ #define VENDOR_AMD 2 #define VENDOR_INVALID 3 -#define UNKNOWN -1 +#define UNKNOWN_FREQ -1 typedef int32_t VENDOR; @@ -42,6 +42,7 @@ struct cpuInfo { struct cach { int32_t size; uint8_t num_caches; + bool exists; // plenty of more properties to include in the future... }; diff --git a/src/main.c b/src/main.c index 82a3a8f..8d8ec6a 100644 --- a/src/main.c +++ b/src/main.c @@ -21,6 +21,7 @@ Options: \n\ * legacy \n\ --help Prints this help and exit\n\ --levels Prints CPU model and cpuid levels (debug purposes)\n\ + --verbose Prints extra information (if available) about how cpufetch tried fetching information\n\ --version Prints cpufetch version and exit\n", argv[0]); } diff --git a/src/udev.c b/src/udev.c index 92b87f5..6ea2e78 100644 --- a/src/udev.c +++ b/src/udev.c @@ -24,7 +24,7 @@ long get_freq_from_file(char* path) { if(fd == -1) { perror("open"); printBug("Could not open '%s'", path); - return UNKNOWN; + return UNKNOWN_FREQ; } //File exists, read it @@ -45,7 +45,7 @@ long get_freq_from_file(char* path) { perror("strtol"); printBug("Failed parsing '%s' file. Read data was: '%s'", path, buf); free(buf); - return UNKNOWN; + return UNKNOWN_FREQ; } // We will be getting the frequency in KHz @@ -53,7 +53,7 @@ long get_freq_from_file(char* path) { // greater than 10 GHz or less than 100 MHz if(ret > 10000 * 1000 || ret < 100 * 1000) { printBug("Invalid data was read from file '%s': %ld\n", path, ret); - return UNKNOWN; + return UNKNOWN_FREQ; } free(buf);