idf_size: Support JSON output

Pass -DOUTPUT_JSON=1 to get JSON formatted output from CMake targets
pull/7874/head
Angus Gratton 2019-02-21 15:02:29 +11:00 zatwierdzone przez Angus Gratton
rodzic 80fb837b29
commit 05be37c87c
6 zmienionych plików z 2767 dodań i 61 usunięć

Wyświetl plik

@ -97,7 +97,7 @@ Advanced Commands
- ``idf.py app``, ``idf.py bootloader``, ``idf.py partition_table`` can be used to build only the app, bootloader, or partition table from the project as applicable.
- There are matching commands ``idf.py app-flash``, etc. to flash only that single part of the project to the ESP32.
- ``idf.py -p PORT erase_flash`` will use esptool.py to erase the ESP32's entire flash chip.
- ``idf.py size`` prints some size information about the app. ``size-components`` and ``size-files`` are similar commands which print more detailed per-component or per-source-file information, respectively.
- ``idf.py size`` prints some size information about the app. ``size-components`` and ``size-files`` are similar commands which print more detailed per-component or per-source-file information, respectively. If you define variable ``-DOUTPUT_JSON=1`` when running CMake (or ``idf.py``), the output will be formatted as JSON not as human readable text.
- ``idf.py reconfigure`` re-runs CMake_ even if it doesn't seem to need re-running. This isn't necessary during normal usage, but can be useful after adding/removing files from the source tree, or when modifying CMake cache variables. For example, ``idf.py -DNAME='VALUE' reconfigure`` can be used to set variable ``NAME`` in CMake cache to value ``VALUE``.
The order of multiple ``idf.py`` commands on the same invocation is not important, they will automatically be executed in the correct order for everything to take effect (ie building before flashing, erasing before flashing, etc.).

Wyświetl plik

@ -384,20 +384,27 @@ macro(project project_name)
idf_build_get_property(idf_path IDF_PATH)
idf_build_get_property(python PYTHON)
set(idf_size ${python} ${idf_path}/tools/idf_size.py)
if(DEFINED OUTPUT_JSON AND OUTPUT_JSON)
list(APPEND idf_size "--json")
endif()
# Add size targets, depend on map file, run idf_size.py
add_custom_target(size
DEPENDS ${project_elf}
COMMAND ${python} ${idf_path}/tools/idf_size.py ${mapfile}
COMMAND ${idf_size} ${mapfile}
)
add_custom_target(size-files
DEPENDS ${project_elf}
COMMAND ${python} ${idf_path}/tools/idf_size.py --files ${mapfile}
COMMAND ${idf_size} --files ${mapfile}
)
add_custom_target(size-components
DEPENDS ${project_elf}
COMMAND ${python} ${idf_path}/tools/idf_size.py --archives ${mapfile}
COMMAND ${idf_size} --archives ${mapfile}
)
unset(idf_size)
idf_build_executable(${project_elf})
__project_info("${test_components}")

Wyświetl plik

@ -22,9 +22,13 @@
#
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import argparse
import re
import collections
import json
import os.path
import re
import sys
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
@ -38,6 +42,12 @@ CHIP_SIZES = {
}
def _json_dump(obj):
""" Pretty-print JSON object to stdout """
json.dump(obj, sys.stdout, indent=4)
print('\n')
def scan_to_header(f, header_line):
""" Scan forward in a file until you reach 'header_line', then return """
for line in f:
@ -160,6 +170,11 @@ def main():
help="Triplet prefix to add before objdump executable",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
'--json',
help="Output results as JSON",
action="store_true")
parser.add_argument(
'map_file', help='MAP file produced by linker',
type=argparse.FileType('r'))
@ -176,20 +191,18 @@ def main():
args = parser.parse_args()
memory_config, sections = load_map_data(args.map_file)
print_summary(memory_config, sections)
if not args.json or not (args.archives or args.files or args.archive_details):
print_summary(memory_config, sections, args.json)
if args.archives:
print("Per-archive contributions to ELF file:")
print_detailed_sizes(sections, "archive", "Archive File")
print_detailed_sizes(sections, "archive", "Archive File", args.json)
if args.files:
print("Per-file contributions to ELF file:")
print_detailed_sizes(sections, "file", "Object File")
print_detailed_sizes(sections, "file", "Object File", args.json)
if args.archive_details:
print("Symbols within the archive:", args.archive_details, "(Not all symbols may be reported)")
print_archive_symbols(sections, args.archive_details)
print_archive_symbols(sections, args.archive_details, args.json)
def print_summary(memory_config, sections):
def print_summary(memory_config, sections, as_json=False):
def get_size(section):
try:
return sections[section]["size"]
@ -202,40 +215,53 @@ def print_summary(memory_config, sections):
used_data = get_size(".dram0.data")
used_bss = get_size(".dram0.bss")
used_dram = used_data + used_bss
try:
used_dram_ratio = used_dram / total_dram
except ZeroDivisionError:
used_dram_ratio = float('nan')
used_iram = sum(get_size(s) for s in sections if s.startswith(".iram0"))
try:
used_iram_ratio = used_iram / total_iram
except ZeroDivisionError:
used_iram_ratio = float('nan')
flash_code = get_size(".flash.text")
flash_rodata = get_size(".flash.rodata")
total_size = used_data + used_iram + flash_code + flash_rodata
print("Total sizes:")
print(" DRAM .data size: %7d bytes" % used_data)
print(" DRAM .bss size: %7d bytes" % used_bss)
print("Used static DRAM: %7d bytes (%7d available, %.1f%% used)" %
(used_dram, total_dram - used_dram,
100.0 * used_dram / total_dram))
print("Used static IRAM: %7d bytes (%7d available, %.1f%% used)" %
(used_iram, total_iram - used_iram,
100.0 * used_iram / total_iram))
print(" Flash code: %7d bytes" % flash_code)
print(" Flash rodata: %7d bytes" % flash_rodata)
print("Total image size:~%7d bytes (.bin may be padded larger)" % (total_size))
if as_json:
_json_dump(collections.OrderedDict([
("dram_data", used_data),
("dram_bss", used_bss),
("used_dram", used_dram),
("available_dram", total_dram - used_dram),
("used_dram_ratio", used_dram_ratio),
("used_iram", used_iram),
("available_iram", total_iram - used_iram),
("used_iram_ratio", used_iram_ratio),
("flash_code", flash_code),
("flash_rodata", flash_rodata),
("total_size", total_size)
]))
else:
print("Total sizes:")
print(" DRAM .data size: %7d bytes" % used_data)
print(" DRAM .bss size: %7d bytes" % used_bss)
print("Used static DRAM: %7d bytes (%7d available, %.1f%% used)" %
(used_dram, total_dram - used_dram, 100.0 * used_dram_ratio))
print("Used static IRAM: %7d bytes (%7d available, %.1f%% used)" %
(used_iram, total_iram - used_iram, 100.0 * used_iram_ratio))
print(" Flash code: %7d bytes" % flash_code)
print(" Flash rodata: %7d bytes" % flash_rodata)
print("Total image size:~%7d bytes (.bin may be padded larger)" % (total_size))
def print_detailed_sizes(sections, key, header):
def print_detailed_sizes(sections, key, header, as_json=False):
sizes = sizes_by_key(sections, key)
headings = (header,
"DRAM .data",
"& .bss",
"IRAM",
"Flash code",
"& rodata",
"Total")
print("%24s %10s %6s %6s %10s %8s %7s" % headings)
result = {}
for k in sizes:
v = sizes[k]
result[k] = {}
result[k] = collections.OrderedDict()
result[k]["data"] = v.get(".dram0.data", 0)
result[k]["bss"] = v.get(".dram0.bss", 0)
result[k]["iram"] = sum(t for (s,t) in v.items() if s.startswith(".iram0"))
@ -250,20 +276,37 @@ def print_detailed_sizes(sections, key, header):
def return_header(elem):
return elem[0]
s = sorted(list(result.items()), key=return_header)
# do a secondary sort in order to have consistent order (for diff-ing the output)
for k,v in sorted(s, key=return_total_size, reverse=True):
if ":" in k: # print subheadings for key of format archive:file
sh,k = k.split(":")
print("%24s %10d %6d %6d %10d %8d %7d" % (k[:24],
v["data"],
v["bss"],
v["iram"],
v["flash_text"],
v["flash_rodata"],
v["total"]))
s = sorted(s, key=return_total_size, reverse=True)
if as_json:
_json_dump(collections.OrderedDict(s))
else:
print("Per-%s contributions to ELF file:" % key)
headings = (header,
"DRAM .data",
"& .bss",
"IRAM",
"Flash code",
"& rodata",
"Total")
header_format = "%24s %10d %6d %6d %10d %8d %7d"
print(header_format.replace("d", "s") % headings)
for k,v in s:
if ":" in k: # print subheadings for key of format archive:file
sh,k = k.split(":")
print(header_format % (k[:24],
v["data"],
v["bss"],
v["iram"],
v["flash_text"],
v["flash_rodata"],
v["total"]))
def print_archive_symbols(sections, archive):
def print_archive_symbols(sections, archive, as_json=False):
interested_sections = [".dram0.data", ".dram0.bss", ".iram0.text", ".iram0.vectors", ".flash.text", ".flash.rodata"]
result = {}
for t in interested_sections:
@ -277,15 +320,26 @@ def print_archive_symbols(sections, archive):
continue
s["sym_name"] = re.sub("(.text.|.literal.|.data.|.bss.|.rodata.)", "", s["sym_name"])
result[section_name][s["sym_name"]] = result[section_name].get(s["sym_name"], 0) + s["size"]
# build a new ordered dict of each section, where each entry is an ordereddict of symbols to sizes
section_symbols = collections.OrderedDict()
for t in interested_sections:
print("\nSymbols from section:", t)
section_total = 0
s = sorted(list(result[t].items()), key=lambda k_v: k_v[0])
# do a secondary sort in order to have consistent order (for diff-ing the output)
for key,val in sorted(s, key=lambda k_v: k_v[1], reverse=True):
print(("%s(%d)" % (key.replace(t + ".", ""), val)), end=' ')
section_total += val
print("\nSection total:",section_total)
s = sorted(s, key=lambda k_v: k_v[1], reverse=True)
section_symbols[t] = collections.OrderedDict(s)
if as_json:
_json_dump(section_symbols)
else:
print("Symbols within the archive: %s (Not all symbols may be reported)" % (archive))
for t,s in section_symbols.items():
section_total = 0
print("\nSymbols from section:", t)
for key, val in s.items():
print(("%s(%d)" % (key.replace(t + ".", ""), val)), end=' ')
section_total += val
print("\nSection total:",section_total)
if __name__ == "__main__":

Wyświetl plik

@ -2,11 +2,23 @@
{ coverage debug sys \
&& coverage erase &> output \
&& echo -e "\n***\nRunning idf_size.py..." >> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py app.map &>> output \
&& echo -e "\n***\nRunning idf_size.py --archives..." >> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --archives app.map &>> output \
&& echo -e "\n***\nRunning idf_size.py --files..." >> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --files app.map &>> output \
&& echo -e "\n***\nRunning idf_size.py --archive_details..." >> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --archive_details libdriver.a app.map &>> output \
&& echo -e "\n***]nProducing JSON output..." >> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --json app.map &>> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --archives app.map &>> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --files app.map &>> output \
&& coverage run -a $IDF_PATH/tools/idf_size.py --json --archive_details libdriver.a app.map &>> output \
&& echo -e "\n***\nRunning idf_size_tests.py..." >> output \
&& coverage run -a $IDF_PATH/tools/test_idf_size/test_idf_size.py &>> output \
&& diff output expected_output \
&& diff -Z output expected_output \
&& coverage report \
; } || { echo 'The test for idf_size has failed. Please examine the artifacts.' ; exit 1; }
# Note: "diff -Z is used because some versions of Python print trailing whitespace for JSON pretty-printing, and some don't

Wyświetl plik

@ -24,18 +24,19 @@ except ImportError:
if __name__ == "__main__":
# Should deliver a RuntimeError as the 'test' header doesn't exist
try:
idf_size.scan_to_header([], 'test')
except RuntimeError:
pass
except RuntimeError as e:
assert "Didn't find line" in str(e)
# Should deliver a RuntimeError as there's no content under the heading
try:
idf_size.load_memory_config(["Memory Configuration"])
pass
except RuntimeError:
pass
except RuntimeError as e:
assert "End of file" in str(e)
try:
idf_size.print_summary({"iram0_0_seg": {"length":0}, "dram0_0_seg": {"length":0}}, {})
except ZeroDivisionError:
pass
# This used to crash with a division by zero error but now it just prints nan% due to
# zero lengths
idf_size.print_summary({"iram0_0_seg": {"length":0}, "dram0_0_seg": {"length":0}}, {})