Skip to content
Snippets Groups Projects
Commit 5f88764a authored by Lorenzo Pagliai's avatar Lorenzo Pagliai
Browse files

[REPORT] Add exception handling of incomplete job

In case the LAVA job is incomplete or failed to execute 'interesting'
tests on the DUT, the script handle the exception and exit
parent 34199bdb
No related tags found
No related merge requests found
...@@ -53,6 +53,8 @@ def convert_urls_to_links(text): ...@@ -53,6 +53,8 @@ def convert_urls_to_links(text):
url_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' url_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
text_with_links = re.sub(url_pattern, lambda x: f'<a href="{x.group(0)}">{x.group(0).split("/")[-1]}</a>', text) text_with_links = re.sub(url_pattern, lambda x: f'<a href="{x.group(0)}">{x.group(0).split("/")[-1]}</a>', text)
return text_with_links return text_with_links
# Convert LAVA report in from CSV format to XHTML
def convert_csv_report_to_xhtml(input_file, page_name, lava_url, lava_token, pipeline_url, testing_page_url, manifest_url, commit, branch, link_report, output_file): def convert_csv_report_to_xhtml(input_file, page_name, lava_url, lava_token, pipeline_url, testing_page_url, manifest_url, commit, branch, link_report, output_file):
# Read CSV data and convert it into a list of dictionaries # Read CSV data and convert it into a list of dictionaries
csv_file = input_file csv_file = input_file
...@@ -82,8 +84,15 @@ def convert_csv_report_to_xhtml(input_file, page_name, lava_url, lava_token, pip ...@@ -82,8 +84,15 @@ def convert_csv_report_to_xhtml(input_file, page_name, lava_url, lava_token, pip
# Create XML structure for the initial SW info table # Create XML structure for the initial SW info table
info_root = ET.Element('root') info_root = ET.Element('root')
# Add headings to the info table # Get job_id if available, or exit (e.g. incomplete LAVA job)
job_id = xml_data[0]['job'] try:
job_id = xml_data[0]['job']
except IndexError:
# Handle the case where xml_data list is empty
print("Error: No interesting job data found in the CSV file. A retrigger of the corresponding LAVA job may be necessary.")
exit(1)
# Add heading to the page
heading = ET.SubElement(info_root, 'h2') heading = ET.SubElement(info_root, 'h2')
heading.text = f'Automatic LAVA Job: {job_id}' heading.text = f'Automatic LAVA Job: {job_id}'
software_info_title = ET.SubElement(info_root, 'h3') software_info_title = ET.SubElement(info_root, 'h3')
...@@ -109,7 +118,6 @@ def convert_csv_report_to_xhtml(input_file, page_name, lava_url, lava_token, pip ...@@ -109,7 +118,6 @@ def convert_csv_report_to_xhtml(input_file, page_name, lava_url, lava_token, pip
# Distro name processing # Distro name processing
parts = page_name.split('-') parts = page_name.split('-')
print(parts)
if "things" in parts: if "things" in parts:
distro = "Things" distro = "Things"
elif "embedded" in parts: elif "embedded" in parts:
...@@ -358,7 +366,6 @@ def main(): ...@@ -358,7 +366,6 @@ def main():
# Strip distro related info from page name # Strip distro related info from page name
parts = args.page_name.split('-') parts = args.page_name.split('-')
print(parts)
if "things" in parts[-1]: if "things" in parts[-1]:
distro = parts[-1:] distro = parts[-1:]
index = (args.page_name).find("-things") index = (args.page_name).find("-things")
......
File mode changed from 100644 to 100755
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment