From 276f650d2f340d79abf6c33d894158be64fbb874 Mon Sep 17 00:00:00 2001 From: Mark Mayo Date: Wed, 7 Dec 2022 16:55:58 +1300 Subject: [PATCH 1/5] code cleanup - sorted imports - black formatting - whitespace cleaned - consistenet quoting - fstrings created --- docs/sources/conf.py | 111 +++++----- examples/pkg1/test_mod1.py | 9 +- examples/pkg1/test_mod2.py | 1 + examples/pkg2/test_mod_a.py | 3 +- examples/pkg3/test_mod_cl.py | 3 +- examples/pkg4/test_mod_a.py | 5 +- examples/pkg5/test_special_pytest.py | 6 +- pytest_monitor/handler.py | 99 ++++++--- pytest_monitor/pytest_monitor.py | 294 ++++++++++++++++++--------- pytest_monitor/session.py | 149 +++++++++----- pytest_monitor/sys_utils.py | 117 +++++++---- setup.py | 83 ++++---- tests/conftest.py | 2 +- tests/test_monitor.py | 204 +++++++++++-------- tests/test_monitor_component.py | 105 +++++----- tests/test_monitor_context.py | 78 +++---- tests/test_monitor_in_ci.py | 245 +++++++++++++++------- 17 files changed, 956 insertions(+), 558 deletions(-) diff --git a/docs/sources/conf.py b/docs/sources/conf.py index 06e5d99..31a5f88 100644 --- a/docs/sources/conf.py +++ b/docs/sources/conf.py @@ -12,61 +12,62 @@ # All configuration values have a default; values that are commented out # serve to show the default. -import sys -import os -import shlex import pathlib def read_version(): - init = pathlib.Path(__file__).parent.parent.parent / 'pytest_monitor' / '__init__.py' - with init.open('r') as pkg_init_f: - version_read = [line.strip() for line in pkg_init_f if line.startswith('__version__')] + init = ( + pathlib.Path(__file__).parent.parent.parent / "pytest_monitor" / "__init__.py" + ) + with init.open("r") as pkg_init_f: + version_read = [ + line.strip() for line in pkg_init_f if line.startswith("__version__") + ] if len(version_read) > 1: raise ValueError('Multiple version found in "pytest_monitor" package!') if not version_read: raise ValueError('No version found in "pytest_monitor" package!') - return version_read[0].split('=', 1)[1].strip('" \'') + return version_read[0].split("=", 1)[1].strip("\" '") # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. -#sys.path.insert(0, os.path.abspath('.')) +# sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' +# needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - 'sphinx.ext.ifconfig', - 'sphinx.ext.todo', - 'sphinx.ext.graphviz', - 'releases', + "sphinx.ext.ifconfig", + "sphinx.ext.todo", + "sphinx.ext.graphviz", + "releases", ] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The encoding of source files. -#source_encoding = 'utf-8-sig' +# source_encoding = 'utf-8-sig' # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'pytest-monitor' -copyright = u'2019, Jean-Sébastien Dieu' -author = u'Jean-Sébastien Dieu' +project = "pytest-monitor" +copyright = "2019, Jean-Sébastien Dieu" +author = "Jean-Sébastien Dieu" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the @@ -75,7 +76,7 @@ def read_version(): # The short X.Y version. version = read_version() # The full version, including alpha/beta/rc tags. -release = 'pytest-monitor v{}'.format(version) +release = f"pytest-monitor v{version}" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -86,37 +87,37 @@ def read_version(): # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: -#today = '' +# today = '' # Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' +# today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. -exclude_patterns = ['_build'] +exclude_patterns = ["_build"] # The reST default role (used for this markup: `text`) to use for all # documents. -#default_role = None +# default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True +# add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). -#add_module_names = True +# add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. -#show_authors = False +# show_authors = False # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] +# modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False +# keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = True @@ -126,22 +127,22 @@ def read_version(): # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. -#html_theme_options = {} +# html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] +# html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". -#html_title = None +# html_title = None # A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None +# html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. @@ -150,72 +151,72 @@ def read_version(): # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. -#html_favicon = None +# html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. -#html_extra_path = [] +# html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' +# html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. -#html_use_smartypants = True +# html_use_smartypants = True # Custom sidebar templates, maps document names to template names. -#html_sidebars = {} +# html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. -#html_additional_pages = {} +# html_additional_pages = {} # If false, no module index is generated. -#html_domain_indices = True +# html_domain_indices = True # If false, no index is generated. -#html_use_index = True +# html_use_index = True # If true, the index is split into individual pages for each letter. -#html_split_index = False +# html_split_index = False # If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True +# html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True +# html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True +# html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. -#html_use_opensearch = '' +# html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None +# html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -#html_search_language = 'en' +# html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # Now only 'ja' uses this config value -#html_search_options = {'type': 'default'} +# html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. -#html_search_scorer = 'scorer.js' +# html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. -htmlhelp_basename = 'pytestmonitor-doc' +htmlhelp_basename = "pytestmonitor-doc" diff --git a/examples/pkg1/test_mod1.py b/examples/pkg1/test_mod1.py index cf2aadb..79307d7 100644 --- a/examples/pkg1/test_mod1.py +++ b/examples/pkg1/test_mod1.py @@ -1,6 +1,7 @@ -import pytest import time +import pytest + def test_sleep1(): time.sleep(1) @@ -11,6 +12,8 @@ def test_sleep2(): time.sleep(2) -@pytest.mark.parametrize(('range_max', 'other'), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")]) +@pytest.mark.parametrize( + ("range_max", "other"), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")] +) def test_heavy(range_max, other): - assert len(['a' * i for i in range(range_max)]) == range_max + assert len(["a" * i for i in range(range_max)]) == range_max diff --git a/examples/pkg1/test_mod2.py b/examples/pkg1/test_mod2.py index 63288f0..8885560 100644 --- a/examples/pkg1/test_mod2.py +++ b/examples/pkg1/test_mod2.py @@ -1,4 +1,5 @@ import time + def test_sleep_400ms(): time.sleep(0.4) diff --git a/examples/pkg2/test_mod_a.py b/examples/pkg2/test_mod_a.py index 59ea67a..4a9a6b0 100644 --- a/examples/pkg2/test_mod_a.py +++ b/examples/pkg2/test_mod_a.py @@ -5,6 +5,5 @@ def test_master_sleep(): t_a = time.time() b_continue = True while b_continue: - t_delta = time.time() - t_a + t_delta = time.time() - t_a b_continue = t_delta < 5 - diff --git a/examples/pkg3/test_mod_cl.py b/examples/pkg3/test_mod_cl.py index 39aac1f..e2ab1a5 100644 --- a/examples/pkg3/test_mod_cl.py +++ b/examples/pkg3/test_mod_cl.py @@ -1,10 +1,11 @@ import time + class TestClass: def setup_method(self, test_method): self.__value = test_method.__name__ time.sleep(1) - + def test_method1(self): time.sleep(0.5) assert self.__value == "test_method1" diff --git a/examples/pkg4/test_mod_a.py b/examples/pkg4/test_mod_a.py index 3f53bdd..d5cbaf7 100644 --- a/examples/pkg4/test_mod_a.py +++ b/examples/pkg4/test_mod_a.py @@ -1,9 +1,11 @@ import time + import pytest pytestmark = pytest.mark.monitor_skip_test -pytest_monitor_component = 'test' +pytest_monitor_component = "test" + def test_not_monitored(): t_a = time.time() @@ -20,4 +22,3 @@ def test_force_monitor(): while b_continue: t_delta = time.time() - t_a b_continue = t_delta < 5 - diff --git a/examples/pkg5/test_special_pytest.py b/examples/pkg5/test_special_pytest.py index daf2afe..e81c9b1 100644 --- a/examples/pkg5/test_special_pytest.py +++ b/examples/pkg5/test_special_pytest.py @@ -1,14 +1,14 @@ import pytest -@pytest.mark.skip(reason='Some special test to skip') +@pytest.mark.skip(reason="Some special test to skip") def test_is_skipped(): assert True def test_that_one_is_skipped_too(): - pytest.skip('Test executed and instructed to be skipped from its body') + pytest.skip("Test executed and instructed to be skipped from its body") def test_import_or_skip(): - pytest.importorskip('this_module_does_not_exists') + pytest.importorskip("this_module_does_not_exists") diff --git a/pytest_monitor/handler.py b/pytest_monitor/handler.py index 4841b14..cbbdecd 100644 --- a/pytest_monitor/handler.py +++ b/pytest_monitor/handler.py @@ -14,41 +14,87 @@ def query(self, what, bind_to, many=False): def insert_session(self, h, run_date, scm_id, description): with self.__cnx: - self.__cnx.execute('insert into TEST_SESSIONS(SESSION_H, RUN_DATE, SCM_ID, RUN_DESCRIPTION)' - ' values (?,?,?,?)', - (h, run_date, scm_id, description)) + self.__cnx.execute( + "insert into TEST_SESSIONS(SESSION_H, RUN_DATE, SCM_ID, RUN_DESCRIPTION)" + " values (?,?,?,?)", + (h, run_date, scm_id, description), + ) - def insert_metric(self, session_id, env_id, item_start_date, item, item_path, item_variant, - item_loc, kind, component, total_time, user_time, kernel_time, cpu_usage, mem_usage): + def insert_metric( + self, + session_id, + env_id, + item_start_date, + item, + item_path, + item_variant, + item_loc, + kind, + component, + total_time, + user_time, + kernel_time, + cpu_usage, + mem_usage, + ): with self.__cnx: - self.__cnx.execute('insert into TEST_METRICS(SESSION_H,ENV_H,ITEM_START_TIME,ITEM,' - 'ITEM_PATH,ITEM_VARIANT,ITEM_FS_LOC,KIND,COMPONENT,TOTAL_TIME,' - 'USER_TIME,KERNEL_TIME,CPU_USAGE,MEM_USAGE) ' - 'values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)', - (session_id, env_id, item_start_date, item, item_path, - item_variant, item_loc, kind, component, total_time, user_time, - kernel_time, cpu_usage, mem_usage)) + self.__cnx.execute( + "insert into TEST_METRICS(SESSION_H,ENV_H,ITEM_START_TIME,ITEM," + "ITEM_PATH,ITEM_VARIANT,ITEM_FS_LOC,KIND,COMPONENT,TOTAL_TIME," + "USER_TIME,KERNEL_TIME,CPU_USAGE,MEM_USAGE) " + "values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)", + ( + session_id, + env_id, + item_start_date, + item, + item_path, + item_variant, + item_loc, + kind, + component, + total_time, + user_time, + kernel_time, + cpu_usage, + mem_usage, + ), + ) def insert_execution_context(self, exc_context): with self.__cnx: - self.__cnx.execute('insert into EXECUTION_CONTEXTS(CPU_COUNT,CPU_FREQUENCY_MHZ,CPU_TYPE,CPU_VENDOR,' - 'RAM_TOTAL_MB,MACHINE_NODE,MACHINE_TYPE,MACHINE_ARCH,SYSTEM_INFO,' - 'PYTHON_INFO,ENV_H) values (?,?,?,?,?,?,?,?,?,?,?)', - (exc_context.cpu_count, exc_context.cpu_frequency, exc_context.cpu_type, - exc_context.cpu_vendor, exc_context.ram_total, exc_context.fqdn, exc_context.machine, - exc_context.architecture, exc_context.system_info, exc_context.python_info, - exc_context.hash())) + self.__cnx.execute( + "insert into EXECUTION_CONTEXTS(CPU_COUNT,CPU_FREQUENCY_MHZ,CPU_TYPE,CPU_VENDOR," + "RAM_TOTAL_MB,MACHINE_NODE,MACHINE_TYPE,MACHINE_ARCH,SYSTEM_INFO," + "PYTHON_INFO,ENV_H) values (?,?,?,?,?,?,?,?,?,?,?)", + ( + exc_context.cpu_count, + exc_context.cpu_frequency, + exc_context.cpu_type, + exc_context.cpu_vendor, + exc_context.ram_total, + exc_context.fqdn, + exc_context.machine, + exc_context.architecture, + exc_context.system_info, + exc_context.python_info, + exc_context.hash(), + ), + ) def prepare(self): cursor = self.__cnx.cursor() - cursor.execute(''' + cursor.execute( + """ CREATE TABLE IF NOT EXISTS TEST_SESSIONS( SESSION_H varchar(64) primary key not null unique, -- Session identifier RUN_DATE varchar(64), -- Date of test run SCM_ID varchar(128), -- SCM change id RUN_DESCRIPTION json -);''') - cursor.execute(''' +);""" + ) + cursor.execute( + """ CREATE TABLE IF NOT EXISTS TEST_METRICS ( SESSION_H varchar(64), -- Session identifier ENV_H varchar(64), -- Environment description identifier @@ -66,8 +112,10 @@ def prepare(self): MEM_USAGE float, -- Max resident memory used. FOREIGN KEY (ENV_H) REFERENCES EXECUTION_CONTEXTS(ENV_H), FOREIGN KEY (SESSION_H) REFERENCES TEST_SESSIONS(SESSION_H) -);''') - cursor.execute(''' +);""" + ) + cursor.execute( + """ CREATE TABLE IF NOT EXISTS EXECUTION_CONTEXTS ( ENV_H varchar(64) primary key not null unique, CPU_COUNT integer, @@ -81,5 +129,6 @@ def prepare(self): SYSTEM_INFO varchar(256), PYTHON_INFO varchar(512) ); -''') +""" + ) self.__cnx.commit() diff --git a/pytest_monitor/pytest_monitor.py b/pytest_monitor/pytest_monitor.py index 7a21669..105f568 100644 --- a/pytest_monitor/pytest_monitor.py +++ b/pytest_monitor/pytest_monitor.py @@ -1,10 +1,11 @@ # -*- coding: utf-8 -*- import gc -import memory_profiler -import pytest import time import warnings +import memory_profiler +import pytest + from pytest_monitor.session import PyTestMonitorSession # These dictionaries are used to compute members set on each items. @@ -14,55 +15,116 @@ # internal marker attribute name: str # callable that set member's value # default value -PYTEST_MONITOR_VALID_MARKERS = {'monitor_skip_test': (False, 'monitor_skip_test', lambda x: True, False), - 'monitor_skip_test_if': (True, 'monitor_skip_test', lambda x: bool(x), False), - 'monitor_test': (False, 'monitor_force_test', lambda x: True, False), - 'monitor_test_if': (True, 'monitor_force_test', lambda x: bool(x), False)} +PYTEST_MONITOR_VALID_MARKERS = { + "monitor_skip_test": (False, "monitor_skip_test", lambda x: True, False), + "monitor_skip_test_if": (True, "monitor_skip_test", lambda x: bool(x), False), + "monitor_test": (False, "monitor_force_test", lambda x: True, False), + "monitor_test_if": (True, "monitor_force_test", lambda x: bool(x), False), +} PYTEST_MONITOR_DEPRECATED_MARKERS = {} -PYTEST_MONITOR_ITEM_LOC_MEMBER = '_location' if tuple(pytest.__version__.split('.')) < ('5', '3') else 'location' +PYTEST_MONITOR_ITEM_LOC_MEMBER = ( + "_location" if tuple(pytest.__version__.split(".")) < ("5", "3") else "location" +) PYTEST_MONITORING_ENABLED = True def pytest_addoption(parser): - group = parser.getgroup('monitor') - group.addoption('--restrict-scope-to', dest='mtr_scope', default='function', - help='Select the scope to monitor. By default, only function is monitored.' - 'Values are function, class, module, session. You can set one or more of these' - 'by listing them using a comma separated list') - group.addoption('--parametrization-explicit', dest='mtr_want_explicit_ids', action='store_true', - help='Set this option to distinguish parametrized tests given their values.' - ' This requires the parameters to be stringifiable.') - group.addoption('--no-monitor', action='store_true', dest='mtr_none', help='Disable all traces') - group.addoption('--remote-server', action='store', dest='mtr_remote', - help='Remote server to send the results to. Format is :') - group.addoption('--db', action='store', dest='mtr_db_out', default='.pymon', - help='Use the given sqlite database for storing results.') - group.addoption('--no-db', action='store_true', dest='mtr_no_db', help='Do not store results in local db.') - group.addoption('--force-component', action='store', dest='mtr_force_component', - help='Force the component to be set at the given value for the all tests run' - ' in this session.') - group.addoption('--component-prefix', action='store', dest='mtr_component_prefix', - help='Prefix each found components with the given value (applies to all tests' - ' run in this session).') - group.addoption('--no-gc', action="store_true", dest="mtr_disable_gc", - help='Disable garbage collection between tests (may leads to non reliable measures)') - group.addoption('--description', action='store', default='', dest='mtr_description', - help='Use this option to provide a small summary about this run.') - group.addoption('--tag', action='append', dest='mtr_tags', default=[], - help='Provide meaningfull flags to your run. This can help you in your analysis.') + group = parser.getgroup("monitor") + group.addoption( + "--restrict-scope-to", + dest="mtr_scope", + default="function", + help="Select the scope to monitor. By default, only function is monitored." + "Values are function, class, module, session. You can set one or more of these" + "by listing them using a comma separated list", + ) + group.addoption( + "--parametrization-explicit", + dest="mtr_want_explicit_ids", + action="store_true", + help="Set this option to distinguish parametrized tests given their values." + " This requires the parameters to be stringifiable.", + ) + group.addoption( + "--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces" + ) + group.addoption( + "--remote-server", + action="store", + dest="mtr_remote", + help="Remote server to send the results to. Format is :", + ) + group.addoption( + "--db", + action="store", + dest="mtr_db_out", + default=".pymon", + help="Use the given sqlite database for storing results.", + ) + group.addoption( + "--no-db", + action="store_true", + dest="mtr_no_db", + help="Do not store results in local db.", + ) + group.addoption( + "--force-component", + action="store", + dest="mtr_force_component", + help="Force the component to be set at the given value for the all tests run" + " in this session.", + ) + group.addoption( + "--component-prefix", + action="store", + dest="mtr_component_prefix", + help="Prefix each found components with the given value (applies to all tests" + " run in this session).", + ) + group.addoption( + "--no-gc", + action="store_true", + dest="mtr_disable_gc", + help="Disable garbage collection between tests (may leads to non reliable measures)", + ) + group.addoption( + "--description", + action="store", + default="", + dest="mtr_description", + help="Use this option to provide a small summary about this run.", + ) + group.addoption( + "--tag", + action="append", + dest="mtr_tags", + default=[], + help="Provide meaningfull flags to your run. This can help you in your analysis.", + ) def pytest_configure(config): - config.addinivalue_line("markers", "monitor_skip_test: mark test to be executed but not monitored.") - config.addinivalue_line("markers", "monitor_skip_test_if(cond): mark test to be executed but " - "not monitored if cond is verified.") - config.addinivalue_line("markers", "monitor_test: mark test to be monitored (default behaviour)." - " This can turn handy to whitelist some test when you have disabled" - " monitoring on a whole module.") - config.addinivalue_line("markers", "monitor_test_if(cond): mark test to be monitored if and only if cond" - " is verified. This can help you in whitelisting tests to be monitored" - " depending on some external conditions.") + config.addinivalue_line( + "markers", "monitor_skip_test: mark test to be executed but not monitored." + ) + config.addinivalue_line( + "markers", + "monitor_skip_test_if(cond): mark test to be executed but " + "not monitored if cond is verified.", + ) + config.addinivalue_line( + "markers", + "monitor_test: mark test to be monitored (default behaviour)." + " This can turn handy to whitelist some test when you have disabled" + " monitoring on a whole module.", + ) + config.addinivalue_line( + "markers", + "monitor_test_if(cond): mark test to be monitored if and only if cond" + " is verified. This can help you in whitelisting tests to be monitored" + " depending on some external conditions.", + ) def pytest_runtest_setup(item): @@ -73,14 +135,22 @@ def pytest_runtest_setup(item): """ if not PYTEST_MONITORING_ENABLED: return - item_markers = {mark.name: mark for mark in item.iter_markers() if mark and mark.name.startswith('monitor_')} + item_markers = { + mark.name: mark + for mark in item.iter_markers() + if mark and mark.name.startswith("monitor_") + } mark_to_del = [] for set_marker in item_markers.keys(): if set_marker not in PYTEST_MONITOR_VALID_MARKERS: - warnings.warn("Nothing known about marker {}. Marker will be dropped.".format(set_marker)) + warnings.warn( + f"Nothing known about marker {set_marker}. Marker will be dropped." + ) mark_to_del.append(set_marker) if set_marker in PYTEST_MONITOR_DEPRECATED_MARKERS: - warnings.warn('Marker {} is deprecated. Consider upgrading your tests'.format(set_marker)) + warnings.warn( + f"Marker {set_marker} is deprecated. Consider upgrading your tests" + ) for marker in mark_to_del: del item_markers[marker] @@ -115,19 +185,23 @@ def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() - if rep.when == 'call': - setattr(item, 'test_run_duration', call.stop - call.start) - setattr(item, 'test_effective_start_time', call.start) + if rep.when == "call": + setattr(item, "test_run_duration", call.stop - call.start) + setattr(item, "test_effective_start_time", call.start) def pytest_runtest_call(item): if not PYTEST_MONITORING_ENABLED: return - setattr(item, 'monitor_results', False) - if hasattr(item, 'module'): - setattr(item, 'monitor_component', getattr(item.module, 'pytest_monitor_component', '')) + setattr(item, "monitor_results", False) + if hasattr(item, "module"): + setattr( + item, + "monitor_component", + getattr(item.module, "pytest_monitor_component", ""), + ) else: - setattr(item, 'monitor_skip_test', True) + setattr(item, "monitor_skip_test", True) @pytest.hookimpl @@ -136,6 +210,7 @@ def pytest_pyfunc_call(pyfuncitem): Core sniffer logic. We encapsulate the test function in a sniffer function to collect memory results. """ + def wrapped_function(): try: funcargs = pyfuncitem.funcargs @@ -147,13 +222,15 @@ def wrapped_function(): return e def prof(): - m = memory_profiler.memory_usage((wrapped_function, ()), - max_iterations=1, max_usage=True, retval=True) + m = memory_profiler.memory_usage( + (wrapped_function, ()), max_iterations=1, max_usage=True, retval=True + ) if isinstance(m[1], BaseException): # Do we have any outcome? raise m[1] memuse = m[0][0] if type(m[0]) is list else m[0] - setattr(pyfuncitem, 'mem_usage', memuse) - setattr(pyfuncitem, 'monitor_results', True) + setattr(pyfuncitem, "mem_usage", memuse) + setattr(pyfuncitem, "monitor_results", True) + if not PYTEST_MONITORING_ENABLED: wrapped_function() else: @@ -165,7 +242,7 @@ def prof(): def pytest_make_parametrize_id(config, val, argname): if config.option.mtr_want_explicit_ids: - return '{}={}'.format(argname, val) + return f"{argname}={val}" @pytest.hookimpl(hookwrapper=True) @@ -174,29 +251,50 @@ def pytest_sessionstart(session): Instantiate a monitor session to save collected metrics. We yield at the end to let pytest pursue the execution. """ - if session.config.option.mtr_force_component and session.config.option.mtr_component_prefix: - raise pytest.UsageError('Invalid usage: --force-component and --component-prefix are incompatible options!') - if session.config.option.mtr_no_db and not session.config.option.mtr_remote and not session.config.option.mtr_none: - warnings.warn('pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.') + if ( + session.config.option.mtr_force_component + and session.config.option.mtr_component_prefix + ): + raise pytest.UsageError( + "Invalid usage: --force-component and --component-prefix are incompatible options!" + ) + if ( + session.config.option.mtr_no_db + and not session.config.option.mtr_remote + and not session.config.option.mtr_none + ): + warnings.warn( + "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." + ) session.config.option.mtr_none = True - component = session.config.option.mtr_force_component or session.config.option.mtr_component_prefix + component = ( + session.config.option.mtr_force_component + or session.config.option.mtr_component_prefix + ) if session.config.option.mtr_component_prefix: - component += '.{user_component}' + component += ".{user_component}" if not component: - component = '{user_component}' - db = None if (session.config.option.mtr_none or session.config.option.mtr_no_db) else session.config.option.mtr_db_out - remote = None if session.config.option.mtr_none else session.config.option.mtr_remote - session.pytest_monitor = PyTestMonitorSession(db=db, remote=remote, - component=component, - scope=session.config.option.mtr_scope) + component = "{user_component}" + db = ( + None + if (session.config.option.mtr_none or session.config.option.mtr_no_db) + else session.config.option.mtr_db_out + ) + remote = ( + None if session.config.option.mtr_none else session.config.option.mtr_remote + ) + session.pytest_monitor = PyTestMonitorSession( + db=db, remote=remote, component=component, scope=session.config.option.mtr_scope + ) global PYTEST_MONITORING_ENABLED PYTEST_MONITORING_ENABLED = not session.config.option.mtr_none - session.pytest_monitor.compute_info(session.config.option.mtr_description, - session.config.option.mtr_tags) + session.pytest_monitor.compute_info( + session.config.option.mtr_description, session.config.option.mtr_tags + ) yield -@pytest.fixture(autouse=True, scope='module') +@pytest.fixture(autouse=True, scope="module") def prf_module_tracer(request): if not PYTEST_MONITORING_ENABLED: yield @@ -206,17 +304,23 @@ def prf_module_tracer(request): yield ptimes_b = request.session.pytest_monitor.process.cpu_times() t_z = time.time() - rss = request.session.pytest_monitor.process.memory_info().rss / 1024 ** 2 - component = getattr(request.module, 'pytest_monitor_component', '') + rss = request.session.pytest_monitor.process.memory_info().rss / 1024**2 + component = getattr(request.module, "pytest_monitor_component", "") item = request.node.name[:-3] - pypath = request.module.__name__[:-len(item)-1] - request.session.pytest_monitor.add_test_info(item, pypath, '', - request.node._nodeid, - 'module', - component, t_a, t_z - t_a, - ptimes_b.user - ptimes_a.user, - ptimes_b.system - ptimes_a.system, - rss) + pypath = request.module.__name__[: -len(item) - 1] + request.session.pytest_monitor.add_test_info( + item, + pypath, + "", + request.node._nodeid, + "module", + component, + t_a, + t_z - t_a, + ptimes_b.user - ptimes_a.user, + ptimes_b.system - ptimes_a.system, + rss, + ) @pytest.fixture(autouse=True) @@ -227,15 +331,21 @@ def prf_tracer(request): ptimes_a = request.session.pytest_monitor.process.cpu_times() yield ptimes_b = request.session.pytest_monitor.process.cpu_times() - if not request.node.monitor_skip_test and getattr(request.node, "monitor_results", False): + if not request.node.monitor_skip_test and getattr( + request.node, "monitor_results", False + ): item_name = request.node.originalname or request.node.name item_loc = getattr(request.node, PYTEST_MONITOR_ITEM_LOC_MEMBER)[0] - request.session.pytest_monitor.add_test_info(item_name, request.module.__name__, - request.node.name, item_loc, - 'function', - request.node.monitor_component, - request.node.test_effective_start_time, - request.node.test_run_duration, - ptimes_b.user - ptimes_a.user, - ptimes_b.system - ptimes_a.system, - request.node.mem_usage) + request.session.pytest_monitor.add_test_info( + item_name, + request.module.__name__, + request.node.name, + item_loc, + "function", + request.node.monitor_component, + request.node.test_effective_start_time, + request.node.test_run_duration, + ptimes_b.user - ptimes_a.user, + ptimes_b.system - ptimes_a.system, + request.node.mem_usage, + ) diff --git a/pytest_monitor/session.py b/pytest_monitor/session.py index c838d90..0b001f8 100644 --- a/pytest_monitor/session.py +++ b/pytest_monitor/session.py @@ -1,27 +1,31 @@ -from http import HTTPStatus - import datetime import hashlib import json -import memory_profiler import os +import warnings +from http import HTTPStatus + +import memory_profiler import psutil import requests -import warnings from pytest_monitor.handler import DBHandler -from pytest_monitor.sys_utils import ExecutionContext, determine_scm_revision, collect_ci_info +from pytest_monitor.sys_utils import ( + ExecutionContext, + collect_ci_info, + determine_scm_revision, +) -class PyTestMonitorSession(object): - def __init__(self, db=None, remote=None, component='', scope=None, tracing=True): +class PyTestMonitorSession: + def __init__(self, db=None, remote=None, component="", scope=None, tracing=True): self.__db = None if db: self.__db = DBHandler(db) self.__monitor_enabled = tracing self.__remote = remote self.__component = component - self.__session = '' + self.__session = "" self.__scope = scope or [] self.__eid = (None, None) self.__mem_usage_base = None @@ -46,15 +50,17 @@ def process(self): def get_env_id(self, env): db, remote = None, None if self.__db: - row = self.__db.query('SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?', (env.hash(),)) + row = self.__db.query( + "SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env.hash(),) + ) db = row[0] if row else None if self.__remote: - r = requests.get('{}/contexts/{}'.format(self.__remote, env.hash())) + r = requests.get(f"{self.__remote}/contexts/{env.hash()}") remote = None if r.status_code == HTTPStatus.OK: remote = json.loads(r.text) - if remote['contexts']: - remote = remote['contexts'][0]['h'] + if remote["contexts"]: + remote = remote["contexts"][0]["h"] else: remote = None return db, remote @@ -70,14 +76,14 @@ def compute_info(self, description, tags): # From description + tags to JSON format d = collect_ci_info() if description: - d['description'] = description + d["description"] = description for tag in tags: if type(tag) is str: - _tag_info = tag.split('=', 1) + _tag_info = tag.split("=", 1) d[_tag_info[0]] = _tag_info[1] else: for sub_tag in tag: - _tag_info = sub_tag.split('=', 1) + _tag_info = sub_tag.split("=", 1) d[_tag_info[0]] = _tag_info[1] description = json.dumps(d) # Now get memory usage base and create the database @@ -86,14 +92,18 @@ def compute_info(self, description, tags): if self.__db: self.__db.insert_session(self.__session, run_date, scm, description) if self.__remote: - r = requests.post('{}/sessions/'.format(self.__remote), - json=dict(session_h=self.__session, - run_date=run_date, - scm_ref=scm, - description=json.loads(description))) + r = requests.post( + f"{self.__remote}/sessions/", + json=dict( + session_h=self.__session, + run_date=run_date, + scm_ref=scm, + description=json.loads(description), + ), + ) if r.status_code != HTTPStatus.CREATED: - self.__remote = '' - msg = "Cannot insert session in remote monitor server ({})! Deactivating...')".format(r.status_code) + self.__remote = "" + msg = f"Cannot insert session in remote monitor server ({r.status_code})! Deactivating...')" warnings.warn(msg) def set_environment_info(self, env): @@ -101,56 +111,91 @@ def set_environment_info(self, env): db_id, remote_id = self.__eid if self.__db and db_id is None: self.__db.insert_execution_context(env) - db_id = self.__db.query('select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?', (env.hash(),))[0] + db_id = self.__db.query( + "select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", (env.hash(),) + )[0] if self.__remote and remote_id is None: # We must postpone that to be run at the end of the pytest session. - r = requests.post('{}/contexts/'.format(self.__remote), json=env.to_dict()) + r = requests.post(f"{self.__remote}/contexts/", json=env.to_dict()) if r.status_code != HTTPStatus.CREATED: - warnings.warn('Cannot insert execution context in remote server (rc={}! Deactivating...'.format(r.status_code)) - self.__remote = '' + warnings.warn( + f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating..." + ) + self.__remote = "" else: - remote_id = json.loads(r.text)['h'] + remote_id = json.loads(r.text)["h"] self.__eid = db_id, remote_id def prepare(self): def dummy(): return True - memuse = memory_profiler.memory_usage((dummy,), max_iterations=1, max_usage=True) + memuse = memory_profiler.memory_usage( + (dummy,), max_iterations=1, max_usage=True + ) self.__mem_usage_base = memuse[0] if type(memuse) is list else memuse - def add_test_info(self, item, item_path, item_variant, item_loc, kind, component, - item_start_time, total_time, user_time, kernel_time, mem_usage): + def add_test_info( + self, + item, + item_path, + item_variant, + item_loc, + kind, + component, + item_start_time, + total_time, + user_time, + kernel_time, + mem_usage, + ): if kind not in self.__scope: return mem_usage = float(mem_usage) - self.__mem_usage_base cpu_usage = (user_time + kernel_time) / total_time item_start_time = datetime.datetime.fromtimestamp(item_start_time).isoformat() final_component = self.__component.format(user_component=component) - if final_component.endswith('.'): + if final_component.endswith("."): final_component = final_component[:-1] - item_variant = item_variant.replace('-', ', ') # No choice + item_variant = item_variant.replace("-", ", ") # No choice if self.__db and self.db_env_id is not None: - self.__db.insert_metric(self.__session, self.db_env_id, item_start_time, item, - item_path, item_variant, item_loc, kind, final_component, total_time, user_time, - kernel_time, cpu_usage, mem_usage) + self.__db.insert_metric( + self.__session, + self.db_env_id, + item_start_time, + item, + item_path, + item_variant, + item_loc, + kind, + final_component, + total_time, + user_time, + kernel_time, + cpu_usage, + mem_usage, + ) if self.__remote and self.remote_env_id is not None: - r = requests.post('{}/metrics/'.format(self.__remote), - json=dict(session_h=self.__session, - context_h=self.remote_env_id, - item_start_time=item_start_time, - item_path=item_path, - item=item, - item_variant=item_variant, - item_fs_loc=item_loc, - kind=kind, - component=final_component, - total_time=total_time, - user_time=user_time, - kernel_time=kernel_time, - cpu_usage=cpu_usage, - mem_usage=mem_usage)) + r = requests.post( + f"{self.__remote}/metrics/", + json=dict( + session_h=self.__session, + context_h=self.remote_env_id, + item_start_time=item_start_time, + item_path=item_path, + item=item, + item_variant=item_variant, + item_fs_loc=item_loc, + kind=kind, + component=final_component, + total_time=total_time, + user_time=user_time, + kernel_time=kernel_time, + cpu_usage=cpu_usage, + mem_usage=mem_usage, + ), + ) if r.status_code != HTTPStatus.CREATED: - self.__remote = '' - msg = "Cannot insert values in remote monitor server ({})! Deactivating...')".format(r.status_code) + self.__remote = "" + msg = f"Cannot insert values in remote monitor server ({r.status_code})! Deactivating...')" warnings.warn(msg) diff --git a/pytest_monitor/sys_utils.py b/pytest_monitor/sys_utils.py index 432f7f3..a389ea7 100644 --- a/pytest_monitor/sys_utils.py +++ b/pytest_monitor/sys_utils.py @@ -2,65 +2,90 @@ import multiprocessing import os import platform -import psutil import socket import subprocess import sys import warnings +import psutil + def collect_ci_info(): d = dict() # Test for jenkins if "BUILD_NUMBER" in os.environ: if "BRANCH_NAME" in os.environ or "JOB_NAME" in os.environ: - br = os.environ["BRANCH_NAME"] if "BRANCH_NAME" in os.environ else os.environ["JOB_NAME"] - d = dict(pipeline_branch=br, pipeline_build_no=os.environ["BUILD_NUMBER"], __ci__='jenkinsci') + br = ( + os.environ["BRANCH_NAME"] + if "BRANCH_NAME" in os.environ + else os.environ["JOB_NAME"] + ) + d = dict( + pipeline_branch=br, + pipeline_build_no=os.environ["BUILD_NUMBER"], + __ci__="jenkinsci", + ) # Test for CircleCI if "CIRCLE_JOB" in os.environ and "CIRCLE_BUILD_NUM" in os.environ: - d = dict(pipeline_branch=os.environ["CIRCLE_JOB"], pipeline_build_no=os.environ["CIRCLE_BUILD_NUM"], - __ci__='circleci') + d = dict( + pipeline_branch=os.environ["CIRCLE_JOB"], + pipeline_build_no=os.environ["CIRCLE_BUILD_NUM"], + __ci__="circleci", + ) # Test for TravisCI if "TRAVIS_BUILD_NUMBER" in os.environ and "TRAVIS_BUILD_ID" in os.environ: - d = dict(pipeline_branch=os.environ["TRAVIS_BUILD_ID"], pipeline_build_no=os.environ["TRAVIS_BUILD_NUMBER"], - __ci__='travisci') + d = dict( + pipeline_branch=os.environ["TRAVIS_BUILD_ID"], + pipeline_build_no=os.environ["TRAVIS_BUILD_NUMBER"], + __ci__="travisci", + ) # Test for DroneCI if "DRONE_REPO_BRANCH" in os.environ and "DRONE_BUILD_NUMBER" in os.environ: - d = dict(pipeline_branch=os.environ["DRONE_REPO_BRANCH"], pipeline_build_no=os.environ["DRONE_BUILD_NUMBER"], - __ci__='droneci') + d = dict( + pipeline_branch=os.environ["DRONE_REPO_BRANCH"], + pipeline_build_no=os.environ["DRONE_BUILD_NUMBER"], + __ci__="droneci", + ) # Test for Gitlab CI if "CI_JOB_NAME" in os.environ and "CI_PIPELINE_ID" in os.environ: - d = dict(pipeline_branch=os.environ["CI_JOB_NAME"], pipeline_build_no=os.environ["CI_PIPELINE_ID"], - __ci__='gitlabci') + d = dict( + pipeline_branch=os.environ["CI_JOB_NAME"], + pipeline_build_no=os.environ["CI_PIPELINE_ID"], + __ci__="gitlabci", + ) return d def determine_scm_revision(): - for scm, cmd in ( - ('git', r'git rev-parse HEAD'), - ('p4', r'p4 changes -m1 \#have')): - p = subprocess.Popen(cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE) + for scm, cmd in (("git", r"git rev-parse HEAD"), ("p4", r"p4 changes -m1 \#have")): + p = subprocess.Popen( + cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE + ) p_out, _ = p.communicate() if p.returncode == 0: - scm_ref = p_out.decode(errors='ignore').split('\n')[0] - if scm == 'p4': + scm_ref = p_out.decode(errors="ignore").split("\n", maxsplit=1)[0] + if scm == "p4": scm_ref = scm_ref.split()[1] return scm_ref - return '' + return "" def _get_cpu_string(): if platform.system().lower() == "darwin": - old_path = os.environ['PATH'] - os.environ['PATH'] = old_path + ':' + '/usr/sbin' - ret = subprocess.check_output('sysctl -n machdep.cpu.brand_string', shell=True).decode().strip() - os.environ['PATH'] = old_path + old_path = os.environ["PATH"] + os.environ["PATH"] = old_path + ":" + "/usr/sbin" + ret = ( + subprocess.check_output("sysctl -n machdep.cpu.brand_string", shell=True) + .decode() + .strip() + ) + os.environ["PATH"] = old_path return ret - elif platform.system().lower() == 'linux': - with open('/proc/cpuinfo', 'r', encoding='utf-8') as f: - lines = [i for i in f if i.startswith('model name')] + if platform.system().lower() == "linux": + with open("/proc/cpuinfo", "r", encoding="utf-8") as f: + lines = [i for i in f if i.startswith("model name")] if lines: - return lines[0].split(':')[1].strip() + return lines[0].split(":")[1].strip() return platform.processor() @@ -68,41 +93,49 @@ class ExecutionContext: def __init__(self): self.__cpu_count = multiprocessing.cpu_count() self.__cpu_vendor = _get_cpu_string() - if int(os.environ.get('PYTEST_MONITOR_FORCE_CPU_FREQ', '0')): + if int(os.environ.get("PYTEST_MONITOR_FORCE_CPU_FREQ", "0")): self._read_cpu_freq_from_env() else: try: self.__cpu_freq_base = psutil.cpu_freq().current except (AttributeError, NotImplementedError, FileNotFoundError): - warnings.warn("Unable to fetch CPU frequency. Trying to read it from environment..") + warnings.warn( + "Unable to fetch CPU frequency. Trying to read it from environment.." + ) self._read_cpu_freq_from_env() self.__proc_typ = platform.processor() self.__tot_mem = int(psutil.virtual_memory().total / 1024**2) self.__fqdn = socket.getfqdn() self.__machine = platform.machine() self.__arch = platform.architecture()[0] - self.__system = '{} - {}'.format(platform.system(), platform.release()) + self.__system = f"{platform.system()} - {platform.release()}" self.__py_ver = sys.version def _read_cpu_freq_from_env(self): try: - self.__cpu_freq_base = float(os.environ.get('PYTEST_MONITOR_CPU_FREQ', '0.')) + self.__cpu_freq_base = float( + os.environ.get("PYTEST_MONITOR_CPU_FREQ", "0.") + ) except (ValueError, TypeError): - warnings.warn("Wrong type/value while reading cpu frequency from environment. Forcing to 0.0.") + warnings.warn( + "Wrong type/value while reading cpu frequency from environment. Forcing to 0.0." + ) self.__cpu_freq_base = 0.0 def to_dict(self): - return dict(cpu_count=self.cpu_count, - cpu_frequency=self.cpu_frequency, - cpu_type=self.cpu_type, - cpu_vendor=self.cpu_vendor, - ram_total=self.ram_total, - machine_node=self.fqdn, - machine_type=self.machine, - machine_arch=self.architecture, - system_info=self.system_info, - python_info=self.python_info, - h=self.hash()) + return dict( + cpu_count=self.cpu_count, + cpu_frequency=self.cpu_frequency, + cpu_type=self.cpu_type, + cpu_vendor=self.cpu_vendor, + ram_total=self.ram_total, + machine_node=self.fqdn, + machine_type=self.machine, + machine_arch=self.architecture, + system_info=self.system_info, + python_info=self.python_info, + h=self.hash(), + ) @property def cpu_count(self): diff --git a/setup.py b/setup.py index 5ea8935..29158bd 100644 --- a/setup.py +++ b/setup.py @@ -3,64 +3,73 @@ import pathlib import re -from setuptools import setup, find_packages + +from setuptools import find_packages, setup def read_version(): p = pathlib.Path(__file__) - p = p.parent / 'pytest_monitor' / '__init__.py' - with p.open('r') as f: + p = p.parent / "pytest_monitor" / "__init__.py" + with p.open("r") as f: for line in f: - if line.startswith('__version__'): - line = line.split('=')[1].strip() + if line.startswith("__version__"): + line = line.split("=")[1].strip() match = re.match(r"^['\"](\d+\.\d+\.\d+\w*)['\"]", line) if match: return match.group(1) - raise ValueError('Unable to compute version') + raise ValueError("Unable to compute version") def read(fname): file_path = pathlib.Path(__file__).parent / fname - with file_path.open('r', encoding='utf-8') as f: + with file_path.open("r", encoding="utf-8") as f: return f.read() setup( - name='pytest-monitor', + name="pytest-monitor", version=read_version(), - author='Jean-Sébastien Dieu', - author_email='jean-sebastien.dieu@cfm.fr', - maintainer='Jean-Sébastien Dieu', - maintainer_email='jean-sebastien.dieu@cfm.fr', - license='MIT', - project_urls=dict(Source='https://github.com/CFMTech/pytest-monitor', - Tracker='https://github.com/CFMTech/pytest-monitor/issues'), - url='https://pytest-monitor.readthedocs.io/', - description='Pytest plugin for analyzing resource usage.', - long_description=read('README.rst'), - packages=find_packages('.', exclude=('tests', 'example', 'docs')), - python_requires='>=3.5', - install_requires=['pytest', 'requests', 'psutil>=5.1.0', 'memory_profiler>=0.58', 'wheel'], + author="Jean-Sébastien Dieu", + author_email="jean-sebastien.dieu@cfm.fr", + maintainer="Jean-Sébastien Dieu", + maintainer_email="jean-sebastien.dieu@cfm.fr", + license="MIT", + project_urls=dict( + Source="https://github.com/CFMTech/pytest-monitor", + Tracker="https://github.com/CFMTech/pytest-monitor/issues", + ), + url="https://pytest-monitor.readthedocs.io/", + description="Pytest plugin for analyzing resource usage.", + long_description=read("README.rst"), + packages=find_packages(".", exclude=("tests", "example", "docs")), + python_requires=">=3.5", + install_requires=[ + "pytest", + "requests", + "psutil>=5.1.0", + "memory_profiler>=0.58", + "wheel", + ], options={"bdist_wheel": {"universal": False}}, classifiers=[ - 'Development Status :: 5 - Production/Stable', - 'Framework :: Pytest', - 'Intended Audience :: Developers', - 'Topic :: Software Development :: Testing', - 'Programming Language :: Python', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: Implementation :: CPython', - 'Programming Language :: Python :: Implementation :: PyPy', - 'Operating System :: OS Independent', - 'License :: OSI Approved :: MIT License', + "Development Status :: 5 - Production/Stable", + "Framework :: Pytest", + "Intended Audience :: Developers", + "Topic :: Software Development :: Testing", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", + "Operating System :: OS Independent", + "License :: OSI Approved :: MIT License", ], entry_points={ - 'pytest11': [ - 'monitor = pytest_monitor.pytest_monitor', + "pytest11": [ + "monitor = pytest_monitor.pytest_monitor", ], }, ) diff --git a/tests/conftest.py b/tests/conftest.py index a60b5be..c6481d5 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1 +1 @@ -pytest_plugins = ['pytester'] +pytest_plugins = ["pytester"] diff --git a/tests/test_monitor.py b/tests/test_monitor.py index 4b59635..f5ff765 100644 --- a/tests/test_monitor.py +++ b/tests/test_monitor.py @@ -1,14 +1,16 @@ # -*- coding: utf-8 -*- import json import pathlib -import pytest import sqlite3 +import pytest + def test_monitor_basic_test(testdir): """Make sure that pytest-monitor does the job without impacting user tests.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time @@ -17,15 +19,16 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('-vv', '--tag', 'version=12.3.5') + result = testdir.runpytest("-vv", "--tag", "version=12.3.5") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the test suite @@ -33,19 +36,22 @@ def test_ok(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) # current test cursor = db.cursor() - tags = json.loads(cursor.execute('SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;').fetchone()[0]) - assert 'description' not in tags - assert 'version' in tags - assert tags['version'] == "12.3.5" + tags = json.loads( + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0] + ) + assert "description" not in tags + assert "version" in tags + assert tags["version"] == "12.3.5" def test_monitor_basic_test_description(testdir): """Make sure that pytest-monitor does the job without impacting user tests.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time @@ -54,15 +60,18 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('-vv', '--description', '"Test"', '--tag', 'version=12.3.5') + result = testdir.runpytest( + "-vv", "--description", '"Test"', "--tag", "version=12.3.5" + ) # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the test suite @@ -70,21 +79,23 @@ def test_ok(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) # current test cursor = db.cursor() - tags = json.loads(cursor.execute('SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;').fetchone()[0]) - assert 'description' in tags - assert tags['description'] == '"Test"' - assert 'version' in tags - assert tags['version'] == "12.3.5" + tags = json.loads( + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0] + ) + assert "description" in tags + assert tags["description"] == '"Test"' + assert "version" in tags + assert tags["version"] == "12.3.5" def test_monitor_pytest_skip_marker(testdir): """Make sure that pytest-monitor does the job without impacting user tests.""" - # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest import time @@ -92,15 +103,16 @@ def test_monitor_pytest_skip_marker(testdir): def test_skipped(): assert True -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_skipped SKIPPED*']) + result.stdout.fnmatch_lines(["*::test_skipped SKIPPED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -108,15 +120,15 @@ def test_skipped(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert not len(cursor.fetchall()) def test_monitor_pytest_skip_marker_on_fixture(testdir): """Make sure that pytest-monitor does the job without impacting user tests.""" - # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest import time @@ -127,15 +139,16 @@ def a_fixture(): def test_skipped(a_fixture): assert True -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_skipped SKIPPED*']) + result.stdout.fnmatch_lines(["*::test_skipped SKIPPED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -143,14 +156,15 @@ def test_skipped(a_fixture): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert not len(cursor.fetchall()) def test_bad_markers(testdir): """Make sure that pytest-monitor warns about unknown markers.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest import time @@ -161,16 +175,18 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 - """) + """ + ) # run pytest with the following cmd args - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok PASSED*', - '*Nothing known about marker monitor_bad_marker*']) + result.stdout.fnmatch_lines( + ["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"] + ) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -178,14 +194,15 @@ def test_ok(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) # current test def test_monitor_skip_module(testdir): """Make sure that pytest-monitor correctly understand the monitor_skip_test marker.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest import time @@ -198,16 +215,21 @@ def test_ok_not_monitored(): def test_another_function_ok_not_monitored(): assert True -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok_not_monitored PASSED*', - '*::test_another_function_ok_not_monitored PASSED*']) - - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + result.stdout.fnmatch_lines( + [ + "*::test_ok_not_monitored PASSED*", + "*::test_another_function_ok_not_monitored PASSED*", + ] + ) + + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -215,14 +237,15 @@ def test_another_function_ok_not_monitored(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert not len(cursor.fetchall()) # Nothing ran def test_monitor_skip_test(testdir): """Make sure that pytest-monitor correctly understand the monitor_skip_test marker.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest import time @@ -233,15 +256,16 @@ def test_not_monitored(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_not_monitored PASSED*']) + result.stdout.fnmatch_lines(["*::test_not_monitored PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -249,15 +273,15 @@ def test_not_monitored(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert not len(cursor.fetchall()) # nothing monitored def test_monitor_skip_test_if(testdir): """Make sure that pytest-monitor correctly understand the monitor_skip_test_if marker.""" - # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest import time @@ -268,23 +292,25 @@ def test_not_monitored(): x = ['a' * i for i in range(100)] assert len(x) == 100 - + @pytest.mark.monitor_skip_test_if(False) def test_monitored(): time.sleep(0.1) x = ['a' *i for i in range(100)] assert len(x) == 100 -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_not_monitored PASSED*', - '*::test_monitored PASSED*']) + result.stdout.fnmatch_lines( + ["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"] + ) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -292,15 +318,15 @@ def test_monitored(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) def test_monitor_no_db(testdir): """Make sure that pytest-monitor correctly understand the monitor_skip_test_if marker.""" - # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import pytest import time @@ -316,18 +342,18 @@ def test_that(): x = ['a' *i for i in range(100)] assert len(x) == 100 -""") +""" + ) - wrn = 'pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.' + wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." with pytest.warns(UserWarning, match=wrn): # run pytest with the following cmd args - result = testdir.runpytest('--no-db', '-v') + result = testdir.runpytest("--no-db", "-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_it PASSED*', - '*::test_that PASSED*']) + result.stdout.fnmatch_lines(["*::test_it PASSED*", "*::test_that PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert not pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -336,21 +362,22 @@ def test_that(): def test_monitor_basic_output(testdir): """Make sure that pytest-monitor does not repeat captured output (issue #26).""" - # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ def test_it(): print('Hello World') - """) + """ + ) - wrn = 'pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.' + wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." with pytest.warns(UserWarning, match=wrn): # run pytest with the following cmd args - result = testdir.runpytest('--no-db', '-s', '-vv') + result = testdir.runpytest("--no-db", "-s", "-vv") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_it Hello World*']) - assert "Hello World" != result.stdout.get_lines_after('*Hello World')[0] + result.stdout.fnmatch_lines(["*::test_it Hello World*"]) + assert "Hello World" != result.stdout.get_lines_after("*Hello World")[0] # make sure that that we get a '0' exit code for the testsuite result.assert_outcomes(passed=1) @@ -358,32 +385,33 @@ def test_it(): def test_monitor_with_doctest(testdir): """Make sure that pytest-monitor does not fail to run doctest.""" - # create a temporary pytest test module - testdir.makepyfile(''' + testdir.makepyfile( + ''' def run(a, b): """ >>> run(3, 30) 33 """ return a + b - ''') + ''' + ) # run pytest with the following cmd args - result = testdir.runpytest('--doctest-modules', '-vv') + result = testdir.runpytest("--doctest-modules", "-vv") # make sure that that we get a '0' exit code for the testsuite result.assert_outcomes(passed=1) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert not len(cursor.fetchall()) pymon_path.unlink() - result = testdir.runpytest('--doctest-modules', '--no-monitor', '-vv') + result = testdir.runpytest("--doctest-modules", "--no-monitor", "-vv") # make sure that that we get a '0' exit code for the testsuite result.assert_outcomes(passed=1) diff --git a/tests/test_monitor_component.py b/tests/test_monitor_component.py index bc3aa64..e176109 100644 --- a/tests/test_monitor_component.py +++ b/tests/test_monitor_component.py @@ -1,31 +1,31 @@ # -*- coding: utf-8 -*- -import os import pathlib import sqlite3 def test_monitor_no_component(testdir): """Make sure that pytest-monitor has an empty component by default""" - # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time - - + + def test_ok(): time.sleep(0.5) x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -33,34 +33,37 @@ def test_ok(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) - cursor.execute("SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';") + cursor.execute( + "SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';" + ) assert not len(cursor.fetchall()) def test_monitor_force_component(testdir): """Make sure that pytest-monitor forces the component name if required""" - # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time - - + + def test_force_ok(): time.sleep(0.5) x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('--force-component', 'my_component', '-v') + result = testdir.runpytest("--force-component", "my_component", "-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_force_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_force_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -68,36 +71,39 @@ def test_force_ok(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) - cursor.execute("SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';") + cursor.execute( + "SELECT ITEM FROM TEST_METRICS" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';" + ) assert 1 == len(cursor.fetchall()) def test_monitor_prefix_component(testdir): - """Make sure that pytest-monitor has a prefixed component """ - + """Make sure that pytest-monitor has a prefixed component""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time - + pytest_monitor_component = 'internal' - + def test_prefix_ok(): time.sleep(0.5) x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('--component-prefix', 'my_component', '-v') + result = testdir.runpytest("--component-prefix", "my_component", "-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_prefix_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_prefix_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -105,21 +111,25 @@ def test_prefix_ok(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) - cursor.execute("SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';") + cursor.execute( + "SELECT ITEM FROM TEST_METRICS" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';" + ) assert not len(cursor.fetchall()) - cursor.execute("SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';") + cursor.execute( + "SELECT ITEM FROM TEST_METRICS" + " WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';" + ) assert 1 == len(cursor.fetchall()) def test_monitor_prefix_without_component(testdir): - """Make sure that pytest-monitor has a prefixed component """ - + """Make sure that pytest-monitor has a prefixed component""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time @@ -128,15 +138,16 @@ def test_prefix_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) # run pytest with the following cmd args - result = testdir.runpytest('--component-prefix', 'my_component', '-v') + result = testdir.runpytest("--component-prefix", "my_component", "-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_prefix_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_prefix_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -144,8 +155,10 @@ def test_prefix_ok(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) - cursor.execute("SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';") + cursor.execute( + "SELECT ITEM FROM TEST_METRICS" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';" + ) assert 1 == len(cursor.fetchall()) diff --git a/tests/test_monitor_context.py b/tests/test_monitor_context.py index 60a3d49..943d289 100644 --- a/tests/test_monitor_context.py +++ b/tests/test_monitor_context.py @@ -1,11 +1,11 @@ -import mock import os import pathlib -import pytest import sqlite3 +import mock +import pytest -CPU_FREQ_PATH = 'pytest_monitor.sys_utils.psutil.cpu_freq' +CPU_FREQ_PATH = "pytest_monitor.sys_utils.psutil.cpu_freq" TEST_CONTENT = """ import time @@ -17,14 +17,15 @@ def test_ok(): assert len(x) == 100 """ + def get_nb_metrics_with_cpu_freq(path): - pymon_path = pathlib.Path(str(path)) / '.pymon' + pymon_path = pathlib.Path(str(path)) / ".pymon" db = sqlite3.connect(path.as_posix()) cursor = db.cursor() - cursor.execute('SELECT ITEM FROM TEST_METRICS;') + cursor.execute("SELECT ITEM FROM TEST_METRICS;") nb_metrics = len(cursor.fetchall()) cursor = db.cursor() - cursor.execute('SELECT CPU_FREQUENCY_MHZ FROM EXECUTION_CONTEXTS;') + cursor.execute("SELECT CPU_FREQUENCY_MHZ FROM EXECUTION_CONTEXTS;") rows = cursor.fetchall() assert 1 == len(rows) cpu_freq = rows[0][0] @@ -33,21 +34,20 @@ def get_nb_metrics_with_cpu_freq(path): def test_force_cpu_freq_set_0_use_psutil(testdir): """Test that when force mode is set, we do not call psutil to fetch CPU's frequency""" - # create a temporary pytest test module testdir.makepyfile(TEST_CONTENT) with mock.patch(CPU_FREQ_PATH, return_value=1500) as cpu_freq_mock: - os.environ['PYTEST_MONITOR_FORCE_CPU_FREQ'] = '0' - os.environ['PYTEST_MONITOR_CPU_FREQ'] = '3000' + os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"] = "0" + os.environ["PYTEST_MONITOR_CPU_FREQ"] = "3000" # run pytest with the following cmd args - result = testdir.runpytest('-vv') - del os.environ['PYTEST_MONITOR_FORCE_CPU_FREQ'] - del os.environ['PYTEST_MONITOR_CPU_FREQ'] + result = testdir.runpytest("-vv") + del os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"] + del os.environ["PYTEST_MONITOR_CPU_FREQ"] cpu_freq_mock.assert_called() # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) # make sure that that we get a '0' exit code for the test suite result.assert_outcomes(passed=1) @@ -56,80 +56,84 @@ def test_force_cpu_freq_set_0_use_psutil(testdir): def test_force_cpu_freq(testdir): """Test that when force mode is set, we do not call psutil to fetch CPU's frequency""" - # create a temporary pytest test module testdir.makepyfile(TEST_CONTENT) with mock.patch(CPU_FREQ_PATH, return_value=1500) as cpu_freq_mock: - os.environ['PYTEST_MONITOR_FORCE_CPU_FREQ'] = '1' - os.environ['PYTEST_MONITOR_CPU_FREQ'] = '3000' + os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"] = "1" + os.environ["PYTEST_MONITOR_CPU_FREQ"] = "3000" # run pytest with the following cmd args - result = testdir.runpytest('-vv') - del os.environ['PYTEST_MONITOR_FORCE_CPU_FREQ'] - del os.environ['PYTEST_MONITOR_CPU_FREQ'] + result = testdir.runpytest("-vv") + del os.environ["PYTEST_MONITOR_FORCE_CPU_FREQ"] + del os.environ["PYTEST_MONITOR_CPU_FREQ"] cpu_freq_mock.assert_not_called() # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) # make sure that that we get a '0' exit code for the test suite result.assert_outcomes(passed=1) assert 1, 3000 == get_nb_metrics_with_cpu_freq(testdir) - -@pytest.mark.parametrize('effect', [AttributeError, NotImplementedError, FileNotFoundError]) -def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback(effect, testdir): + +@pytest.mark.parametrize( + "effect", [AttributeError, NotImplementedError, FileNotFoundError] +) +def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback( + effect, testdir +): """Make sure that pytest-monitor fallback takes value of CPU FREQ from special env var""" # create a temporary pytest test module testdir.makepyfile(TEST_CONTENT) with mock.patch(CPU_FREQ_PATH, side_effect=effect) as cpu_freq_mock: - os.environ['PYTEST_MONITOR_CPU_FREQ'] = '3000' + os.environ["PYTEST_MONITOR_CPU_FREQ"] = "3000" # run pytest with the following cmd args - result = testdir.runpytest('-vv') - del os.environ['PYTEST_MONITOR_CPU_FREQ'] + result = testdir.runpytest("-vv") + del os.environ["PYTEST_MONITOR_CPU_FREQ"] cpu_freq_mock.assert_called() # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) # make sure that that we get a '0' exit code for the test suite result.assert_outcomes(passed=1) assert 1, 3000 == get_nb_metrics_with_cpu_freq(testdir) - -@pytest.mark.parametrize('effect', [AttributeError, NotImplementedError, FileNotFoundError]) + +@pytest.mark.parametrize( + "effect", [AttributeError, NotImplementedError, FileNotFoundError] +) def test_when_cpu_freq_cannot_fetch_frequency_set_freq_to_0(effect, testdir): - """Make sure that pytest-monitor's fallback mechanism is efficient enough. """ + """Make sure that pytest-monitor's fallback mechanism is efficient enough.""" # create a temporary pytest test module testdir.makepyfile(TEST_CONTENT) with mock.patch(CPU_FREQ_PATH, side_effect=effect) as cpu_freq_mock: # run pytest with the following cmd args - result = testdir.runpytest('-vv') + result = testdir.runpytest("-vv") cpu_freq_mock.assert_called() # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) # make sure that that we get a '0' exit code for the test suite result.assert_outcomes(passed=1) assert 1, 0 == get_nb_metrics_with_cpu_freq(testdir) - -@mock.patch('pytest_monitor.sys_utils.psutil.cpu_freq', return_value=None) + +@mock.patch("pytest_monitor.sys_utils.psutil.cpu_freq", return_value=None) def test_when_cpu_freq_cannot_fetch_frequency(cpu_freq_mock, testdir): """Make sure that pytest-monitor does the job when we have issue in collecing context resources""" # create a temporary pytest test module testdir.makepyfile(TEST_CONTENT) # run pytest with the following cmd args - result = testdir.runpytest('-vv') + result = testdir.runpytest("-vv") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) # make sure that that we get a '0' exit code for the test suite result.assert_outcomes(passed=1) assert 1, 0 == get_nb_metrics_with_cpu_freq(testdir) - diff --git a/tests/test_monitor_in_ci.py b/tests/test_monitor_in_ci.py index 4de3c3e..f3fe9e8 100644 --- a/tests/test_monitor_in_ci.py +++ b/tests/test_monitor_in_ci.py @@ -7,7 +7,8 @@ def test_monitor_no_ci(testdir): """Make sure that pytest-monitor does not insert CI information.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time @@ -16,22 +17,34 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) envs = dict() - for k in ["CIRCLE_BUILD_NUM", "CIRCLE_JOB", "DRONE_REPO_BRANCH", "DRONE_BUILD_NUMBER", "BUILD_NUMBER", "JOB_NUMBER", - "JOB_NAME", "TRAVIS_BUILD_ID", "TRAVIS_BUILD_NUMBER", "CI_PIPELINE_ID", "CI_JOB_NAME"]: + for k in [ + "CIRCLE_BUILD_NUM", + "CIRCLE_JOB", + "DRONE_REPO_BRANCH", + "DRONE_BUILD_NUMBER", + "BUILD_NUMBER", + "JOB_NUMBER", + "JOB_NAME", + "TRAVIS_BUILD_ID", + "TRAVIS_BUILD_NUMBER", + "CI_PIPELINE_ID", + "CI_JOB_NAME", + ]: if k in os.environ: envs[k] = os.environ[k] del os.environ[k] # run pytest with the following cmd args - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") # fnmatch_lines does an assertion internally - result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -39,10 +52,10 @@ def test_ok(): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;') + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") desc = cursor.fetchall() assert 1 == len(desc) # current test - assert desc[0][0] == '{}' + assert desc[0][0] == "{}" for k in envs.keys(): os.environ[k] = envs[k] @@ -50,7 +63,8 @@ def test_ok(): def test_monitor_jenkins_ci(testdir): """Make sure that pytest-monitor correctly handle Jenkins CI information.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time @@ -59,13 +73,14 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) def check_that(the_result, match): # fnmatch_lines does an assertion internally - the_result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -73,25 +88,43 @@ def check_that(the_result, match): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;') + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") desc = cursor.fetchall() assert 1 == len(desc) # current test assert desc[0][0] == match pymon_path.unlink() - run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "jenkinsci"}' - + run_description = ( + '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "jenkinsci"}' + ) + envs = dict() - for k in ["CIRCLE_BUILD_NUM", "CIRCLE_JOB", "DRONE_REPO_BRANCH", "DRONE_BUILD_NUMBER", "BUILD_NUMBER", "JOB_NUMBER", - "JOB_NAME", "TRAVIS_BUILD_ID", "TRAVIS_BUILD_NUMBER", "CI_PIPELINE_ID", "CI_JOB_NAME"]: + for k in [ + "CIRCLE_BUILD_NUM", + "CIRCLE_JOB", + "DRONE_REPO_BRANCH", + "DRONE_BUILD_NUMBER", + "BUILD_NUMBER", + "JOB_NUMBER", + "JOB_NAME", + "TRAVIS_BUILD_ID", + "TRAVIS_BUILD_NUMBER", + "CI_PIPELINE_ID", + "CI_JOB_NAME", + ]: if k in os.environ: envs[k] = os.environ[k] del os.environ[k] - - for env, exp in [(dict(BUILD_NUMBER="123"), '{}'), - (dict(BUILD_NUMBER="123", JOB_NAME="test"), run_description), - (dict(BUILD_NUMBER="123", BRANCH_NAME="test"), run_description), - (dict(BUILD_NUMBER="123", JOB_NAME="test-123", BRANCH_NAME="test"), run_description)]: + + for env, exp in [ + (dict(BUILD_NUMBER="123"), "{}"), + (dict(BUILD_NUMBER="123", JOB_NAME="test"), run_description), + (dict(BUILD_NUMBER="123", BRANCH_NAME="test"), run_description), + ( + dict(BUILD_NUMBER="123", JOB_NAME="test-123", BRANCH_NAME="test"), + run_description, + ), + ]: if "BUILD_NUMBER" in os.environ: del os.environ["BUILD_NUMBER"] if "JOB_NUMBER" in os.environ: @@ -102,7 +135,7 @@ def check_that(the_result, match): for k, v in env.items(): os.environ[k] = v - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") check_that(result, match=exp) if "BUILD_NUMBER" in os.environ: @@ -116,7 +149,8 @@ def check_that(the_result, match): def test_monitor_gitlab_ci(testdir): """Make sure that pytest-monitor correctly handle Gitlab CI information.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time @@ -125,13 +159,14 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) def check_that(the_result, match): # fnmatch_lines does an assertion internally - the_result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -139,23 +174,38 @@ def check_that(the_result, match): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;') + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") desc = cursor.fetchall() assert 1 == len(desc) # current test assert desc[0][0] == match pymon_path.unlink() - run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "gitlabci"}' + run_description = ( + '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "gitlabci"}' + ) envs = dict() - for k in ["CIRCLE_BUILD_NUM", "CIRCLE_JOB", "DRONE_REPO_BRANCH", "DRONE_BUILD_NUMBER", "BUILD_NUMBER", "JOB_NUMBER", - "JOB_NAME", "TRAVIS_BUILD_ID", "TRAVIS_BUILD_NUMBER", "CI_PIPELINE_ID", "CI_JOB_NAME"]: + for k in [ + "CIRCLE_BUILD_NUM", + "CIRCLE_JOB", + "DRONE_REPO_BRANCH", + "DRONE_BUILD_NUMBER", + "BUILD_NUMBER", + "JOB_NUMBER", + "JOB_NAME", + "TRAVIS_BUILD_ID", + "TRAVIS_BUILD_NUMBER", + "CI_PIPELINE_ID", + "CI_JOB_NAME", + ]: if k in os.environ: envs[k] = os.environ[k] del os.environ[k] - for env, exp in [(dict(CI_PIPELINE_ID="123"), '{}'), - (dict(CI_PIPELINE_ID="123", CI_JOB_NAME="test"), run_description), - (dict(CI_JOB_NAME="123"), '{}')]: + for env, exp in [ + (dict(CI_PIPELINE_ID="123"), "{}"), + (dict(CI_PIPELINE_ID="123", CI_JOB_NAME="test"), run_description), + (dict(CI_JOB_NAME="123"), "{}"), + ]: if "CI_PIPELINE_ID" in os.environ: del os.environ["CI_PIPELINE_ID"] if "CI_JOB_NAME" in os.environ: @@ -164,7 +214,7 @@ def check_that(the_result, match): for k, v in env.items(): os.environ[k] = v - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") check_that(result, match=exp) if "CI_PIPELINE_ID" in os.environ: @@ -176,7 +226,8 @@ def check_that(the_result, match): def test_monitor_travis_ci(testdir): """Make sure that pytest-monitor correctly handle Travis CI information.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time @@ -185,13 +236,14 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) def check_that(the_result, match): # fnmatch_lines does an assertion internally - the_result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -199,23 +251,38 @@ def check_that(the_result, match): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;') + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") desc = cursor.fetchall() assert 1 == len(desc) # current test assert desc[0][0] == match pymon_path.unlink() - run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "travisci"}' + run_description = ( + '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "travisci"}' + ) envs = dict() - for k in ["CIRCLE_BUILD_NUM", "CIRCLE_JOB", "DRONE_REPO_BRANCH", "DRONE_BUILD_NUMBER", "BUILD_NUMBER", "JOB_NUMBER", - "JOB_NAME", "TRAVIS_BUILD_ID", "TRAVIS_BUILD_NUMBER", "CI_PIPELINE_ID", "CI_JOB_NAME"]: + for k in [ + "CIRCLE_BUILD_NUM", + "CIRCLE_JOB", + "DRONE_REPO_BRANCH", + "DRONE_BUILD_NUMBER", + "BUILD_NUMBER", + "JOB_NUMBER", + "JOB_NAME", + "TRAVIS_BUILD_ID", + "TRAVIS_BUILD_NUMBER", + "CI_PIPELINE_ID", + "CI_JOB_NAME", + ]: if k in os.environ: envs[k] = os.environ[k] del os.environ[k] - for env, exp in [(dict(TRAVIS_BUILD_NUMBER="123"), '{}'), - (dict(TRAVIS_BUILD_NUMBER="123", TRAVIS_BUILD_ID="test"), run_description), - (dict(TRAVIS_BUILD_ID="test-123"), '{}')]: + for env, exp in [ + (dict(TRAVIS_BUILD_NUMBER="123"), "{}"), + (dict(TRAVIS_BUILD_NUMBER="123", TRAVIS_BUILD_ID="test"), run_description), + (dict(TRAVIS_BUILD_ID="test-123"), "{}"), + ]: if "TRAVIS_BUILD_NUMBER" in os.environ: del os.environ["TRAVIS_BUILD_NUMBER"] if "TRAVIS_BUILD_ID" in os.environ: @@ -224,7 +291,7 @@ def check_that(the_result, match): for k, v in env.items(): os.environ[k] = v - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") check_that(result, match=exp) if "TRAVIS_BUILD_NUMBER" in os.environ: @@ -236,7 +303,8 @@ def check_that(the_result, match): def test_monitor_circle_ci(testdir): """Make sure that pytest-monitor correctly handle Circle CI information.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time @@ -245,13 +313,14 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) def check_that(the_result, match): # fnmatch_lines does an assertion internally - the_result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -259,23 +328,38 @@ def check_that(the_result, match): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;') + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") desc = cursor.fetchall() assert 1 == len(desc) # current test assert desc[0][0] == match pymon_path.unlink() - run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "circleci"}' + run_description = ( + '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "circleci"}' + ) envs = dict() - for k in ["CIRCLE_BUILD_NUM", "CIRCLE_JOB", "DRONE_REPO_BRANCH", "DRONE_BUILD_NUMBER", "BUILD_NUMBER", "JOB_NUMBER", - "JOB_NAME", "TRAVIS_BUILD_ID", "TRAVIS_BUILD_NUMBER", "CI_PIPELINE_ID", "CI_JOB_NAME"]: + for k in [ + "CIRCLE_BUILD_NUM", + "CIRCLE_JOB", + "DRONE_REPO_BRANCH", + "DRONE_BUILD_NUMBER", + "BUILD_NUMBER", + "JOB_NUMBER", + "JOB_NAME", + "TRAVIS_BUILD_ID", + "TRAVIS_BUILD_NUMBER", + "CI_PIPELINE_ID", + "CI_JOB_NAME", + ]: if k in os.environ: envs[k] = os.environ[k] del os.environ[k] - for env, exp in [(dict(CIRCLE_BUILD_NUM="123"), '{}'), - (dict(CIRCLE_BUILD_NUM="123", CIRCLE_JOB="test"), run_description), - (dict(CIRCLE_JOB="test"), '{}')]: + for env, exp in [ + (dict(CIRCLE_BUILD_NUM="123"), "{}"), + (dict(CIRCLE_BUILD_NUM="123", CIRCLE_JOB="test"), run_description), + (dict(CIRCLE_JOB="test"), "{}"), + ]: if "CIRCLE_BUILD_NUM" in os.environ: del os.environ["CIRCLE_BUILD_NUM"] if "CIRCLE_JOB" in os.environ: @@ -284,7 +368,7 @@ def check_that(the_result, match): for k, v in env.items(): os.environ[k] = v - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") check_that(result, match=exp) if "CIRCLE_BUILD_NUM" in os.environ: @@ -296,7 +380,8 @@ def check_that(the_result, match): def test_monitor_drone_ci(testdir): """Make sure that pytest-monitor correctly handle Jenkins CI information.""" # create a temporary pytest test module - testdir.makepyfile(""" + testdir.makepyfile( + """ import time @@ -305,13 +390,14 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""") +""" + ) def check_that(the_result, match): # fnmatch_lines does an assertion internally - the_result.stdout.fnmatch_lines(['*::test_ok PASSED*']) + the_result.stdout.fnmatch_lines(["*::test_ok PASSED*"]) - pymon_path = pathlib.Path(str(testdir)) / '.pymon' + pymon_path = pathlib.Path(str(testdir)) / ".pymon" assert pymon_path.exists() # make sure that that we get a '0' exit code for the testsuite @@ -319,23 +405,38 @@ def check_that(the_result, match): db = sqlite3.connect(str(pymon_path)) cursor = db.cursor() - cursor.execute('SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;') + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;") desc = cursor.fetchall() assert 1 == len(desc) # current test assert desc[0][0] == match pymon_path.unlink() - run_description = '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "droneci"}' + run_description = ( + '{"pipeline_branch": "test", "pipeline_build_no": "123", "__ci__": "droneci"}' + ) envs = dict() - for k in ["CIRCLE_BUILD_NUM", "CIRCLE_JOB", "DRONE_REPO_BRANCH", "DRONE_BUILD_NUMBER", "BUILD_NUMBER", "JOB_NUMBER", - "JOB_NAME", "TRAVIS_BUILD_ID", "TRAVIS_BUILD_NUMBER", "CI_PIPELINE_ID", "CI_JOB_NAME"]: + for k in [ + "CIRCLE_BUILD_NUM", + "CIRCLE_JOB", + "DRONE_REPO_BRANCH", + "DRONE_BUILD_NUMBER", + "BUILD_NUMBER", + "JOB_NUMBER", + "JOB_NAME", + "TRAVIS_BUILD_ID", + "TRAVIS_BUILD_NUMBER", + "CI_PIPELINE_ID", + "CI_JOB_NAME", + ]: if k in os.environ: envs[k] = os.environ[k] del os.environ[k] - for env, exp in [(dict(DRONE_BUILD_NUMBER="123"), '{}'), - (dict(DRONE_BUILD_NUMBER="123", DRONE_REPO_BRANCH="test"), run_description), - (dict(DRONE_REPO_BRANCH="test"), "{}")]: + for env, exp in [ + (dict(DRONE_BUILD_NUMBER="123"), "{}"), + (dict(DRONE_BUILD_NUMBER="123", DRONE_REPO_BRANCH="test"), run_description), + (dict(DRONE_REPO_BRANCH="test"), "{}"), + ]: if "DRONE_REPO_BRANCH" in os.environ: del os.environ["DRONE_REPO_BRANCH"] if "DRONE_BUILD_NUMBER" in os.environ: @@ -344,7 +445,7 @@ def check_that(the_result, match): for k, v in env.items(): os.environ[k] = v - result = testdir.runpytest('-v') + result = testdir.runpytest("-v") check_that(result, match=exp) if "DRONE_REPO_BRANCH" in os.environ: From 17a6b0cc300030d854dc534fa5f0042e635ca263 Mon Sep 17 00:00:00 2001 From: Mark Mayo Date: Tue, 20 Dec 2022 11:33:24 +1100 Subject: [PATCH 2/5] undid fstring to allow for 3.5 code backdated fstring to format strings --- pytest_monitor/pytest_monitor.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pytest_monitor/pytest_monitor.py b/pytest_monitor/pytest_monitor.py index 105f568..9243333 100644 --- a/pytest_monitor/pytest_monitor.py +++ b/pytest_monitor/pytest_monitor.py @@ -143,9 +143,7 @@ def pytest_runtest_setup(item): mark_to_del = [] for set_marker in item_markers.keys(): if set_marker not in PYTEST_MONITOR_VALID_MARKERS: - warnings.warn( - f"Nothing known about marker {set_marker}. Marker will be dropped." - ) + warnings.warn("Nothing known about marker {}. Marker will be dropped.".format(set_marker)) mark_to_del.append(set_marker) if set_marker in PYTEST_MONITOR_DEPRECATED_MARKERS: warnings.warn( From 5a194914f61df2aad5113c0821730e185a40e235 Mon Sep 17 00:00:00 2001 From: Mark Mayo Date: Thu, 6 Jul 2023 15:08:20 +1200 Subject: [PATCH 3/5] flake8 C812 C812 missing trailing comma --- examples/pkg1/test_mod1.py | 2 +- pytest_monitor/handler.py | 6 +++--- pytest_monitor/pytest_monitor.py | 18 ++++++++--------- pytest_monitor/session.py | 8 ++++---- pytest_monitor/sys_utils.py | 8 ++++---- tests/test_monitor.py | 34 ++++++++++++++++---------------- tests/test_monitor_component.py | 18 ++++++++--------- tests/test_monitor_context.py | 6 +++--- tests/test_monitor_in_ci.py | 12 +++++------ 9 files changed, 56 insertions(+), 56 deletions(-) diff --git a/examples/pkg1/test_mod1.py b/examples/pkg1/test_mod1.py index 79307d7..ed3de7d 100644 --- a/examples/pkg1/test_mod1.py +++ b/examples/pkg1/test_mod1.py @@ -13,7 +13,7 @@ def test_sleep2(): @pytest.mark.parametrize( - ("range_max", "other"), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")] + ("range_max", "other"), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")], ) def test_heavy(range_max, other): assert len(["a" * i for i in range(range_max)]) == range_max diff --git a/pytest_monitor/handler.py b/pytest_monitor/handler.py index cbbdecd..e9c1bbb 100644 --- a/pytest_monitor/handler.py +++ b/pytest_monitor/handler.py @@ -91,7 +91,7 @@ def prepare(self): RUN_DATE varchar(64), -- Date of test run SCM_ID varchar(128), -- SCM change id RUN_DESCRIPTION json -);""" +);""", ) cursor.execute( """ @@ -112,7 +112,7 @@ def prepare(self): MEM_USAGE float, -- Max resident memory used. FOREIGN KEY (ENV_H) REFERENCES EXECUTION_CONTEXTS(ENV_H), FOREIGN KEY (SESSION_H) REFERENCES TEST_SESSIONS(SESSION_H) -);""" +);""", ) cursor.execute( """ @@ -129,6 +129,6 @@ def prepare(self): SYSTEM_INFO varchar(256), PYTHON_INFO varchar(512) ); -""" +""", ) self.__cnx.commit() diff --git a/pytest_monitor/pytest_monitor.py b/pytest_monitor/pytest_monitor.py index 9243333..1e4d923 100644 --- a/pytest_monitor/pytest_monitor.py +++ b/pytest_monitor/pytest_monitor.py @@ -47,7 +47,7 @@ def pytest_addoption(parser): " This requires the parameters to be stringifiable.", ) group.addoption( - "--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces" + "--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces", ) group.addoption( "--remote-server", @@ -106,7 +106,7 @@ def pytest_addoption(parser): def pytest_configure(config): config.addinivalue_line( - "markers", "monitor_skip_test: mark test to be executed but not monitored." + "markers", "monitor_skip_test: mark test to be executed but not monitored.", ) config.addinivalue_line( "markers", @@ -147,7 +147,7 @@ def pytest_runtest_setup(item): mark_to_del.append(set_marker) if set_marker in PYTEST_MONITOR_DEPRECATED_MARKERS: warnings.warn( - f"Marker {set_marker} is deprecated. Consider upgrading your tests" + f"Marker {set_marker} is deprecated. Consider upgrading your tests", ) for marker in mark_to_del: @@ -221,7 +221,7 @@ def wrapped_function(): def prof(): m = memory_profiler.memory_usage( - (wrapped_function, ()), max_iterations=1, max_usage=True, retval=True + (wrapped_function, ()), max_iterations=1, max_usage=True, retval=True, ) if isinstance(m[1], BaseException): # Do we have any outcome? raise m[1] @@ -254,7 +254,7 @@ def pytest_sessionstart(session): and session.config.option.mtr_component_prefix ): raise pytest.UsageError( - "Invalid usage: --force-component and --component-prefix are incompatible options!" + "Invalid usage: --force-component and --component-prefix are incompatible options!", ) if ( session.config.option.mtr_no_db @@ -262,7 +262,7 @@ def pytest_sessionstart(session): and not session.config.option.mtr_none ): warnings.warn( - "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." + "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.", ) session.config.option.mtr_none = True component = ( @@ -282,12 +282,12 @@ def pytest_sessionstart(session): None if session.config.option.mtr_none else session.config.option.mtr_remote ) session.pytest_monitor = PyTestMonitorSession( - db=db, remote=remote, component=component, scope=session.config.option.mtr_scope + db=db, remote=remote, component=component, scope=session.config.option.mtr_scope, ) global PYTEST_MONITORING_ENABLED PYTEST_MONITORING_ENABLED = not session.config.option.mtr_none session.pytest_monitor.compute_info( - session.config.option.mtr_description, session.config.option.mtr_tags + session.config.option.mtr_description, session.config.option.mtr_tags, ) yield @@ -330,7 +330,7 @@ def prf_tracer(request): yield ptimes_b = request.session.pytest_monitor.process.cpu_times() if not request.node.monitor_skip_test and getattr( - request.node, "monitor_results", False + request.node, "monitor_results", False, ): item_name = request.node.originalname or request.node.name item_loc = getattr(request.node, PYTEST_MONITOR_ITEM_LOC_MEMBER)[0] diff --git a/pytest_monitor/session.py b/pytest_monitor/session.py index 0b001f8..3958d49 100644 --- a/pytest_monitor/session.py +++ b/pytest_monitor/session.py @@ -51,7 +51,7 @@ def get_env_id(self, env): db, remote = None, None if self.__db: row = self.__db.query( - "SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env.hash(),) + "SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env.hash(),), ) db = row[0] if row else None if self.__remote: @@ -112,14 +112,14 @@ def set_environment_info(self, env): if self.__db and db_id is None: self.__db.insert_execution_context(env) db_id = self.__db.query( - "select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", (env.hash(),) + "select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", (env.hash(),), )[0] if self.__remote and remote_id is None: # We must postpone that to be run at the end of the pytest session. r = requests.post(f"{self.__remote}/contexts/", json=env.to_dict()) if r.status_code != HTTPStatus.CREATED: warnings.warn( - f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating..." + f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating...", ) self.__remote = "" else: @@ -131,7 +131,7 @@ def dummy(): return True memuse = memory_profiler.memory_usage( - (dummy,), max_iterations=1, max_usage=True + (dummy,), max_iterations=1, max_usage=True, ) self.__mem_usage_base = memuse[0] if type(memuse) is list else memuse diff --git a/pytest_monitor/sys_utils.py b/pytest_monitor/sys_utils.py index a389ea7..a0463ac 100644 --- a/pytest_monitor/sys_utils.py +++ b/pytest_monitor/sys_utils.py @@ -59,7 +59,7 @@ def collect_ci_info(): def determine_scm_revision(): for scm, cmd in (("git", r"git rev-parse HEAD"), ("p4", r"p4 changes -m1 \#have")): p = subprocess.Popen( - cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE + cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) p_out, _ = p.communicate() if p.returncode == 0: @@ -100,7 +100,7 @@ def __init__(self): self.__cpu_freq_base = psutil.cpu_freq().current except (AttributeError, NotImplementedError, FileNotFoundError): warnings.warn( - "Unable to fetch CPU frequency. Trying to read it from environment.." + "Unable to fetch CPU frequency. Trying to read it from environment..", ) self._read_cpu_freq_from_env() self.__proc_typ = platform.processor() @@ -114,11 +114,11 @@ def __init__(self): def _read_cpu_freq_from_env(self): try: self.__cpu_freq_base = float( - os.environ.get("PYTEST_MONITOR_CPU_FREQ", "0.") + os.environ.get("PYTEST_MONITOR_CPU_FREQ", "0."), ) except (ValueError, TypeError): warnings.warn( - "Wrong type/value while reading cpu frequency from environment. Forcing to 0.0." + "Wrong type/value while reading cpu frequency from environment. Forcing to 0.0.", ) self.__cpu_freq_base = 0.0 diff --git a/tests/test_monitor.py b/tests/test_monitor.py index f5ff765..8ca11d9 100644 --- a/tests/test_monitor.py +++ b/tests/test_monitor.py @@ -19,7 +19,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -40,7 +40,7 @@ def test_ok(): assert 1 == len(cursor.fetchall()) # current test cursor = db.cursor() tags = json.loads( - cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0] + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0], ) assert "description" not in tags assert "version" in tags @@ -60,12 +60,12 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args result = testdir.runpytest( - "-vv", "--description", '"Test"', "--tag", "version=12.3.5" + "-vv", "--description", '"Test"', "--tag", "version=12.3.5", ) # fnmatch_lines does an assertion internally @@ -83,7 +83,7 @@ def test_ok(): assert 1 == len(cursor.fetchall()) # current test cursor = db.cursor() tags = json.loads( - cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0] + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0], ) assert "description" in tags assert tags["description"] == '"Test"' @@ -103,7 +103,7 @@ def test_monitor_pytest_skip_marker(testdir): def test_skipped(): assert True -""" +""", ) # run pytest with the following cmd args @@ -139,7 +139,7 @@ def a_fixture(): def test_skipped(a_fixture): assert True -""" +""", ) # run pytest with the following cmd args @@ -175,7 +175,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 - """ + """, ) # run pytest with the following cmd args @@ -183,7 +183,7 @@ def test_ok(): # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines( - ["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"] + ["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"], ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" @@ -215,7 +215,7 @@ def test_ok_not_monitored(): def test_another_function_ok_not_monitored(): assert True -""" +""", ) # run pytest with the following cmd args @@ -226,7 +226,7 @@ def test_another_function_ok_not_monitored(): [ "*::test_ok_not_monitored PASSED*", "*::test_another_function_ok_not_monitored PASSED*", - ] + ], ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" @@ -256,7 +256,7 @@ def test_not_monitored(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -299,7 +299,7 @@ def test_monitored(): x = ['a' *i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -307,7 +307,7 @@ def test_monitored(): # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines( - ["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"] + ["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"], ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" @@ -342,7 +342,7 @@ def test_that(): x = ['a' *i for i in range(100)] assert len(x) == 100 -""" +""", ) wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." @@ -367,7 +367,7 @@ def test_monitor_basic_output(testdir): """ def test_it(): print('Hello World') - """ + """, ) wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." @@ -394,7 +394,7 @@ def run(a, b): 33 """ return a + b - ''' + ''', ) # run pytest with the following cmd args diff --git a/tests/test_monitor_component.py b/tests/test_monitor_component.py index e176109..c872a35 100644 --- a/tests/test_monitor_component.py +++ b/tests/test_monitor_component.py @@ -16,7 +16,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -36,7 +36,7 @@ def test_ok(): cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) cursor.execute( - "SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';" + "SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';", ) assert not len(cursor.fetchall()) @@ -54,7 +54,7 @@ def test_force_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -75,7 +75,7 @@ def test_force_ok(): assert 1 == len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';", ) assert 1 == len(cursor.fetchall()) @@ -94,7 +94,7 @@ def test_prefix_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -115,12 +115,12 @@ def test_prefix_ok(): assert 1 == len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';", ) assert not len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';" + " WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';", ) assert 1 == len(cursor.fetchall()) @@ -138,7 +138,7 @@ def test_prefix_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -159,6 +159,6 @@ def test_prefix_ok(): assert 1 == len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';", ) assert 1 == len(cursor.fetchall()) diff --git a/tests/test_monitor_context.py b/tests/test_monitor_context.py index 943d289..c1bcd68 100644 --- a/tests/test_monitor_context.py +++ b/tests/test_monitor_context.py @@ -77,10 +77,10 @@ def test_force_cpu_freq(testdir): @pytest.mark.parametrize( - "effect", [AttributeError, NotImplementedError, FileNotFoundError] + "effect", [AttributeError, NotImplementedError, FileNotFoundError], ) def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback( - effect, testdir + effect, testdir, ): """Make sure that pytest-monitor fallback takes value of CPU FREQ from special env var""" # create a temporary pytest test module @@ -102,7 +102,7 @@ def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback( @pytest.mark.parametrize( - "effect", [AttributeError, NotImplementedError, FileNotFoundError] + "effect", [AttributeError, NotImplementedError, FileNotFoundError], ) def test_when_cpu_freq_cannot_fetch_frequency_set_freq_to_0(effect, testdir): """Make sure that pytest-monitor's fallback mechanism is efficient enough.""" diff --git a/tests/test_monitor_in_ci.py b/tests/test_monitor_in_ci.py index f3fe9e8..cb04652 100644 --- a/tests/test_monitor_in_ci.py +++ b/tests/test_monitor_in_ci.py @@ -17,7 +17,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) envs = dict() @@ -73,7 +73,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -159,7 +159,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -236,7 +236,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -313,7 +313,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -390,7 +390,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): From 4c688ef1814983e8532d9ea85c19a25bf42313b6 Mon Sep 17 00:00:00 2001 From: Mark Mayo Date: Thu, 6 Jul 2023 15:12:54 +1200 Subject: [PATCH 4/5] f-strings and if statements merged --- pytest_monitor/pytest_monitor.py | 2 +- pytest_monitor/sys_utils.py | 25 ++++++++++++------------- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/pytest_monitor/pytest_monitor.py b/pytest_monitor/pytest_monitor.py index 1e4d923..53e01b6 100644 --- a/pytest_monitor/pytest_monitor.py +++ b/pytest_monitor/pytest_monitor.py @@ -143,7 +143,7 @@ def pytest_runtest_setup(item): mark_to_del = [] for set_marker in item_markers.keys(): if set_marker not in PYTEST_MONITOR_VALID_MARKERS: - warnings.warn("Nothing known about marker {}. Marker will be dropped.".format(set_marker)) + warnings.warn(f"Nothing known about marker {set_marker}. Marker will be dropped.") mark_to_del.append(set_marker) if set_marker in PYTEST_MONITOR_DEPRECATED_MARKERS: warnings.warn( diff --git a/pytest_monitor/sys_utils.py b/pytest_monitor/sys_utils.py index a0463ac..cc7c8fc 100644 --- a/pytest_monitor/sys_utils.py +++ b/pytest_monitor/sys_utils.py @@ -13,18 +13,17 @@ def collect_ci_info(): d = dict() # Test for jenkins - if "BUILD_NUMBER" in os.environ: - if "BRANCH_NAME" in os.environ or "JOB_NAME" in os.environ: - br = ( - os.environ["BRANCH_NAME"] - if "BRANCH_NAME" in os.environ - else os.environ["JOB_NAME"] - ) - d = dict( - pipeline_branch=br, - pipeline_build_no=os.environ["BUILD_NUMBER"], - __ci__="jenkinsci", - ) + if "BUILD_NUMBER" in os.environ and ("BRANCH_NAME" in os.environ or "JOB_NAME" in os.environ): + br = ( + os.environ["BRANCH_NAME"] + if "BRANCH_NAME" in os.environ + else os.environ["JOB_NAME"] + ) + d = dict( + pipeline_branch=br, + pipeline_build_no=os.environ["BUILD_NUMBER"], + __ci__="jenkinsci", + ) # Test for CircleCI if "CIRCLE_JOB" in os.environ and "CIRCLE_BUILD_NUM" in os.environ: d = dict( @@ -73,7 +72,7 @@ def determine_scm_revision(): def _get_cpu_string(): if platform.system().lower() == "darwin": old_path = os.environ["PATH"] - os.environ["PATH"] = old_path + ":" + "/usr/sbin" + os.environ["PATH"] = f"{old_path}:/usr/sbin" ret = ( subprocess.check_output("sysctl -n machdep.cpu.brand_string", shell=True) .decode() From 265f2b9b5ea92f118ed9f0221a193573f99f4da3 Mon Sep 17 00:00:00 2001 From: Mark Mayo Date: Thu, 6 Jul 2023 15:23:26 +1200 Subject: [PATCH 5/5] updated trailing commas after merge --- examples/pkg1/test_mod1.py | 2 +- pytest_monitor/handler.py | 6 +++--- pytest_monitor/pytest_monitor.py | 18 ++++++++--------- pytest_monitor/session.py | 8 ++++---- pytest_monitor/sys_utils.py | 33 +++++++++++++++---------------- tests/test_monitor.py | 34 ++++++++++++++++---------------- tests/test_monitor_component.py | 18 ++++++++--------- tests/test_monitor_context.py | 6 +++--- tests/test_monitor_in_ci.py | 12 +++++------ 9 files changed, 68 insertions(+), 69 deletions(-) diff --git a/examples/pkg1/test_mod1.py b/examples/pkg1/test_mod1.py index 79307d7..ed3de7d 100644 --- a/examples/pkg1/test_mod1.py +++ b/examples/pkg1/test_mod1.py @@ -13,7 +13,7 @@ def test_sleep2(): @pytest.mark.parametrize( - ("range_max", "other"), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")] + ("range_max", "other"), [(10, "10"), (100, "100"), (1000, "1000"), (10000, "10000")], ) def test_heavy(range_max, other): assert len(["a" * i for i in range(range_max)]) == range_max diff --git a/pytest_monitor/handler.py b/pytest_monitor/handler.py index cbbdecd..e9c1bbb 100644 --- a/pytest_monitor/handler.py +++ b/pytest_monitor/handler.py @@ -91,7 +91,7 @@ def prepare(self): RUN_DATE varchar(64), -- Date of test run SCM_ID varchar(128), -- SCM change id RUN_DESCRIPTION json -);""" +);""", ) cursor.execute( """ @@ -112,7 +112,7 @@ def prepare(self): MEM_USAGE float, -- Max resident memory used. FOREIGN KEY (ENV_H) REFERENCES EXECUTION_CONTEXTS(ENV_H), FOREIGN KEY (SESSION_H) REFERENCES TEST_SESSIONS(SESSION_H) -);""" +);""", ) cursor.execute( """ @@ -129,6 +129,6 @@ def prepare(self): SYSTEM_INFO varchar(256), PYTHON_INFO varchar(512) ); -""" +""", ) self.__cnx.commit() diff --git a/pytest_monitor/pytest_monitor.py b/pytest_monitor/pytest_monitor.py index 9243333..1e4d923 100644 --- a/pytest_monitor/pytest_monitor.py +++ b/pytest_monitor/pytest_monitor.py @@ -47,7 +47,7 @@ def pytest_addoption(parser): " This requires the parameters to be stringifiable.", ) group.addoption( - "--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces" + "--no-monitor", action="store_true", dest="mtr_none", help="Disable all traces", ) group.addoption( "--remote-server", @@ -106,7 +106,7 @@ def pytest_addoption(parser): def pytest_configure(config): config.addinivalue_line( - "markers", "monitor_skip_test: mark test to be executed but not monitored." + "markers", "monitor_skip_test: mark test to be executed but not monitored.", ) config.addinivalue_line( "markers", @@ -147,7 +147,7 @@ def pytest_runtest_setup(item): mark_to_del.append(set_marker) if set_marker in PYTEST_MONITOR_DEPRECATED_MARKERS: warnings.warn( - f"Marker {set_marker} is deprecated. Consider upgrading your tests" + f"Marker {set_marker} is deprecated. Consider upgrading your tests", ) for marker in mark_to_del: @@ -221,7 +221,7 @@ def wrapped_function(): def prof(): m = memory_profiler.memory_usage( - (wrapped_function, ()), max_iterations=1, max_usage=True, retval=True + (wrapped_function, ()), max_iterations=1, max_usage=True, retval=True, ) if isinstance(m[1], BaseException): # Do we have any outcome? raise m[1] @@ -254,7 +254,7 @@ def pytest_sessionstart(session): and session.config.option.mtr_component_prefix ): raise pytest.UsageError( - "Invalid usage: --force-component and --component-prefix are incompatible options!" + "Invalid usage: --force-component and --component-prefix are incompatible options!", ) if ( session.config.option.mtr_no_db @@ -262,7 +262,7 @@ def pytest_sessionstart(session): and not session.config.option.mtr_none ): warnings.warn( - "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." + "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring.", ) session.config.option.mtr_none = True component = ( @@ -282,12 +282,12 @@ def pytest_sessionstart(session): None if session.config.option.mtr_none else session.config.option.mtr_remote ) session.pytest_monitor = PyTestMonitorSession( - db=db, remote=remote, component=component, scope=session.config.option.mtr_scope + db=db, remote=remote, component=component, scope=session.config.option.mtr_scope, ) global PYTEST_MONITORING_ENABLED PYTEST_MONITORING_ENABLED = not session.config.option.mtr_none session.pytest_monitor.compute_info( - session.config.option.mtr_description, session.config.option.mtr_tags + session.config.option.mtr_description, session.config.option.mtr_tags, ) yield @@ -330,7 +330,7 @@ def prf_tracer(request): yield ptimes_b = request.session.pytest_monitor.process.cpu_times() if not request.node.monitor_skip_test and getattr( - request.node, "monitor_results", False + request.node, "monitor_results", False, ): item_name = request.node.originalname or request.node.name item_loc = getattr(request.node, PYTEST_MONITOR_ITEM_LOC_MEMBER)[0] diff --git a/pytest_monitor/session.py b/pytest_monitor/session.py index 0b001f8..3958d49 100644 --- a/pytest_monitor/session.py +++ b/pytest_monitor/session.py @@ -51,7 +51,7 @@ def get_env_id(self, env): db, remote = None, None if self.__db: row = self.__db.query( - "SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env.hash(),) + "SELECT ENV_H FROM EXECUTION_CONTEXTS WHERE ENV_H= ?", (env.hash(),), ) db = row[0] if row else None if self.__remote: @@ -112,14 +112,14 @@ def set_environment_info(self, env): if self.__db and db_id is None: self.__db.insert_execution_context(env) db_id = self.__db.query( - "select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", (env.hash(),) + "select ENV_H from EXECUTION_CONTEXTS where ENV_H = ?", (env.hash(),), )[0] if self.__remote and remote_id is None: # We must postpone that to be run at the end of the pytest session. r = requests.post(f"{self.__remote}/contexts/", json=env.to_dict()) if r.status_code != HTTPStatus.CREATED: warnings.warn( - f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating..." + f"Cannot insert execution context in remote server (rc={r.status_code}! Deactivating...", ) self.__remote = "" else: @@ -131,7 +131,7 @@ def dummy(): return True memuse = memory_profiler.memory_usage( - (dummy,), max_iterations=1, max_usage=True + (dummy,), max_iterations=1, max_usage=True, ) self.__mem_usage_base = memuse[0] if type(memuse) is list else memuse diff --git a/pytest_monitor/sys_utils.py b/pytest_monitor/sys_utils.py index a389ea7..cc7c8fc 100644 --- a/pytest_monitor/sys_utils.py +++ b/pytest_monitor/sys_utils.py @@ -13,18 +13,17 @@ def collect_ci_info(): d = dict() # Test for jenkins - if "BUILD_NUMBER" in os.environ: - if "BRANCH_NAME" in os.environ or "JOB_NAME" in os.environ: - br = ( - os.environ["BRANCH_NAME"] - if "BRANCH_NAME" in os.environ - else os.environ["JOB_NAME"] - ) - d = dict( - pipeline_branch=br, - pipeline_build_no=os.environ["BUILD_NUMBER"], - __ci__="jenkinsci", - ) + if "BUILD_NUMBER" in os.environ and ("BRANCH_NAME" in os.environ or "JOB_NAME" in os.environ): + br = ( + os.environ["BRANCH_NAME"] + if "BRANCH_NAME" in os.environ + else os.environ["JOB_NAME"] + ) + d = dict( + pipeline_branch=br, + pipeline_build_no=os.environ["BUILD_NUMBER"], + __ci__="jenkinsci", + ) # Test for CircleCI if "CIRCLE_JOB" in os.environ and "CIRCLE_BUILD_NUM" in os.environ: d = dict( @@ -59,7 +58,7 @@ def collect_ci_info(): def determine_scm_revision(): for scm, cmd in (("git", r"git rev-parse HEAD"), ("p4", r"p4 changes -m1 \#have")): p = subprocess.Popen( - cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE + cmd, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE, ) p_out, _ = p.communicate() if p.returncode == 0: @@ -73,7 +72,7 @@ def determine_scm_revision(): def _get_cpu_string(): if platform.system().lower() == "darwin": old_path = os.environ["PATH"] - os.environ["PATH"] = old_path + ":" + "/usr/sbin" + os.environ["PATH"] = f"{old_path}:/usr/sbin" ret = ( subprocess.check_output("sysctl -n machdep.cpu.brand_string", shell=True) .decode() @@ -100,7 +99,7 @@ def __init__(self): self.__cpu_freq_base = psutil.cpu_freq().current except (AttributeError, NotImplementedError, FileNotFoundError): warnings.warn( - "Unable to fetch CPU frequency. Trying to read it from environment.." + "Unable to fetch CPU frequency. Trying to read it from environment..", ) self._read_cpu_freq_from_env() self.__proc_typ = platform.processor() @@ -114,11 +113,11 @@ def __init__(self): def _read_cpu_freq_from_env(self): try: self.__cpu_freq_base = float( - os.environ.get("PYTEST_MONITOR_CPU_FREQ", "0.") + os.environ.get("PYTEST_MONITOR_CPU_FREQ", "0."), ) except (ValueError, TypeError): warnings.warn( - "Wrong type/value while reading cpu frequency from environment. Forcing to 0.0." + "Wrong type/value while reading cpu frequency from environment. Forcing to 0.0.", ) self.__cpu_freq_base = 0.0 diff --git a/tests/test_monitor.py b/tests/test_monitor.py index f5ff765..8ca11d9 100644 --- a/tests/test_monitor.py +++ b/tests/test_monitor.py @@ -19,7 +19,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -40,7 +40,7 @@ def test_ok(): assert 1 == len(cursor.fetchall()) # current test cursor = db.cursor() tags = json.loads( - cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0] + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0], ) assert "description" not in tags assert "version" in tags @@ -60,12 +60,12 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args result = testdir.runpytest( - "-vv", "--description", '"Test"', "--tag", "version=12.3.5" + "-vv", "--description", '"Test"', "--tag", "version=12.3.5", ) # fnmatch_lines does an assertion internally @@ -83,7 +83,7 @@ def test_ok(): assert 1 == len(cursor.fetchall()) # current test cursor = db.cursor() tags = json.loads( - cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0] + cursor.execute("SELECT RUN_DESCRIPTION FROM TEST_SESSIONS;").fetchone()[0], ) assert "description" in tags assert tags["description"] == '"Test"' @@ -103,7 +103,7 @@ def test_monitor_pytest_skip_marker(testdir): def test_skipped(): assert True -""" +""", ) # run pytest with the following cmd args @@ -139,7 +139,7 @@ def a_fixture(): def test_skipped(a_fixture): assert True -""" +""", ) # run pytest with the following cmd args @@ -175,7 +175,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 - """ + """, ) # run pytest with the following cmd args @@ -183,7 +183,7 @@ def test_ok(): # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines( - ["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"] + ["*::test_ok PASSED*", "*Nothing known about marker monitor_bad_marker*"], ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" @@ -215,7 +215,7 @@ def test_ok_not_monitored(): def test_another_function_ok_not_monitored(): assert True -""" +""", ) # run pytest with the following cmd args @@ -226,7 +226,7 @@ def test_another_function_ok_not_monitored(): [ "*::test_ok_not_monitored PASSED*", "*::test_another_function_ok_not_monitored PASSED*", - ] + ], ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" @@ -256,7 +256,7 @@ def test_not_monitored(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -299,7 +299,7 @@ def test_monitored(): x = ['a' *i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -307,7 +307,7 @@ def test_monitored(): # fnmatch_lines does an assertion internally result.stdout.fnmatch_lines( - ["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"] + ["*::test_not_monitored PASSED*", "*::test_monitored PASSED*"], ) pymon_path = pathlib.Path(str(testdir)) / ".pymon" @@ -342,7 +342,7 @@ def test_that(): x = ['a' *i for i in range(100)] assert len(x) == 100 -""" +""", ) wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." @@ -367,7 +367,7 @@ def test_monitor_basic_output(testdir): """ def test_it(): print('Hello World') - """ + """, ) wrn = "pytest-monitor: No storage specified but monitoring is requested. Disabling monitoring." @@ -394,7 +394,7 @@ def run(a, b): 33 """ return a + b - ''' + ''', ) # run pytest with the following cmd args diff --git a/tests/test_monitor_component.py b/tests/test_monitor_component.py index e176109..c872a35 100644 --- a/tests/test_monitor_component.py +++ b/tests/test_monitor_component.py @@ -16,7 +16,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -36,7 +36,7 @@ def test_ok(): cursor.execute("SELECT ITEM FROM TEST_METRICS;") assert 1 == len(cursor.fetchall()) cursor.execute( - "SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';" + "SELECT ITEM FROM TEST_METRICS WHERE COMPONENT != '' AND ITEM LIKE '%test_ok';", ) assert not len(cursor.fetchall()) @@ -54,7 +54,7 @@ def test_force_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -75,7 +75,7 @@ def test_force_ok(): assert 1 == len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_force_ok%';", ) assert 1 == len(cursor.fetchall()) @@ -94,7 +94,7 @@ def test_prefix_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -115,12 +115,12 @@ def test_prefix_ok(): assert 1 == len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';", ) assert not len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';" + " WHERE COMPONENT == 'my_component.internal' AND ITEM LIKE '%test_prefix_ok%';", ) assert 1 == len(cursor.fetchall()) @@ -138,7 +138,7 @@ def test_prefix_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) # run pytest with the following cmd args @@ -159,6 +159,6 @@ def test_prefix_ok(): assert 1 == len(cursor.fetchall()) cursor.execute( "SELECT ITEM FROM TEST_METRICS" - " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';" + " WHERE COMPONENT == 'my_component' AND ITEM LIKE '%test_prefix_ok%';", ) assert 1 == len(cursor.fetchall()) diff --git a/tests/test_monitor_context.py b/tests/test_monitor_context.py index 943d289..c1bcd68 100644 --- a/tests/test_monitor_context.py +++ b/tests/test_monitor_context.py @@ -77,10 +77,10 @@ def test_force_cpu_freq(testdir): @pytest.mark.parametrize( - "effect", [AttributeError, NotImplementedError, FileNotFoundError] + "effect", [AttributeError, NotImplementedError, FileNotFoundError], ) def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback( - effect, testdir + effect, testdir, ): """Make sure that pytest-monitor fallback takes value of CPU FREQ from special env var""" # create a temporary pytest test module @@ -102,7 +102,7 @@ def test_when_cpu_freq_cannot_fetch_frequency_set_freq_by_using_fallback( @pytest.mark.parametrize( - "effect", [AttributeError, NotImplementedError, FileNotFoundError] + "effect", [AttributeError, NotImplementedError, FileNotFoundError], ) def test_when_cpu_freq_cannot_fetch_frequency_set_freq_to_0(effect, testdir): """Make sure that pytest-monitor's fallback mechanism is efficient enough.""" diff --git a/tests/test_monitor_in_ci.py b/tests/test_monitor_in_ci.py index f3fe9e8..cb04652 100644 --- a/tests/test_monitor_in_ci.py +++ b/tests/test_monitor_in_ci.py @@ -17,7 +17,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) envs = dict() @@ -73,7 +73,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -159,7 +159,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -236,7 +236,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -313,7 +313,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match): @@ -390,7 +390,7 @@ def test_ok(): x = ['a' * i for i in range(100)] assert len(x) == 100 -""" +""", ) def check_that(the_result, match):