diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml index 302a58e3..ffea3e36 100644 --- a/.github/workflows/dependabot.yml +++ b/.github/workflows/dependabot.yml @@ -1,6 +1,6 @@ version: 2 updates: - - package-ecosystem: "npm" + - package-ecosystem: "bun" directory: "/" schedule: interval: "daily" diff --git a/.github/workflows/deploy.yml b/.github/workflows/deploy.yml index 09f5944f..9ab5d52d 100644 --- a/.github/workflows/deploy.yml +++ b/.github/workflows/deploy.yml @@ -16,26 +16,24 @@ jobs: # runs-on: ubuntu-latest # steps: # - uses: actions/checkout@v4 -# - name: Setup node -# uses: actions/setup-node@v4 +# - name: Setup Bun +# uses: oven-sh/setup-bun@v1 # with: -# node-version: 20.x -# - uses: pnpm/action-setup@v4 +# bun-version: latest +# - name: Install bunosh +# run: bun add -g bunosh +# - run: bun install +# - run: bunosh docs +# - run: bun run build +# env: +# PUBLIC_MEILISEARCH_KEY: ${{ secrets.PUBLIC_MEILISEARCH_KEY }} +# - name: Publish to Cloudflare Pages +# uses: cloudflare/pages-action@v1 # with: -# version: 10.12.4 -# - run: pnpm install -# - run: cd src/scripts && pnpm install -# - run: cd src/scripts && node runok.js docs -# - run: pnpm run build - # env: - # PUBLIC_MEILISEARCH_KEY: ${{ secrets.PUBLIC_MEILISEARCH_KEY }} - # - name: Publish to Cloudflare Pages - # uses: cloudflare/pages-action@v1 - # with: - # apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} - # accountId: 60b76f40370d8320885e92e3daa114b1 - # projectName: docs - # directory: dist +# apiToken: ${{ secrets.CLOUDFLARE_API_TOKEN }} +# accountId: 60b76f40370d8320885e92e3daa114b1 +# projectName: docs +# directory: dist run-scraper: name: Run Search Scraper runs-on: ubuntu-latest diff --git a/.github/workflows/test-build.yml b/.github/workflows/test-build.yml index 6aa45f42..7af9411d 100644 --- a/.github/workflows/test-build.yml +++ b/.github/workflows/test-build.yml @@ -7,18 +7,16 @@ on: jobs: deploy: - name: Test - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - name: Setup node - uses: actions/setup-node@v4 - with: - node-version: 20.x - - uses: pnpm/action-setup@v4 - with: - version: 10.14.0 - - run: pnpm install - - run: cd src/scripts && pnpm install - - run: cd src/scripts && node runok.js docs - - run: pnpm run build \ No newline at end of file + name: Test + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - name: Setup Bun + uses: oven-sh/setup-bun@v1 + with: + bun-version: latest + - name: Install bunosh + run: bun add -g bunosh + - run: bun install + - run: bunosh docs + - run: bun run build \ No newline at end of file diff --git a/Bunoshfile.js b/Bunoshfile.js new file mode 100644 index 00000000..307254d8 --- /dev/null +++ b/Bunoshfile.js @@ -0,0 +1,349 @@ +// Import Bunosh functions from global object +const { exec, fetch, writeToFile, say, yell } = global.bunosh; +const fs = require('fs'); +const path = require('path'); +const { globSync } = require('glob'); + +require('dotenv').config(); + +const token = process.env.GH_PAT; + +const FETCH_ISSUES_REQUEST = 'GET /repos/{owner}/{repo}/issues?labels={label}&sort=comments&direction=desc' + +const humanize = (s) => s.replace(/-([a-z])/g, (_, c) => ' ' + c.toUpperCase()).replace(/^([a-z])/, (_, c) => c.toUpperCase()); +const slugify = require('slugify') +const dasherize = (str) => slugify(str.toLowerCase()); + +// Frontmatter templates for documentation files +const JS_FRONTMATTER = `--- +title: Import JavaScript Tests +description: Import JavaScript tests into Testomat.io using the CLI tool with support for various frameworks like Cypress, TestCafe, and Protractor. This guide covers commands for importing, synchronizing, and managing test IDs, as well as options for handling parametrized tests, disabling detached tests, and importing into specific suites or branches. +type: article +url: https://docs.testomat.io/project/import-export/import-js +head: + - tag: meta + attrs: + name: og:image + content: https://user-images.githubusercontent.com/24666922/78563263-505d1280-7838-11ea-8fbc-18e942d48485.png + + - tag: meta + attrs: + name: keywords + content: Testomat.io, JavaScript test import, automated tests, test management, CLI tool, test synchronization, parametrized tests, Test IDs, Cypress, TestCafe, Protractor, QA tools +--- + + + +Testomat.io can import automated tests into a project. +We provide CLI tools for different frameworks so you get visibility of your tests in seconds. +On this page we collect the reference to them. Learn how to install and configure test importer for your project. + +## JavaScript + +> ๐Ÿ“‘ This documentation is taken from open-source project [testomatio/check-tests](https://github.com/testomatio/check-tests)`; + +const PHP_FRONTMATTER = `--- +title: Import PHP Tests +description: Import PHP tests into Testomat.io using the list-tests CLI utility, which supports PHPUnit and Codeception. This guide explains how to install the tool, print and export test lists in markdown format, and import tests directly into Testomat.io by passing the API key for your project. Easily manage PHP tests and maintain live documentation with this integration. +type: article +url: https://docs.testomat.io/project/import-export/import-php +head: + - tag: meta + attrs: + name: og:image + content: https://docs.testomat.io/_astro/test-reporting-heat-map.CoE-TwPN_Z20qVi.webp + + - tag: meta + attrs: + name: keywords + content: Testomat.io, PHP test import, PHPUnit, Codeception, CLI tool, test management, live documentation, markdown export, test synchronization, API key, QA tools +--- + + + +> ๐Ÿ“‘ This documentation is taken from open-source project [testomatio/php-list-tests](https://github.com/testomatio/php-list-tests)`; + +const BDD_FRONTMATTER = `--- +title: Import Cucumber BDD Tests +description: Import Cucumber BDD tests into Testomat.io using the check-cucumber CLI tool. This guide covers commands to synchronize tests, assign test IDs, manage detached tests, and clean or import tests into specific suites, branches, or keep source code structure intact. It also supports both manual and automated BDD test imports, allowing efficient test management and version control. +type: article +url: https://docs.testomat.io/project/import-export/import-bdd +head: + - tag: meta + attrs: + name: og:image + content: https://user-images.githubusercontent.com/24666922/78559548-2dc7fb00-7832-11ea-8c69-0722222a82fe.png + + - tag: meta + attrs: + name: keywords + content: Testomat.io, Cucumber BDD, CLI tool, test import, test IDs, test synchronization, automated tests, manual tests, test management, QA tools +--- + + + +> ๐Ÿ“‘ This documentation is taken from open-source project [testomatio/check-cucumber](https://github.com/testomatio/check-cucumber)`; + +/** + * Main docs task that imports and processes documentation + */ +export async function docs() { + await docsImporter(); + await docsReporter(); +} + +/** + * Process images in documentation files + */ +export async function docsImages() { + const files = globSync("src/content/docs/**/*.md"); + + for (const file of files) { + say(`Processing ${file}`); + const content = fs.readFileSync(file).toString(); + + const destinationFolder = path.join(path.dirname(file), 'images'); + + if (!fs.existsSync(destinationFolder)) { + fs.mkdirSync(destinationFolder, { recursive: true }); + } + + const imageUrls = content.match(/!\[.*?\]\((.*?)\)/g)?.map(match => match.match(/!\[.*?\]\((.*?)\)/)[1]) || []; + console.log(imageUrls); + + let updatedContent = content; + + if (!imageUrls.length) continue; + + for (const imageUrl of imageUrls) { + if (imageUrl.startsWith('http')) { + try { + await downloadImage(imageUrl, destinationFolder); + updatedContent = updatedContent.replace(imageUrl, `./images/${path.basename(imageUrl)}`); + } catch (err) {} + continue; + } + // try { + // fs.renameSync(path.join('docs/.vuepress/public', imageUrl), path.join(destinationFolder, path.basename(imageUrl)), { overwrite: true }); + // } catch (err) {} + updatedContent = updatedContent.replace(imageUrl, `./images/${path.basename(imageUrl)}`); + } + + // updatedContent = content.replace(/!\[.*?\]\((\/assets\/.*?)\)/g, "![$&](images$1)"); + + // console.log(updatedContent); + fs.writeFileSync(file, updatedContent); + } +} + +/** + * Generate and process reporter documentation + */ +export async function docsReporter() { + await exec`rm -rf tmp/reporter`; + await exec`rm -rf tmp/php-reporter`; + await exec`rm -rf tmp/pytest-reporter`; + + const destinationFolder = path.resolve(path.join(__dirname, 'src/content/docs/test-reporting')); + + say(`Destination folder: ${destinationFolder}`); + + if (!fs.existsSync(destinationFolder)) { + fs.mkdirSync(destinationFolder, { recursive: true }); + } + + // saving headers of files + const files = globSync(`${destinationFolder}/**/*.md`); + + let fileHeaders = {} + for (const file of files) { + const content = fs.readFileSync(file, 'utf8'); + const match = content.match(/^---[\s\S]+?^---/m); + if (match) { + fileHeaders[path.basename(file, '.md')] = match[0]; + } + } + + await exec`git clone https://github.com/testomatio/reporter.git tmp/reporter --depth=1`; + + // Copy docs files using Node.js fs instead of cp command + const docsSrc = path.join(__dirname, 'tmp/reporter/docs'); + const docsFiles = fs.readdirSync(docsSrc); + for (const file of docsFiles) { + const srcPath = path.join(docsSrc, file); + const destPath = path.join(destinationFolder, file); + if (fs.statSync(srcPath).isFile()) { + fs.copyFileSync(srcPath, destPath); + } + } + + const phpReporterUrl = 'https://github.com/testomatio/php-reporter' + await exec`git clone ${phpReporterUrl}.git tmp/php-reporter --depth=1`; + const phpReadme = 'tmp/php-reporter/README.md'; + + const pytestReporterUrl = 'https://github.com/testomatio/pytestomatio' + await exec`git clone ${pytestReporterUrl}.git tmp/pytest-reporter --depth=1`; + const pytestReadme = 'tmp/pytest-reporter/README.md'; + + const capitalize = s => s && s[0].toUpperCase() + s.slice(1) + + const filesToDelete = ['pipes', 'debugging', 'stacktrace'] + for (const file of filesToDelete) { + say(`Deleting pipes file: ${file}`); + try { + fs.unlinkSync(path.join(destinationFolder, file + '.md')); + } catch (error) { + // console.error(`Error deleting file ${file}: ${error.message}`); + } + } + + const updatedFiles = globSync(`${destinationFolder}/**/*.md`); + for (const file of updatedFiles) { + if (['index', 'php', 'python'].includes(path.basename(file, '.md'))) continue; + let title = humanize(path.basename(file, '.md')).trim(); + if (title.length > 0) { + title = title.charAt(0).toUpperCase() + title.slice(1); + } + const titleId = title.toUpperCase(); + + if (titleId === 'FRAMEWORKS') title = "NodeJS Test Frameworks"; + if (titleId === 'TESTOMATIO') title = "Advanced Options" + if (titleId === 'JUNIT') title = "JUnit Reporter" + let contents; + try { + contents = fs.readFileSync(file).toString() + } catch (error) { + continue; + } + + // Check if content already has the correct frontmatter + const hasCorrectFrontmatter = contents.startsWith('---\ntitle: '); + + // Skip processing if file already has correct frontmatter + if (hasCorrectFrontmatter && fileHeaders[path.basename(file, '.md')]) { + continue; + } + + // Extract existing frontmatter if it exists + let existingFrontmatter = ''; + const frontmatterMatch = contents.match(/^---[\s\S]*?^---\n/m); + if (frontmatterMatch) { + existingFrontmatter = frontmatterMatch[0]; + } + + // Remove existing frontmatter from content + contents = contents.replace(/^---[\s\S]*?^---\n/m, ''); + + contents = contents.replace(/^#\s.+/gm, ''); + // fix links + // contents = transformLinks(contents) + + // Use existing frontmatter if available, otherwise use stored or generate new + const frontmatter = existingFrontmatter || + fileHeaders[path.basename(file, '.md')] || + `---\ntitle: ${title}\n---`; + + contents = `${frontmatter}\n${contents}\n`; + + fs.writeFileSync(file, contents) + } + + let phpContents = fs.readFileSync(phpReadme).toString().replace(/^#\s.+/gm, ''); + phpContents = phpContents.replace(/^---[\s\S]+?^---/m, ''); + phpContents = phpContents.replace(/^#\s.+/gm, ''); + phpContents = `\n\n:::note\n Taken from [PHP Reporter Readme](${phpReporterUrl})\n:::\n ${phpContents}\n` + + fs.writeFileSync(path.join(destinationFolder, '/php.md'), fileHeaders.php + phpContents) + + let pytestContents = fs.readFileSync(pytestReadme).toString().split('## Change')[0]; + pytestContents = pytestContents.replace(/^---[\s\S]+?^---/m, ''); + pytestContents = `\n\n:::note\n Taken from [Pytestomatio Reporter Readme](${pytestReporterUrl})\n:::\n\n${pytestContents}\n` + + fs.writeFileSync(path.join(destinationFolder, '/python.md'), fileHeaders.python + pytestContents) + // writeToFile(destinationFolder + '/python.md', cfg => { + // cfg.line(fileHeaders.python || '---\nPython Reporter\n---'); + // cfg.line(pytestContents); + // }); +} + +/** + * Import documentation from various sources + */ +export async function docsImporter() { + const destinationFolder = 'src/content/docs/project/import-export'; + + if (!fs.existsSync(destinationFolder)) { + fs.mkdirSync(destinationFolder, { recursive: true }); + } + + const response = await fetch('https://raw.githubusercontent.com/testomatio/check-tests/master/README.md'); + let content = response.output; + content = content.split('\n'); + content = content.slice(content.indexOf('## CLI') + 2).join('\n').replace(/#\s/g, '## ') + + writeToFile('src/content/docs/project/import-export/import/import-js.md', (line) => { + line(JS_FRONTMATTER); + line(''); + line(content); + }); + + const response2 = await fetch('https://raw.githubusercontent.com/testomatio/php-list-tests/0.2.x/README.md'); + let content2 = response2.output; + content2 = content2.split('\n').slice(3).join('\n').replace(/#\s/g, '## ') + + + writeToFile('src/content/docs/project/import-export/import/import-php.md', (line) => { + line(PHP_FRONTMATTER); + line(''); + line(content2); + }); + + const response3 = await fetch('https://raw.githubusercontent.com/testomatio/check-cucumber/master/README.md'); + let content3 = response3.output.split('\n'); + content3 = content3.slice(content3.indexOf('## Cli') + 2).join('\n') + + + writeToFile('src/content/docs/project/import-export/import/import-bdd.md', (line) => { + line(BDD_FRONTMATTER); + line(''); + line(content3); + }); +} + +async function downloadImage(imageUrl, destinationFolder) { + try { + const img = imageUrl.split('/')[imageUrl.split('/').length - 1]; + + if (fs.existsSync(path.join(destinationFolder, img))) { + say(`Image already exists: ${imageUrl}`); + return path.join(destinationFolder, img); + } + + const response = await fetch(imageUrl); + // Bunosh fetch returns TaskResult with output containing the binary data + const imageData = response.output; + + // Extract content type from response if available, otherwise use fallback + let contentType = 'image/jpeg'; // fallback + if (response.headers && response.headers['content-type']) { + contentType = response.headers['content-type']; + } + + const extension = contentType.split('/')[1]; + + if (!fs.existsSync(destinationFolder)) { + fs.mkdirSync(destinationFolder, { recursive: true }); + } + + const filename = path.join(destinationFolder, img); + fs.writeFileSync(filename, Buffer.from(imageData)); + + say(`Downloaded image: ${imageUrl} => ${filename}`); + return filename; + } catch (error) { + console.error(`Failed to download image: ${imageUrl} ${error}`); + // process.exit(1); + throw error; + // return null; + } +} diff --git a/README.md b/README.md index 3098a0fe..3921f69b 100644 --- a/README.md +++ b/README.md @@ -12,18 +12,53 @@ Documentation is saved as static markdown files under `src/content/docs`. This static website is built with Astro Starlight. -To start the website locally +### Prerequisites -* clone this repo -* install dependencies with npm -* fetch all documenation pages +Before starting the website locally, you need to install: -``` -./runok.js docs -``` -* start a dev server +1. **Bun** - A fast JavaScript runtime and package manager + ```bash + # Install Bun + curl -fsSL https://bun.sh/install | bash + # Or using npm: npm install -g bun + ``` -``` -npm run dev -``` +2. **Bunosh** - A modern task runner for JavaScript + ```bash + bun add -g bunosh + ``` + +### Setup and Development + +To start the website locally: + +1. **Clone this repo** + ```bash + git clone + cd + ``` + +2. **Install dependencies** + ```bash + bun install + ``` + +3. **Fetch all documentation pages** + ```bash + bunosh docs + ``` + +4. **Start the development server** + ```bash + bun run dev + ``` + +### Available Commands + +- `bunosh docs` - Fetch and generate all documentation +- `bunosh docs:importer` - Import external documentation from GitHub +- `bunosh docs:reporter` - Generate reporter documentation +- `bunosh docs:images` - Process documentation images +- `bun run dev` - Start development server +- `bun run build` - Build for production diff --git a/astro.config.mjs b/astro.config.mjs index e72f3c20..a3cf4b49 100644 --- a/astro.config.mjs +++ b/astro.config.mjs @@ -157,7 +157,7 @@ export default defineConfig({ { label: 'Overview', link: '/project/import-export/import' }, { label: 'Import from Source Code', link: '/project/import-export/import/import-tests-from-source-code' }, { label: 'Import from Cucumber', link: '/project/import-export/import/import-tests-from-cucumber' }, - { label: 'Import from TMS', link: '/project/import-export/import/import-tests-from-csvxls' }, + { label: 'Import from CSV/XLSX', link: '/project/import-export/import/import-tests-from-csv-xlsx' }, { label: 'Import Tests From TestRail', link: '/project/import-export/import/import-tests-from-testrail' }, @@ -229,6 +229,14 @@ export default defineConfig({ items: [ { label: 'Overview', link: '/advanced' }, { + label: 'Artifacts', + collapsed: true, + items: [ + { label: 'Artifacts', link: '/advanced/test-artifacts/artifacts' } + ], + }, + { + label: 'Tags, Custom fields', label: 'Tags & Labels', collapsed: true, items: [ diff --git a/package.json b/package.json index 78310212..11ea5704 100644 --- a/package.json +++ b/package.json @@ -1,7 +1,7 @@ { "name": "@testomatio/docs", "version": "0.0.1", - "packageManager": "pnpm@10.14.0", + "packageManager": "pnpm@10.17.1", "description": "Testomatio Documentation", "main": "index.js", "authors": { @@ -20,30 +20,30 @@ }, "license": "MIT", "devDependencies": { - "axios": "^1.11.0", + "axios": "^1.12.2", "dasherize": "^2.0.0", "glob": "^10.4.2", "runok": "^0.9.3", "slugify": "^1.6.6" }, "dependencies": { - "@astrojs/markdown-remark": "^6.3.6", - "@astrojs/starlight": "^0.35.2", + "@astrojs/markdown-remark": "^6.3.7", + "@astrojs/starlight": "^0.36.0", "@octokit/core": "^5.1.0", - "astro": "^5.13.2", + "astro": "^5.14.1", "astro-breadcrumbs": "^3.3.1", "astro-og-canvas": "^0.7.0", "astro-rehype-relative-markdown-links": "0.18.1", - "astro-vtbot": "^2.1.7", - "bunosh": "^0.1.5", + "astro-vtbot": "^2.1.8", + "bunosh": "^0.4.1", "canvaskit-wasm": "^0.40.0", - "dotenv": "^16.5.0", + "dotenv": "^17.2.3", "gray-matter": "^4.0.3", "meilisearch-docsearch": "^0.8.0", "schema-dts": "^1.1.5", - "sharp": "^0.34.3", + "sharp": "^0.34.4", "starlight-image-zoom": "^0.13.0", - "starlight-links-validator": "^0.17.1" + "starlight-links-validator": "^0.18.0" }, "type": "module" } diff --git a/redirects.js b/redirects.js index c274ce42..1bb72adb 100644 --- a/redirects.js +++ b/redirects.js @@ -3,6 +3,7 @@ const redirects = { "/getting-started/import-tests-from-cucumber": "/project/import-export/import/import-tests-from-cucumber", "/getting-started/import-tests-from-source-code": "/project/import-export/import/import-tests-from-source-code", "/getting-started/import-tests-from-csvxls": "/project/import-export/import/import-tests-from-csvxls", + "/project/import-export/import/import-tests-from-csvxls": "/project/import-export/import/import-tests-from-csv-xlsx/", "/getting-started/test-design": "/project/tests", "/getting-started/running-tests-manually": "/project/runs/running-tests-manually", "/getting-started/running-automated-tests": "/project/runs/running-automated-tests", diff --git a/src/content/docs/advanced/ai-powered-features/ai-powered-features.md b/src/content/docs/advanced/ai-powered-features/ai-powered-features.md index c6872450..c7246e6a 100644 --- a/src/content/docs/advanced/ai-powered-features/ai-powered-features.md +++ b/src/content/docs/advanced/ai-powered-features/ai-powered-features.md @@ -34,7 +34,7 @@ You can use **'Chat with Tests'** feature on Project or Folder level. ### Use 'Chat with Tests' Feature at the Project Level 1. Go to 'Tests' page. -2. Click on **'Chat with tests'** AI icon displayed in the header. +2. Click **'Chat with tests'** AI icon displayed in the header. ![Testomat.io - AI-powered](./images/AI_Chat_1.png) @@ -50,7 +50,7 @@ OR Create you own AI-promt. -4. Click on **'Ask'** button. +4. Click **'Ask'** button. ![Testomat.io - AI-powered](./images/AI_Chat_2.gif) @@ -60,7 +60,7 @@ You can also use **'Chat with Tests'** on folder level to analyze and summarize 1. Go to 'Tests' page. 2. Select the Folder. -3. Click on **'Chat with Tests'** button. +3. Click **'Chat with Tests'** button. ![Testomat.io - AI-powered](./images/AI_Chat_3.gif) @@ -70,7 +70,7 @@ You can automatically generate a suite description by analyzing the test cases w 1. Go to 'Tests'. 2. Select Suite with test cases. -3. Click on **'Summarize'** button. +3. Click **'Summarize'** button. ![Testomat.io - AI-powered](./images/AI_1.png) @@ -100,7 +100,7 @@ You can also use AI to enhance your test coverage by creating additional test ca This feature makes it easier to create comprehensive test suites. 1. Open Test Suite that already contains Test Cases. -2. Click on 'Extra menu' button. +2. Click **'Extra menu'** button. 3. Select **'Suggest Tests'** option. ![Testomat.io - AI-powered](./images/AI_8.gif) @@ -131,7 +131,7 @@ This feature accelerates test creation, enhances coverage by identifying overloo This feature allows you to create test case description based just on its name or improve description that you previously added to your test case. 1. Open Test Case. -2. Click on **'Suggest Description'** button. +2. Click **'Suggest Description'** button. ![Testomat.io - AI-powered](./images/AI_9.gif) @@ -141,7 +141,7 @@ Use AI to analyze your test code and produce detailed test descriptions. Bridges 1. Go to 'Tests'. 2. Select Test Case with code. -3. Click on **'Write Description from Code'** button. +3. Click **'Write Description from Code'** button. ![Testomat.io - AI-powered](./images/AI_3.png) @@ -188,7 +188,7 @@ Shows only for finished, automated runs with 5+ failures. 1. Go to 'Runs' page. 2. Open finished automated run. -3. Click on **'Clusterize Errors'** button. +3. Click **'Clusterize Errors'** button. ![Testomat.io - AI-powered](./images/AI_5.png) @@ -205,7 +205,7 @@ The same as in the previous case, it also available only for finished, automated 1. Go to 'Runs' page. 2. Open finished automated run. 3. Click on Failed Test Case. -4. Click on **'Explain Failure'** button. +4. Click **'Explain Failure'** button. ![Testomat.io - AI-powered](./images/AI_7.gif) @@ -215,7 +215,7 @@ Testomat.io allows you to use AI-powered feature to analyze and summarize your f 1. Go to 'Runs' page. 2. Select finished test run for statistics snalysis. -3. Click on **'Run Summary'** button. +3. Click **'Run Summary'** button. ![Testomat.io - AI-powered](./images/AI_run_summary_1.png) @@ -227,6 +227,29 @@ Testomat.io allows you to use AI-powered feature to analyze and summarize your f ::: +## Analyze Suite + +**Analyze Suite** tool brings AI-powered analytics directly to individual suites, helping you assess both **functional coverage** and **suite stability** without navigating the entire project view. + +**Whatโ€™s included:** + +- **Functional area coverage mapping** โ€“ analyzes tests within a suite to determine which parts of your product it covers. +- **Suite Stability Report** โ€“ evaluates recent test execution results to highlight flakiness, instability, or recurring issues. +- **Focused insight** โ€“ ideal for monitoring the health of specific product modules or critical flows. + +**To access this feature:** + +1. Go to 'Tests'. +2. Select the Suite that you want to analyze. +3. Click **'Extra menu'** button on 'Summarize' button. +4. Select **'Analyze Suite'** option from the dropdown menu. + +![Testomat.io - AI-powered](./images/AI_Analyze_Suite_1.png) + +By providing actionable insights at the suite level, teams can quickly identify improvement areas, address instability, and maintain high-quality standards in critical parts of their projects. + +![Testomat.io - AI-powered](./images/AI_Analyze_Suite_2.gif) + ## Project Runs Status Report **AI-Powered Project Runs Status Report** feature automatically generates a **high-level status report** based on the latest project's test runs information โ€” powered by AI. @@ -241,13 +264,55 @@ The **Runs Status Report** gives you a quick overview of test stability, critica - **Execution Time Trends** โ€“ How test durations are behaving over time. - **Top Errors** โ€“ Most frequent failure messages to help speed up debugging. - **Systematic Failures** โ€“ Pinpointed test cases that failed consistently and may block critical flows. -- **Note** - Hightlights the test runs that were analyzed in the Run Status Report by AI. +- **Note** - Hightlights the test runs that were analyzed in the Runs Status Report by AI. **To access this feature:** 1. Go to 'Runs' page. -2. Click on **'Run Status Report'** button. + +2. Click **'Run Status Report'** button. ![Testomat.io - AI-powered Project Runs Status Report](./images/AI_14.gif) This report is available automatically based on recent test run history, giving your team instant visibility into the health of your project. + +## RunGroup Statistic Report + +The **'RunGroup Statistic Report'** โ€” a new way to analyze the health and progress of test runs grouped together. + +**This report includes:** + +- **Run Execution Summary** โ€“ a quick breakdown of passed, failed, and skipped tests across all runs in the group. +- **Detailed Analytics by Run Status** โ€“ view trends, patterns, and key metrics within each run. +- **TOP Failed Tests** - view tests that failed the most in total. +- **AI-Powered Recommendations** โ€“ suggested actions to improve stability and address recurring issues +Perfect for teams managing large-scale test executions across multiple environments or test types. + +**To access this feature:** + +1. Go to 'Runs' page. +2. Select RunGroup you want to analyze. +3. Click **'RunGroup Statistic Report'** button. + +![Testomat.io - AI-powered Project Runs Status Report](./images/AI_RunGroup_Report_1.gif) + +This report is perfect for teams managing large-scale test executions across multiple environments or test types. + + +## Frequently Asked Questions (FAQ) + +**Q: What are the available AI provider options in Testomat.io, and what is their approach to data usage and model training?** + +A: Testomatio offers flexible options for AI providers to accommodate different company needs and policies. You can choose from the following: +- [Groq Inc.](https://groq.com/): This is a US-based company that uses open-source models and does not train its own models on user data, so the input data won't be consumed to train new models, as they just provide hosting for it. Testomat.io can provide access to Groq as part of its service. +- Other Providers: If your company has a specific policy or preferred vendor, you can use an alternative provider like OpenAI, Azure, etc. These can be configured at a global level for the entire organization. + +**Q: How is user data handled and secured when using Testomat.io's AI features?** + +A: Testomat.io's AI features are designed with data privacy and user control in mind. Here's how it works: +- **User-Initiated Actions:** No data is sent to the AI provider in the background. A user must manually select a specific test or run and click an AI button to send the data for analysis. +- **Context-Based Prompts:** The AI prompts are run on specific contexts, including tests, suites, runs, and run results. This ensures that only the relevant, selected data is sent for analysis. + +**Q: What is the approximate AI usage in terms of tokens or API calls?** + +A: AI consumption depends on the size of your project โ€” including test cases, suites, run messages, stack traces, and requirements. In short, the more tests and requirements you have, the larger the prompts will be. diff --git a/src/content/docs/advanced/ai-powered-features/ai-requirements.md b/src/content/docs/advanced/ai-powered-features/ai-requirements.md index af7067e8..ea9edcf9 100644 --- a/src/content/docs/advanced/ai-powered-features/ai-requirements.md +++ b/src/content/docs/advanced/ai-powered-features/ai-requirements.md @@ -35,12 +35,34 @@ You can enbable AI at any time on Company Settings page by following the instruc ::: +## Add Requirements to an Empty Project + +You can start a new project by first adding your requirements. + +Once a project is created (BDD or Classical), set up the integration for your **Requirement Source** (see detailed instructions for [JIRA](https://docs.testomat.io/integrations/issues-management/jira/#connecting-to-jira-project) and [Confluence](https://docs.testomat.io/integrations/issues-management/confluence)). Then, proceed with adding requirements: + +1. Click **'Extra menu'** button. +2. Select **'Requirements'** option from the dropdown list. + +![Testomat.io - Empty Project](./images/Empty_Project_1.png) + +3. Click **'+ Add'** button +4. Select your requirement source (Jira in our case). +5. Enter **'Jira Issue ID'**. +6. Click **'Save'** button. + +![Testomat.io - Empty Project](./images/Empty_Project_2.png) + +After the requirement is linked to Testomat.io you can use AI Assistant to analyze requirements for edge cases and potential solutions. You can also create suites and test cases based on these requirements. + ## Jira as a Requirement Source +Testomat.io not only allows you to start a new project with requirements but also to link requirements to an ongoing project at any time. + **To add Jira as a Requirement Source:** 1. Open your Project in Testomat.io. -2. Click on **'Extra menu'** button. +2. Click **'Extra menu'** button. 3. Select **'Requirements'** option from the dropdown list. 4. Click **'+ Add'** button. 5. Select **'Jira'** as your Requirement Source. @@ -49,11 +71,11 @@ You can enbable AI at any time on Company Settings page by following the instruc ![Testomat.io - AI-powered](./images/AI_Jira_1.gif) -After the requirement is linked to Testomat.io you can use AI Assistant to analyze requirements for edge cases and potential solutions. You can also create suites and test cases based on these requirements. +Same as for a new project, after the requirement is linked to your project, you can start using AI-features for analyzing it, and generating suites, test cases, and edge cases to cover your requirements. :::note -To use AI-Requirements feature, first connect Testomat.io to your Jira project. See detailed instructions in the [Connecting to JIRA project section](https://docs.testomat.io/integrations/issues-management/jira/#connecting-to-jira-project). +To use AI-Requirements feature, first connect Testomat.io to your Jira project. See detailed instructions in the [Connecting to JIRA project](https://docs.testomat.io/integrations/issues-management/jira/#connecting-to-jira-project) section. ::: @@ -67,7 +89,7 @@ This integration bridges the gap between documentation and test planning, enabli **To add Confluence as a Requirement into your project:** -1. Click on **'Extra menu'** button. +1. Click **'Extra menu'** button. 2. Select **'Requirements'** option from the dropdown list. 3. Click **'+ Add'** button. 4. Select **'Confluence'** as your Requirement Source. @@ -114,26 +136,26 @@ This integration bridges the gap between documentation and test planning, enabli After adding the Suite Case to your project, you can begin creating your test cases or use AI to generate them for you. -## Generate Test Cases from Requirements +## Generate Test Cases from Requirements Page 1. Open added Requirement. 2. Click **'Analyze Requirement'** button. ![Testomat.io - Jira](./images/AI_Jira_2.png) -3. Click on **'Add tests to {Suite_name} Suite'** option. +3. Click **'Add tests to {Suite_name} Suite'** option. ![Testomat.io - Jira](./images/AI_Jira_8.png) -4. Check suggested test cases and add the relevant ones by clicking on **'Add'** button. +4. Check suggested test cases and add the relevant ones by clicking **'Add'** button. ![Testomat.io - Jira](./images/AI_Jira_9.png) -5. Click on **'Write Description'** button to add description to the selected test case. +5. Click **'Write Description'** button to add description to the selected test case. ![Testomat.io - Jira](./images/AI_Jira_10.png) -6. Click on **'Update Test Description'** button to add genearted test description to the test case. +6. Click **'Update Test Description'** button to add genearted test description to the test case. ![Testomat.io - Jira](./images/AI_Jira_11.png) @@ -143,12 +165,30 @@ You always need explicitly select which test cases to add and update their descr ::: +## Suggest Test Cases Based on Requirements from Suite Level + +You can also generate test cases directly from a Suite if it has a linked requirement. + +1. Open Suite Case with the linked requirement. +2. Click **'Extra menu'** button on 'Summarize' button. +3. Select **'Suggest Tests'** option from the dropdown list. + +![Testomat.io - Suggest Tests](./images/AI_Suggest_tests_1.png) + +Similar to the previous case, review the suggested test cases, add the relevant ones, and write descriptions for the selected test cases directly from the AI-assistant window. + +![Testomat.io - Suggest Tests](./images/AI_Suggest_tests_2.gif) + +All added test cases will be displayed in your Suite after AI-assistant window is closed. + +![Testomat.io - Suggest Tests](./images/AI_Suggest_tests_3.png) + ## Link Requirement to an Existing Suite Case Testomat.io allows you to link a requirement to an existing suite case directly from suite case page. To do that, follow these steps: 1. Open your Suite Case. -2. Click on **'Extra menu'** button. +2. Click **'Extra menu'** button. 3. Select **'Add Requirements'** option from the dropdown list. 4. Select the requirement from the list by clicking on **'Attach'** button. diff --git a/src/content/docs/advanced/ai-powered-features/images/AI_Analyze_Suite_1.png b/src/content/docs/advanced/ai-powered-features/images/AI_Analyze_Suite_1.png new file mode 100644 index 00000000..69967821 Binary files /dev/null and b/src/content/docs/advanced/ai-powered-features/images/AI_Analyze_Suite_1.png differ diff --git a/src/content/docs/advanced/ai-powered-features/images/AI_Analyze_Suite_2.gif b/src/content/docs/advanced/ai-powered-features/images/AI_Analyze_Suite_2.gif new file mode 100644 index 00000000..51f08aa7 Binary files /dev/null and b/src/content/docs/advanced/ai-powered-features/images/AI_Analyze_Suite_2.gif differ diff --git a/src/content/docs/advanced/ai-powered-features/images/AI_RunGroup_Report_1.gif b/src/content/docs/advanced/ai-powered-features/images/AI_RunGroup_Report_1.gif new file mode 100644 index 00000000..d2efb6c9 Binary files /dev/null and b/src/content/docs/advanced/ai-powered-features/images/AI_RunGroup_Report_1.gif differ diff --git a/src/content/docs/advanced/ai-powered-features/images/AI_Suggest_tests_1.png b/src/content/docs/advanced/ai-powered-features/images/AI_Suggest_tests_1.png new file mode 100644 index 00000000..b3328120 Binary files /dev/null and b/src/content/docs/advanced/ai-powered-features/images/AI_Suggest_tests_1.png differ diff --git a/src/content/docs/advanced/ai-powered-features/images/AI_Suggest_tests_2.gif b/src/content/docs/advanced/ai-powered-features/images/AI_Suggest_tests_2.gif new file mode 100644 index 00000000..12981b2a Binary files /dev/null and b/src/content/docs/advanced/ai-powered-features/images/AI_Suggest_tests_2.gif differ diff --git a/src/content/docs/advanced/ai-powered-features/images/AI_Suggest_tests_3.png b/src/content/docs/advanced/ai-powered-features/images/AI_Suggest_tests_3.png new file mode 100644 index 00000000..a2ecc622 Binary files /dev/null and b/src/content/docs/advanced/ai-powered-features/images/AI_Suggest_tests_3.png differ diff --git a/src/content/docs/advanced/ai-powered-features/images/Empty_Project_1.png b/src/content/docs/advanced/ai-powered-features/images/Empty_Project_1.png new file mode 100644 index 00000000..a0f57c38 Binary files /dev/null and b/src/content/docs/advanced/ai-powered-features/images/Empty_Project_1.png differ diff --git a/src/content/docs/advanced/ai-powered-features/images/Empty_Project_2.png b/src/content/docs/advanced/ai-powered-features/images/Empty_Project_2.png new file mode 100644 index 00000000..ad25171b Binary files /dev/null and b/src/content/docs/advanced/ai-powered-features/images/Empty_Project_2.png differ diff --git a/src/content/docs/advanced/test-artifacts/artifacts.md b/src/content/docs/advanced/test-artifacts/artifacts.md new file mode 100644 index 00000000..d59b95e5 --- /dev/null +++ b/src/content/docs/advanced/test-artifacts/artifacts.md @@ -0,0 +1,123 @@ +--- +title: Artifacts +description: Learn how Testomat.io handles test artifacts and attachments, such as screenshots, videos, and logs, to enhance test management and reporting. This guide explains how to add and manage attachments during test case creation, manual testing, and automated test execution with S3 cloud storage support. It also covers features like in-platform artifact preview and integrations with various frameworks and tools to streamline debugging and project documentation. +type: article +url: https://docs.testomat.io/advanced/test-artifacts/artifacts +head: + - tag: meta + attrs: + name: keywords + content: test artifacts, test attachments, screenshots, videos, test management, test reporting, debugging, manual testing, automation testing, S3 storage, AWS, DigitalOcean, Azure, Google Cloud, CI/CD, preview attachments, suite-level attachments, folder-level attachments, readme section, Jira Integration, Video Capturing, Automated Tests Analytics +--- + +Testomat.io offers various ways to handle test artifacts and attachments, such as screenshots, videos, logs, documents, etc. These features enhance test management, reporting, and debugging by providing visual evidence and supporting documentation. + +You can add test artifacts and attachments during **test case creation**, **manual testing**, or **automation testing**. + +## Add Attachments to Test Cases + +Testomat.io allows you to add attachments, including screenshots, files, and videos to your test cases. It helps to visualize tests, make them more clear and offer additional information for QA managers, developers, other QAs, or stakeholders. + +You can add attachments during test case creation or editing by directly **dragging & dropping** them, using the hot key **CTR+C/CTR+V** for copy/past or by using the **browse a file** option. + +![Testomat.io - Add attachment](./images/Add_attachment_1.gif) + +For more details on adding and managing attachments, refer to the [Add Attachments to Test](https://docs.testomat.io/project/tests/test-case-creation-and-editing/#add-attachments-to-test). + +You can also manage attachments across Testomat.io by adding them to **suites**, **folders**, and the **readme section**. This streamline workflows by keeping all relevant files and documentation in one place. Whether youโ€™re sharing important notes, reference materials, or test data, you can now attach them directly to the relevant test structures for easy access. + +**Use cases:** + +- **Suite-level attachments:** Attach detailed test execution reports or configuration files to specific test suites for easy reference by team members. +- **Folder-level attachments:** Add relevant project documentation or setup instructions to test folders, ensuring that all files related to a specific testing area are easily accessible. +- **Readme section attachments:** Include critical resources or additional context in the readme section, such as diagrams, code samples, or links to external resources, improving overall clarity for team members. + +## Delete Attachments + +If you no longer need your attachments, you can delete them. + +Go to [How to Delete Attachment from Test Case](https://docs.testomat.io/project/tests/test-case-creation-and-editing/#how-to-delete-attachment-from-test-case) section to read more about this feature. + +:::note + +You can delete attachments from Suite, Folder and Readme section in the same way you would from a Test Case. + +::: + +## Add Test Arfifacts During Manual Testing + +Attaching a short video or screenshots is highly useful for manual testing, especially for failing tests. These artifacts provide comprehensive information, making it easier for the QA tester to understand what went wrong. + +To add test artifacts during manual tests execution: + +1. Launch a run. +2. On manual run window, select the test case. +3. **Drag&drop** the test evidence into the **'Attachments'** section (or use **browse a file** or **copy/paste** option). + +![Testomat.io - Add attachment](./images/Add_attachment_2.gif) + +## Preview Attachments + +Attachments are available for viewing without downloading. To preview an attachment, select it in the Test Case Attachment section to view it in the open dialog window. + +![Testomat.io - Preview attachment](./images/Preview_attachment_2.gif) + +Testomat.io also allows you to watch attached video or screenshots directly from the **Report** page. + +![Testomat.io - Preview attachment](./images/Preview_attachment_1.gif) + +Additionally, you can switch between list and tile views to make reviewing test artifacts easier. + +![Testomat.io - Preview attachment](./images/Preview_attachment_3.gif) + +## Add Test Arfifacts During Automation Testing + +Testomat.io also allows you to add test artifacts during automation testing. +Test artifacts, which include screenshots and video files, provide a complete picture of your test results as soon as autotests are completed, helping you understand the root cause of a problem. + +### Support Artifacts Storing to S3 + +This kind of implementation of **Video recording** or **Screenshots** artifacts is possible due to our integration with many **S3 Storages**: AWS, DigitalOcean, Azure, and Google Cloud. After a test run is completed, video and other artifacts are uploaded to the chosen storage, and from there, they automatically go to Testomat.io. + +Testomat.io provides the ability to download unlimited data by choosing one of the available S3 Storages depending on your companyโ€™s priorities or the location of your preferred CI\CD service. + +:::note + +All cloud providers charge for their services. Be sure to check their rates in detail before starting. + +::: + +Thanks to **S3 Cloud Services** integration, you can easily find any test artifacts stored in the test project structure on your local PC or in the cloud when running tests on Github, Jenkins, or any other CI\CD service. + +### How Artifact Recording Works + +1. **Get test artifacts when you run autotests:** To enable this, you need to set the right parameters โ€“ specify the configuration of your cloud storage. As a result, all screenshots or video files from S3 Storage will be placed in the test report, helping you prioritize issues. You can configure artifacts to be collected from failed, passed, or all tests, whatever status was assigned to them after the end of the run. This parameter is typically configured within your testing framework (e.g., Cypress, Playwright), not in Testomat.io. + +:::note + +Read how to **Set up S3 Bucket** on the relevant [Test Artifacts](https://docs.testomat.io/test-reporting/artifacts/) page. + +::: + +2. **Upload a public or private test artifacts:** These processes are identical; the artifact travels the same path in both cases. But if a private format is selected, additional settings and data encryption are used. This feature is used by companies that pay special attention to data privacy and security of their projects or when company policy forbids public access to their projects. The necessary settings are set in the **'Artifacts'** tab and apply to all tests of a particular project. For more detail, visit the [Privacy](https://docs.testomat.io/test-reporting/artifacts/#privacy) section in the Docs. + +3. **Preview or download test artifact:** You can view videos or screenshots directly on the platform or download them on your device. The latter is relevant if you need to share the test results with stakeholders. + +:::note + +If you share a private artifact, your recipient will receive a message that they do not have permission to view the file. + +::: + +### Associated Test Screenshot-Recording Features + +- **Reporters by Popular Testing Frameworks** โ€“ real-time reports supporting of popular programming languages give you a complete picture of the software productโ€™s quality level. With Advanced Reporters by popular testing frameworks, you can see the results of end-to-end, integration, unit testing, and API testing. Thanks to the integration with many S3 Storages, our SaaS test management solution retrieves test artifacts (including screenshots during a test runs) and uploads them to the report in a convenient format. + +- **Automated Tests Analytics** โ€“ all test results of test executions are used for in-depth analytics. Determining the percentage of test automation, the total number of tests, the ratio of tests to uncompleted tests, failed tests, [flaky tests](https://docs.testomat.io/project/analytics/#flaky-tests), never-run tests help prioritize daily tasks and make the process transparent to all Agile team members. + +- **Jira Integration** โ€“ after installing the [Jira plugin](https://docs.testomat.io/advanced/jira-plugin/), you have the ability to run test cases from Atlassian Jira and work on projects directly from either Jira or Testomat.io.; switching between tools is no longer necessary. You can also navigate from Jira to the Run Report to view a specific artifact. + +- **Video Capturing Artifacts** โ€“ in addition to saving screenshots, you can also save videos from tests (passed, failed, or all completed, regardless of the result). From Test Runner and Reporter, video files, like screenshots, are uploaded to S3 Storage and then displayed in the test management system in a user-friendly format, where they can be viewed or downloaded. + +- **Artifact S3 Support** โ€“ you can store [Test Artifacts](https://docs.testomat.io/test-reporting/artifacts/) in the cloud and choose the provider your company or CI\CD tool is comfortable working with: AWS, DigitalOcean, Azure, and Google Cloud. + diff --git a/src/content/docs/advanced/test-artifacts/images/Add_attachment_1.gif b/src/content/docs/advanced/test-artifacts/images/Add_attachment_1.gif new file mode 100644 index 00000000..27a87243 Binary files /dev/null and b/src/content/docs/advanced/test-artifacts/images/Add_attachment_1.gif differ diff --git a/src/content/docs/advanced/test-artifacts/images/Add_attachment_2.gif b/src/content/docs/advanced/test-artifacts/images/Add_attachment_2.gif new file mode 100644 index 00000000..3b3bf333 Binary files /dev/null and b/src/content/docs/advanced/test-artifacts/images/Add_attachment_2.gif differ diff --git a/src/content/docs/advanced/test-artifacts/images/Preview_attachment_1.gif b/src/content/docs/advanced/test-artifacts/images/Preview_attachment_1.gif new file mode 100644 index 00000000..91eaeb30 Binary files /dev/null and b/src/content/docs/advanced/test-artifacts/images/Preview_attachment_1.gif differ diff --git a/src/content/docs/advanced/test-artifacts/images/Preview_attachment_2.gif b/src/content/docs/advanced/test-artifacts/images/Preview_attachment_2.gif new file mode 100644 index 00000000..94c99dec Binary files /dev/null and b/src/content/docs/advanced/test-artifacts/images/Preview_attachment_2.gif differ diff --git a/src/content/docs/advanced/test-artifacts/images/Preview_attachment_3.gif b/src/content/docs/advanced/test-artifacts/images/Preview_attachment_3.gif new file mode 100644 index 00000000..b82d9c1a Binary files /dev/null and b/src/content/docs/advanced/test-artifacts/images/Preview_attachment_3.gif differ diff --git a/src/content/docs/integrations/issues-management/images/att1_youtrack.png b/src/content/docs/integrations/issues-management/images/att1_youtrack.png new file mode 100644 index 00000000..fe318f25 Binary files /dev/null and b/src/content/docs/integrations/issues-management/images/att1_youtrack.png differ diff --git a/src/content/docs/integrations/issues-management/youtrack.md b/src/content/docs/integrations/issues-management/youtrack.md index 6607fe4b..cf79e899 100644 --- a/src/content/docs/integrations/issues-management/youtrack.md +++ b/src/content/docs/integrations/issues-management/youtrack.md @@ -1,6 +1,6 @@ --- title: YouTrack Configuration -description: Learn how to integrate YouTrack with Testomat.io for streamlined issue management. This guide covers retrieving your YouTrack Workspace Name, Personal Access Token, and Project ID, along with detailed steps to set up and configure the integration within Testomat.io. +description: Learn how to integrate YouTrack with Testomat.io for streamlined issue management. This guide explains how to retrieve your YouTrack Workspace Name, Personal Access Token, and Project ID, and then configure the integration within Testomat.io. type: article url: https://docs.testomat.io/integrations/issues-management/youtrack head: @@ -8,33 +8,32 @@ head: attrs: name: og:image content: https://docs.testomat.io/_astro/New_UTbONHqM_2024-10-31.DUKqHBp2_YEDnG.webp - + - tag: meta attrs: name: keywords content: YouTrack integration, Testomat.io, issue tracking, personal access token, workspace configuration, project management, test management tools, software testing, bug tracking --- -If you already have a workspace and project configured in **YouTrack**, you're ready to integrate it with Testomat.io. To get started, youโ€™ll need your **Workspace Name**, **Personal Access Token**, and **Project ID**. Weโ€™ll walk you through each step to locate this information and connect it with Testomat.io. +If you already have a workspace and project in YouTrack, youโ€™re ready to integrate it with Testomat.io. To do so, youโ€™ll need three items: -![Testomat.io - YouTrack Project](./images/New_US8feteR_2024-10-31.png) +- Workspace Name +- Personal Access Token +- Project ID -You can find your **Workspace Name** in the browser **URL** when you're logged into YouTrack. For example: `[my-workspace].youtrack.cloud` - -![Testomat.io - YouTrack Workspace Name](./images/New_kltLXnVA_2024-10-31.png) +This guide walks you through where to find each. -To locate the **Project ID**, follow these steps: +![Testomat.io - YouTrack Project](./images/New_US8feteR_2024-10-31.png) -1. Go to **Projects** in the header -2. Click on your project +## How to Find a Workspace Name -![Testomat.io - YouTrack Project Info](./images/New_Yp7Na4z3_2024-10-31.png) +You can find your **Workspace Name** in the browser **URL** when you're logged into YouTrack. For example: `[my-workspace].youtrack.cloud` -3. Find your **Project ID** in the browser **URL**; For example: `0-1` +![Testomat.io - YouTrack Workspace Name](./images/New_kltLXnVA_2024-10-31.png) -![Testomat.io - YouTrack Project ID](./images/New_0t9JUaTD_2024-10-31.png) +## How to Create a Personal Access Token -Finally, to create the **Personal Access Token**, follow these steps: +To create the **Personal Access Token**, follow these steps: 1. Click on the profile avatar 2. Go to **Profile** @@ -44,21 +43,46 @@ Finally, to create the **Personal Access Token**, follow these steps: ![Testomat.io - YouTrack Create Token](./images/New_pCUPQyzz_2024-10-31.png) 5. Enter a **Token** name -6. Select services (YouTrack, YouTrack Administration) +6. Select services (**YouTrack, YouTrack Administration**) 7. Click on **Create** button ![Testomat.io - YouTrack Personal Access Token](./images/New_WzTEWxkc_2024-10-31.png) Once the token has been created, copy it. Keep your Personal Access Token secure, as youโ€™ll need it for the integration with Testomat.io. -After collecting all necessary data, we can move on to Testomat.io. +## How to Get a Project ID + +Finally, to get the **Project ID**, follow these steps: + +1. Open YouTrack [API Documentation](https://www.postman.com/youtrack-dev/youtrack/documentation/sd7pq8x/youtrack-rest-api)-> Administration -> Projects -> Read a List of Projects +2. Get a request: + +``` +https://.youtrack.cloud/api/admin/projects?fields=id,key,name + +``` + +3. Paste your Personal Access Token +4. Click **Send** button and get your Project ID in the response body (e.g. `"id": "0-1"`) + +![Get Project ID](./images/att1_youtrack.png) + +:::note + +The ID in the browser URL isn't the same as the project ID in the database. + +::: + +After collecting all necessary data, we can move on to Testomat.io. + +## How to Connect YouTrack in Testomat.io 1. Select YouTrack from the list of available Issue Management Systems. ![Testomat.io - Connect to YouTrack](./images/New_F8MYluVy_2024-10-31.png) 2. Enter a **Profile Name** -3. Paste YouTrack **Workspace name** +3. Paste YouTrack **Workspace Name** 4. Paste YouTrack **Personal Access Token** 5. Paste YouTrack **Project ID** 6. Click on **Save** button @@ -67,4 +91,4 @@ After collecting all necessary data, we can move on to Testomat.io. If everything was done correctly, you will receive a confirmation message indicating that the YouTrack profile was successfully created. -![Testomat.io - YouTrack Profile](./images/New_K1fdAe5k_2024-10-31.png) \ No newline at end of file +![Testomat.io - YouTrack Profile](./images/New_K1fdAe5k_2024-10-31.png) diff --git a/src/content/docs/legal/security/jira.md b/src/content/docs/legal/security/jira.md index 67b8414d..6174880f 100644 --- a/src/content/docs/legal/security/jira.md +++ b/src/content/docs/legal/security/jira.md @@ -122,7 +122,7 @@ Namely, you can: - Use tracebility matrix and reports to check test coverage in sprints and project. - Manage project branches. -For details on how to use Testomat.io JIRA Plugin, please refer to [JIRA guide](../../advanced/jira-plugin/index.md) in our documentation. +For details on how to use Testomat.io JIRA Plugin, please refer to [JIRA guide](https://docs.testomat.io/advanced/jira-plugin/) in our documentation. ## FAQ diff --git a/src/content/docs/management/company/administration.md b/src/content/docs/management/company/administration.md index 9691ce90..3f4339f9 100644 --- a/src/content/docs/management/company/administration.md +++ b/src/content/docs/management/company/administration.md @@ -131,10 +131,11 @@ Testomat.io uses Groq as the main AI provider. Groq uses only open-source models How to enable built-in AI? -1. Click **'Companies'** in the header -2. Click the **'Settings'** button -3. Click the **'AI'** option -4. Enable the **'AI Features'** option +1. Create a Company (see how to do this [here](https://docs.testomat.io/management/company/administration/#how-to-create-a-company)) +2. Click **'Companies'** in the header +3. Click the **'Settings'** button +4. Click the **'AI'** option +5. Enable the **'AI Features'** option ![AI Features](images/att12_263.png) diff --git a/src/content/docs/management/company/subscriptions.md b/src/content/docs/management/company/subscriptions.md index 63cc6ede..323e1e28 100644 --- a/src/content/docs/management/company/subscriptions.md +++ b/src/content/docs/management/company/subscriptions.md @@ -27,7 +27,18 @@ Testomat.io offers flexible plans tailored for different team sizes and needs: - **Professional** โ€” ideal for small and medium-sized QA teams - **Enterprise** โ€” includes full feature set and On-Premise options for large organizations -Each plan provides different access to features like Jira integration and branch management. For a detailed comparison of plans, see: [Compare all plans.](https://testomat.io/pricing/) +:::note + +Subscription fees are calculated **per user, per month** or **per user, per year**. + +::: + +Let's take a look at an example for the **Professional Plan**: + +- **Monthly billing:** Charged **per user, per month** (e.g., $30 ร— 10 users = $300/month). +- **Yearly billing:** Charged **per user, per year**, at a **10% discounted rate** compared to monthly billing (e.g., $324 ร— 10 users = $3,240/year). + +Each plan provides different access to features like Jira integration and branch management. For a detailed comparison of plans, see: [Compare all plans](https://testomat.io/pricing/). ## How to Use the Free Plan diff --git a/src/content/docs/management/company/users-and-permissions/images/att1_5718.png b/src/content/docs/management/company/users-and-permissions/images/att1_5718.png new file mode 100644 index 00000000..b44ab69a Binary files /dev/null and b/src/content/docs/management/company/users-and-permissions/images/att1_5718.png differ diff --git a/src/content/docs/management/company/users-and-permissions/images/att2_5718.png b/src/content/docs/management/company/users-and-permissions/images/att2_5718.png new file mode 100644 index 00000000..c2e68200 Binary files /dev/null and b/src/content/docs/management/company/users-and-permissions/images/att2_5718.png differ diff --git a/src/content/docs/management/company/users-and-permissions/images/att3_5718.png b/src/content/docs/management/company/users-and-permissions/images/att3_5718.png new file mode 100644 index 00000000..a5ca714a Binary files /dev/null and b/src/content/docs/management/company/users-and-permissions/images/att3_5718.png differ diff --git a/src/content/docs/management/company/users-and-permissions/index.mdx b/src/content/docs/management/company/users-and-permissions/index.mdx index 08c2f035..4590bd8b 100644 --- a/src/content/docs/management/company/users-and-permissions/index.mdx +++ b/src/content/docs/management/company/users-and-permissions/index.mdx @@ -69,22 +69,47 @@ As well, the same user within a Company may have different roles on different pr Follow these steps to invite users to your company: -1. Go to the 'Companies' page. -2. Click 'Invite' button. +1. Go to the **Companies** page +2. Click the **Invite** button next to the company you want to invite users to -![Testomatio - Invite_users](./images/Invite_users_n.png) +![Testomatio - Invite_users](./images/att1_5718.png) -3. Enter user's mail (to invite multiple users, use a comma as a separator and enter the users' emails that you need). +Once the **Invite Users** page opens -![Testomatio - Invite_users](./images/Invite_users_2n.png) +3. Enter the user's email. To invite multiple users, separate addresses with commas +4. Select a specific role: -4. Select a role 'Accountant User' or 'Read Only Mode', if needed. +- **QA** (Paid, project-level seat. Plans, designs, executes, and reports tests; creates and manages projects; files bugs, verifies fixes, and uses every feature needed for quality-assurance work within the project.) +- **Manager** (Paid, project-level seat. Includes all QA capabilities plus the ability to invite teammates to projects and purchase or modify project subscriptions.) -5. Click 'Invite' button. +:::note + +Manager can only be invited one at a time. If multiple emails are entered, the **Manager** role option will be disabled. + +::: + +- **Accountant** (Free seat. Make payments, views invoices, payment methods and updates billing or tax information; no access to any project content or settings.) +- **Read-only** (Free seat. Available on paid pricing plans. Views test cases, runs, and reports across projects but cannot edit or create anything.) + +:::note + +Users invited with **Read-only** access are assigned the QA role by default, but their permissions remain limited to read-only. They do not have full QA rights and cannot edit or create content. + +::: + +5. Click **Invite** button + +![Testomatio - Invite_button](./images/att2_5718.png) + +--- -![Testomatio - Invite_users](./images/Invite_users_3n.png) +- You will be redirected to the Company page +- A confirmation message will appear to confirm that users have been successfully added +- If the paid seat limit is reached, only eligible users will be added +- If an invited email is invalid (wrong format or already exists), an orange warning message will appear +- After users are added to the Company, you can assign them to Projects -After the users are added to a Company you can add them to your Project. +![Testomatio - Confirmation message](./images/att3_5718.png) ## How to Add a User to a Project diff --git a/src/content/docs/project/analytics/images/Analytics_Failures.gif b/src/content/docs/project/analytics/images/Analytics_Failures.gif new file mode 100644 index 00000000..79f13099 Binary files /dev/null and b/src/content/docs/project/analytics/images/Analytics_Failures.gif differ diff --git a/src/content/docs/project/analytics/images/New_QIy3IApm_2024-08-18.gif b/src/content/docs/project/analytics/images/New_QIy3IApm_2024-08-18.gif deleted file mode 100644 index c664e852..00000000 Binary files a/src/content/docs/project/analytics/images/New_QIy3IApm_2024-08-18.gif and /dev/null differ diff --git a/src/content/docs/project/analytics/images/att10_357.png b/src/content/docs/project/analytics/images/att10_357.png new file mode 100644 index 00000000..e9f7dcb8 Binary files /dev/null and b/src/content/docs/project/analytics/images/att10_357.png differ diff --git a/src/content/docs/project/analytics/images/att11_357.png b/src/content/docs/project/analytics/images/att11_357.png new file mode 100644 index 00000000..cb87f515 Binary files /dev/null and b/src/content/docs/project/analytics/images/att11_357.png differ diff --git a/src/content/docs/project/analytics/images/att1_357.png b/src/content/docs/project/analytics/images/att1_357.png new file mode 100644 index 00000000..e96a7b2f Binary files /dev/null and b/src/content/docs/project/analytics/images/att1_357.png differ diff --git a/src/content/docs/project/analytics/images/att2_357.png b/src/content/docs/project/analytics/images/att2_357.png new file mode 100644 index 00000000..80cde94e Binary files /dev/null and b/src/content/docs/project/analytics/images/att2_357.png differ diff --git a/src/content/docs/project/analytics/images/att3_357.png b/src/content/docs/project/analytics/images/att3_357.png new file mode 100644 index 00000000..20074618 Binary files /dev/null and b/src/content/docs/project/analytics/images/att3_357.png differ diff --git a/src/content/docs/project/analytics/images/att4_357.png b/src/content/docs/project/analytics/images/att4_357.png new file mode 100644 index 00000000..bfc58bfd Binary files /dev/null and b/src/content/docs/project/analytics/images/att4_357.png differ diff --git a/src/content/docs/project/analytics/images/att5_357.png b/src/content/docs/project/analytics/images/att5_357.png new file mode 100644 index 00000000..778a8c76 Binary files /dev/null and b/src/content/docs/project/analytics/images/att5_357.png differ diff --git a/src/content/docs/project/analytics/images/att6_357.png b/src/content/docs/project/analytics/images/att6_357.png new file mode 100644 index 00000000..ddc70b71 Binary files /dev/null and b/src/content/docs/project/analytics/images/att6_357.png differ diff --git a/src/content/docs/project/analytics/images/att7_357.png b/src/content/docs/project/analytics/images/att7_357.png new file mode 100644 index 00000000..e007f2f6 Binary files /dev/null and b/src/content/docs/project/analytics/images/att7_357.png differ diff --git a/src/content/docs/project/analytics/images/att8_357.png b/src/content/docs/project/analytics/images/att8_357.png new file mode 100644 index 00000000..8cb4b5ef Binary files /dev/null and b/src/content/docs/project/analytics/images/att8_357.png differ diff --git a/src/content/docs/project/analytics/images/att9_357.png b/src/content/docs/project/analytics/images/att9_357.png new file mode 100644 index 00000000..82c28e7a Binary files /dev/null and b/src/content/docs/project/analytics/images/att9_357.png differ diff --git a/src/content/docs/project/analytics/images/gif1_357.gif b/src/content/docs/project/analytics/images/gif1_357.gif new file mode 100644 index 00000000..9f04d182 Binary files /dev/null and b/src/content/docs/project/analytics/images/gif1_357.gif differ diff --git a/src/content/docs/project/analytics/images/gif2_357.gif b/src/content/docs/project/analytics/images/gif2_357.gif new file mode 100644 index 00000000..b39a01d1 Binary files /dev/null and b/src/content/docs/project/analytics/images/gif2_357.gif differ diff --git a/src/content/docs/project/analytics/images/gif3_357.gif b/src/content/docs/project/analytics/images/gif3_357.gif new file mode 100644 index 00000000..3bc2b5dd Binary files /dev/null and b/src/content/docs/project/analytics/images/gif3_357.gif differ diff --git a/src/content/docs/project/analytics/images/gif4_357.gif b/src/content/docs/project/analytics/images/gif4_357.gif new file mode 100644 index 00000000..89d2d231 Binary files /dev/null and b/src/content/docs/project/analytics/images/gif4_357.gif differ diff --git a/src/content/docs/project/analytics/images/gif5_357.gif b/src/content/docs/project/analytics/images/gif5_357.gif new file mode 100644 index 00000000..5f5bb279 Binary files /dev/null and b/src/content/docs/project/analytics/images/gif5_357.gif differ diff --git a/src/content/docs/project/analytics/images/gif6_357.gif b/src/content/docs/project/analytics/images/gif6_357.gif new file mode 100644 index 00000000..c0fe823a Binary files /dev/null and b/src/content/docs/project/analytics/images/gif6_357.gif differ diff --git a/src/content/docs/project/analytics/index.md b/src/content/docs/project/analytics/index.md index b7d3c7d8..e2af5b75 100644 --- a/src/content/docs/project/analytics/index.md +++ b/src/content/docs/project/analytics/index.md @@ -17,9 +17,9 @@ head: As a test management system, Testomat.io Team intends to provide our users with as much valuable testing data as possible. For this purpose, we developed Analytics Feature. -![Testomat.io - Analytics](./images/Analytics.png) +![Analytics dashboard](./images/att10_357.png) -## How Does It Work? +## How Does It Work Testomat.io tracks your automated and manual tests, aggregates their statuses history, analyzes them, defines tests by Analytics categories, and shows them to you. You can configure these metrics. So you can give a more precise glance to prevent bugs. This also means that Analytics widgets are updated and supplemented with each completed Test Run. @@ -57,73 +57,139 @@ Analytics data loads for the last 4 weeks by default. But you have the option to As a management system for automated tests, Testomat.io team created an Automation Coverage Board where you can track the progress of automation coverage on the project. You can sort your tests by Suite and Automation indicators. -![Testomat.io - Automation-Coverage](./images/147570053-cb2bf5d9-e98c-4778-9df2-74ed88b96c49.gif) +![Automation Coverage](./images/gif2_357.gif) ## Custom Charts -Custom charts are a powerful addition to analytics. They allow you to customise the display of data that is important to you โ€” not only for tests, but now also for **Test Runs**. +Custom charts are a powerful addition to analytics. They allow you to customize the display of the data most relevant to you โ€” not only for **Tests**, but also for **Test Runs**. -You can now build custom charts based on **Test Run** data using search queries. This enables a more comprehensive analysis of your test execution process, improving visibility into trends, completion metrics, and performance of your testing pipeline. +You can build custom charts using search queries to get a comprehensive view of your testing process, providing visibility into trends, completion metrics, and overall testing performance. -**Use Cases:** +Custom charts help teams to visualize key testing metrics at a glance: -**Label-Based Metrics:** Track the number of test runs with a specific label, e.g., os:windows to monitor test execution on the Windows platform. +- **Environment-Based Metrics**: Track the number of test runs executed on specific platforms or environments, helping you monitor execution across systems. +- **Label-Based Metrics**: Track tests or test runs associated with specific labels, such as a build version or milestone, to gain insights into particular testing contexts. +- **Run Duration Analysis**: Visualize average or total duration of Test Runs over time. +- **Trends Over Time**: With timeline settings, track metrics across a selected period. +- **Widgets**: Any chart can be added as a widget to your dashboard, giving a separate view alongside other analytics. -**Run Duration Analysis:** Visualize average or total duration of Test Runs over time. +### How to Create a Custom Chart -**Custom Querying:** Filter and chart test runs using attributes such as title, created, updated, or any defined label/tag. +The flow for creating a custom chart is the same for both **Tests and Test Runs**. The only difference is which **Data Source** you select. -### How to Use Custom Charts with Test Runs -In the **Edit Chart** mode, create a new query by selecting **"Test Run"** as the target entity. +1. Navigate to the **Analytics** tab in the left sidebar +2. Click **Custom Charts** on the dashboard -Then configure the chart view as before, with options for: +![Custom Charts](./images/att1_357.png) -**Label View:** Customize the label display. -**Color Coding:** Apply custom colours for each query. -**Data Display:** Show raw numbers, percentages, or both. +3. Click the **+** button to open a new chart -Once configured, save the chart and it will appear on your dashboard. +![Plus button](./images/att2_357.png) + +4. Enter **Title** (required) +5. Toggle **As widget** (optional) + +:::note + +By toggling **As widget**, your custom chart will appear as a separate widget on the Analytics dashboard. This allows you to monitor key metrics continuously alongside other analytics without navigating back to the Custom Charts page. + +::: + +6. Enter **Description** (optional) +7. Select **Data Source**: (required) + +- **Tests:** for Test data +- **Runs:** for Test Run data + +![Data Source](./images/att3_357.png) + +After selecting the Data Source, additional configuration options appear: + +8. **Chart Type** (required) โ€“ choose how data will be visualized (bar, donut, pie, etc.) +9. **Labels** (required) โ€“ customize the display of labels (short query, titles, numbers, title and %, etc.) + +**Timeline settings:** + +10. **Period** (optional) โ€“ enable Timeline to track data changes over a selected period +11. **Extra Line** (optional) โ€“ add an additional line to compare metrics within the Timeline +12. **Chart type** (optional) โ€“ select the visualization style specifically for the timeline chart +13. Click the **Add Query** button to open the Query Editor: + +Configure queries according to your metrics using supported query variables: + +- [Tests Variables](https://docs.testomat.io/advanced/tql/#tests-variables) +- [Runs Variables](https://docs.testomat.io/advanced/tql/#runs-variables) + +![Add Query](./images/att4_357.png) + +14. Click the **Save** button โ€” it will appear on your dashboard or as a widget if selected. This extended chart functionality enhances your ability to make data-driven decisions by offering insight into both tests and test runs. -![Testomat.io - Custom Charts](./images/New_3EOUAwBN_2024-08-04.png) +![Created Custom Chart](./images/att5_357.png) + +### How To Customize The Chart View + +Custom charts can be tailored to match your reporting needs. In this section, you can adjust how chart information is displayed โ€” by modifying labels, colors, and other visual settings. These options help make your data easier to read and interpret. + +- **Labels** + +During chart creation or in the **Edit** mode, select the **Labels** dropdown to customize the information on the chart to your preference: + +For example, if a query `state == 'manual' and status == 'passed'` has been selected: + +| **Label** | **View** | +| -------------------------- | -------------------------------------------------------- | +| Query | `state == 'manual' and status == 'failed': 9 tests` | +| Short query | `state == 'manual' and status ...: 9 tests` | +| Titles | `state == 'manual' and status == 'failed'` | +| Numbers | `9` | +| Title and items amount | `state == 'manual' and status == 'failed': 9 tests` | +| Title and % | `state == 'manual' and status == 'failed': 1% ` | +| Title, items amounts and % | `state == 'manual' and status == 'failed': 9 tests, 1% ` | + +![Labels view](./images/att7_357.png) -### How To Customise The Chart View? +- **Color Customization** -In the **Edit** mode, select the **Labels** field to customise the information on the chart to your preference: +In addition to customizing labels, you can also personalize the colors of your queries for better data visualization. To do this, follow these steps: -| Label | View | -| -------------------------- | --------------------------------------------------- | -| Query | `state == 'manual' and status == 'failed': 9 tests` | -| Titles | `state == 'manual' and status ...: 9 tests` | -| Short query | `Manual - Failed` | -| Numbers | `9` | -| Title and tests amount | `Manual - Failed: 9 tests` | -| Title and % | `Manual - Failed: 7%` | -| Title, tests amounts and % | `Manual - Failed: 9 tests, 7%` | +1. Open a Custom Chart you want to customize +2. Scroll down to the list of queries displayed under the chart -![Testomat.io - Custom Charts Labels](./images/New_Sey1I0ug_2024-08-07.png) +![Queries list](./images/att1_4449.png) -In addition to customising labels, you can also personalise the colours of your queries in **โ€™Edit Chartโ€™** mode for better data visualisation. To do this, follow these steps: +3. Click the color box next to a query to change its color +4. After a color picker appears, select a new color -1. Scroll down to the list of queries displayed under the chart +- Use the color palette to choose your preferred shade +- Alternatively, enter RGB, HSL, or HEX values manually for precise color selection -![Queries list](images/att1_4449.png) +![Set up color](./images/att2_4449.png) -2. Click on the colour box next to the query to change colour -3. After a colour picker appears, select a new colour - - Use the colour palette to choose your preferred shade - - Alternatively, enter RGB, HSL, or HEX values manually for precise colour selection +Once selected, the chart will automatically reflect the applied color. -![Set up colour](images/att2_4449.png) +5. Click the **Save** button to apply changes -Once selected, the chart will automatically reflect the applied colour. +![Save button](./images/att3_4449.png) -4. Click **โ€™Saveโ€™** button to apply changes +This feature gives you greater control over the visual presentation of your custom charts by allowing you to select colors for each query. -![Save button](images/att3_4449.png) +- **Additional Features:** -This feature gives you greater control over the visual presentation of your custom charts by allowing you to select colours for each query. + - **Duplicate Chart**: Quickly create a copy of an existing chart and modify it without starting from scratch + +![Duplicate Chart](./images/att9_357.png) + +- **Download Chart**: Download your chart as **SVG, PNG, or CSV** for reporting or sharing + +![Export Chart](./images/att8_357.png) + +- **Sort Queries by Tests/Default**: On Edit Mode, set up the order of queries in the chart + +These options give you full control over both the appearance and functionality of your custom charts, making it easier to create professional and insightful visualizations. + +![Sort Queries by Tests/Default](./images/gif1_357.gif) ### Timeline @@ -141,16 +207,17 @@ Each timeline is associated with a unique URL, which can be copied and shared wi ![Testomat.io - Unique URL for timelines](./images/New_E5IK2JjS_2024-09-24.png) -:::note +:::note Timeline feature is also available for other widgets, such as: -- Automation Coverage. -- Failures. -- Issues. -- Defects. -- Ever Failing Tests. -- Flaky Tests. -- Jira. + +- Automation Coverage +- Failures +- Issues +- Defects +- Ever Failing Tests +- Flaky Tests +- Jira ::: @@ -158,9 +225,9 @@ Timeline feature is also available for other widgets, such as: ## Failures Board -Your team may require visualizing your current status on the project. This is implemented with Failures Board. There you can see failures from the latest test runs, navigate to the suite, latest test run, and the failed test itself. For the Failures widget, we added the ability to group and sort failures found on the project. +To make it easier for your team to track what's going on in the project, weโ€™ve added a Failures Board. It shows failures from the latest test runs and lets you quickly jump to the suite, the test run, or the failed test itself. You can now also group and sort failures in the Failures widget to help make sense of the data faster. On top of that, thereโ€™s a new Defect column, so you can see any linked issues right away through IMS links like GitHub, Azure DevOps, or Jira. -![Testomat.io - Failures Board](./images/New_QIy3IApm_2024-08-18.gif) +![Testomat.io - Failures Board](./images/Analytics_Failures.gif) ## Issues Board @@ -196,19 +263,19 @@ If a test has been run 14 times and succeeded 7 times, the success rate is calcu It is well known that automated tests need maintenance and refactoring. The Slowest Tests widget will help you to define such automated tests and help to visualize them. You can sort them by execution duration and passed/failed status to prioritize your work effectively. -![Testomat.io - Slowest Tests](./images/147572823-b5a3917f-55f3-4fc6-88bf-a69b1ec9bfca.gif) +![Slowest Tests](./images/gif6_357.gif) ## Never Run Tests There may be tests that were never executed on your project because they simply got lost or forgotten. To avoid such situations we added Never Run Tests that will show you test those ones. -![Testomat.io - Never-Run-Tests](./images/Never-Run-Tests.gif) +![Never Run Tests](./images/gif5_357.gif) ## Ever Failing Tests Ever Failing Tests is another useful Analytics widget that will show you automated tests that never passed. This feature will help you to pay attention to potential risks in your application. -![Testomat.io - Ever Failing Tests](./images/147574334-3a076e17-9a8e-437b-8a8c-9d1833a29c28.gif) +![Ever Failing Tests](./images/gif4_357.gif) ## Labels Statistics @@ -243,8 +310,8 @@ We empowered Testomat.io Run Reports with Overview chart, Flaky and Slowests tes Overview chart visualizes aggregated tests statuses by suites: -![Testomat.io - image](./images/147571210-e1277094-b480-4b3a-ad5b-b79248203c9e.png) +![Analytics In Run Reports](./images/gif3_357.gif) Flaky and Slowests tests widgets show the latest 5 tests and navigate to dedicated Analytics pages: -![Testomat.io - image](./images/147570746-6d5a24be-689a-4209-b246-24aaf8afeda0.png) +![Flaky and Slowests widgets](./images/att11_357.png) diff --git a/src/content/docs/project/import-export/import/images/att1_436.png b/src/content/docs/project/import-export/import/images/att1_436.png new file mode 100644 index 00000000..b68edcde Binary files /dev/null and b/src/content/docs/project/import-export/import/images/att1_436.png differ diff --git a/src/content/docs/project/import-export/import/images/att2_436.png b/src/content/docs/project/import-export/import/images/att2_436.png new file mode 100644 index 00000000..a10765e2 Binary files /dev/null and b/src/content/docs/project/import-export/import/images/att2_436.png differ diff --git a/src/content/docs/project/import-export/import/images/att3_436.png b/src/content/docs/project/import-export/import/images/att3_436.png new file mode 100644 index 00000000..2322f1d9 Binary files /dev/null and b/src/content/docs/project/import-export/import/images/att3_436.png differ diff --git a/src/content/docs/project/import-export/import/import-bdd.md b/src/content/docs/project/import-export/import/import-bdd.md index 6c3bbea7..455e2776 100644 --- a/src/content/docs/project/import-export/import/import-bdd.md +++ b/src/content/docs/project/import-export/import/import-bdd.md @@ -15,144 +15,113 @@ head: content: Testomat.io, Cucumber BDD, CLI tool, test import, test IDs, test synchronization, automated tests, manual tests, test management, QA tools --- - + > ๐Ÿ“‘ This documentation is taken from open-source project [testomatio/check-cucumber](https://github.com/testomatio/check-cucumber) -To import tests into Testomatio run `check-cucumber` via npx: - ``` TESTOMATIO=API_KEY npx check-cucumber -d example/cucumber ``` - **Note: replace API_KEY wit key from Testomatio** - ### CLI Options: - - `-d, --dir` - Directory of the project - `-c, --codeceptjs` - If it is codeceptjs project use this option - **Note :** Running this will create Suites with folder and file name as sub suites. - To change the host for receiving data, if you use Testomat.io from domain other than `app.testomat.io` set `TESTOMATIO_URL` environment variable: - ``` TESTOMATIO_URL=https://other-instance-of.testomat.io ``` - ### Sample Output - ![check-cucumber-output](https://user-images.githubusercontent.com/24666922/78559548-2dc7fb00-7832-11ea-8c69-0722222a82fe.png) - ### Assign IDs - To set Testomatio IDs for scenarios and features in files run this command with `--update-ids` option. - ``` TESTOMATIO=API_KEY npx check-cucumber -d example/cucumber --update-ids ``` - Scenarios should already be imported into Testomatio - > If you prever to add Test IDs into scenario titles instead of tags enable `TESTOMATIO_TITLE_IDS=1` environment variable - ### Disable Detached Tests - If a test from a previous import was not found on next import it is marked as "detached". This is done to ensure that deleted tests are not staying in Testomatio while deleted in codebase. - To disable this behavior and don't mark anything on detached on import use `--no-detached` option - ``` TESTOMATIO=API_KEY npx check-cucumber -d example/cucumber --no-detached ``` - This option could also be set via environment variable `TESTOMATIO_NO_DETACHED=1`. If you don't want to pass it each time, create .env file in the root dir of your project with this variable set. - - ## Synchronous Import - By default `check-cucumber` doesn't wait for all tests to be processed. It sends request to Testomatio and exits. To wait for processing to finish use `--sync` option. - ``` TESTOMATIO=API_KEY npx check-cucumber -d example/cucumber --sync ``` - Please note, that this will take a long time on a large codebase. - ## Clean Test IDs - If you want to import the synced project as new project, you have to clean the test ids. To clean up test ids without connecting to Testomatio project use `--purge` option: - ``` npx check-cucumber -d example/cucumber --purge ``` - This method may be unsafe, as it cleans all `@S*` and `@T*` tags from tests and suites. So if you have a tag like `@Test1234` this may also be removed. If you use this option make sure if all the test titles a proper before committing the tests in GIT. - > **Note:** An alias of `--purge` option is `--unsafe-clean-ids`. - To clean only test ids set from a specific project use `--clean-ids` option instead: - ``` TESTOMATIO=API_KEY npx check-cucumber -d example/cucumber --clean-ids ``` - TESTOMATIO is API key for old project. - +### Check IDs +To check whether all scenarios and features have Testomatio IDs run this command with `--check-ids` option. +``` +TESTOMATIO=API_KEY npx check-cucumber -d example/cucumber --check-ids +``` +If there is a feature or scenario without a Testomatio ID, the command exits with a non-zero status code. +If all features and scenarios have Testomatio IDs, the command imports them into Testomatio. ### Import Into a Branch - Tests can be imported into a specific branch if `TESTOMATIO_BRANCH` parameter is used. Branch is matched by its id. If branch was not found, it will be created. - ``` TESTOMATIO_BRANCH="dev" TESTOMATIO=1111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" ``` - ### Keep Structure of Source Code - When tests in source code have IDs assigned and those tests are imported, Testomat.io uses current structure in a project to put the tests in. If folders in source code doesn't match folders in Testomat.io project, existing structure in source code will be ignored. To force using the structure from the source code, use `--keep-structure` flag on import: - ``` TESTOMATIO=1111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --keep-structure ``` - > This may be helpful when you want to align current project with the source code and use the source code as the source of truth for tests. - - ### Delete Empty Suites - If tests were marked with IDs and imported to already created suites in Testomat.io newly imported suites may become empty. Use `--no-empty` option to clean them up after import. - ``` TESTOMATIO=1111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --no-empty ``` - > This prevents usage --keep-structure option. - ### Import Into a Specific Suite - -To put all imported tests into a specific suite (folder) pass in `TESTOMATIO_PREPEND_DIR` environment variable: - +You can import tests into a specific suite using one of these options: +#### Option 1: Import to existing suite by ID +Use `TESTOMATIO_SUITE` to import tests into an existing suite by its ID: +``` +TESTOMATIO_SUITE=@Sa1b2c3d4 TESTOMATIO=API_KEY npx check-cucumber -d example/cucumber +``` +#### Option 2: Create new suite folder +Use `TESTOMATIO_PREPEND_DIR` to create a new folder and import all tests into it: ``` TESTOMATIO_PREPEND_DIR="MyTESTS" TESTOMATIO=API_KEY npx check-cucumber -d example/cucumber ``` - +This creates a new suite folder named "MyTESTS" and imports all tests under it, preserving the original file structure within that folder. +### Apply Labels to Tests +You can apply labels to all imported tests using the `TESTOMATIO_LABELS` environment variable: +```bash +# Apply labels to all tests +TESTOMATIO_LABELS="smoke,regression" TESTOMATIO=API_KEY npx check-cucumber -d example/cucumber +# Apply labels with values using label:value format +TESTOMATIO_LABELS="severity:high,feature:auth" TESTOMATIO=API_KEY npx check-cucumber -d example/cucumber +``` ### Import Manual BDD Tests From Source Code - If you have manual tests in the repository and want to import them into Testomatio you can use `.manual.feature` extension in the feature file. Tests will be imported as well as automated tests and will be marked as `manual` in Testomatio. For instance: - ``` mark-as-completed.manual.feature ``` - --- - +All available options [here](./cli.md) +--- License MIT. - Part of [Testomat.io](https://testomat.io/) - - diff --git a/src/content/docs/project/import-export/import/import-js.md b/src/content/docs/project/import-export/import/import-js.md index aef8cd0d..2f10fc46 100644 --- a/src/content/docs/project/import-export/import/import-js.md +++ b/src/content/docs/project/import-export/import/import-js.md @@ -25,330 +25,352 @@ On this page we collect the reference to them. Learn how to install and configur > ๐Ÿ“‘ This documentation is taken from open-source project [testomatio/check-tests](https://github.com/testomatio/check-tests) -Use this checker as CLI tool with any Continuous Integration service. - Run `check-tests` via npx: - ```sh npx check-tests "" --no-skipped ``` - #### Development - To change host of endpoint for receiving data, and set it to other than app.testomat.io use TESTOMATIO_URL environment variable: - TESTOMATIO_URL=http://local.testomat.io - > This checker will fail a build if exclusive tests (with `.only` or `fit` or `fdescribe` found) - #### Arguments: - - test framework - glob pattern to match tests in a project, example: `tests/**_test.js'`. **It is important to include glob pattern in double quotes `"` so wildcard could be used correctly.** - #### CLI Options: - - `--no-skipped` - fail when skipped tests found - `--typescript` - enable typescript support - `-g, --generate-file ` - Export test details to document - `-u, --url `, Github URL to get file link (URL/tree/master) - #### Example - -Check tests for CodeceptJS - -``` +#### Framework Examples +##### CodeceptJS +```bash +## JavaScript npx check-tests codeceptjs "tests/**_test.js" +## TypeScript +npx check-tests codeceptjs "tests/**_test.ts" --typescript ``` - -Check tests for Protractor - +##### Cypress.io +```bash +## JavaScript +npx check-tests cypress "cypress/integration/**.js" +npx check-tests cypress.io "cypress/e2e/**.js" +## TypeScript +npx check-tests cypress "cypress/integration/**.ts" --typescript +npx check-tests cypress.io "cypress/e2e/**.spec.ts" --typescript ``` -npx check-tests protractor "spec/**.spec.js" +##### Jasmine +```bash +## JavaScript +npx check-tests jasmine "spec/**/*.spec.js" +## TypeScript +npx check-tests jasmine "spec/**/*.spec.ts" --typescript ``` - -Check tests for Protractor with TypeScript - +##### Jest +```bash +## JavaScript +npx check-tests jest "tests/**/*.test.js" +npx check-tests jest "__tests__/**/*.js" +## TypeScript +npx check-tests jest "tests/**/*.test.ts" --typescript +npx check-tests jest "src/**/*.spec.ts" --typescript +``` +##### Mocha +```bash +## JavaScript +npx check-tests mocha "test/**/*_test.js" +npx check-tests mocha "tests/**/*.spec.js" +## TypeScript +npx check-tests mocha "test/**/*.test.ts" --typescript +``` +##### Newman (Postman Collections) +```bash +## Single collection +npx check-tests newman "api-tests.postman_collection.json" +## Multiple collections +npx check-tests newman "collections/*.json" +``` +##### Nightwatch +```bash +## JavaScript +npx check-tests nightwatch "tests/**/*.js" +## TypeScript +npx check-tests nightwatch "tests/**/*.ts" --typescript ``` +##### Playwright +```bash +## JavaScript +npx check-tests playwright "tests/**/*.spec.js" +npx check-tests playwright "e2e/**/*.test.js" +## TypeScript +npx check-tests playwright "tests/**/*.spec.ts" --typescript +npx check-tests playwright "e2e/**/*.test.ts" --typescript +``` +##### Protractor +```bash +## JavaScript +npx check-tests protractor "spec/**.spec.js" +npx check-tests protractor "e2e/**/*_spec.js" +## TypeScript npx check-tests protractor "spec/**.spec.ts" --typescript +npx check-tests protractor "e2e/**/*.spec.ts" --typescript ``` - -Check tests for Cypress.io - -``` -npx check-tests cypress "cypress/integration/**.js" -``` - -Check tests for Testcafe - +##### QUnit +```bash +## JavaScript +npx check-tests qunit "tests/**/*.js" +## TypeScript +npx check-tests qunit "tests/**/*.ts" --typescript ``` +##### TestCafe +```bash +## JavaScript npx check-tests testcafe "tests/**.js" +npx check-tests testcafe "fixtures/**/*.test.js" +## TypeScript +npx check-tests testcafe "tests/**.ts" --typescript +npx check-tests testcafe "fixtures/**/*.test.ts" --typescript +``` +##### Vitest +```bash +## JavaScript +npx check-tests vitest "tests/**/*.test.js" +npx check-tests vitest "src/**/*.spec.js" +## TypeScript +npx check-tests vitest "tests/**/*.test.ts" --typescript +npx check-tests vitest "src/**/*.spec.ts" --typescript +``` +##### Gauge +```bash +## Gauge specification files +npx check-tests gauge "specs/**/*.spec" +npx check-tests gauge "tests/**/*.spec" +``` +##### Manual Tests (Markdown) +```bash +## Markdown-based manual test documentation +npx check-tests manual "docs/tests/**/*.md" +npx check-tests manual "manual-tests/*.md" ``` - #### Sample Output - List CodeceptJS tests - ![](https://user-images.githubusercontent.com/24666922/78563263-505d1280-7838-11ea-8fbc-18e942d48485.png) - When found `.only` test: - ``` โœ— npx check-tests mocha "test/**/**_test.js" - [[ Tests checker by testomat.io ]] Error: Exclusive tests detected. `.only` call found in test/checkout/important_test.js:290 Remove `.only` to restore test checks - ``` - ### Using in Testomatio - This library is used by [Testomatio](https://testomat.io) to import tests. - ### Importing Into Project - Use `TESTOMATIO` environment variable with a valid API key to import data into a project. API key can be obtained on project settings page or on "Import From Source" page. - For example: - ``` TESTOMATIO=11111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" - ``` - #### Test code - By default, check-test sends the code of the test hooks to the "client": before, beforeEach and after. In the "Codes" section you can see all the additional "context" of the test (Testomat.io). - To exclude hook code from a client test, use the --no-hooks option - ``` TESTOMATIO=11111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --no-hooks ``` - #### Additional line number to the test code - To include line number code from a client test, use --line-numbers option. _(By default Code section exclude "line number")_ - ``` TESTOMATIO=11111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --line-numbers ``` - ### Import Parametrized Tests - It is possible to import parametrized tests if they use template literals with variables in thier names: - ```js ['one', 'two', 'three'].forEach(() => { it(`this is test number ${parameter}`); }); ``` - This test will be imported with its original name including a placeholder: - ``` this is test number ${parameter} ``` - When executed test will be reported with 3 results matched to the same test and param values will be added to the report. - ### Disable Detached Tests - If a test from a previous import was not found on next import it is marked as "detached". This is done to ensure that deleted tests are not staying in Testomatio while deleted in codebase. - To disable this behavior and don't mark anything on detached on import use `--no-detached` option - ``` TESTOMATIO=11111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --no-detached ``` - This option could also be set via environment variable `TESTOMATIO_NO_DETACHED=1`. If you don't want to pass it each time, create .env file in the root dir of your project with this variable set. - ### Synchronous Import - By default `check-tests` doesn't wait for all tests to be processed. It sends request to Testomatio and exits. To wait for processing to finish use `--sync` option. - ``` TESTOMATIO=11111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --sync ``` - Please note, that this will take a long time on a large codebase. - ### Auto-assign Test IDs in Source Code - To disable guess matching for tests it is recommend to use Testomatio IDs to map a test in source code to a test in Testomatio. Testomatio IDs can be put automatically into the test names into source code when `--update-ids` option is used: - ``` TESTOMATIO=11111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --update-ids ``` - Tests imported with `--update-ids` will be processed in synchronouse mode, so the script will finish after all tests are processed. - ### Keep Test IDs Between Projects - To import tests with Test IDs set in source code into another project use `--create` option. In this case, a new project will be populated with the same Test IDs. - ``` TESTOMATIO=11111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --create ``` - Without `--create` import will fail with a message that ID was not found. - ### Clean Test IDs - If you want to import the synced project as new project, you have to clean the test ids. To clean up test ids without connecting to Testomatio project use `--purge` option: - ``` npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --purge ``` - This method may be unsafe, as it cleans all `@S*` and `@T*` tags from tests and suites. So if you have a tag like `@Test1234` this may also be removed. If you use this option make sure if all the test titles a proper before committing the tests in GIT. - > **Note:** `--purge` is an alias of `--unsafe-clean-ids` option. - To clean only test ids set from a specific project use `--clean-ids` option instead: - ``` TESTOMATIO=11111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --clean-ids ``` - TESTOMATIO is API key of a project with existing test ids. - ### Import Into a Branch - Tests can be imported into a specific branch if `TESTOMATIO_BRANCH` parameter is used. Branch is matched by its id. If branch was not found, it will be created. - ``` TESTOMATIO_BRANCH=dev TESTOMATIO=1111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" ``` - ### Keep Structure of Source Code - When tests in source code have IDs assigned and those tests are imported, Testomat.io uses current structure in a project to put the tests in. If folders in source code doesn't match folders in Testomat.io project, existing structure in source code will be ignored. To force using the structure from the source code, use `--keep-structure` flag on import: - ``` TESTOMATIO=1111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --keep-structure ``` - ### Delete Empty Suites - If tests were marked with IDs and imported to already created suites in Testomat.io newly imported suites may become empty. Use `--no-empty` option to clean them up after import. - ``` TESTOMATIO=1111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --no-empty ``` - > This prevents usage --keep-structure option. - -### Import Into a Specific Suite - -To put all imported tests into a specific suite (folder) pass in `TESTOMATIO_PREPEND_DIR` environment variable: - +#### Import Into a Specific Suite +To put all imported tests into a specific suite (folder) pass in `TESTOMATIO_PREPEND_DIR` environment variable, avoid using special characters in the directory name. This helps prevent potential errors across different operating systems and command-line environments. +**Recommendations:** +Use only letters `(A-Z, a-z)`, numbers `(0-9)`, hyphens `(-)`, and underscores `(_)`. +Avoid characters like `/, \, :, *, ?, ", <, >, |, &, $, #, %, @,` and the apostrophe `(')`. +Examples of recommended naming: `MyTests` or `project_tests`. ``` TESTOMATIO_PREPEND_DIR="MyTESTS" TESTOMATIO=1111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" ``` - This will use "MyTests" folder in a root of a project or create it if it doesn't exist. - It is also possible to specify a suite by its SID: - ``` TESTOMATIO_SUITE="1111111" TESTOMATIO=1111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" ``` - or use SID with prefix: - ``` TESTOMATIO_SUITE="S1111111" TESTOMATIO=1111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" TESTOMATIO_SUITE="@S1111111" TESTOMATIO=1111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" ``` - +### Apply Labels to Tests +Use `TESTOMATIO_LABELS` to tag all imported tests with labels: +```bash +## Apply single label +TESTOMATIO_LABELS="smoke" TESTOMATIO=1111111 npx check-tests jest "tests/**/*.test.js" +## Apply multiple labels (comma-separated) +TESTOMATIO_LABELS="smoke,regression,api" TESTOMATIO=1111111 npx check-tests playwright "tests/**/*.spec.ts" +## Apply labels with values using label:value format +TESTOMATIO_LABELS="severity:high,feature:user_account,team:backend" TESTOMATIO=1111111 npx check-tests jest "tests/**/*.test.js" +## Mix simple labels and label:value pairs +TESTOMATIO_LABELS="smoke,severity:critical,feature:auth,regression" TESTOMATIO=1111111 npx check-tests playwright "tests/**/*.spec.ts" +## Use alias for Python SDK compatibility +TESTOMATIO_SYNC_LABELS="integration,e2e" TESTOMATIO=1111111 npx check-tests cypress "cypress/integration/**/*.js" +``` +### Remove Path Prefixes +Use `TESTOMATIO_WORKDIR` to avoid redundant folder nesting: +```bash +## Problem: src/tests/API/ creates nested paths in Testomat.io +## Solution: Set working directory to remove src/tests prefix +TESTOMATIO_WORKDIR=src/tests TESTOMATIO=1111111 npx check-tests playwright "**/*.spec.ts" +## Monorepo: Import without parent paths +TESTOMATIO_WORKDIR=apps/frontend TESTOMATIO=1111111 npx check-tests jest "**/*.test.js" +``` +### Group Tests by Category +Use `TESTOMATIO_PREPEND_DIR` to organize tests: +```bash +## Group API tests under "API Tests" folder +TESTOMATIO_PREPEND_DIR="API Tests" TESTOMATIO=1111111 npx check-tests jest "src/api/**/*.test.js" +## Separate by team +TESTOMATIO_PREPEND_DIR="Frontend Team" TESTOMATIO=1111111 npx check-tests playwright "tests/ui/**/*.spec.ts" +``` +### Import to Specific Suite +Use `TESTOMATIO_SUITE` to target existing suites: +```bash +## Import to existing suite by SID +TESTOMATIO_SUITE=S1234567 TESTOMATIO=1111111 npx check-tests jest "features/**/*.test.js" +``` ### TypeScript - For TypeScript projects `@babel/core` and `@babel/plugin-transform-typescript` packages are used. GitHub Action already contains those modules, while CLI version of this tool tries to automatically install them on first run. - If you face issues parsing TypeScript file menitioning `@babel/core` or `@babel/plugin-transform-typescript` try to install them manually: - ``` npm i @babel/core @babel/plugin-transform-typescript --save-dev ``` - Now tests TypeScript can be imported with `--typescript` option: - ``` TESTOMATIO=11111111 npx check-tests CodeceptJS "**/*{.,_}{test,spec}.js" --typescript ``` - +#### ES2023 Support +Starting from version 0.13.3, the tool supports ES2023 Explicit Resource Management (ERM) syntax including: +- `using` declarations for automatic resource disposal +- `[Symbol.dispose]` method definitions +This allows parsing of modern TypeScript/JavaScript files that use resource management patterns: +```typescript +const getResource = () => ({ + [Symbol.dispose]: () => { /* cleanup code */ }, +}); +test('resource management', () => { + using resource = getResource(); + // resource will be automatically disposed at the end of the scope +}); +``` ### Test aliases - -Test aliases are used to map tests in source code to tests in Testomat.io. By default `test` and `it` are parsed. But if you rename them or use another function to define tests (e.g. created/extended test object in Playwright), you can add alias (or multiple aliases, separated by comma) via `--alias` option: - +Test aliases are used to map tests in source code to tests in Testomat.io. By default `test` and `it` are parsed. But if you rename them or use another function to define tests (e.g. created/extended test object in Playwright), you can add alias (or multiple aliases, separated by comma) via `--test-alias` option: ``` -TESTOMATIO=11111111 npx check-tests Playwright "**/*{.,_}{test,spec}.ts" --alias myTest,myCustomFunction +TESTOMATIO=11111111 npx check-tests Playwright "**/*{.,_}{test,spec}.ts" --test-alias myTest,myCustomFunction ``` - ### Programmatic API - Import Analyzer from module: - ```js const { Analyzer } = require('check-tests'); - const framework = 'jest'; const pathToTests = './tests'; const pattern = '**/*[._-]{test,spec}.{ts,js}'; - const analyzer = new Analyzer(framework, pathToTests); - // to enable typescript... analyzer.withTypeScript(); - // to enable babel plugins analyzer.addPlugin('@babel/plugin-syntax-jsx'); analyzer.addPlugin('@babel/plugin-syntax-flow'); - // to enable babel presets analyzer.addPreset('@babel/preset-react'); analyzer.addPreset('@babel/preset-flow'); - analyzer.analyze(pattern); - // stats on processed files const stats = analyzer.stats; - // full info on parsed tests const data = analyzer.rawTests; ``` - ### Debugging - Run import with `DEBUG="testomatio:*"` environment variable to get additional logs which may help understanding the cause of an issue. Usually it may happen because of a specific file that couldn't be parsed: - ``` DEBUG="testomatio:*" npx check-tests@latest .... ``` - ### API Definition - API Endpoint to import test data into Testomat.io: - [Import API Reference](https://testomatio.github.io/check-tests/) - ### Limitations - - Can't analyze included tests from external files - Can't analyze dynamically created tests - ### License MIT - Part of [Testomat.io](https://testomat.io) - diff --git a/src/content/docs/project/import-export/import/import-php.md b/src/content/docs/project/import-export/import/import-php.md index 8e830324..1d19f8bc 100644 --- a/src/content/docs/project/import-export/import/import-php.md +++ b/src/content/docs/project/import-export/import/import-php.md @@ -4,6 +4,11 @@ description: Import PHP tests into Testomat.io using the list-tests CLI utility, type: article url: https://docs.testomat.io/project/import-export/import-php head: + - tag: meta + attrs: + name: og:image + content: https://docs.testomat.io/_astro/test-reporting-heat-map.CoE-TwPN_Z20qVi.webp + - tag: meta attrs: name: keywords @@ -14,58 +19,35 @@ head: > ๐Ÿ“‘ This documentation is taken from open-source project [testomatio/php-list-tests](https://github.com/testomatio/php-list-tests) - -> List tests currently support PHP 8+, for PHP 7 look into 0.1.x branch - This CLI utility prints all tests in PHP project **without executing them**. Easily check the number tests and get live documentation for your tests in markdown format! - > Supports PHPUnit & Codeception. - This CLI script is also used to import tests into Testomat.io application. - ### Installation - ``` composer require testomatio/list-tests --dev ``` - ### Usage - To print all tests execute `list-tests` script pointing to directory with tests: - ``` php vendor/bin/list-tests test/ ``` - This will print output similar to this: - ![](https://pbs.twimg.com/media/Ee5PXDOWkAEdiyz?format=jpg&name=large) - To print tests into a file execute command with `--markdown` option providing a filename to save info into: - ``` php vendor/bin/list-tests tests --markdown test-docs.md ``` - This command will save test list into "tests.md" file. - To show test list with links to corresponding files on GitHub/GitLab/BitBucket use `PREPEND_URL` environment variable and set a base URL for tests files. - ``` PREPEND_URL=https://github.com/testomatio/php-list-tests/tests php vendor/bin/list-tests tests --markdown tests.md ``` - This will generate a file similar to [test-docs.md](https://github.com/testomatio/php-list-tests/blob/0.2.x/test-docs.md) in this repository. - ### Import into [Testomat.io](https://testomat.io) - This script can also import tests from PHP project into the testomat.io project. Pass in TESTOMATIO API KEY of your project to import tests: - ``` TESTOMATIO={api-key} php vendor/bin/list-tests tests -``` - - +``` diff --git a/src/content/docs/project/import-export/import/import-tests-from-csv-xlsx.md b/src/content/docs/project/import-export/import/import-tests-from-csv-xlsx.md new file mode 100644 index 00000000..5b818720 --- /dev/null +++ b/src/content/docs/project/import-export/import/import-tests-from-csv-xlsx.md @@ -0,0 +1,115 @@ +--- +title: Import from CSV/XLSX +description: Learn how to import tests from CSV or XLSX files into Testomat.io. This guide covers the process of importing test cases from systems like TestRail, Zephyr, Qase, and more. It also introduces an experimental feature to convert CSV/XLSX data into BDD scenarios, and provides instructions for creating custom XLS files for importing. +type: article +url: https://docs.testomat.io/project/import-export/import-tests-from-csv-xlsx +head: + - tag: meta + attrs: + name: keywords + content: Testomat.io, CSV, XLS, XLSX, TMS, test import, BDD scenarios, TestRail, Zephyr, Qase, QTest, Testmo, XRay manual tests, automated tests, test management, custom XLS +--- + +Testomat.io allows you to import tests from **CSV** or **XLSX** files. This is useful if you are migrating from another Test Management System (TMS) or if you already have test cases documented in spreadsheets. + +## Supported Test Management Systems + +Testomat.io supports importing test cases from many popular TMS tools. There are two ways to import: + +- **Direct Import** โ€” via built-in integration +- **CSV/XLSX Import** โ€” supported for selected TMS formats + +### Supported For CSV/XLSX Import + +- Testomatio +- TestRail +- Testmo +- Zephyr +- QTest +- Qase + +### Direct Import Guides + +Click any tool below to see step-by-step instructions: + +- [Qase](https://docs.testomat.io/project/import-export/import/import-tests-from-qase/) +- [QTest](https://docs.testomat.io/project/import-export/import/import-tests-from-qtest/) +- [QMetry](https://docs.testomat.io/project/import-export/import/import-tests-from-qmetry/) +- [TestCaseLabs](https://docs.testomat.io/project/import-export/import/import-tests-from-testcaselabs/) +- [Testmo](https://docs.testomat.io/project/import-export/import/import-tests-from-testmo/) +- [Testomat.io](https://docs.testomat.io/project/import-export/import/import-tests-from-csvxlsx/) +- [TestRail](https://docs.testomat.io/project/import-export/import/import-tests-from-testrail/) +- [XRay](https://docs.testomat.io/project/import-export/import/import-tests-from-xray/) +- [Zephyr](https://docs.testomat.io/project/import-export/import/import-tests-from-zephyr/) + +## How to Import Tests from CSV/XLSX in Classic Projects + +You can import tests into your project via: + +- **Imports** page โ€” ideal when existing data is present. +- **Tests** page โ€” best for new projects without data. + +In a **new project**, simply click **Import from Spreadsheet** on the **Tests** page and continue from **Step 3** below. + +![Import from scratch](./images/att3_436.png) + +### Steps to Import + +1. Click **Imports** button in the sidebar +2. Click **Import from CSV** button + +![Import from CSV/XLSX button](./images/att1_436.png) + +When the sidebar opens, + +3. Select the format from which your CSV/XLSX was exported (e.g. Qase) +4. Click **Choose file** and select your CSV/XLSX file +5. Click **Create** button to start the import + +![Upload CSV/XLSX](./images/att2_436.png) + +Your file will be processed, and the test cases will appear in your project. + +:::note + +Currently, the CSV/XLSX import is an experimental feature. Some data might not be imported correctly, depending on the TMS format. Please review your imported test cases after migration. + +::: + +## How to Import Tests from CSV/XLSX in BDD Projects + +If your project type is **BDD**, the import steps are **exactly the same** as described above for [Classic projects](https://docs.testomat.io/project/import-export/import/import-tests-from-csv-xlsx/#how-to-import-tests-from-csvxlsx-in-classic-projects). + +The only difference is the appearance of a new checkbox: + +- **Import as BDD** โ€“ available **only** for imports from **TestRail** and **QTest**. +- When checked, all rows from the CSV/XLSX file are converted into **BDD scenarios**. + - Mapping: + - Precondition โ†’ **Given** + - Step โ†’ **When** + - Expected Result โ†’ **Then** + - All imported tests are saved as **feature files** in your project. + +:::note + +Currently, the feature works for TestRail and QTest. If you need support for other systems, please [submit a request](https://testomat.nolt.io/). + +::: + +## How to ะกreate Custom XLS for Testomat.io + +You can also create your own XLS file to import tests into Testomat.io. Follow these rules when preparing a custom XLS file: + +| Column name | Content | +| ----------- | --------------------------------------------------------------------------------------------------------------- | +| ID | leave it empty | +| Title | put the title of your test here, one title per row | +| Status | goes for test type manual or automated, can be blank | +| Folder | enter the suite name here, and use `/suite name/sub-suite name` format to create suites nesting | +| Emoji | can be blank | +| Priority | you can set priority to your test normal, important, high, critical or low, can be blank | +| Tags | place here any tags you need, can be blank | +| Owner | name of test owner, can be blank | +| Description | put here the description of your test, [Markdown format ](https://www.markdownguide.org/basic-syntax/)supported | + +You can download the custom Testomat.io example file [here](https://testomatio-artifacts.ams3.digitaloceanspaces.com/documentation/testomatio.xlsx). diff --git a/src/content/docs/project/import-export/import/import-tests-from-csvxls.md b/src/content/docs/project/import-export/import/import-tests-from-csvxls.md deleted file mode 100644 index 168ffd05..00000000 --- a/src/content/docs/project/import-export/import/import-tests-from-csvxls.md +++ /dev/null @@ -1,64 +0,0 @@ ---- -title: Import Tests From TMS -description: Learn how to import tests from CSV or XLS files into Testomat.io. This guide covers the process of importing test cases from systems like TestRail, Zephyr, Qase, and more. It also introduces an experimental feature to convert CSV/XLS data into BDD scenarios, and provides instructions for creating custom XLS files for importing. -type: article -url: https://docs.testomat.io/project/import-export/import-tests-from-csvxls -head: - - tag: meta - attrs: - name: keywords - content: Testomat.io, CSV, XLS, TMS, test import, BDD scenarios, TestRail, Zephyr, Qase, QTest, Testmo, XRay manual tests, automated tests, test management, custom XLS ---- - -> If you have tests in CSV/XLS format - follow this guide to learn how to import your tests to Testomat.io - -## Supported Test Management Systems - -Testomat.io supports importing test cases from a wide range of popular test management systems. -**Click any tool name below to view a step-by-step guide for importing from that specific system**: - -- [Qase](https://docs.testomat.io/project/import-export/import/import-tests-from-qase/) -- [QTest](https://docs.testomat.io/project/import-export/import/import-tests-from-qtest/) -- [QMetry](https://docs.testomat.io/project/import-export/import/import-tests-from-qmetry/) -- [TestCaseLabs](https://docs.testomat.io/project/import-export/import/import-tests-from-testcaselabs/) -- [Testmo](https://docs.testomat.io/project/import-export/import/import-tests-from-testmo/) -- [Testomat.io](https://docs.testomat.io/project/import-export/import/import-tests-from-csvxls/) -- [TestRail](https://docs.testomat.io/project/import-export/import/import-tests-from-testrail/) -- [XRay](https://docs.testomat.io/project/import-export/import/import-tests-from-xray/) -- [Zephyr](https://docs.testomat.io/project/import-export/import/import-tests-from-zephyr/) - - -## Import CSV/XLS As BDD - -> Please note that this is experimental feature and some data can be imported wrongly. - -Testomat.io allows converting CSV/XLS to BDD Scenarios, where your Precondition equals **Given**, Step = **When**, and Expected Result = **Then**. - - -![image](./images/149758030-698bfebd-6045-4db9-976c-5c1f95ded5e0.png) - -For this moment this feature is implemented for TestRail and QTest. But you can request it for other systems [here](https://testomat.nolt.io/). - -## Create Custom XLS For Testomat.io - -You can create your own XLS to import your tests to Testomat.io. There are such rules to create custom XLS files for Testomat.io import -| Column name | Content | -|---|---| -|ID|leave it empty| -|Title |put the title of your test here, one title per row| -|Status | goes for test type manual or automated, can be blank| -|Folder| place here the name of the suite, and use `/suite name/sub-suite name` format to create suites nesting| -|Emoji |can be blank| -|Priority| you can set priority to your test normal, important, high, critical or low, can be blank| -|Tags| place here any tags you need, can be blank| -|Owner |name of test owner, can be blank| -|Description |put here the description of your test, [Markdown format ](https://www.markdownguide.org/basic-syntax/)supported| - -You can download the custom Testomat.io example file [here](https://testomatio-artifacts.ams3.digitaloceanspaces.com/documentation/testomatio.xlsx). - - - - - - - diff --git a/src/content/docs/project/runs/archive-runs-and-groups.md b/src/content/docs/project/runs/archive-runs-and-groups.md index 65f9c759..31bc912f 100644 --- a/src/content/docs/project/runs/archive-runs-and-groups.md +++ b/src/content/docs/project/runs/archive-runs-and-groups.md @@ -61,7 +61,7 @@ Archived Runs or Groups are removed from the main Runs board and got the **archi ![Testomat.io - Run overview](./images/Archive_Run_1.png) -OR +OR - Click the **'Extra menu'** button at the top right corner and select the specific archive section that you need. @@ -120,7 +120,6 @@ As well, by clicking on archived RunGroup, you can see the RunGroup Report. ![Testomat.io - RunGroup Report](./images/Archive_Run_16.png) - ## How to Unarchive Runs & RunGroups - Archived runs and groups can be unarchived. This allows you to display them on the main panel. @@ -160,4 +159,4 @@ Use **'Multi-select'** to unarchive a few Runs simultaneously. 3. Click the **'Confirm'** button. -![Testomat.io - Unarchive Groups Archive](./images/Archive_Run_20.png) \ No newline at end of file +![Testomat.io - Unarchive Groups Archive](./images/Archive_Run_20.png) diff --git a/src/content/docs/project/runs/images/Advanced_Relaunch_1.png b/src/content/docs/project/runs/images/Advanced_Relaunch_1.png index a903e202..620a6e36 100644 Binary files a/src/content/docs/project/runs/images/Advanced_Relaunch_1.png and b/src/content/docs/project/runs/images/Advanced_Relaunch_1.png differ diff --git a/src/content/docs/project/runs/images/Advanced_Relaunch_2.png b/src/content/docs/project/runs/images/Advanced_Relaunch_2.png index a589da55..7d0efe26 100644 Binary files a/src/content/docs/project/runs/images/Advanced_Relaunch_2.png and b/src/content/docs/project/runs/images/Advanced_Relaunch_2.png differ diff --git a/src/content/docs/project/runs/images/Advanced_Relaunch_3.gif b/src/content/docs/project/runs/images/Advanced_Relaunch_3.gif new file mode 100644 index 00000000..ec983942 Binary files /dev/null and b/src/content/docs/project/runs/images/Advanced_Relaunch_3.gif differ diff --git a/src/content/docs/project/runs/images/Advanced_Relaunch_4.gif b/src/content/docs/project/runs/images/Advanced_Relaunch_4.gif new file mode 100644 index 00000000..ec1b5f78 Binary files /dev/null and b/src/content/docs/project/runs/images/Advanced_Relaunch_4.gif differ diff --git a/src/content/docs/project/runs/images/Combined_report.png b/src/content/docs/project/runs/images/Combined_report.png index c77da39c..ae5b6dba 100644 Binary files a/src/content/docs/project/runs/images/Combined_report.png and b/src/content/docs/project/runs/images/Combined_report.png differ diff --git a/src/content/docs/project/runs/images/Combined_report_1.png b/src/content/docs/project/runs/images/Combined_report_1.png index 8d9627b7..493bf99c 100644 Binary files a/src/content/docs/project/runs/images/Combined_report_1.png and b/src/content/docs/project/runs/images/Combined_report_1.png differ diff --git a/src/content/docs/project/runs/images/Combined_report_2.gif b/src/content/docs/project/runs/images/Combined_report_2.gif new file mode 100644 index 00000000..faeaf8d6 Binary files /dev/null and b/src/content/docs/project/runs/images/Combined_report_2.gif differ diff --git a/src/content/docs/project/runs/images/RunGroup_4.png b/src/content/docs/project/runs/images/RunGroup_4.png index 86f1907d..dc54c33b 100644 Binary files a/src/content/docs/project/runs/images/RunGroup_4.png and b/src/content/docs/project/runs/images/RunGroup_4.png differ diff --git a/src/content/docs/project/runs/images/RunGroup_5.png b/src/content/docs/project/runs/images/RunGroup_5.png index 1ef1ac23..1bfac49a 100644 Binary files a/src/content/docs/project/runs/images/RunGroup_5.png and b/src/content/docs/project/runs/images/RunGroup_5.png differ diff --git a/src/content/docs/project/runs/images/RunGroup_6.png b/src/content/docs/project/runs/images/RunGroup_6.png new file mode 100644 index 00000000..f322c4f4 Binary files /dev/null and b/src/content/docs/project/runs/images/RunGroup_6.png differ diff --git a/src/content/docs/project/runs/images/RunGroup_7.png b/src/content/docs/project/runs/images/RunGroup_7.png new file mode 100644 index 00000000..1aac3173 Binary files /dev/null and b/src/content/docs/project/runs/images/RunGroup_7.png differ diff --git a/src/content/docs/project/runs/images/RunGroup_Report_1.png b/src/content/docs/project/runs/images/RunGroup_Report_1.png new file mode 100644 index 00000000..d6a4505a Binary files /dev/null and b/src/content/docs/project/runs/images/RunGroup_Report_1.png differ diff --git a/src/content/docs/project/runs/images/RunGroup_Report_2.gif b/src/content/docs/project/runs/images/RunGroup_Report_2.gif new file mode 100644 index 00000000..25f1eaac Binary files /dev/null and b/src/content/docs/project/runs/images/RunGroup_Report_2.gif differ diff --git a/src/content/docs/project/runs/images/att1_444.png b/src/content/docs/project/runs/images/att1_444.png new file mode 100644 index 00000000..ec12a38a Binary files /dev/null and b/src/content/docs/project/runs/images/att1_444.png differ diff --git a/src/content/docs/project/runs/images/att2_444.png b/src/content/docs/project/runs/images/att2_444.png new file mode 100644 index 00000000..7dce811f Binary files /dev/null and b/src/content/docs/project/runs/images/att2_444.png differ diff --git a/src/content/docs/project/runs/images/att3_444.png b/src/content/docs/project/runs/images/att3_444.png new file mode 100644 index 00000000..3a7d2493 Binary files /dev/null and b/src/content/docs/project/runs/images/att3_444.png differ diff --git a/src/content/docs/project/runs/managing-runs.md b/src/content/docs/project/runs/managing-runs.md index b9f02f68..7a04e431 100644 --- a/src/content/docs/project/runs/managing-runs.md +++ b/src/content/docs/project/runs/managing-runs.md @@ -196,13 +196,29 @@ In the opened sidebar window, configure your relaunch (optional): 1. Enter a custom run title. 2. Enable **'Create new run'** option (if needed). 3. Enable **'Keep values'** option (available only if **'Create new run'** is enabled; this shows previous statuses for test cases). -4. Select the tests that you want to include in your relaunched run (use filtering, sorting, or search options). +4. Select the tests that you want to include in your relaunched run one by one (use filtering, sorting, or search options). 5. Click the **'Relaunch'** button. ![Testomat.io - relaunched Run](./images/Advanced_Relaunch_2.png) +:::note + Only selected test cases will be relaunched for manual re-check or automatically re-run on CI. +::: + +If you have many test cases and need to select all of them or only failed ones, you don't have to do it manually. You can use the **'Select'** feature. + +**There are two options of how you can select all tests, depending on your needs:** + +1. Use **'Checkbox Select'** option by simply clicking on checkbox. This is useful if you want to select a group of tests (like all filtered failed tests) and then manually unselect or add more. With this option, you can easialy edit your selection. + +![Testomat.io - relaunched Run](./images/Advanced_Relaunch_3.gif) + +2. Use the **'Select All'** option from the **'Select'** dropdown. This is a quick way to select all currently displayed test cases. With this option, you cannot manually add or remove tests from the selection. If you change the filtering, the selection will automatically update to include only the currently displayed tests. Use the **'None'** option to unselect all tests. + +![Testomat.io - relaunched Run](./images/Advanced_Relaunch_4.gif) + **Key benefits of Advanced Relaunch:** - Customize relaunches to better fit your team's workflow. @@ -248,7 +264,7 @@ Multiselection allows the bulk application options to many runs with one click. 3. **'Labels'** - apply/remove [labels](https://docs.testomat.io/usage/labels-and-custom-fields/#how-to-setup-a-label) from runs. 4. **'Compare'** - open Compare runs view. 5. **'Extra menu'** - access additional options. -6. **'Delete'** - delete selected runs. +6. **'Purge icon'** - purge selected runs [see more details here](https://docs.testomat.io/project/runs/managing-runs/#purge-runs). ![Testomatio.io - Multiselection Options](./images/Multi-select_1.png) @@ -284,3 +300,51 @@ This feature allows you to pass **Runs** and **Runs Archive** filter parameters Specify the necessary filtering parameters on the **Runs** or **Runs Archive** page, then copy the URL and share it with interested members. ![Testomatio.io - Share Runs Filter](./images/Share_Filter.png) + +## Purge Runs + +Starting from September 2025, Testomat.io introduces a new approach to managing old and unused Runs. + +The **Delete** option for Runs is now replaced with a **Purge** option. + +![Purge button](./images/att1_444.png) + +This change provides a safer and more flexible way to manage Run data by introducing a two-step deletion strategy instead of immediate and irreversible removal. + +### Compress and Move to Archive (Purge) + +When a Run is purged (either manually or automatically), it is first compressed and moved to the [Archive](https://docs.testomat.io/project/runs/archive-runs-and-groups/). + +![Move to Archive](./images/att2_444.png) + +Runs that are moved to the Archive receive a specific badge: + +- **Purged** โ€“ when a run is purged (manually or automatically) +- **Archived** โ€“ when a run is archived manually + +During this step: + +- Stack traces are removed to reduce storage size. +- Essential data is preserved, including: + - Test results + - Artifacts (attachments, logs, screenshots) + - Custom statuses +- Archived Runs remain available for as long as needed and can be restored at any time. + +### Permanent Deletion from Archive + +:::note + +If you remove a Run from the Archive, it is permanently deleted. This step is irreversible โ€” once **deleted**, the Run cannot be restored. + +::: + +![Delete runs](./images/att3_444.png) + +**Automatic Purge Applies Same Logic** + +The same two-step logic applies to automatically purged Runs Project Settings โ†’ [Purge Old Runs](https://docs.testomat.io/management/project/settings/#purge-old-runs). Instead of being deleted right away, these Runs are first compressed and archived, then eventually fully deleted. + +**Summary** + +By introducing the Purge option, Testomat.io ensures that Run data is handled with more safety and flexibility. You can keep your workspace clean and optimize storage, while still maintaining the ability to restore important Runs from the Archive when needed. diff --git a/src/content/docs/project/runs/reports.md b/src/content/docs/project/runs/reports.md index 8cc85831..1d27d49e 100644 --- a/src/content/docs/project/runs/reports.md +++ b/src/content/docs/project/runs/reports.md @@ -14,15 +14,15 @@ When you run tests with Testomat.io, the system generates a **Run Report** displ ## Run Report Basic View -Clicking on the Test Run will toggle RunReport screen in basic view. +Clicking on the Test Run will toggle Run Report screen in basic view. ![Testomat.io - RunReport](./images/Run_Report_1.png) -**On this basic report you can:** +**In this basic Run Report, you can:** 1. View a list of test cases. -2. Check their status (Passed, Failed, Skipped, Pending, Custom statuses). -3. Reiew general information on the Run Result. +2. Check their statuses (Passed, Failed, Skipped, Pending, Custom statuses). +3. Review general information on the Run Result. ![Testomat.io - RunReport](./images/Run_Report_2.png) @@ -55,7 +55,7 @@ You can also check overall **Statistics** by suites/tags/labels/assignees/priori Testomat.io allows you to customize the dispalay of test cases within a Run Report. -**This geature helps you:** +**This feature helps you:** - **Show or hide columns** such as test status, duration, labels, tags, priority, etc. - **Focus only on the data** most reelvant to you or your team. @@ -128,6 +128,17 @@ Similar to the basic run report view, click on a test case to see its details or ![Testomat.io - RunReport](./images/Run_Report_15.gif) +## RunGroup Report + +The same as for Runs, you can also view RunGroups Reportes. +To open **RunGroup Report basic view**, simply click on its name - RunGroup Report window will be displayed with general information and runs summary. + +![Testomat.io - Test Runs in a created RunGroup](./images/RunGroup_6.png) + +For more information go to the [RunGroup Report Basic View](https://docs.testomat.io/project/runs/rungroups#rungroup-report-basic-view) section **'RunGroups'** page. + +From RunGroup Report Basic View you can open **Combined Report for RunGroups**, by clicking on the **'Combined Report'** button. This Report is designed to help you aggregate and analyze the results of multiple test runs within a single view. You can find more information about this report in [Combined Report for RunGroups](https://docs.testomat.io/project/runs/rungroups/#combined-report-for-rungroups) section on **'RunGroups'** page. + ## How to Download Run Report as Spreadsheet Testomat.io allows you to export Run Reports as a spreadsheet (XLSX file) to obtain your test data. @@ -294,19 +305,4 @@ To compare Runs: ![Testomat.io - Compare Test Runs](./images/compare-runs.gif) -This feature is also available for RunGroups, allowing you to compare test results from different runs within a single RunGroup. - -1. Open RunGroup. -2. Click the **'Combined Report'** button. - -![Testomat.io - Combined Report](./images/Combined_report.png) - -3. Click the **'Compare to'** button to select the run you want to compare with others. - -![Testomat.io - Combined Report](./images/Combined_report_1.png) - -:::note - -The **'Combined Report'** feature for RunGroups does **not** combine test results of all runs inside one RunGroup into a single report. It only allows you **to compare** test results of all runs within this RunGroup. - -::: \ No newline at end of file +This feature is also available for RunGroups, allowing you to compare test results from different runs within a single RunGroup. You can find more information about it in the [Combined Report for RunGroups](https://docs.testomat.io/project/runs/rungroups/#combined-report-for-rungroups) section on **'RunGroups'** page. \ No newline at end of file diff --git a/src/content/docs/project/runs/rungroups.md b/src/content/docs/project/runs/rungroups.md index 49326d90..cdf0c7a8 100644 --- a/src/content/docs/project/runs/rungroups.md +++ b/src/content/docs/project/runs/rungroups.md @@ -39,16 +39,34 @@ The new RunGroup will appear on Runs page and will open automatically after crea After RunGroup is added, you can create new Manual/Automated/Mixed Runs inside it. +## How to Create a Run in RunGroup + - To create a new **Manual Run**, open the RunGroup and click on **'Manual Run'** button. ![Testomat.io - put Manual Run](./images/RunGroup_4.png) +After this in the displayed window select a test plan from the list or create a new one, and fill in other data that you need for your run (like: assignee, title, environment, description). + +:::note + +When you create a manual run from RunGroup, it will be automatically pre-populated on the **'New Manual Run'** window, but you can change it if needed. + +::: + +![Testomat.io - put Manual Run](./images/RunGroup_7.png) + - To add a new **Automated Run** to a RunGroup, execute your tests with Testomat.io, providing TESTOMATIO_RUNGROUP_TITLE="Build ${BUILD_ID}". -Now you can view Test Runs within your created RunGroup. +Now, by clicking on expand arrow, you can view Test Runs and their results within created RunGroup. ![Testomat.io - Test Runs in a created RunGroup](./images/RunGroup_5.png) +Clicking on RunGroup name will toggle RunGroup Report screen in basic view. Here you can view more detail information about the runs inside selected RunGroup and total results. + +![Testomat.io - Test Runs in a created RunGroup](./images/RunGroup_6.png) + +For more details go to the relevant section - [RunGroup Report in Basic View](https://docs.testomat.io/project/runs/reports/#rungroup-report-in-basic-view) on **'Reports'** page. + ## How to Move a Run to a RunGroup You may need to move a Run to a specific RunGroup (e.g., to associate it with a particular release or build). Use the **'Move'** functionality for this purpose: @@ -65,6 +83,28 @@ You may need to move a Run to a specific RunGroup (e.g., to associate it with a ![Testomat.io - Destination RunGroup selection dialog](./images/Move_Run_2.png) +## RunGroup Report Basic View + +You can view RunGroup Report by clicking on its name. + +![Testomat.io - Test Runs in a created RunGroup](./images/RunGroup_6.png) + +**In this basic RunGroup Report, you can:** + +1. View the [RunGroup Chart](https://docs.testomat.io/project/runs/rungroups/#rungroup-chart). +2. Review **'RunGroup summary section'** with general information based on the selected [Merge Strategy](https://docs.testomat.io/project/runs/merge-strategies/). +3. View a list of test runs, their statuses with the number of Passed, Failed, Skipped tests, along with the run assignee and executed time. +4. Customize your RunGroup Report view (Read more in the [How to Customize Runs List View ](https://docs.testomat.io/project/runs/rungroups/#how-to-customize-runs-list-view) section below). +5. Check **'Combined Report'** (Read more in the [Combined Report for RunGroups](https://docs.testomat.io/project/runs/rungroups/#combined-report-for-rungroups) section below). +6. Add a new Manual Run to the selected RunGroup. +7. Generate a [Rungroup Statistic Report](https://docs.testomat.io/advanced/ai-powered-features/ai-powered-features/#rungroup-statistic-report)(AI-feature should be enabled on **'Company Settings'** page). + +![Testomat.io - RunGroup Report](./images/RunGroup_Report_1.png) + +The **'RunGroup summary section'** displays a counter with the number of included and analyzed runs in the RunGroup. Archived runs are excluded from this counter and do not affect the total RunGroup result. You can see how many runs were archived, and by clicking on archived counter, you will be redirected to the **'Groups Archive'** page, where runs are already filtered by selected RunGroup. + +![Testomat.io - RunGroup Report](./images/RunGroup_Report_2.gif) + ## How to Customize Runs List View When working with test runs inside RunGroup or Runs Dashboard Flow, you can adjust the table layout to fit your needs. Instead of using the default view, you can customize the runs table layout within **RunGroup** page or directly from the main **Runs Dashboard**. @@ -126,6 +166,42 @@ The chart displays up to 50 of the latest test runs belonging to the group. If y ![Testomat.io - RunGroup Chart](./images/RunGroup_Chart.png) +## Combined Report for RunGroups + +Testomat.io provides a **'Combined Report'** feature for RunGroups. This report is designed to help you aggregate and analyze the results of all test runs within the RunGroup in a single view. + +You can open **'Combined Report'** for a RunGroup by clicking on **'Combined Report'** button from the RunGroup Report Basic View window. + +![Testomat.io - Combined Report](./images/Combined_report.png) + +The comparison of runs is based on the first (main) launch. The statuses counters on the left side are calculated based on the main run. + +To change the main run, click the **'Compare To'** button. Note that the counters of loaded tests is above them. The final summary on the right side is calculated based on the loaded tests. + +![Testomat.io - Combined Report](./images/Combined_report_1.png) + +:::note + +The **'Combined Report'** feature for RunGroups does **not** combine test results of all runs inside one RunGroup into a single report. It only allows you **to compare** test results of all runs within this RunGroup. + +::: + +Inside the **'Combined Report'**, you can get a quick overview of the pass/fail rates for all tests within the Group. You can also see the total number of tests present in all compared runs, as well as the total number of flaky (unstable tests that passed or failed in different runs), revieved (previously failing that passed in next runs), and degraded (previously passed that failed in next runs) tests. + +Additionally, you can use filters and search features to view the data most relevant to your needs. + +![Testomat.io - Combined Report](./images/Combined_report_2.gif) + +**Key benefits:** + +- **Analyze Trends:** By seeing the results from multiple runs in one place, you can identify trends in test performance and stability over time. + +- **Consolidate Data:** The combined report is especially useful for "mixed runs" (runs that include both manual and automated tests), as it consolidates all results into a single, cohesive report. + +- **Compare Runs:** Testomat.io also has a separate **'Compare Test Runs'** feature that can be used within a RunGroup. This allows you to side-by-side compare the results of different runs to pinpoint changes, regressions, or improvements. (Go to [Compare Test Runs](https://docs.testomat.io/project/runs/reports/#compare-test-runs) section on **'Run reports'** page to read more about this feature.) + +This feature is a powerful tool for project managers, QA leads, and stakeholders who need to make informed decisions based on a holistic view of testing activities, without having to manually compile data from individual reports. + ## How to Copy RunGroup You can easily create a new RunGroup, completely independent of any previous runs, by copying all relevant tests exactly as they are. You can configure what data should be copied, namely: @@ -136,7 +212,7 @@ You can easily create a new RunGroup, completely independent of any previous run - **Environments**: Control the duplication of environment settings based on your requirements. - **Nested Structure**: Preserve or exclude the nested structure of your test groups as you duplicate them. -Follow these steps: +**Follow these steps:** 1. Open the RunGroup. 2. Click the **'Extra menu'** button. diff --git a/src/content/docs/project/tests/bdd-test-case-editor.md b/src/content/docs/project/tests/bdd-test-case-editor.md index 90d3ea91..016d42fe 100644 --- a/src/content/docs/project/tests/bdd-test-case-editor.md +++ b/src/content/docs/project/tests/bdd-test-case-editor.md @@ -62,6 +62,27 @@ Beyond Feature Files lies the individual tests. Here, the BDD Editor grants you 11. **Go Back** โ€“ Return to the previous screen. 12. **Close** โ€“ Exit the editor. + +**Saving changes in the feature file as a draft** + +There is an option to save unfinished changes in the feature file as a draft. This is useful when the scenario isn't finalized yet and may contain syntax errors. + +Drafts can be applied at both the suite and test levels - in other words, Feature and Scenario description. +When editing a test, if the editor prevents you from saving changes (e.g., due to syntax errors), the relevant error message will appear at the bottom of the screen. If you plan to fix the scenario later but donโ€™t want to lose your changes, you can use the **Save to Draft and View Test** option: + +![Save To Draft and View Test option](./images/Save_To_Draft_and_View_Test.png) + +The next time you open the test in edit mode, you will see two additional buttons: + +1. **Apply Draft to Description** โ€“ Applies the last saved draft to the scenario text section. + +2. **Delete Draft** โ€“ Deletes the existing draft. + +![ Apply Draft to description and Delete Draft buttons](./images/apply_draft_or_delete_buttons.png) + +> **Note:** Only one draft can be saved at a time. + + ## Cross-Linking Tests, Suites and Folders Another useful feature that allows you to cross-link test cases, suites, and folders by embedding their unique IDs directly into the description of another test or suite. This functionality provides you with clickable links to other related items within your project, and clicking on it displays a dynamic preview of the linked test, suite or folder in an additional window. @@ -73,4 +94,5 @@ This feature is available for Classical and BDD projects but have a difference i In the projects that use BDD format, you need to follow certain rules to maintain your test structure. If you want to add clickable references to a test or suite in a BDD project, use **#** followed by their IDs. Clicking the link will open the test or suite in detail view, making navigation and traceability more seamless. -![Testomat.io - Use ID In Tests](./images/link_bdd.gif) \ No newline at end of file +![Testomat.io - Use ID In Tests](./images/link_bdd.gif) + diff --git a/src/content/docs/project/tests/images/Confirm_deleting_1.png b/src/content/docs/project/tests/images/Confirm_deleting_1.png new file mode 100644 index 00000000..4f318576 Binary files /dev/null and b/src/content/docs/project/tests/images/Confirm_deleting_1.png differ diff --git a/src/content/docs/project/tests/images/Delete_attachment_1.png b/src/content/docs/project/tests/images/Delete_attachment_1.png new file mode 100644 index 00000000..45e0df68 Binary files /dev/null and b/src/content/docs/project/tests/images/Delete_attachment_1.png differ diff --git a/src/content/docs/project/tests/images/Delete_attachment_2.png b/src/content/docs/project/tests/images/Delete_attachment_2.png new file mode 100644 index 00000000..206f4b61 Binary files /dev/null and b/src/content/docs/project/tests/images/Delete_attachment_2.png differ diff --git a/src/content/docs/project/tests/images/Delete_attachment_3.png b/src/content/docs/project/tests/images/Delete_attachment_3.png new file mode 100644 index 00000000..7d05a0bc Binary files /dev/null and b/src/content/docs/project/tests/images/Delete_attachment_3.png differ diff --git a/src/content/docs/project/tests/images/Save_To_Draft_and_View_Test.png b/src/content/docs/project/tests/images/Save_To_Draft_and_View_Test.png new file mode 100644 index 00000000..eae3b561 Binary files /dev/null and b/src/content/docs/project/tests/images/Save_To_Draft_and_View_Test.png differ diff --git a/src/content/docs/project/tests/images/apply_draft_or_delete_buttons.png b/src/content/docs/project/tests/images/apply_draft_or_delete_buttons.png new file mode 100644 index 00000000..79ca4da4 Binary files /dev/null and b/src/content/docs/project/tests/images/apply_draft_or_delete_buttons.png differ diff --git a/src/content/docs/project/tests/test-case-creation-and-editing.md b/src/content/docs/project/tests/test-case-creation-and-editing.md index 135e40df..41554cf5 100644 --- a/src/content/docs/project/tests/test-case-creation-and-editing.md +++ b/src/content/docs/project/tests/test-case-creation-and-editing.md @@ -164,6 +164,34 @@ After saving the changes in the edit mode, you will see a preview of the drawing ![Testomatio.io - Draw Preview](./images/New_1hpi4Xy4_2024-10-07.png) +## How to Delete Attachment from Test Case + +Save time by quickly deleting unnecessary attachments, ensuring your test cases remain relevant and clutter-free. To delete attachment: + +1. On 'Tests' page open Test Case. +2. Click **'Attachments'** tab. +3. Click **'Delete'** icon on the attachment you want to delete. + +![Testomat.io - Delete attachment](./images/Delete_attachment_1.png) + +4. Confirm action. + +![Testomat.io - Delete attachment](./images/Confirm_deleting_1.png) + +OR + +1. On 'Tests' page open Test Case. +2. Click **'Edit'** button. + +![Testomat.io - Delete attachment](./images/Delete_attachment_2.png) + +3. Click **'Attachments'** tab inside edit mode. +4. Click **'Delete'** icon on the attachment you want to delete. + +![Testomat.io - Delete attachment](./images/Delete_attachment_3.png) + +5. Confirm action and Save changes. + ## How to add a label/tag to a test This option is the easiest one! You simply need to add the label's name (preceeded by @ char) in the name field of the test. The drop-down list of tags already used in projects appears, when you type the @ symbol. diff --git a/src/content/docs/support/index.md b/src/content/docs/support/index.md index e28f7eb3..30f2b7e7 100644 --- a/src/content/docs/support/index.md +++ b/src/content/docs/support/index.md @@ -42,6 +42,7 @@ Here you can find useful contacts and links ## Invite a Support User to Your Project Before adding a support user, please make sure you have contacted the Testomat.io team and asked for help. +Inviting a Support User requires the Manager or Owner role. To invite a support user to your project, follow these steps: diff --git a/src/content/docs/test-reporting/api.md b/src/content/docs/test-reporting/api.md new file mode 100644 index 00000000..fc858e1c --- /dev/null +++ b/src/content/docs/test-reporting/api.md @@ -0,0 +1,655 @@ +--- +title: Api +--- + + +This guide explains how to report test results directly to Testomat.io using API. + +Also please refer to the [Reporting API reference](https://testomatio.github.io/reporter/) + +## Prerequisites + +- A Testomat.io account +- Project API key (can be found on Settins > Project page, starts with `tstmt_`) + +## API Overview + +Testomat.io's API allows you to: + +1. Create a run +2. Send test results to a run +3. Match test results to existing tests +4. Finish a run + +The base URL for all API requests is `https://app.testomat.io`. + +## Authentication + +All requests require your Testomat.io API key. You can include it as a query parameter or in the request body: + +``` +api_key=tstmt_your_api_key +``` + +## Step 1: Create a Run + +First, create a new test run to report results to: + +**CURL Example:** + +```bash +curl -X POST "https://app.testomat.io/api/reporter?api_key=tstmt_your_api_key" \ + -H "Content-Type: application/json" \ + -d '{ + "title": "My Test Run", + "env": "production", + }' +``` + +**HTTP Request:** + +``` +POST https://app.testomat.io/api/reporter?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "title": "My Test Run", + "env": "production", +} +``` + +**Request Body Options:** + +all params are **optional**: + +- `title` (string): Name of your test run +- `env` (string): The environment tests ran in (e.g., "staging", "production") +- `group_title` (string): Put this run into Rungroup found by its title + +**Response:** + +```json +{ + "uid": "a0b1c2d3", + "url": "https://app.testomat.io/projects//runs/a0b1c2d3" +} +``` + +Save the `uid` value - you'll need it to report test results. + +## Step 2: Report Test Results + +> By default, Testomat.io won't create tests automatically. Include `"create": true` in your request to create tests automatically. See details in the next section. + +You can report test results individually or in batches: + +### Individual Test Reporting + +**CURL Example:** + +```bash +curl -X POST "https://app.testomat.io/api/reporter/a0b1c2d3/testrun?api_key=tstmt_your_api_key" \ + -H "Content-Type: application/json" \ + -d '{ + "title": "Should login successfully", + "status": "passed", + "suite_title": "Authentication Tests", + "test_id": "d8b9c0e1", + "run_time": 0.5, + "stack": "Error: .... (complete exception trace)" + }' +``` + +**HTTP Request:** + +``` +POST https://app.testomat.io/api/reporter/a0b1c2d3/testrun?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "title": "Should login successfully", + "status": "passed", + "suite_title": "Authentication Tests", + "test_id": "@Ta0b0c0d0", + "run_time": 0.5, + "stack": "Error: .... (complete exception trace)" +} +``` + +### Batch Test Reporting + +**CURL Example:** + +```bash +curl -X POST "https://app.testomat.io/api/reporter/a0b1c2d3/testrun?api_key=tstmt_your_api_key" \ + -H "Content-Type: application/json" \ + -d '{ + "api_key": "tstmt_your_api_key", + "tests": [ + { + "title": "Should login successfully", + "status": "passed", + "suite_title": "Authentication Tests", + "test_id": "@Ta0b0c0d0", + "rid": "windows-login-test", + "run_time": 0.5 + }, + { + "title": "Should login successfully", + "status": "passed", + "suite_title": "Authentication Tests", + "test_id": "@Ta0b0c0d0", + "rid": "linux-login-test", + "run_time": 0.6 + }, + { + "title": "Should show error for invalid credentials", + "status": "failed", + "suite_title": "Authentication Tests", + "test_id": "T2", + "rid": "windows-error-test", + "run_time": 0.3, + "message": "Expected error message not shown", + "stack": "Error: Expected error message not shown...)" + } + ], + }' +``` + +**HTTP Request:** + +``` +POST https://app.testomat.io/api/reporter/a0b1c2d3/testrun?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "api_key": "tstmt_your_api_key", + "tests": [ + { + "title": "Should login successfully", + "status": "passed", + "suite_title": "Authentication Tests", + "test_id": "@Ta0b0c0d0", + "rid": "windows-login-test", + "run_time": 0.5 + }, + { + "title": "Should login successfully", + "status": "passed", + "suite_title": "Authentication Tests", + "test_id": "@Ta0b0c0d0", + "rid": "linux-login-test", + "run_time": 0.6 + }, + { + "title": "Should show error for invalid credentials", + "status": "failed", + "suite_title": "Authentication Tests", + "test_id": "T2", + "rid": "windows-error-test", + "run_time": 0.3, + "message": "Expected error message not shown", + "stack": "Error: Expected error message not shown...)" + } + ], + "batch_index": 1 +} +``` + +**Key Test Properties:** + +- `title` (string): Test name +- `status` (string): Must be "passed", "failed", or "skipped" +- `test_id` (string): ID of the test in Testomat.io (optional) +- `suite_title` (string): Test suite name (optional) +- `run_time` (number): Test duration in seconds +- `message` (string): Error message for failed tests +- `stack` (string): Stack trace for failed tests +- `steps` (array): Test steps (optional) +- `artifacts` (array): URLs to test artifacts like screenshots (optional) +- `rid` (string): Report ID to uniquely identify test executions (optional) + +> `rid` parameter is used to identify the same test which are executed in multiple environments. So let's say we run one test on Windows and Linux, but we want to have it reported twice, so we can use different rids but same test_id for it + +### Using Report ID (rid) for Cross-Platform Testing + +The `rid` parameter allows you to report the same test multiple times in a single run. This is especially useful for **Cross-platform testing**. +When you run the same test on different environments, like operating systems or browsers + +**Example Scenario:** + +In this example, we run the same login test (with ID "@Ta0b0c0d0") on both Windows and Linux: + +``` +POST https://app.testomat.io/api/reporter/a0b1c2d3/testrun?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "tests": [ + { + "title": "Should login successfully", + "status": "passed", + "test_id": "@Ta0b0c0d0", + "rid": "chrome-login:windows", + "run_time": 0.5, + "suite_title": "Login Tests" + }, + { + "title": "Should login successfully", + "status": "failed", + "test_id": "@Ta0b0c0d0", + "rid": "chrome-login:linux", + "run_time": 0.6, + "suite_title": "Login Tests", + "message": "Login failed on Linux", + "stack": "Error: Login failed on Linux\n at Object.login (/tests/linux/auth.test.js:25:7)" + } + ], + "batch_index": 1 +} +``` + +By using different `rid` values, both test executions will be reported separately in Testomat.io, even though they have the same `test_id`. The platform will recognize them as different executions of the same test. + +## Step 3: Finish the Test Run + +When all tests are reported, finish the test run: + +**CURL Example:** + +```bash +curl -X PUT "https://app.testomat.io/api/reporter/a0b1c2d3?api_key=tstmt_your_api_key" \ + -H "Content-Type: application/json" \ + -d '{ + "status_event": "finish", + "duration": 25.5 + }' +``` + +**HTTP Request:** + +``` +PUT https://app.testomat.io/api/reporter/a0b1c2d3?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "status_event": "finish", + "duration": 25.5 +} +``` + +**Finishing Options:** + +- `status_event`: Use "finish" to calculate status from test results, or explicitly set "pass" or "fail" +- `duration`: Total run duration in seconds + +## Creating Tests Automatically + +If you want to automatically create tests in Testomat.io that don't exist yet, include `"create": true` in your request: + +**CURL Example:** + +```bash +curl -X POST "https://app.testomat.io/api/reporter/a0b1c2d3/testrun?api_key=tstmt_your_api_key" \ + -H "Content-Type: application/json" \ + -d '{ + "title": "New Test Case", + "status": "passed", + "create": true + }' +``` + +**HTTP Request:** + +``` +POST https://app.testomat.io/api/reporter/a0b1c2d3/testrun?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "title": "New Test Case", + "status": "passed", + "create": true +} +``` + +This API enables seamless integration of your testing tools with Testomat.io, allowing you to report test results from any CI/CD pipeline or custom testing solution. + +## Reporting Test Steps to Testomat.io + +When reporting tests to Testomat.io, you can include detailed steps for better test analysis and debugging. Steps provide a hierarchical representation of test execution that makes it easier to understand what happened during the test. + +### Step Structure + +Steps in Testomat.io follow a hierarchical structure and can include: + +``` +POST https://app.testomat.io/api/reporter/a0b1c2d3/testrun?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "title": "Login Test", + "status": "passed", + "test_id": "T1", + "steps": [ + { + "category": "user", + "title": "Navigate to login page", + "duration": 232 + }, + { + "category": "user", + "title": "Enter credentials", + "duration": 421, + "steps": [ + { + "category": "framework", + "title": "Fill username input", + "duration": 150 + }, + { + "category": "framework", + "title": "Fill password input", + "duration": 145 + }, + { + "category": "framework", + "title": "Click submit button", + "duration": 126 + } + ] + }, + { + "category": "user", + "title": "Verify user is logged in", + "duration": 510 + } + ] +} +``` + +### Step Properties + +Each step can include the following properties: + +- `category` (string): Classifies the step type. Common values include: + + - `user`: High-level user action steps + - `framework`: Internal framework operations + - `hook`: Test hooks like beforeEach, afterEach + +- `title` (string): Description of the step + +- `duration` (number): Time taken to execute the step in milliseconds + +- `steps` (array): Nested steps (sub-steps) + +- `error` (object): Error information if the step failed + +### Example: Complex Test with Steps and Error + +``` +POST https://app.testomat.io/api/reporter/a0b1c2d3/testrun?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "title": "User checkout process", + "status": "failed", + "test_id": "T45", + "steps": [ + { + "category": "user", + "title": "Login as test user", + "duration": 845, + "steps": [ + { + "category": "framework", + "title": "Navigate to login page", + "duration": 312 + }, + { + "category": "framework", + "title": "Fill credentials", + "duration": 421 + }, + { + "category": "framework", + "title": "Submit form", + "duration": 112 + } + ] + }, + { + "category": "user", + "title": "Add product to cart", + "duration": 653, + "steps": [ + { + "category": "framework", + "title": "Navigate to product page", + "duration": 245 + }, + { + "category": "framework", + "title": "Click add to cart button", + "duration": 408 + } + ] + }, + { + "category": "user", + "title": "Complete checkout", + "duration": 1205, + "steps": [ + { + "category": "framework", + "title": "Navigate to checkout", + "duration": 320 + }, + { + "category": "framework", + "title": "Fill shipping information", + "duration": 545 + }, + { + "category": "framework", + "title": "Submit payment", + "duration": 340, + "error": { + "message": "Payment gateway timeout", + "stack": "Error: Payment gateway timeout\n ..." + } + } + ] + } + ], + "message": "Test failed during payment submission", + "stack": "Error: Payment gateway timeout\n ..." +} +``` + +By providing detailed step information, you can quickly identify which part of the test failed and under what circumstances, making debugging and test maintenance much easier. + +## Uploading Artifacts to Testomat.io + +Test artifacts such as screenshots, videos, logs, and other files provide crucial evidence of test execution. Testomat.io allows you to upload and associate these artifacts with your test runs. + +### Artifact Upload Methods + +There are two ways to upload artifacts to Testomat.io: + +1. **Direct URL Reference**: Link to files already uploaded to publicly accessible locations +2. **S3 Storage**: Upload files to Amazon S3 storage configured with Testomat.io + +### Reporting Tests with Artifacts + +To associate artifacts with a test, include the `artifacts` property in your test data: + +``` +POST https://app.testomat.io/api/reporter/a0b1c2d3/testrun?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "title": "Login Test", + "status": "failed", + "test_id": "@Ta0b0c0d0", + "artifacts": [ + "https://your-s3-bucket.s3.amazonaws.com/screenshots/login-failure.png", + "https://your-s3-bucket.s3.amazonaws.com/videos/login-test.mp4" + ], + "message": "Failed to log in with valid credentials" +} +``` + +To make Testomat.io display artifacts, configure its access in Settings > Artifacts section, setting all S3 bucket credentials. + +Instead of AWS S3 you can use any other S3 provider like: + +- Google Cloud Storage +- Digital Ocean Spaces +- MinIO +- Cloudflare R2 +- etc + +## Reporting Parameterized Tests + +Parameterized tests (also known as data-driven tests) run the same test logic with different input data. Testomat.io provides a way to report these tests clearly, showing both the test structure and the specific data used for each test run. + +### Using the `example` Parameter + +When reporting parameterized tests, use the `example` parameter to identify the specific data set used in each test execution: + +``` +POST https://app.testomat.io/api/reporter/run-id-12345/testrun?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "title": "User login with different roles", + "status": "passed", + "test_id": "T1", + "rid": "login-admin-role", + "example": { + "username": "admin", + "role": "administrator", + "expectedPermissions": ["read", "write", "delete"] + } +} +``` + +In this example, the `example` parameter contains the specific data used for this test run. + +### Reporting Multiple Parameterized Test Runs + +For tests that run with multiple data sets, report each execution with the same test ID but different RIDs and examples: + +``` +POST https://app.testomat.io/api/reporter/run-id-12345/testrun?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "tests": [ + { + "title": "User login with different roles", + "status": "passed", + "test_id": "T1", + "rid": "login-admin-role", + "example": { + "username": "admin", + "role": "administrator", + "expectedPermissions": ["read", "write", "delete"] + } + }, + { + "title": "User login with different roles", + "status": "passed", + "test_id": "T1", + "rid": "login-editor-role", + "example": { + "username": "editor", + "role": "content_editor", + "expectedPermissions": ["read", "write"] + } + }, + { + "title": "User login with different roles", + "status": "failed", + "test_id": "T1", + "rid": "login-viewer-role", + "example": { + "username": "viewer", + "role": "readonly", + "expectedPermissions": ["read"] + }, + "message": "User was granted write permission when they should only have read" + } + ], +} +``` + +### Example Structure + +The `example` parameter can contain any JSON structure that represents your test data: + +- **Simple parameters**: `{"username": "admin", "password": "secret"}` +- **Complex objects**: `{"user": {"id": 1, "role": "admin"}, "settings": {"theme": "dark"}}` + +A string can also be passed as example, in this case it will be reported as: + +```json +{ "example": "your string" } +``` + +### Complete Example: Table-Driven Test + +Here's a complete example of reporting a table-driven test that verifies email validation with different inputs: + +``` +POST https://app.testomat.io/api/reporter/run-id-12345/testrun?api_key=tstmt_your_api_key +Content-Type: application/json + +{ + "tests": [ + { + "title": "Email validation", + "status": "passed", + "test_id": "@T01010101", + "example": { + "email": "user@example.com", + "expectedValid": true + } + }, + { + "title": "Email validation", + "status": "passed", + "test_id": "@T01010101", + "example": { + "email": "user@subdomain.example.com", + "expectedValid": true + } + }, + { + "title": "Email validation", + "status": "passed", + "test_id": "@T01010101", + "example": { + "email": "userexample.com", + "expectedValid": false + } + }, + { + "title": "Email validation", + "status": "failed", + "test_id": "@T01010101", + "example": { + "email": "user+code@example.com", + "expectedValid": true + }, + "message": "Valid email with + character was rejected", + "stack": "Error: Expected validation to pass but got false\n at validateEmail (/tests/validation.js:45:7)" + } + ], + "batch_index": 1 +} +``` + +By properly structuring your parameterized test reports with examples, you'll be able to quickly identify which specific data sets are causing test failures and understand the context of each test execution. + diff --git a/src/content/docs/test-reporting/artifacts.md b/src/content/docs/test-reporting/artifacts.md index a1332ee6..bf1da085 100644 --- a/src/content/docs/test-reporting/artifacts.md +++ b/src/content/docs/test-reporting/artifacts.md @@ -21,11 +21,9 @@ By using external storage Testomat.io allows getting full control over how the s To have test artifacts uploaded you need to create S3 Object Storage bucket on AWS, DigitalOcean, or Google Cloud Storage in interoperability mode. - +> **โš ๏ธ Important** +> +> You need to obtain the following credentials: `ACCESS_KEY_ID`, `SECRET_ACCESS_KEY`, `BUCKET`, `REGION`, `ENDPOINT` (not required for AWS) to access S3 bucket. Then to to [Configuration](#configuration) section to enable S3 access. ## Overview @@ -61,7 +59,7 @@ Then provide the same S3 credentials in "Settings > Artifacts" section of a proj Links to files will be pre-signed and expire automatically in 10 minutes. -![Testomatio - Private Access](./images/Test_artifacts.png) +![Testomatio - Private Access](./images/test-artifacts.png) > Test Artifacts settings are available only to managers and owners of a project! @@ -82,7 +80,7 @@ Recommended way is to set S3 bucket credentials as environment variables: If you use Testomat.io Application, you can set those variables inside **Settings > Artifacts** page: enable "Share credentials" toggle to pass credentials into reporter and fill in S3 credentials into the displayed form. -![Testomatio - Configuration](./images/Configuration.png) +![Testomatio - Configuration](./images/configuration.png) In this case Testomat.io Reporter will obtain S3 credentials for server and use them to save artifacts. @@ -91,7 +89,7 @@ Alternatively, you can configure reporter by using environment variables. If `S3 We recommend storing S3 configuration in `.env` files when running tests locally and using job configuration when running on the Continuous Integration server. Please keep `S3_SECRET_ACCESS_KEY` in secret. -``` +```bash TESTOMATIO_PRIVATE_ARTIFACTS=1 @@ -124,6 +122,8 @@ S3_BUCKET=artifacts S3_REGION=us-west-1 ``` +> Use `S3_FORCE_PATH_STYLE` option to enable or disable force path style. Testomat.io expects artifacts URL to be in format: `https://.s3..amazonzws.com` + To allow Testomat.io access stored files it is recommended to apply this policy to the bucket: ```json @@ -154,7 +154,7 @@ To allow Testomat.io access stored files it is recommended to apply this policy If you use **Playwright** and you want to enable trace viewing ensure that CORS policy is enabled for the bucket: -``` +```json aws s3api put-bucket-cors \ --bucket YOUR_BUCKET_NAME \ --cors-configuration '{ @@ -208,7 +208,7 @@ S3_FORCE_PATH_STYLE=true Google Cloud Storage can work via S3 protocol if **Interoperability mode** is enabled. -![Testomatio - Google Cloud Storage](./images/Google_Cloud.png) +![Testomatio - Google Cloud Storage](./images/google-cloud.png) Open Cloud Storage. Create a bucket for artifacts, then inside Settings select "Interoperability". Create `ACCESS_KEY` and `SECRET_ACCESS_KEY` for the current user. @@ -222,9 +222,11 @@ S3_REGION=us-east1 Please note, that you need to enable [Use Private URLs for Test Artifacts](https://docs.testomat.io/project/runs/reporter/artifacts/#privacy) in Testomat.io Project Settings if you configure bucket credentials on Testomat.io side. +### Cloudflare R2 + #### Cloudflare R2 and integration with Testomatio -##### 1. Creating a Bucket in Cloudflare R2 +**1. Creating a Bucket in Cloudflare R2** - **Cloudflare Dashboard:** [https://dash.cloudflare.com/](https://dash.cloudflare.com/) - **Steps:** @@ -232,11 +234,11 @@ Please note, that you need to enable [Use Private URLs for Test Artifacts](https - Navigate to the **R2** section and select the option to create a new bucket. - Choose a unique name and a region for the bucket. - ![Create a new bucket](./images/cloudr2-01.png) +![Testomatio - Create a new bucket](./images/create-bucket-1.png) - ![Set bucket name](./images/cloudr2-02.png) +![Testomatio - Set bucket name](./images/create-bucket-2.png) -## 2. Creating API Keys for the Bucket +**2. Creating API Keys for the Bucket** - **Purpose:** To ensure secure access to the bucket. - **Steps:** @@ -245,17 +247,45 @@ Please note, that you need to enable [Use Private URLs for Test Artifacts](https - Generate a new API key with read/write permissions. - Save and verify the generated key. - ![Generate API keys for bucket](./images/cloudr2-03.png) +![Testomatio - Generate API keys for bucket](./images/api-key-1.png) + +![Testomatio - Select Api variant](./images/api-key-2.png) + +![Testomatio - Create Api key](./images/api-key-3.png) + +![Testomatio - Set permissions for Api key](./images/api-key-4.png) + +> [!WARNING] +> If you set permission for bucket "Object..." need setup CORS policy manually, for "Admins" it is not required - ![Select Api variant](./images/cloudr2-04.png) +**Policy Settings**: - ![Create Api key](./images/cloudr2-05.png) +![Testomatio - policy settings 2](./images/policy.png) - ![Set permissions for Api key](./images/cloudr2-06.png) +Example for Playwright trace policy settings - ![Copy Api key](./images/cloudr2-07.png) +```json +[ + { + "AllowedOrigins": ["https://app.testomat.io"], + "AllowedMethods": ["GET"], + "AllowedHeaders": ["*"], + "ExposeHeaders": ["Access-Control-Allow-Origin"], + "MaxAgeSeconds": 3000 + }, + { + "AllowedOrigins": ["https://trace.playwright.dev"], + "AllowedMethods": ["GET"], + "AllowedHeaders": ["*"], + "ExposeHeaders": ["Access-Control-Allow-Origin"], + "MaxAgeSeconds": 3000 + } +] +``` + +![Testomatio - Copy Api key](./images/api-key-5.png) -## 3. Connecting the Bucket to Testomatio +**3. Connecting the Bucket to Testomatio** - **Overview:** Integration with R2 streamlines testing and data management. - **Configuration:** @@ -263,7 +293,7 @@ Please note, that you need to enable [Use Private URLs for Test Artifacts](https - In the Testomatio panel, enter the API keys and bucket address. - Configure the paths and access parameters as required. - ![Set bucket credentials](./images/cloudr2-08.png) +![Testomatio - Set bucket credentials](./images/artifacts-testomat.png) ## Adding Artifacts @@ -285,7 +315,7 @@ Testomat.io Reporter has built-in support and automatically uploads saved artifa - Playwright - CodeceptJS - ะกypress -- WebdriverIO +- WebdriverIO ([details](./frameworks.md#webdriverio)) If a screenshot, a video, or a trace was saved and attached to test, Testomat.io Reporter will automatically upload any of these as artifacts. @@ -321,7 +351,7 @@ To attach a file to a test as an artifact print the file name into console with If S3 credentials are set, files will be uploaded to bucket and attached to test in a report. -##### Java Example: +#### Java Example Attaching a screenshot to the Java test @@ -332,7 +362,7 @@ Attaching a screenshot to the Java test System.out.println("file://" + pathToScreenshot); ``` -##### C# Example: +#### C# Example Attaching a screenshot to the C# test @@ -360,15 +390,15 @@ puts "file://" + path_to_screenshot ## Troubleshooting -#### I don't have a S3 Bucket +### I don't have a S3 Bucket Well then, just get it. Even if your company doesn't provide one, you can purchase a S3 storage by yourself. -#### Publishing Artifacts from Docker Container +### Publishing Artifacts from Docker Container If your tests are running within Docker container pass all environment variables explicitly -``` +```bash docker run -e TESTOMATIO_PRIVATE_ARTIFACTS=1 \ -e S3_ACCESS_KEY_ID=11111111111111111111 \ -e S3_SECRET_ACCESS_KEY=2222222222222222222222222222222222222222222 \ @@ -377,12 +407,26 @@ docker run -e TESTOMATIO_PRIVATE_ARTIFACTS=1 \ run-tests ``` -#### Environment variables for S3 are not working +### Environment variables for S3 are not working This can be caused by various reasons. As an alternative approach, you can try to set S3 credentials inside Testomat.io Application and enable shared credentials. -#### How to cleanup old artifacts? +### How to cleanup old artifacts? At this moment we don't provide any tools for the cleanup of files. It is recommended to write a custom cleanup tool on a S3 bucket. +### Private artifacts uploaded to AWS S3 but not displayed + +If a artifact URL is formatted as `https://s3..amazonzws.com/` there might be issues displayed it. + +Please make sure that you don't use `S3_FORCE_PATH_STYLE=true` in `.env` file. +It is also recommended to explicitly set this value to false: + +``` +S3_FORCE_PATH_STYLE=false +``` + +Please also make sure your bucket don't use dots in its name. +The expected bucket format is `https://.s3..amazonzws.com`. If force path style is enabled, file might not be availble by a presgned link. + diff --git a/src/content/docs/test-reporting/cli.md b/src/content/docs/test-reporting/cli.md index 7d4e325b..b6e615f7 100644 --- a/src/content/docs/test-reporting/cli.md +++ b/src/content/docs/test-reporting/cli.md @@ -10,6 +10,7 @@ head: content: Testomat.io Reporter, CLI, command-line tool, test runs, managing test runs, CI/CD pipelines, test reporting, parse XML reports, upload artifacts, @testomatio/reporter, API key, environment variables, JUnit, NUnit, xUnit, TRX, S3 artifacts configuration, test runner, test automation, software testing --- + The Testomat.io Reporter CLI is a powerful tool for managing test runs, parsing XML reports, and uploading artifacts. CLI can be used to start and finish test runs, run tests, parse XML reports, and upload artifacts. It can be used in CI/CD pipelines or locally. Reporter is designed to work with [Testomat.io](https://testomat.io) service but not exclusively @@ -194,6 +195,56 @@ TESTOMATIO=tstmt_* npx @testomatio/reporter upload-artifacts However, `upload-artifacts` command will upload all files after the run, without blocking the final result. +### 6. replay + +The `replay` command allows you to re-send test data from debug files to Testomat.io. This is useful when your original test run failed to upload results properly. + +**Important:** To make replay work, tests should be executed with `DEBUG=1` variable set, to ensure they are running in debug mode and save data into a file. + +**Usage:** +```bash +npx @testomatio/reporter replay [debug-file] [options] +``` + +**Arguments:** +- `debug-file` (optional) - Path to debug file. Defaults to latest created debug file, i.e. `/tmp/testomatio.debug.latest.json` + +**Options:** +- `--dry-run` - Preview the data without sending to Testomat.io +- `--env-file ` - Load environment variables from env file + +**Examples:** + +```bash + +TESTOMATIO= DEBUG=1 npx playwright test + + +TESTOMATIO= npx @testomatio/reporter replay + + +TESTOMATIO= npx @testomatio/reporter replay /path/to/debug.json + + +TESTOMATIO= npx @testomatio/reporter replay --dry-run + + +npx @testomatio/reporter replay --env-file .env.staging +``` + +**How it works:** + +The replay command uses the `ReplayService` class (located in `src/replay.js`) to: + +1. Parse the debug file line by line +2. Extract environment variables, run parameters, test data, and finish parameters +3. Restore environment variables (without overriding existing ones) +4. Create a new test run using the TestomatClient +5. Send each test result individually +6. Update the run status when complete + +For more details about debug files, see the [Debug Pipe documentation](pipes/debug.md). + ## Environment Variables Many commands rely on environment variables. You can set these in a command line, in a `.env` file, or use the `--env-file` option to specify a custom env file. Important variables include: diff --git a/src/content/docs/test-reporting/configuration.md b/src/content/docs/test-reporting/configuration.md index f8b8433b..2a0292c7 100644 --- a/src/content/docs/test-reporting/configuration.md +++ b/src/content/docs/test-reporting/configuration.md @@ -27,12 +27,30 @@ Your Project API key for reporting to Testomat.io. #### `TESTOMATIO_CREATE` -Create test IDs +Create test which are not yet exist in a project ``` TESTOMATIO={API_KEY} TESTOMATIO_CREATE=1 ``` +#### `TESTOMATIO_WORKDIR` + +Specify a custom working directory for relative file paths in test reports. When tests are created with `TESTOMATIO_CREATE=1`, file paths will be relative to this directory instead of the current working directory. + +``` +TESTOMATIO={API_KEY} TESTOMATIO_CREATE=1 TESTOMATIO_WORKDIR=/path/to/project +``` + +#### `TESTOMATIO_SUITE` + +Place newly created tests into a specific suite. Can be used on XML import or combined with `TESTOMATIO_CREATE=1`. Suite should be specified by its ID: + +``` +TESTOMATIO={API_KEY} TESTOMATIO_CREATE=1 TESTOMATIO_SUITE=@S1235678 + +TESTOMATIO={API_KEY} TESTOMATIO_SUITE=@S1235678 npx @testomatio/reporter xml +``` + #### `TESTOMATIO_DISABLE_BATCH_UPLOAD` Disables batch uploading (multiple test results in one request) and uploads each test result one by one. @@ -79,6 +97,16 @@ Example: TESTOMATIO_EXCLUDE_SKIPPED=1 ``` +#### `TESTOMATIO_NO_TIMESTAMP` + +Disable automatic timestamp generation for test results. By default, the reporter automatically adds timestamps to test data. Use this option if you run tests in parallel on different machines where time is not synchronized. + +Example: + +``` +TESTOMATIO_NO_TIMESTAMP=1 +``` + #### `TESTOMATIO_INTERCEPT_CONSOLE_LOGS` Intercept console logs and add them to your report. @@ -131,6 +159,14 @@ Example: TESTOMATIO={API_KEY} TESTOMATIO_PROCEED=1 ``` +#### `TESTOMATIO_PUBLISH` + +Publish run after reporting and provide a public URL: + +``` +TESTOMATIO_PUBLISH=1 TESTOMATIO={API_KEY} +``` + #### `TESTOMATIO_RUN` Add a report to the run by ID. @@ -151,7 +187,6 @@ Use `/` separator to create a nested rungroup: TESTOMATIO={API_KEY} TESTOMATIO_RUNGROUP_TITLE="Builds/${BUILD_ID}" ``` - #### `TESTOMATIO_SHARED_RUN` Report parallel execution to the same run matching it by title. **If the run was created more than 20 minutes ago, a new run will be created instead.** To change the timeout use `TESTOMATIO_SHARED_RUN_TIMEOUT` variable. diff --git a/src/content/docs/test-reporting/debug-file-format.md b/src/content/docs/test-reporting/debug-file-format.md new file mode 100644 index 00000000..406d4548 --- /dev/null +++ b/src/content/docs/test-reporting/debug-file-format.md @@ -0,0 +1,385 @@ +--- +title: Debug File Format +--- + + +The Testomatio Reporter debug file format is used to capture and replay test execution data. This specification defines the structure, storage location, and content of debug files. + +## File Format + +Debug files use **JSONL (JSON Lines)** format, not standard JSON. Each line contains a single JSON object representing a specific action or data entry. This format allows for streaming processing and incremental writing. + +``` +{"t":"+0ms","datetime":"2024-01-15T10:30:00.000Z","timestamp":1705315800000} +{"t":"+5ms","data":"variables","testomatioEnvVars":{"TESTOMATIO":"abc123"}} +{"t":"+10ms","action":"createRun","params":{"title":"Test Run"}} +``` + +## File Storage + +### File Location +Debug files are stored in the system's temporary directory: + +**Primary File (timestamped):** +``` +{os.tmpdir()}/testomatio.debug.{timestamp}.json +``` +Example: `/tmp/testomatio.debug.1705315800000.json` + +**Symlink (latest):** +``` +{os.tmpdir()}/testomatio.debug.latest.json +``` + +The symlink always points to the most recently created debug file, providing a consistent access point for tools like the replay command. + +### File Creation +- Files are created when `TESTOMATIO_DEBUG` or `DEBUG` environment variables are set +- A new timestamped file is created for each test session +- The symlink is updated to point to the latest file +- If symlink creation fails, it's logged but doesn't prevent file creation + +## Entry Structure + +Every entry in the debug file follows this base structure: + +```json +{ + "t": "+{time}ms", // Time elapsed since last action (required) + // ... action-specific fields +} +``` + +### Time Field (`t`) +- Format: `"+{milliseconds}ms"` (e.g., `"+150ms"`, `"+2.5s"`) +- Represents time passed since the previous action +- Generated using `prettyMs()` for human readability + +## Entry Types + +### 1. Session Initialization + +**Timestamp Entry:** +```json +{ + "t": "+0ms", + "datetime": "2024-01-15T10:30:00.000Z", + "timestamp": 1705315800000 +} +``` + +**Environment Variables:** +```json +{ + "t": "+5ms", + "data": "variables", + "testomatioEnvVars": { + "TESTOMATIO": "api_key_here", + "TESTOMATIO_TITLE": "Test Run Title", + "TESTOMATIO_ENV": "staging" + } +} +``` + +**Store Data:** +```json +{ + "t": "+10ms", + "data": "store", + "store": {} +} +``` + +### 2. Test Run Management Actions + +**Prepare Run:** +```json +{ + "t": "+15ms", + "action": "prepareRun", + "data": { + "pipe": "testomatio", + "pipeOptions": "tag-name=smoke" + } +} +``` + +**Create Run:** +```json +{ + "t": "+20ms", + "action": "createRun", + "params": { + "title": "Test Run Title", + "env": "staging", + "parallel": true, + "isBatchEnabled": true + } +} +``` + +**Finish Run:** +```json +{ + "t": "+5000ms", + "actions": "finishRun", + "params": { + "status": "finished", + "parallel": true + } +} +``` + +### 3. Test Execution Actions + +**Add Single Test:** +```json +{ + "t": "+100ms", + "action": "addTest", + "runId": "run-id-uuid", + "testId": { + "id": "test-unique-id", + "title": "Test case title", + "status": "passed", + "time": 1500, + "rid": "request-id", + "suite": "Suite Name", + "file": "path/to/test.js", + "error": "Error message if failed", + "steps": [ + { + "title": "Step description", + "status": "passed", + "time": 500 + } + ], + "artifacts": [ + { + "name": "screenshot.png", + "type": "image/png", + "path": "/path/to/screenshot.png" + } + ], + "files": [ + "/path/to/attachment1.txt", + "/path/to/attachment2.log" + ] + } +} +``` + +**Add Tests Batch:** +```json +{ + "t": "+200ms", + "action": "addTestsBatch", + "runId": "run-id-uuid", + "tests": [ + { + "id": "test-1", + "title": "First test", + "status": "passed", + "time": 800 + }, + { + "id": "test-2", + "title": "Second test", + "status": "failed", + "time": 1200, + "error": "Assertion failed" + } + ] +} +``` + +## Field Definitions + +### Common Fields + +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `t` | string | Yes | Time elapsed since last action (e.g., "+150ms") | +| `action` | string | Conditional | Action type identifier (most actions) | +| `actions` | string | Conditional | Action type identifier (for finishRun only) | +| `data` | string | Conditional | Data type identifier for non-action entries | + +### Action-Specific Fields + +#### `createRun` / `finishRun` +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `params` | object | Yes | Parameters for run creation/completion | +| `params.title` | string | No | Test run title | +| `params.env` | string | No | Environment name | +| `params.parallel` | boolean | No | Whether run supports parallel execution | +| `params.status` | string | No | Run status (for finishRun) | +| `params.isBatchEnabled` | boolean | No | Whether batch upload is enabled | + +#### `addTest` / `addTestsBatch` +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `runId` | string | No | UUID of the test run | +| `testId` | object | Yes* | Single test data (for addTest) | +| `tests` | array | Yes* | Array of test objects (for addTestsBatch) | + +*Either `testId` or `tests` is required depending on action type. + +### Test Object Fields +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `id` | string | Yes | Unique test identifier | +| `title` | string | Yes | Test case title | +| `status` | string | Yes | Test status: "passed", "failed", "skipped", "pending" | +| `time` | number | No | Execution time in milliseconds | +| `rid` | string | No | Request ID for deduplication | +| `suite` | string | No | Test suite name | +| `file` | string | No | Test file path | +| `error` | string | No | Error message for failed tests | +| `stack` | string | No | Stack trace for failed tests | +| `code` | string | No | Test source code | +| `steps` | array | No | Array of test step objects | +| `artifacts` | array | No | Array of artifact objects | +| `files` | array | No | Array of file paths | + +### Step Object Fields +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `title` | string | Yes | Step description | +| `status` | string | Yes | Step status | +| `time` | number | No | Step execution time | +| `error` | string | No | Error message for failed steps | + +### Artifact Object Fields +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `name` | string | Yes | Artifact file name | +| `type` | string | No | MIME type | +| `path` | string | No | File system path | +| `url` | string | No | Remote URL | +| `size` | number | No | File size in bytes | + +### Environment Variables Entry +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `data` | string | Yes | Must be "variables" | +| `testomatioEnvVars` | object | Yes | All TESTOMATIO_* environment variables | + +### Store Entry +| Field | Type | Required | Description | +|-------|------|----------|-------------| +| `data` | string | Yes | Must be "store" | +| `store` | object | Yes | Internal store data | + +## Status Values + +### Test Status +- `"passed"` - Test executed successfully +- `"failed"` - Test failed with assertion or runtime error +- `"skipped"` - Test was skipped intentionally +- `"pending"` - Test is pending implementation +- `"retried"` - Test was retried + +### Run Status +- `"started"` - Run has been initiated +- `"finished"` - Run completed normally +- `"failed"` - Run failed due to system error +- `"interrupted"` - Run was interrupted + +## File Processing Rules + +### Line Processing +1. Each line must be valid JSON +2. Empty lines and whitespace-only lines are ignored +3. Parse errors are logged but don't stop processing +4. Maximum of 3 parse errors are shown, then summarized + +### Test Deduplication +- Tests with `rid` (request ID) are deduplicated +- Multiple entries with same `rid` are merged +- Later entries can override earlier fields +- Arrays (`files`, `artifacts`) are merged, not replaced +- Tests without `rid` are never deduplicated + +### Data Merging +When merging duplicate tests: +1. Non-null/non-undefined values override null/undefined +2. Non-empty arrays override empty arrays +3. Files and artifacts arrays are concatenated +4. Other fields use "last value wins" strategy + +## Usage Examples + +### Enabling Debug Output +```bash + +export TESTOMATIO_DEBUG=1 + +export DEBUG=1 + + +npm test +``` + +### Replay from Debug File +```bash + +npx testomatio-reporter replay + + +npx testomatio-reporter replay /tmp/testomatio.debug.1705315800000.json +``` + +### Reading Debug Files +```javascript +import fs from 'fs'; + +// Read debug file line by line +const content = fs.readFileSync('/tmp/testomatio.debug.latest.json', 'utf-8'); +const lines = content.trim().split('\n').filter(line => line.trim()); + +for (const line of lines) { + try { + const entry = JSON.parse(line); + console.log(`[${entry.t}] ${entry.action || entry.data}`); + } catch (err) { + console.error('Parse error:', err.message); + } +} +``` + +## Implementation Notes + +### File Creation +- Debug files are created in the `DebugPipe` constructor when enabled +- Each test session gets a unique timestamped filename +- Symlink provides consistent access to the latest file + +### Batch Processing +- Tests can be logged individually or in batches +- Batch interval is 5 seconds by default +- Batch upload is triggered on intervals and during finishRun + +### Error Handling +- Parse errors don't stop file processing +- Missing required fields may cause replay failures +- Malformed entries are skipped with warnings + +### Performance Considerations +- Files are written synchronously for data integrity +- Large test suites may produce substantial debug files +- Consider file cleanup policies for long-running systems + +## Version Compatibility + +This specification is compatible with: +- Testomatio Reporter v2.0.0+ +- All major test frameworks (Jest, Mocha, Playwright, etc.) +- Node.js 14+ environments + +## Security Notes + +- Debug files may contain sensitive data from environment variables +- Store debug files securely and clean them regularly +- Consider filtering sensitive variables before logging +- Debug files are world-readable by default in temp directories diff --git a/src/content/docs/test-reporting/frameworks.md b/src/content/docs/test-reporting/frameworks.md index f4dd6732..2d40d43c 100644 --- a/src/content/docs/test-reporting/frameworks.md +++ b/src/content/docs/test-reporting/frameworks.md @@ -21,6 +21,7 @@ head: - [JUnit](#junit) - [Mocha](#mocha) - [Newman/Postman](#newman) +- [Nightwatch](#nightwatch) - [Playwright](#playwright) - [Protractor](#protractor) - [Selenide](#selenide) @@ -40,7 +41,10 @@ Add plugin to [codecept conf](https://github.com/testomatio/reporter/blob/master plugins: { testomatio: { enabled: true, + // new way require: '@testomatio/reporter/lib/adapter/codecept', + // old way + require: '@testomatio/reporter/codecept', } } ``` @@ -81,6 +85,9 @@ Add a reporter to Playwright config: reporter: [ ['list'], [ + // new way + '@testomatio/reporter/playwright', + // old way '@testomatio/reporter/lib/adapter/playwright.js', { apiKey: process.env.TESTOMATIO, @@ -108,6 +115,9 @@ TESTOMATIO={API_KEY} npx playwright test Register `cypress-plugin` in `cypress/plugins/index.js`: ```javascript +// new way +const testomatioReporter = require('@testomatio/reporter/cypress'); +// old way const testomatioReporter = require('@testomatio/reporter/lib/adapter/cypress-plugin'); /** @@ -129,6 +139,9 @@ For Cypress >= `10.0.0` use `setupNodeEvents` in `cypress.config.js(ts)` ```javascript setupNodeEvents(on, config) { + // new way + return require('@testomatio/reporter/cypress')(on, config) + // old way return require('@testomatio/reporter/lib/adapter/cypress-plugin')(on, config) } ``` @@ -150,6 +163,9 @@ TESTOMATIO={API_KEY} npx cypress run Run the following command from you project folder: ```bash + +mocha --reporter @testomatio/reporter/mocha --reporter-options apiKey={API_KEY} + mocha --reporter ./node_modules/@testomatio/reporter/lib/adapter/mocha.js --reporter-options apiKey={API_KEY} ``` @@ -162,6 +178,9 @@ mocha --reporter ./node_modules/@testomatio/reporter/lib/adapter/mocha.js --repo Add the following line to [jest.config.js](https://github.com/testomatio/reporter/blob/master/example/jest/jest.config.js#L100): ```javascript +// new way +reporters: ['default', ['@testomatio/reporter/jest', { apiKey: process.env.TESTOMATIO }]], +// old way reporters: ['default', ['@testomatio/reporter/lib/adapter/jest.js', { apiKey: process.env.TESTOMATIO }]], ``` @@ -176,6 +195,8 @@ TESTOMATIO={API_KEY} npx jest > ๐Ÿ“‘ [Example Project](https://github.com/testomatio/examples/tree/master/jest) +> ๐Ÿ“‘ [Example Project with ESM syntax](https://github.com/testomatio/examples/tree/master/jest-esm) + > ๐Ÿ“บ [Video](https://www.youtube.com/watch?v=RKfIfnEuGys) ### Vitest @@ -192,6 +213,9 @@ npm install @testomatio/reporter --save-dev ```typescript // import reporter +// new way +import TestomatioReporter from '@testomatio/reporter/vitest'; +// old way import TestomatioReporter from '@testomatio/reporter/lib/adapter/vitest'; export default defineConfig({ @@ -230,6 +254,11 @@ Vitest reporter has some limitations: Add the following lines to [wdio.conf.js](https://webdriver.io/docs/configurationfile/): ```javascript +// new way +const testomatio = require('@testomatio/reporter/webdriver'); +// or +const testomatio = require('@testomatio/reporter/wdio'); +// old way const testomatio = require('@testomatio/reporter/lib/adapter/webdriver'); exports.config = { @@ -247,11 +276,9 @@ exports.config = { For making screenshots on failed tests add the following hook to `wdio.conf.js`: ```js - afterTest: function (test, context, { error, result, duration, passed, retries }) { - if (error) { - browser.takeScreenshot() - } - }, +afterTest: function (test, context, { error }) { + if (error) browser.takeScreenshot() +} ``` Run the following command from you project folder: @@ -271,6 +298,9 @@ TESTOMATIO={API_KEY} npx start-test-run -c 'npx wdio wdio.conf.js' Run the following command from you project folder: ```bash + +TESTOMATIO={API_KEY} npx cucumber-js --format @testomatio/reporter/cucumber + TESTOMATIO={API_KEY} npx cucumber-js --format ./node_modules/@testomatio/reporter/lib/adapter/cucumber.js ``` @@ -310,6 +340,18 @@ TESTOMATIO={API_KEY} npx newman run {collection_name.json} -r testomatio > ๐Ÿ“‘ [Example Project](https://github.com/testomatio/examples/tree/master/newman) +### Nightwatch + +> ๐Ÿ“ When used with [Testomat.io Application](https://app.testomat.io) it is recommended to import automated tests first via [check-tests](https://github.com/testomatio/check-tests#cli). To create items on the fly set `TESTOMATIO_CREATE=1` env variable. + +1. Install Testomatio reporter: + +`npm install @testomatio/reporter --save-dev` + +2. Add testomatio reporter to your testrun command: + +`TESTOMATIO={API_KEY} npx nightwatch --reporter @testomatio/reporter/nightwatch` + ### Detox > ๐Ÿ“ When used with [Testomat.io Application](https://app.testomat.io) it is recommended to import automated tests first via [check-tests](https://github.com/testomatio/check-tests#cli). To create items on the fly set `TESTOMATIO_CREATE=1` env variable. @@ -330,6 +372,9 @@ TESTOMATIO={API_KEY} npx detox test -c {configuration_name} Add the following lines to [conf.js](https://github.com/angular/protractor/blob/5.4.1/example/conf.js): ```javascript +// new way +const JasmineReporter = require('@testomatio/reporter/jasmine'); +// old way const JasmineReporter = require('@testomatio/reporter/lib/adapter/jasmine'); exports.config = { diff --git a/src/content/docs/test-reporting/functions.md b/src/content/docs/test-reporting/functions.md index 2d9a273f..0a66807c 100644 --- a/src/content/docs/test-reporting/functions.md +++ b/src/content/docs/test-reporting/functions.md @@ -11,20 +11,20 @@ head: --- Testomat functions gives you more flexibility in reporting and make your reports more powerful. -### Usage example +Functions can be used as ESM or CommonJS module. Use `require` instead of `import` in CommonJS. -```javascript -import testomat from '@testomatio/reporter'; // or const testomat = require('@testomatio/reporter'); +```js +// inside TypeScript or JavaScript ESM +import { artifact, log, meta } from '@testomatio/reporter'; -test('my test', async () => { - testomat.artifact('path/to/file'); -}); +// inside CommonJS +const { artifact, log, meta } = require('@testomatio/reporter'); ``` -Or import only required functions: +### Usage example ```javascript -import { artifact, log, meta } from '@testomatio/reporter'; // or const { artifact, log } = require('@testomatio/reporter'); +import { artifact, log, meta } from '@testomatio/reporter'; test('my test', async () => { meta('ISSUE', 'MY-123'); @@ -43,6 +43,7 @@ After you import and invoke `testomat`, autocompletion will help you to find the - [log](#log) - [step](#step) - [meta (key:value)](#meta) +- [label](#label) ### Artifact @@ -66,16 +67,22 @@ Similar to [step](#step) function, intended to log any additional info to the te ##### Usage examples: ```javascript -testomat.log('your message'); -testomat.log(`your message ${variable}`); -testomat.log('your message', variable1, variable2); +import { log } from '@testomatio/reporter'; + +test('my test', async () => { + log('your message'); + log(`your message ${variable}`); + log('your message', variable1, variable2); +}); ``` ```javascript -const testomat = require('@testomatio/reporter'); +import { log } from '@testomatio/reporter'; + test('Your test @T12345678', async () => { await page.login(user); - testomat.log`I was logged in with user ${user}`; + + log`I was logged in with user ${user}`; assert(loggedIn); log`I was logged in with user ${user}`; // < shorter syntax @@ -87,11 +94,12 @@ test('Your test @T12345678', async () => { Adds step to the test report. Step is a human-readable description of the test action. It is used to describe the test flow. This function is similar to [log](#log) function, but looks differently in the report. ```javascript -const testomat = require('@testomatio/reporter'); +import { step } from '@testomatio/reporter'; + describe('Your suite @S12345678', () => { test('Your test @T12345678', async () => { await page.login(); - testomat.step`Login successful`; + step`Login successful`; assert(something); }); }); @@ -113,7 +121,7 @@ test('my test', () => { browser: 'chrome', server: 'staging', }); -}) +}); ``` Or in CommonJS style: @@ -131,6 +139,23 @@ test('Your test @T12345678', async () => { }); ``` +## Label + +Adds a label to the reported test. Unlike `meta` label will be persisted to the test case itself, not just to reported run. If the label does not exist in Testomat.io, it will be automatically created and linked to the test case during the test run, or you can use existing labels in Testomat.io. You can pass also a label value, if the label was created as a custom field. + +```javascript +import { label } from '@testomatio/reporter'; + +describe('Your suite', () => { + test('I can login', async () => { + label('Area', 'Auth') + label('Severity', 'High') + label('Browser') + await page.login(); + }); +}); +``` + --- Supported frameworks: @@ -139,6 +164,6 @@ Supported frameworks: - ๐ŸŸข Cucumber - ๐ŸŸข Jest - ๐ŸŸข Mocha -- ๐ŸŸข Playwright +- ๐ŸŸข Playwright (optional, use native functionality) - ๐ŸŸข WDIO (everything, except artifacts) diff --git a/src/content/docs/test-reporting/junit.md b/src/content/docs/test-reporting/junit.md index b700682d..9d4e015c 100644 --- a/src/content/docs/test-reporting/junit.md +++ b/src/content/docs/test-reporting/junit.md @@ -58,6 +58,69 @@ npx report-xml "{pattern}" --lang={lang} - `--env-file ` option to load environment variables from .env file. Inside env file TESTOMATIO credentials like `TESTOMATIO` api key or [bucket config for artifats](./artifacts.md). - `--timelimit