Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ jobs:

- name: Generate HISTORY.md
run: |
git-changelog > HISTORY.md
git-changelog -c angular > HISTORY.md
cat HISTORY.md

- name: Commit and Push
Expand Down
11 changes: 11 additions & 0 deletions HISTORY.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,17 @@ The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).

<!-- insertion marker -->
## [1.3.0](https://github.com/cortexapps/cli/releases/tag/1.3.0) - 2025-11-05

<small>[Compare with 1.2.0](https://github.com/cortexapps/cli/compare/1.2.0...1.3.0)</small>

### Fixed

- fix: add retry logic for scorecard create to handle active evaluations ([cc40b55](https://github.com/cortexapps/cli/commit/cc40b55ed9ef5af4146360b5a879afc6dc67fe06) by Jeff Schnitter).
- fix: use json.dump instead of Rich print for file writing ([c66c2fe](https://github.com/cortexapps/cli/commit/c66c2fe438cc95f8343fbd4ba3cecae605c435ea) by Jeff Schnitter).
- fix: ensure export/import output is in alphabetical order ([9055f78](https://github.com/cortexapps/cli/commit/9055f78cc4e1136da20e4e42883ff3c0f248825b) by Jeff Schnitter).
- fix: ensure CORTEX_BASE_URL is available in publish workflow ([743579d](https://github.com/cortexapps/cli/commit/743579d760e900da693696df2841e7b710b08d39) by Jeff Schnitter).

## [1.2.0](https://github.com/cortexapps/cli/releases/tag/1.2.0) - 2025-11-04

<small>[Compare with 1.1.0](https://github.com/cortexapps/cli/compare/1.1.0...1.2.0)</small>
Expand Down
2 changes: 2 additions & 0 deletions cortexapps_cli/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
import cortexapps_cli.commands.rest as rest
import cortexapps_cli.commands.scim as scim
import cortexapps_cli.commands.scorecards as scorecards
import cortexapps_cli.commands.secrets as secrets
import cortexapps_cli.commands.teams as teams
import cortexapps_cli.commands.workflows as workflows

Expand Down Expand Up @@ -70,6 +71,7 @@
app.add_typer(rest.app, name="rest")
app.add_typer(scim.app, name="scim")
app.add_typer(scorecards.app, name="scorecards")
app.add_typer(secrets.app, name="secrets")
app.add_typer(teams.app, name="teams")
app.add_typer(workflows.app, name="workflows")

Expand Down
105 changes: 105 additions & 0 deletions cortexapps_cli/commands/secrets.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
import typer
import json
from typing_extensions import Annotated
from cortexapps_cli.utils import print_output_with_context
from cortexapps_cli.command_options import ListCommandOptions

app = typer.Typer(
help="Secrets commands",
no_args_is_help=True
)

@app.command()
def list(
ctx: typer.Context,
page: ListCommandOptions.page = None,
page_size: ListCommandOptions.page_size = 250,
table_output: ListCommandOptions.table_output = False,
csv_output: ListCommandOptions.csv_output = False,
columns: ListCommandOptions.columns = [],
no_headers: ListCommandOptions.no_headers = False,
filters: ListCommandOptions.filters = [],
sort: ListCommandOptions.sort = [],
):
"""
List secrets
"""
client = ctx.obj["client"]

params = {
"page": page,
"pageSize": page_size
}

if (table_output or csv_output) and not ctx.params.get('columns'):
ctx.params['columns'] = [
"ID=id",
"Name=name",
"Tag=tag",
]

# remove any params that are None
params = {k: v for k, v in params.items() if v is not None}

if page is None:
r = client.fetch("api/v1/secrets", params=params)
else:
r = client.get("api/v1/secrets", params=params)
print_output_with_context(ctx, r)

@app.command()
def get(
ctx: typer.Context,
tag_or_id: str = typer.Option(..., "--tag-or-id", "-t", help="Secret tag or ID"),
):
"""
Get a secret by tag or ID
"""
client = ctx.obj["client"]
r = client.get(f"api/v1/secrets/{tag_or_id}")
print_output_with_context(ctx, r)

@app.command()
def create(
ctx: typer.Context,
file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing secret definition (name, secret, tag); can be passed as stdin with -, example: -f-")] = ...,
):
"""
Create a secret

Provide a JSON file with the secret definition including required fields:
- name: human-readable label for the secret
- secret: the actual secret value
- tag: unique identifier for the secret
"""
client = ctx.obj["client"]
data = json.loads("".join([line for line in file_input]))
r = client.post("api/v1/secrets", data=data)
print_output_with_context(ctx, r)

@app.command()
def update(
ctx: typer.Context,
tag_or_id: str = typer.Option(..., "--tag-or-id", "-t", help="Secret tag or ID"),
file_input: Annotated[typer.FileText, typer.Option("--file", "-f", help="File containing fields to update (name, secret); can be passed as stdin with -, example: -f-")] = ...,
):
"""
Update a secret

Provide a JSON file with the fields to update (name and/or secret are optional).
"""
client = ctx.obj["client"]
data = json.loads("".join([line for line in file_input]))
r = client.put(f"api/v1/secrets/{tag_or_id}", data=data)
print_output_with_context(ctx, r)

@app.command()
def delete(
ctx: typer.Context,
tag_or_id: str = typer.Option(..., "--tag-or-id", "-t", help="Secret tag or ID"),
):
"""
Delete a secret
"""
client = ctx.obj["client"]
client.delete(f"api/v1/secrets/{tag_or_id}")
21 changes: 21 additions & 0 deletions data/import/scorecards/cli-test-evaluation-scorecard.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
tag: cli-test-evaluation-scorecard
name: CLI Test Evaluation Scorecard
description: Used to test Cortex CLI trigger-evaluation command
draft: false
ladder:
name: Default Ladder
levels:
- name: You Made It
rank: 1
description: "My boring description"
color: 7cf376
rules:
- title: Has Custom Data
expression: custom("testField") != null
weight: 1
level: You Made It
filter:
category: SERVICE
filter:
query: 'entity.tag() == "cli-test-service"'
category: SERVICE
5 changes: 5 additions & 0 deletions data/run-time/secret-create.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
{
"tag": "cli_test_secret",
"name": "CLI Test Secret",
"secret": "test-secret-value-12345"
}
4 changes: 4 additions & 0 deletions data/run-time/secret-update.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
{
"name": "Updated CLI Test Secret",
"secret": "updated-secret-value-67890"
}
File renamed without changes.
53 changes: 21 additions & 32 deletions tests/test_scorecards.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,25 +4,13 @@

# Get rule id to be used in exemption tests.
# TODO: check for and revoke any PENDING exemptions.
@mock.patch.dict(os.environ, {"CORTEX_API_KEY": os.environ['CORTEX_API_KEY']})
def _get_rule(title):
response = cli(["scorecards", "get", "-s", "cli-test-scorecard"])
rule_id = [rule['identifier'] for rule in response['scorecard']['rules'] if rule['title'] == title]
return rule_id[0]

def test_scorecards():
# Retry scorecard create in case there's an active evaluation
# (can happen if test_import.py just triggered an evaluation)
max_retries = 3
for attempt in range(max_retries):
try:
cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-scorecard.yaml"])
break
except Exception as e:
if "500" in str(e) and attempt < max_retries - 1:
time.sleep(2 ** attempt) # Exponential backoff: 1s, 2s
continue
raise
cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-scorecard.yaml"])

response = cli(["scorecards", "list"])
assert any(scorecard['tag'] == 'cli-test-scorecard' for scorecard in response['scorecards']), "Should find scorecard with tag cli-test-scorecard"
Expand All @@ -39,33 +27,30 @@ def test_scorecards():
# cannot rely on a scorecard evaluation being complete, so not performing any validation
cli(["scorecards", "next-steps", "-s", "cli-test-scorecard", "-t", "cli-test-service"])

# Test trigger-evaluation command (accepts both success and 409 Already evaluating)
response = cli(["scorecards", "trigger-evaluation", "-s", "cli-test-scorecard", "-e", "cli-test-service"], return_type=ReturnType.STDOUT)
assert ("Scorecard evaluation triggered successfully" in response or "Already evaluating scorecard" in response), \
"Should receive success message or 409 Already evaluating error"

# cannot rely on a scorecard evaluation being complete, so not performing any validation
#response = cli(["scorecards", "scores", "-s", "cli-test-scorecard", "-t", "cli-test-service"])
#assert response['scorecardTag'] == "cli-test-scorecard", "Should get valid response that include cli-test-scorecard"

# # Not sure if we can run this cli right away. Newly-created Scorecard might not be evaluated yet.
# # 2024-05-06, additionally now blocked by CET-8882
# # cli(["scorecards", "scores", "-t", "cli-test-scorecard", "-e", "cli-test-service"])
#
# cli(["scorecards", "scores", "-t", "cli-test-scorecard"])


def test_scorecard_trigger_evaluation():
# Create a dedicated scorecard for trigger-evaluation testing to avoid conflicts with import
cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-evaluation-scorecard.yaml"])

# Test trigger-evaluation command (accepts both success and 409 Already evaluating)
response = cli(["scorecards", "trigger-evaluation", "-s", "cli-test-evaluation-scorecard", "-e", "cli-test-service"], return_type=ReturnType.STDOUT)
assert ("Scorecard evaluation triggered successfully" in response or "Already evaluating scorecard" in response), \
"Should receive success message or 409 Already evaluating error"

# Clean up
cli(["scorecards", "delete", "-s", "cli-test-evaluation-scorecard"])

def test_scorecards_drafts():
# Retry scorecard create in case there's an active evaluation
max_retries = 3
for attempt in range(max_retries):
try:
cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-draft-scorecard.yaml"])
break
except Exception as e:
if "500" in str(e) and attempt < max_retries - 1:
time.sleep(2 ** attempt) # Exponential backoff: 1s, 2s
continue
raise
cli(["scorecards", "create", "-f", "data/import/scorecards/cli-test-draft-scorecard.yaml"])

response = cli(["scorecards", "list", "-s"])
assert any(scorecard['tag'] == 'cli-test-draft-scorecard' for scorecard in response['scorecards'])
Expand All @@ -80,7 +65,10 @@ def test_scorecards_drafts():
# testing assumes no tenanted data, so this condition needs to be created as part of the test
#
# - there is no public API to force evaluation of a scorecard; can look into possibility of using
# an internal endpoint for this
# an internal endpoint for this
#
# Nov 2025 - there is a public API to force evaluation of a scorecard for an entity, but there is
# not a way to determine when the evaluation completes.
#
# - could create a scorecard as part of the test and wait for it to complete, but completion time for
# evaluating a scorecard is non-deterministic and, as experienced with query API tests, completion
Expand All @@ -96,6 +84,7 @@ def test_scorecards_drafts():
# So this is how we'll roll for now . . .
# - Automated tests currently run in known tenants that have the 'cli-test-scorecard' in an evaluated state.
# - So we can semi-reliably count on an evaluated scorecard to exist.
# - However, we should be cleaning up test data after tests run which would invalidate these assumptions.

@pytest.fixture(scope='session')
@mock.patch.dict(os.environ, {"CORTEX_API_KEY": os.environ['CORTEX_API_KEY_VIEWER']})
Expand Down
42 changes: 42 additions & 0 deletions tests/test_secrets.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from tests.helpers.utils import *
import pytest

def test():
# Skip test if API key doesn't have secrets permissions
# The Secrets API may require special permissions or may not be available in all environments
try:
# Try to list secrets first to check if we have permission
response = cli(["secrets", "list"], return_type=ReturnType.RAW)
if response.exit_code != 0 and "403" in response.stdout:
pytest.skip("API key does not have permission to access Secrets API")
except Exception as e:
if "403" in str(e) or "Forbidden" in str(e):
pytest.skip("API key does not have permission to access Secrets API")

# Create a secret
response = cli(["secrets", "create", "-f", "data/run-time/secret-create.json"])
assert response['tag'] == 'cli_test_secret', "Should create secret with tag cli_test_secret"
assert response['name'] == 'CLI Test Secret', "Should have correct name"

# List secrets and verify it exists
response = cli(["secrets", "list"])
assert any(secret['tag'] == 'cli_test_secret' for secret in response['secrets']), "Should find secret with tag cli_test_secret"

# Get the secret
response = cli(["secrets", "get", "-t", "cli_test_secret"])
assert response['tag'] == 'cli_test_secret', "Should get secret with correct tag"
assert response['name'] == 'CLI Test Secret', "Should have correct name"

# Update the secret
cli(["secrets", "update", "-t", "cli_test_secret", "-f", "data/run-time/secret-update.json"])

# Verify the update
response = cli(["secrets", "get", "-t", "cli_test_secret"])
assert response['name'] == 'Updated CLI Test Secret', "Should have updated name"

# Delete the secret
cli(["secrets", "delete", "-t", "cli_test_secret"])

# Verify deletion by checking list
response = cli(["secrets", "list"])
assert not any(secret['tag'] == 'cli_test_secret' for secret in response['secrets']), "Should not find deleted secret"