Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 24 additions & 7 deletions .github/actions/test_ya/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -252,13 +252,18 @@ runs:
echo "::debug::get version"
./ya --version

export YA_MAKE_COMMAND="./ya make ${params[@]}"
if [ "${{ inputs.increment }}" = "true" ]; then
GRAPH_COMPARE_OUTPUT="$PUBLIC_DIR/graph_compare_log.txt"
GRAPH_COMPARE_OUTPUT_URL="$PUBLIC_DIR_URL/graph_compare_log.txt"

set +e
./.github/scripts/graph_compare.py $ORIGINAL_HEAD~1 $ORIGINAL_HEAD |& tee $GRAPH_COMPARE_OUTPUT
YA_MAKE_COMMAND="./ya make ${params[@]}"
if [ true = ${{ inputs.run_tests }} ]; then
YA_MAKE_COMMAND="$YA_MAKE_COMMAND -A"
fi
GRAPH_PATH=$(realpath graph.json)
CONTEXT_PATH=$(realpath context.json)
./.github/scripts/graph_compare.py --ya-make-command="$YA_MAKE_COMMAND" --result-graph-path=$GRAPH_PATH --result-context-path=$CONTEXT_PATH $ORIGINAL_HEAD~1 $ORIGINAL_HEAD |& tee $GRAPH_COMPARE_OUTPUT
RC=${PIPESTATUS[0]}
set -e

Expand All @@ -271,7 +276,11 @@ runs:
fi

git checkout $ORIGINAL_HEAD
YA_MAKE_TARGET=.
params+=(
--build-custom-json=$GRAPH_PATH
--custom-context=$CONTEXT_PATH
)
YA_MAKE_TARGET="ydb"
else
YA_MAKE_TARGET=""
for TARGET in ${{ inputs.build_target }}; do
Expand All @@ -297,8 +306,6 @@ runs:
params+=(--retest)
fi

export YA_MAKE_COMMAND="./ya make ${params[@]}"

YA_MAKE_OUT_DIR=$TMP_DIR/out

YA_MAKE_OUTPUT="$PUBLIC_DIR/ya_make_output.txt"
Expand Down Expand Up @@ -347,13 +354,16 @@ runs:
RERUN_FAILED_OPT=""
else
CURRENT_MESSAGE="$CURRENT_MESSAGE (failed tests rerun, try $RETRY)"
RERUN_FAILED_OPT="-X"
if [ -z "$GRAPH_PATH" ]; then
RERUN_FAILED_OPT="-X"
fi
fi

echo $CURRENT_MESSAGE | GITHUB_TOKEN="${{ github.token }}" .github/scripts/tests/comment-pr.py

CURRENT_JUNIT_XML_PATH=$CURRENT_PUBLIC_DIR/junit.xml
CURRENT_REPORT=$CURRENT_PUBLIC_DIR/report.json
CURRENT_JSONL_REPORT=$CURRENT_PUBLIC_DIR/report.jsonl

monitor_memory() {
set +x
Expand All @@ -369,9 +379,10 @@ runs:
MONITOR_PID=$!

set +e
($YA_MAKE_COMMAND $YA_MAKE_TARGET \
(./ya make ${params[@]} $YA_MAKE_TARGET \
$RERUN_FAILED_OPT --log-file "$PUBLIC_DIR/ya_log.txt" \
--evlog-file "$CURRENT_PUBLIC_DIR/ya_evlog.jsonl" \
--jsonl-report "$CURRENT_JSONL_REPORT" \
--junit "$CURRENT_JUNIT_XML_PATH" --build-results-report "$CURRENT_REPORT" --output "$YA_MAKE_OUT_DIR"; echo $? > exit_code) |& cat >> $YA_MAKE_OUTPUT
set -e
RC=`cat exit_code`
Expand Down Expand Up @@ -489,6 +500,12 @@ runs:
if [ $IS_LAST_RETRY = 1 ]; then
break
fi
if [ -n "$GRAPH_PATH" ] && [ -n "$CONTEXT_PATH" ]; then
.github/scripts/graph_patch.py \
--in-graph="$GRAPH_PATH" --in-context="$CONTEXT_PATH" \
--out-graph="$GRAPH_PATH" --out-context="$CONTEXT_PATH" \
--report="$CURRENT_JSONL_REPORT" --muted=.github/config/muted_ya.txt
fi
done;

if [ $BUILD_FAILED = 0 ]; then
Expand Down
76 changes: 19 additions & 57 deletions .github/scripts/graph_compare.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@

import os
import tempfile
import sys
import json
import argparse


def exec(command: str):
Expand All @@ -22,77 +21,40 @@ def log(msg: str):
print(msg)


def do_compare():
if len(sys.argv) < 3:
print('base or head commit not set')
exit(1)
base_commit = sys.argv[1]
head_commit = sys.argv[2]

ya_make_command = os.getenv('YA_MAKE_COMMAND')
if not ya_make_command:
print('YA_MAKE_COMMAND not set')
def do_compare(opts):
if not opts.ya_make_command:
print('--ya-make-command not set')
exit(1)
ya = opts.ya_make_command.split(' ')[0]

workdir = os.getenv('workdir')
if not workdir:
workdir = tempfile.mkdtemp()

log(f'Workdir: {workdir}')
log('Checkout base commit...')
exec(f'git checkout {base_commit}')
exec(f'git checkout {opts.base_commit}')
log('Build graph for base commit...')
exec(f'{ya_make_command} ydb -k -A --cache-tests -Gj0 > {workdir}/graph_base.json')
exec(f'{opts.ya_make_command} ydb -k --cache-tests --save-graph-to {workdir}/graph_base.json --save-context-to {workdir}/context_base.json')

log('Checkout head commit...')
exec(f'git checkout {head_commit}')
exec(f'git checkout {opts.head_commit}')
log('Build graph for head commit...')
exec(f'{ya_make_command} ydb -k -A --cache-tests -Gj0 > {workdir}/graph_head.json')
exec(f'{opts.ya_make_command} ydb -k --cache-tests --save-graph-to {workdir}/graph_head.json --save-context-to {workdir}/context_head.json')

log('Generate diff graph...')
exec(f'./ya tool ygdiff --old {workdir}/graph_base.json --new {workdir}/graph_head.json --cut {workdir}/graph_diff.json --dump-uids-for-affected-nodes {workdir}/affected_uids.json')

log('Read diff graph...')
with open(f'{workdir}/graph_diff.json', 'r') as f:
diff_graph = json.load(f)

with open(f'{workdir}/affected_uids.json', 'r') as f:
uids = set(json.load(f))

tests = set()
modules = set()

log('Scan diff graph...')
for target in diff_graph.get('graph', []):
if target.get('uid') not in uids:
continue
if target.get('node-type') == 'test':
path = target.get('kv', {}).get('path')
if path is not None:
tests.add(os.path.dirname(path))
tp = target.get('target_properties')
if (
tp is not None
and tp.get('module_type') is not None
and tp.get('module_dir', '').startswith('ydb')
and tp.get('module_tag', '').find('proto') < 0
):
modules.add(tp.get('module_dir'))

log('Create ya.make')
exec(f'{ya} tool ygdiff --old {workdir}/graph_base.json --new {workdir}/graph_head.json --cut {opts.graph_path} --dump-uids {workdir}/uids.json')

with open('ya.make', 'w') as ya_make:
ya_make.write('RECURSE_FOR_TESTS(\n')
for test in sorted(tests):
ya_make.write(f' {test}\n')
ya_make.write(')\n\nRECURSE (\n')
for module in sorted(modules):
ya_make.write(f' {module}\n')
ya_make.write(')\n')
log('ya.make content:')
exec('cat ya.make')
log('Generate diff context...')
exec(f'{ya} tool context_difference {workdir}/context_base.json {workdir}/context_head.json {opts.context_path} {workdir}/uids.json {opts.graph_path}')
exit(0)


if __name__ == '__main__':
do_compare()
parser = argparse.ArgumentParser()
parser.add_argument('--result-graph-path', '-g', type=str, help='Path to result graph', dest='graph_path', required=True)
parser.add_argument('--result-context-path', '-c', type=str, help='Path to result context', dest='context_path', required=True)
parser.add_argument('--ya-make-command', '-y', type=str, help='Ya make command', dest='ya_make_command', required=True)
parser.add_argument(dest='base_commit', help='Base commit')
parser.add_argument(dest='head_commit', help='Head commit')
do_compare(parser.parse_args())
160 changes: 160 additions & 0 deletions .github/scripts/graph_patch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
#! /usr/bin/python3 -u

from __future__ import annotations
import argparse
import json
import re
import pickle
import six


class MuteTestCheck:
def __pattern_to_re(self, pattern):
res = []
for c in pattern:
if c == '*':
res.append('.*')
else:
res.append(re.escape(c))

return f"(?:^{''.join(res)}$)"

def __init__(self, fn):
self.regexps = []

with open(fn, 'r') as fp:
for line in fp:
line = line.strip()
pattern = self.__pattern_to_re(line)

try:
self.regexps.append(re.compile(pattern))
except re.error:
print(f"Unable to compile regex {pattern!r}")
raise

def __call__(self, fullname):
for r in self.regexps:
if r.match(fullname):
return True
return False

def _patch_uid(uid: str) -> str:
return uid # f'{uid}-I'

def get_failed_uids(opts) -> set[str]:
print('Load failed uids..')
result = set()
mute_check = MuteTestCheck(opts.muted) if opts.muted else None
with open(opts.report) as report_file:
for line in report_file.readlines():
record = json.loads(line).get('data', {})
if record.get('status', 'OK') == 'OK' or record.get('suite', False):
continue
if mute_check is not None:
test_name = f'{record.get("path", "")} {record.get("name", "")}.{record.get("subtest_name", "")}'
if mute_check(test_name):
continue
uid = record.get('uid')
if uid:
result.add(uid)
print(f'{len(result)} uids loaded')
return result


def _strip_graph(graph: dict, uids_filter: set[str]) -> dict:
result = {uid for uid in graph['result'] if uid in uids_filter}
nodes = _strip_unused_nodes(graph['graph'], result)

conf = graph.get('conf', {}).copy()
conf['resources'] = _filter_duplicate_resources(conf.get('resources', []))

return {'conf': conf, 'inputs': graph.get('inputs', {}), 'result': [_patch_uid(uid) for uid in result], 'graph': nodes}


def _strip_unused_nodes(graph_nodes: list, result: set[str]) -> list[dict]:
by_uid = {n['uid']: n for n in graph_nodes}

def visit(uid):
if uid in by_uid:
node = by_uid.pop(uid)
yield node
for dep in node['deps']:
yield from visit(dep)

result_nodes: list[dict] = []
for uid in result:
for node in visit(uid):
if node['uid'] == uid:
node['uid'] = _patch_uid(uid)
result_nodes.append(node)

return result_nodes


def _filter_duplicate_resources(resources):
v = set()
result = []
for x in resources:
if x['pattern'] not in v:
v.add(x['pattern'])
result.append(x)
return result


def process_graph(opts, uids_filter: set[str]) -> None:
print('Load graph...')
with open(opts.in_graph) as f:
in_graph = json.load(f)
print('Strip graph...')
out_graph = _strip_graph(in_graph, uids_filter)
print('Save graph...')
with open(opts.out_graph, 'w') as f:
json.dump(out_graph, f, indent=2)
print('Process graph...OK')


def process_context(opts, uids_filter: set[str]) -> None:
print('Load context...')
with open(opts.in_context) as f:
in_context = json.load(f)
out_context = {}
print('Strip context...')
for k, v in in_context.items():
if k == 'tests':
new_tests = {}
for uid in v.keys():
if uid in uids_filter:
pathced_uid = _patch_uid(uid)
# replaces = {uid: pathced_uid}
# test = pickle.loads(six.ensure_binary(v[uid], encoding='latin-1'), encoding='utf-8')
# test.uid = pathced_uid
# test._result_uids = [replaces.get(dep_uid, dep_uid) for dep_uid in test._result_uids]
# test._output_uids = [replaces.get(dep_uid, dep_uid) for dep_uid in test._output_uids]
# test.dep_uids = [replaces.get(dep_uid, dep_uid) for dep_uid in test.dep_uids]
# test.change_build_dep_uids({uid: pathced_uid})
# new_tests[pathced_uid] = six.ensure_str(pickle.dumps(test), encoding='latin-1')
new_tests[pathced_uid] = v[uid]
out_context[k] = new_tests
elif k == 'graph':
out_context[k] = _strip_graph(v, uids_filter)
else:
out_context[k] = v
print('Save context...')
with open(opts.out_context, 'w') as f:
json.dump(out_context, f, indent=2)
print('Process context...OK')


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--in-graph', '-G', type=str, help='Path to input graph', dest='in_graph', required=True)
parser.add_argument('--in-context', '-C', type=str, help='Path to input context', dest='in_context', required=True)
parser.add_argument('--out-graph', '-g', type=str, help='Path to result graph', dest='out_graph', required=True)
parser.add_argument('--out-context', '-c', type=str, help='Path to result context', dest='out_context', required=True)
parser.add_argument('--report', '-r', type=str, help='Path to jsonl report', dest='report', required=True)
parser.add_argument('--muted', '-m', type=str, help='Path to muted tests', dest='muted')
opts = parser.parse_args()
uids = get_failed_uids(opts)
process_graph(opts, uids)
process_context(opts, uids)
2 changes: 1 addition & 1 deletion .github/workflows/pr_check.yml
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ jobs:
run_tests: ${{ contains(fromJSON('["relwithdebinfo", "release-asan", "release-tsan", "release-msan"]'), matrix.build_preset) }}
test_size: ${{ matrix.test_size }}
test_threads: ${{ matrix.threads_count }}
put_build_results_to_cache: true
put_build_results_to_cache: false
additional_ya_make_args: -DDEBUGINFO_LINES_ONLY # we don't need full symbols in CI checks
secs: ${{ format('{{"AWS_KEY_ID":"{0}","AWS_KEY_VALUE":"{1}","REMOTE_CACHE_USERNAME":"{2}","REMOTE_CACHE_PASSWORD":"{3}","TELEGRAM_YDBOT_TOKEN":"{4}"}}',
secrets.AWS_KEY_ID, secrets.AWS_KEY_VALUE, secrets.REMOTE_CACHE_USERNAME, secrets.REMOTE_CACHE_PASSWORD, secrets.TELEGRAM_YDBOT_TOKEN ) }}
Expand Down
Loading