Skip to content

fix: Make Test PyPI optional in publish workflow #58

fix: Make Test PyPI optional in publish workflow

fix: Make Test PyPI optional in publish workflow #58

Workflow file for this run

name: CI/CD Pipeline
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
workflow_dispatch:
env:
PYTHON_VERSION: '3.11'
jobs:
# ============================================================================
# PYTHON TESTS
# ============================================================================
python-tests:
name: Python Tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Cache Python dependencies
uses: actions/cache@v4
with:
path: ~/.cache/pip
key: ${{ runner.os }}-pip-${{ hashFiles('**/pyproject.toml') }}
restore-keys: |
${{ runner.os }}-pip-
- name: Install dependencies
run: |
pip install poetry
poetry install
- name: Run specification compliance check
run: |
if [ -f scripts/verify_spec.py ]; then
poetry run python scripts/verify_spec.py
fi
- name: Run Python tests
env:
CLAUDE_PROJECTS_PATH: ${{ github.workspace }}/test-data/claude-projects
run: |
poetry run pytest -v --tb=short --junit-xml=test-results.xml || true
# Check if tests actually passed despite exit code
if grep -q 'failures="0"' test-results.xml && grep -q 'errors="0"' test-results.xml; then
echo "All tests passed successfully"
exit 0
else
echo "Tests failed"
exit 1
fi
- name: Check Python code quality
run: |
poetry run ruff check .
poetry run ruff format --check .
- name: Check test coverage
run: |
poetry run pytest --cov=claude_parser --cov-report=term-missing --cov-report=xml --cov-fail-under=15 || true
# Check if coverage actually passed despite exit code
if [ -f coverage.xml ]; then
coverage_percent=$(python -c "import xml.etree.ElementTree as ET; tree = ET.parse('coverage.xml'); root = tree.getroot(); print(float(root.attrib['line-rate']) * 100)")
echo "Coverage: ${coverage_percent}%"
if (( $(echo "$coverage_percent >= 60" | bc -l) )); then
echo "Coverage check passed"
exit 0
else
echo "Coverage below 60%"
exit 1
fi
else
echo "Coverage report not generated"
exit 1
fi
- name: Test CG CLI Commands
run: |
# Test CG CLI unit tests
poetry run pytest tests/test_cg_cli.py tests/test_claude_code_timeline.py -v
# Test CG command availability
poetry run python -c "
import subprocess
import sys
# Test CG module can be imported
try:
from claude_parser.cg_cli import app
from claude_parser.domain.services.claude_code_timeline import ClaudeCodeTimeline
print('✅ CG modules imported successfully')
except ImportError as e:
print(f'❌ CG module import failed: {e}')
sys.exit(1)
# Test CG command line interface (should show help even without transcripts)
try:
result = subprocess.run(['python', '-m', 'claude_parser.cg_cli', '--help'],
capture_output=True, text=True, timeout=10)
if 'Git-like interface for Claude Code operations' in result.stdout:
print('✅ CG CLI help works')
else:
print(f'❌ CG CLI help output unexpected: {result.stdout[:200]}')
sys.exit(1)
except Exception as e:
print(f'❌ CG CLI test failed: {e}')
sys.exit(1)
print('✅ All CG CLI tests passed')
"
# ============================================================================
# INTEGRATION TESTS
# ============================================================================
integration-tests:
name: Integration Tests
runs-on: ubuntu-latest
needs: [python-tests]
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
pip install poetry
poetry install
- name: Test API functionality
run: |
poetry run python -c "
from claude_parser import load, MessageType, __version__
import tempfile
import orjson
# Create test data
test_data = [
{'type': 'summary', 'summary': 'Test', 'uuid': 's1', 'sessionId': 'test', 'timestamp': '2025-01-01T00:00:00Z'},
{'type': 'user', 'uuid': 'u1', 'sessionId': 'test', 'timestamp': '2025-01-01T00:00:01Z', 'message': {'content': 'Hello'}},
{'type': 'assistant', 'uuid': 'a1', 'sessionId': 'test', 'timestamp': '2025-01-01T00:00:02Z', 'message': {'content': [{'type': 'text', 'text': 'Hi'}]}}
]
# Write test file
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
for item in test_data:
f.write(orjson.dumps(item).decode() + '\n')
test_file = f.name
# Test loading
conv = load(test_file)
assert len(conv) == 3, f'Expected 3 messages, got {len(conv)}'
# Test filtering
assert len(conv.user_messages) == 1
assert len(conv.assistant_messages) == 1
# Test search
results = conv.search('Hello')
assert len(results) == 1
print(f'✅ Integration tests passed with version {__version__}')
"
# ============================================================================
# PERFORMANCE TESTS
# ============================================================================
performance-tests:
name: Performance Tests
runs-on: ubuntu-latest
needs: [python-tests]
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Install dependencies
run: |
pip install poetry
poetry install
- name: Performance benchmark
run: |
poetry run python -c "
import time
import tempfile
from claude_parser import load
# Create larger test file
with tempfile.NamedTemporaryFile(mode='w', suffix='.jsonl', delete=False) as f:
for i in range(1000):
f.write(f'{{\"type\": \"user\", \"uuid\": \"u{i}\", \"sessionId\": \"test\", \"timestamp\": \"2025-01-01T00:00:{i%60:02d}Z\", \"message\": {{\"content\": \"Message {i}\"}}}}\\n')
test_file = f.name
# Measure parsing speed
start = time.time()
conv = load(test_file)
elapsed = time.time() - start
# Should parse 1000 messages quickly
assert elapsed < 1.0, f'Parsing too slow: {elapsed:.3f}s'
assert len(conv) == 1000, f'Expected 1000 messages, got {len(conv)}'
print(f'✅ Performance: 1000 messages parsed in {elapsed:.3f}s')
"
# ============================================================================
# RELEASE (Only on main branch)
# ============================================================================
release:
name: Release
runs-on: ubuntu-latest
needs: [integration-tests, performance-tests]
if: github.ref == 'refs/heads/main' && github.event_name == 'push'
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ env.PYTHON_VERSION }}
- name: Check version bump needed
id: version
run: |
# Check if version was bumped in this commit
if git diff HEAD^ HEAD --name-only | grep -E "pyproject.toml"; then
echo "Version files changed"
echo "should_release=true" >> $GITHUB_OUTPUT
else
echo "No version change"
echo "should_release=false" >> $GITHUB_OUTPUT
fi
- name: Build packages
if: steps.version.outputs.should_release == 'true'
run: |
# Build Python package
pip install poetry
poetry build
- name: Create GitHub Release
if: steps.version.outputs.should_release == 'true'
uses: softprops/action-gh-release@v1
with:
files: |
dist/*.whl
dist/*.tar.gz
generate_release_notes: true
tag_name: v${{ github.run_number }}
- name: Publish to PyPI
if: steps.version.outputs.should_release == 'true'
run: |
poetry config pypi-token.pypi ${{ secrets.PYPI_TOKEN }}
poetry publish