Command Testing
Implementation Reference
Source Files:
apps/xec/test/commands/*.test.ts
- Command test filespackages/test-utils/src/config.ts
- Test configuration utilitiespackages/test-utils/src/mocks.ts
- Mock objects and helpersapps/xec/src/commands/base-command.ts
- Base command for testing
Test Utilities:
createTestConfig()
- Creates test configurationcreateMockTarget()
- Creates mock targetscreateTestCommand()
- Creates test command instances
Testing Architecture
Test Structure
Command tests follow a consistent structure:
// apps/xec/test/commands/my-command.test.ts
import { describe, it, expect, beforeEach, vi } from 'vitest';
import { MyCommand } from '../../src/commands/my-command';
import { createTestConfig } from '@xec-sh/test-utils';
describe('MyCommand', () => {
let command: MyCommand;
let config: Config;
beforeEach(() => {
config = createTestConfig({
// Custom config for tests
});
command = new MyCommand(
config,
'/test/config.yaml',
false, // verbose
false, // dryRun
false, // quiet
'/test/cwd'
);
});
describe('execute', () => {
it('should execute successfully with valid arguments', async () => {
const args = ['arg1', 'arg2'];
const flags = { flag1: true };
await command.execute(args, flags);
// Assertions
});
});
});
Unit Testing
Testing Command Logic
Test core command logic in isolation:
describe('command logic', () => {
it('should parse targets correctly', () => {
const command = new TestCommand(config);
const result = command.parseTargets(['user@host', 'docker:container']);
expect(result.targets).toHaveLength(2);
expect(result.targets[0].type).toBe('ssh');
expect(result.targets[1].type).toBe('docker');
});
it('should validate arguments', async () => {
const command = new TestCommand(config);
await expect(
command.execute([], {})
).rejects.toThrow(ValidationError);
});
});
Mocking Dependencies
Mock external dependencies:
import { vi } from 'vitest';
import { $ } from '@xec-sh/core';
// Mock the execution engine
vi.mock('@xec-sh/core', () => ({
$: vi.fn(() => ({
ssh: vi.fn(() => ({
execute: vi.fn().mockResolvedValue({ stdout: 'output' })
})),
docker: vi.fn(() => ({
execute: vi.fn().mockResolvedValue({ stdout: 'output' })
}))
}))
}));
describe('with mocked execution', () => {
it('should call execution engine correctly', async () => {
const command = new OnCommand(config);
await command.execute(['user@host', 'ls'], {});
expect($).toHaveBeenCalled();
expect($.ssh).toHaveBeenCalledWith(expect.objectContaining({
host: 'host',
user: 'user'
}));
});
});
Integration Testing
Testing with Real Execution
Use test containers for integration tests:
import { SSHTestContainer } from '@xec-sh/test-utils';
describe('integration', () => {
let container: SSHTestContainer;
beforeAll(async () => {
container = new SSHTestContainer();
await container.start();
});
afterAll(async () => {
await container.stop();
});
it('should execute on SSH target', async () => {
const config = createTestConfig({
targets: {
test: {
type: 'ssh',
host: container.host,
port: container.port,
user: 'test',
password: 'test'
}
}
});
const command = new OnCommand(config);
await command.execute(['test', 'echo', 'hello'], {});
// Verify output
});
});
Testing with Docker
import { DockerTestContainer } from '@xec-sh/test-utils';
describe('docker integration', () => {
let container: DockerTestContainer;
beforeAll(async () => {
container = new DockerTestContainer('alpine:latest');
await container.start();
});
afterAll(async () => {
await container.stop();
});
it('should execute in container', async () => {
const command = new InCommand(config);
await command.execute([container.id, 'ls', '/'], {});
// Verify output
});
});
Test Utilities
Configuration Helpers
Create test configurations:
import { createTestConfig } from '@xec-sh/test-utils';
const config = createTestConfig({
targets: {
local: { type: 'local' },
ssh1: { type: 'ssh', host: 'host1' },
docker1: { type: 'docker', container: 'container1' }
},
tasks: {
test: {
command: 'echo test',
targets: ['local']
}
},
defaults: {
shell: '/bin/bash',
timeout: 30000
}
});
Mock Targets
Create mock targets:
import { createMockTarget } from '@xec-sh/test-utils';
const sshTarget = createMockTarget('ssh', {
host: 'test.example.com',
user: 'testuser',
port: 22
});
const dockerTarget = createMockTarget('docker', {
container: 'test-container',
image: 'alpine:latest'
});
Output Capture
Capture command output:
import { captureOutput } from '@xec-sh/test-utils';
it('should output correct message', async () => {
const output = await captureOutput(async () => {
await command.execute(['arg'], {});
});
expect(output.stdout).toContain('Expected message');
expect(output.stderr).toBe('');
});
Error Testing
Testing Error Conditions
Test various error scenarios:
describe('error handling', () => {
it('should handle validation errors', async () => {
const command = new MyCommand(config);
await expect(
command.execute(['invalid'], {})
).rejects.toThrow(ValidationError);
});
it('should handle connection errors', async () => {
const config = createTestConfig({
targets: {
unreachable: {
type: 'ssh',
host: 'unreachable.invalid'
}
}
});
const command = new OnCommand(config);
await expect(
command.execute(['unreachable', 'ls'], {})
).rejects.toThrow(ConnectionError);
});
it('should handle timeout errors', async () => {
const command = new MyCommand(config);
await expect(
command.execute(['long-running'], { timeout: 1 })
).rejects.toThrow(TimeoutError);
});
});
Exit Code Verification
Verify correct exit codes:
import { getExitCode } from '@xec-sh/test-utils';
it('should exit with correct code', async () => {
const exitCode = await getExitCode(async () => {
await command.execute(['bad-arg'], {});
});
expect(exitCode).toBe(1); // ValidationError
});
Flag Testing
Testing Command Flags
Test flag handling:
describe('flags', () => {
it('should handle verbose flag', async () => {
const command = new MyCommand(config, '/config', true); // verbose
const output = await captureOutput(async () => {
await command.execute(['arg'], {});
});
expect(output.stdout).toContain('[DEBUG]');
});
it('should handle dry-run flag', async () => {
const command = new MyCommand(config, '/config', false, true); // dryRun
const spy = vi.spyOn(console, 'log');
await command.execute(['arg'], {});
expect(spy).toHaveBeenCalledWith(expect.stringContaining('[DRY-RUN]'));
});
it('should handle quiet flag', async () => {
const command = new MyCommand(config, '/config', false, false, true); // quiet
const output = await captureOutput(async () => {
await command.execute(['arg'], {});
});
expect(output.stdout).toBe('');
});
});
Performance Testing
Testing Command Performance
Measure execution time:
import { measureTime } from '@xec-sh/test-utils';
describe('performance', () => {
it('should complete within timeout', async () => {
const { duration } = await measureTime(async () => {
await command.execute(['arg'], {});
});
expect(duration).toBeLessThan(1000); // 1 second
});
it('should handle parallel execution efficiently', async () => {
const targets = Array.from({ length: 10 }, (_, i) => `target${i}`);
const { duration } = await measureTime(async () => {
await command.execute(targets, { parallel: true });
});
// Should be faster than sequential (10 * 100ms)
expect(duration).toBeLessThan(500);
});
});
Memory Usage Testing
Monitor memory usage:
import { measureMemory } from '@xec-sh/test-utils';
it('should not leak memory', async () => {
const initialMemory = measureMemory();
// Run command multiple times
for (let i = 0; i < 100; i++) {
await command.execute(['arg'], {});
}
const finalMemory = measureMemory();
const memoryIncrease = finalMemory - initialMemory;
// Should not increase significantly
expect(memoryIncrease).toBeLessThan(10 * 1024 * 1024); // 10MB
});
Test Coverage
Coverage Requirements
Commands should maintain:
- Line Coverage: ≥90%
- Branch Coverage: ≥85%
- Function Coverage: ≥95%
Coverage Reports
Generate coverage reports:
# Run tests with coverage
yarn test:coverage
# View HTML report
open coverage/index.html