diff --git a/README.md b/README.md index 85053d9..455bd76 100644 --- a/README.md +++ b/README.md @@ -85,7 +85,7 @@ docksec Dockerfile --scan-only ## Features - Smart Analysis: AI explains what vulnerabilities mean for your specific setup -- Multiple LLM Providers: Support for OpenAI, Anthropic Claude, Google Gemini, and Ollama (local models) +- Multiple LLM Providers: Support for OpenAI, Anthropic Claude, Google Gemini, Ollama (local models), and Docker Model Runner - Multiple Scanners: Integrates Trivy, Hadolint, and Docker Scout - Security Scoring: Get a 0-100 score to track improvements - Multiple Formats: Export reports as HTML, PDF, JSON, or CSV @@ -131,6 +131,17 @@ export LLM_MODEL="llama3.1" export OLLAMA_BASE_URL="http://localhost:11434" ``` +### Docker Model Runner +```bash +# Start a model via Docker Model Runner (Requires Docker Desktop 4.40+) +docker model pull ai/smollm2 +docker model run ai/smollm2 + +# Run DockSec +export LLM_PROVIDER="docker-model-runner" +export LLM_MODEL="ai/smollm2" +``` + **External tools** (optional, for full scanning): ```bash # Install Trivy and Hadolint @@ -172,7 +183,7 @@ docksec Dockerfile --provider ollama --model llama3.1 | `dockerfile` | Path to Dockerfile | | `-i, --image` | Docker image to scan | | `-o, --output` | Output file path | -| `--provider` | LLM provider (openai, anthropic, google, ollama) | +| `--provider` | LLM provider (openai, anthropic, google, ollama, docker-model-runner) | | `--model` | Model name (e.g., gpt-4o, claude-3-5-sonnet-20241022) | | `--ai-only` | AI analysis only (no scanning) | | `--scan-only` | Scanning only (no AI) | @@ -184,7 +195,7 @@ Create a `.env` file for advanced configuration: ```bash # LLM Provider Configuration -LLM_PROVIDER=openai # Options: openai, anthropic, google, ollama +LLM_PROVIDER=openai # Options: openai, anthropic, google, ollama, docker-model-runner LLM_MODEL=gpt-4o # Model to use LLM_TEMPERATURE=0.0 # Temperature (0-1) @@ -239,6 +250,7 @@ DockSec runs security scanners locally, then uses AI to: - **Anthropic**: Claude 3.5 Sonnet, Claude 3 Opus - **Google**: Gemini 1.5 Pro, Gemini 1.5 Flash - **Ollama**: Llama 3.1, Mistral, Phi-3, and other local models +- **Docker Model Runner**: SmolLM2, Llama 3.2, Phi-3, and other local models All scanning happens on your machine. Only scan results (not your code) are sent to the AI provider when using AI features. @@ -274,7 +286,7 @@ Quick links: → Set appropriate API key for your provider (OPENAI_API_KEY, ANTHROPIC_API_KEY, GOOGLE_API_KEY) or use `--scan-only` mode **"Unsupported LLM provider"** -→ Valid providers: openai, anthropic, google, ollama. Set with `--provider` flag or LLM_PROVIDER env var +→ Valid providers: openai, anthropic, google, ollama, docker-model-runner. Set with `--provider` flag or LLM_PROVIDER env var **"Hadolint not found"** → Run `python -m docksec.setup_external_tools` diff --git a/config_manager.py b/config_manager.py index e94915d..cfc06d1 100644 --- a/config_manager.py +++ b/config_manager.py @@ -28,7 +28,7 @@ class DocksecConfig: - Scan parameters Attributes: - llm_provider: LLM provider to use (openai, anthropic, google, ollama) + llm_provider: LLM provider to use (openai, anthropic, google, ollama, docker-model-runner) openai_api_key: OpenAI API key for AI features anthropic_api_key: Anthropic API key for Claude google_api_key: Google API key for Gemini @@ -120,7 +120,7 @@ def _validate(self) -> None: ValueError: If configuration values are invalid """ # Validate LLM provider - valid_providers = ['openai', 'anthropic', 'google', 'ollama'] + valid_providers = ['openai', 'anthropic', 'google', 'ollama', 'docker-model-runner'] if self.llm_provider not in valid_providers: raise ValueError(f"Invalid llm_provider: {self.llm_provider}. Valid options: {valid_providers}") @@ -234,6 +234,9 @@ def get_api_key_for_provider(self) -> str: elif self.llm_provider == "ollama": return "" + elif self.llm_provider == "docker-model-runner": + return "no-key-required" + else: raise ValueError(f"Unsupported LLM provider: {self.llm_provider}") diff --git a/docksec.py b/docksec.py index 80a6948..3fa6ac1 100644 --- a/docksec.py +++ b/docksec.py @@ -43,7 +43,7 @@ def main() -> None: parser.add_argument('--ai-only', action='store_true', help='Run only AI-based recommendations (requires Dockerfile)') parser.add_argument('--scan-only', action='store_true', help='Run only Dockerfile/image scanning (requires --image)') parser.add_argument('--image-only', action='store_true', help='Scan only the Docker image without Dockerfile analysis') - parser.add_argument('--provider', choices=['openai', 'anthropic', 'google', 'ollama'], + parser.add_argument('--provider', choices=['openai', 'anthropic', 'google', 'ollama', 'docker-model-runner'], help='LLM provider to use (default: openai, can also set LLM_PROVIDER env var)') parser.add_argument('--model', help='Model name to use (e.g., gpt-4o, claude-3-5-sonnet-20241022, gemini-1.5-pro, llama3.1)') parser.add_argument('--version', action='version', version=f'DockSec {get_version()}') diff --git a/setup_external_tools.py b/setup_external_tools.py index e5b9717..8a60f4d 100644 --- a/setup_external_tools.py +++ b/setup_external_tools.py @@ -21,6 +21,17 @@ def check_command_exists(command): """Check if a command exists in the system PATH.""" return shutil.which(command) is not None +def check_docker_model_runner(): + """Verify that Docker Model Runner is available.""" + try: + result = subprocess.run( + ["docker", "model", "list"], + capture_output=True, text=True + ) + return result.returncode == 0 + except FileNotFoundError: + return False + def run_command(command, shell=False): """Run a command and return its output.""" try: @@ -188,5 +199,12 @@ def main(): else: print("Failed to install Trivy") + # Check Docker Model Runner + print("\nChecking Docker Model Runner...") + if check_docker_model_runner(): + print("Docker Model Runner is available.") + else: + print("Docker Model Runner is not available (requires Docker Desktop 4.40+).") + if __name__ == "__main__": main() \ No newline at end of file diff --git a/tests/test_utils.py b/tests/test_utils.py index a74acb1..df2268f 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -83,6 +83,35 @@ def test_get_llm_no_api_key(self, mock_get_config): with self.assertRaises(EnvironmentError): get_llm() + @patch('utils.ChatOpenAI') + @patch('config_manager.get_config') + def test_get_llm_docker_model_runner(self, mock_get_config, mock_chatopenai): + """Test LLM initialization for Docker Model Runner.""" + from utils import get_llm + + mock_config = Mock() + mock_config.llm_provider = "docker-model-runner" + mock_config.llm_model = "ai/smollm2" + mock_config.llm_temperature = 0.0 + mock_config.timeout_llm = 60 + mock_config.max_retries_llm = 2 + mock_get_config.return_value = mock_config + + mock_llm_instance = Mock() + mock_chatopenai.return_value = mock_llm_instance + + llm = get_llm() + + mock_chatopenai.assert_called_once_with( + model="ai/smollm2", + base_url="http://localhost:12434/engines/llama.cpp/v1", + api_key="no-key-required", + temperature=0.0, + request_timeout=60, + max_retries=2 + ) + self.assertIsNotNone(llm) + if __name__ == '__main__': unittest.main() diff --git a/utils.py b/utils.py index 3b45205..f8f9a21 100644 --- a/utils.py +++ b/utils.py @@ -139,6 +139,7 @@ def get_llm() -> Union[ChatOpenAI, 'ChatAnthropic', 'ChatGoogleGenerativeAI', 'C - Anthropic (claude-3-5-sonnet-20241022, claude-3-opus-20240229) - Google (gemini-1.5-pro, gemini-1.5-flash) - Ollama (llama3.1, mistral, phi3, local models) + - docker-model-runner (smollm2, llama3.2, phi3) Returns: LLM instance (ChatOpenAI, ChatAnthropic, ChatGoogleGenerativeAI, or ChatOllama) @@ -232,8 +233,20 @@ def get_llm() -> Union[ChatOpenAI, 'ChatAnthropic', 'ChatGoogleGenerativeAI', 'C logger.info(f"Ollama LLM initialized successfully with base URL: {config.ollama_base_url}") return llm + elif provider == "docker-model-runner": + llm = ChatOpenAI( + model=model, + base_url="http://localhost:12434/engines/llama.cpp/v1", + api_key="no-key-required", + temperature=temperature, + request_timeout=timeout, + max_retries=max_retries + ) + logger.info("Docker Model Runner LLM initialized successfully") + return llm + else: - raise ValueError(f"Unsupported LLM provider: {provider}. Supported: openai, anthropic, google, ollama") + raise ValueError(f"Unsupported LLM provider: {provider}. Supported: openai, anthropic, google, ollama, docker-model-runner") except Exception as e: logger.error(f"Failed to initialize LLM: {str(e)}") @@ -243,8 +256,8 @@ def get_llm() -> Union[ChatOpenAI, 'ChatAnthropic', 'ChatGoogleGenerativeAI', 'C console.print("2. Check your internet connection") console.print("3. Verify your account has available credits") console.print("4. Try using --scan-only mode if you don't need AI features") - console.print(f"5. Current provider: {config.llm_provider if 'config' in locals() else 'unknown'}") - console.print("6. Set LLM_PROVIDER environment variable to change provider (openai/anthropic/google/ollama)") + console.print("5. Current provider: {config.llm_provider if 'config' in locals() else 'unknown'}") + console.print("6. Set LLM_PROVIDER environment variable to change provider (openai/anthropic/google/ollama/docker-model-runner)") raise