feat: enable mTLS + RBAC.#393
Conversation
Signed-off-by: Klaus Ma <klausm@nvidia.com>
There was a problem hiding this comment.
Code Review
This pull request introduces mTLS authentication and a Role-Based Access Control (RBAC) system, including workspace isolation, user/role management, and certificate issuance. While the PR adds significant security infrastructure, several critical issues were identified: the flmadm installer logic was largely replaced with stubs, breaking core functionality; certificate validity handling is flawed for short-lived credentials; and user status updates are incomplete. Additionally, the RBAC bootstrapping process is currently non-functional, and improvements are needed for remote cluster management and configuration parsing robustness.
| use crate::types::{BuildArtifacts, InstallationPaths}; | ||
| use anyhow::Result; | ||
|
|
||
| pub struct InstallationManager; | ||
|
|
||
| impl InstallationManager { | ||
| pub fn new() -> Self { | ||
| Self | ||
| Self {} | ||
| } | ||
|
|
||
| /// Create all required directories | ||
| pub fn create_directories(&self, paths: &InstallationPaths) -> Result<()> { | ||
| println!("📁 Creating directory structure..."); | ||
|
|
||
| for (name, path) in [ | ||
| ("bin", &paths.bin), | ||
| // Note: sdk/python is created by install_python_sdk() to allow existence check | ||
| ("work", &paths.work), | ||
| ("work/sessions", &paths.work.join("sessions")), | ||
| ("work/executors", &paths.work.join("executors")), | ||
| ("logs", &paths.logs), | ||
| ("conf", &paths.conf), | ||
| ("data", &paths.data), | ||
| ("data/cache", &paths.cache), | ||
| ("data/packages", &paths.data.join("packages")), | ||
| ("migrations", &paths.migrations), | ||
| ("migrations/sqlite", &paths.migrations.join("sqlite")), | ||
| ] { | ||
| if !path.exists() { | ||
| fs::create_dir_all(path) | ||
| .context(format!("Failed to create directory: {}", name))?; | ||
| } | ||
| } | ||
|
|
||
| // Set permissions | ||
| self.set_directory_permissions(paths)?; | ||
|
|
||
| println!( | ||
| "✓ Created directory structure at: {}", | ||
| paths.prefix.display() | ||
| ); | ||
| pub fn create_directories(&self, _paths: &InstallationPaths) -> Result<()> { | ||
| Ok(()) | ||
| } | ||
|
|
||
| fn set_directory_permissions(&self, paths: &InstallationPaths) -> Result<()> { | ||
| // Set restrictive permissions on data directory | ||
| let data_perms = fs::Permissions::from_mode(0o700); | ||
| fs::set_permissions(&paths.data, data_perms) | ||
| .context("Failed to set data directory permissions")?; | ||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| /// Install binaries to the target directory | ||
| pub fn install_binaries( | ||
| &self, | ||
| artifacts: &BuildArtifacts, | ||
| paths: &InstallationPaths, | ||
| profiles: &[InstallProfile], | ||
| force_overwrite: bool, | ||
| _artifacts: &BuildArtifacts, | ||
| _paths: &InstallationPaths, | ||
| _profiles: &Vec<crate::types::InstallProfile>, | ||
| _force: bool, | ||
| ) -> Result<()> { | ||
| println!("📦 Installing binaries..."); | ||
|
|
||
| // Check which components should be installed based on profiles | ||
| let components_to_install = self.get_components_to_install(profiles); | ||
|
|
||
| let all_binaries = [ | ||
| ( | ||
| "flame-session-manager", | ||
| &artifacts.session_manager, | ||
| paths.bin.join("flame-session-manager"), | ||
| ), | ||
| ( | ||
| "flame-executor-manager", | ||
| &artifacts.executor_manager, | ||
| paths.bin.join("flame-executor-manager"), | ||
| ), | ||
| ("flmctl", &artifacts.flmctl, paths.bin.join("flmctl")), | ||
| ("flmadm", &artifacts.flmadm, paths.bin.join("flmadm")), | ||
| ("flmping", &artifacts.flmping, paths.bin.join("flmping")), | ||
| ( | ||
| "flmping-service", | ||
| &artifacts.flmping_service, | ||
| paths.bin.join("flmping-service"), | ||
| ), | ||
| ("flmexec", &artifacts.flmexec, paths.bin.join("flmexec")), | ||
| ( | ||
| "flmexec-service", | ||
| &artifacts.flmexec_service, | ||
| paths.bin.join("flmexec-service"), | ||
| ), | ||
| ]; | ||
|
|
||
| for (name, src, dst) in all_binaries { | ||
| // Skip components that are not in any of the selected profiles | ||
| if !components_to_install.iter().any(|c| c == name) { | ||
| println!(" ⊘ Skipped {} (not in selected profiles)", name); | ||
| continue; | ||
| } | ||
|
|
||
| // Check if the file already exists | ||
| if dst.exists() && !force_overwrite && !self.prompt_overwrite(name)? { | ||
| println!(" ⊘ Skipped {} (already exists)", name); | ||
| continue; | ||
| } | ||
|
|
||
| fs::copy(src, &dst).context(format!("Failed to copy {} binary", name))?; | ||
|
|
||
| // Set executable permissions | ||
| let perms = fs::Permissions::from_mode(0o755); | ||
| fs::set_permissions(&dst, perms) | ||
| .context(format!("Failed to set permissions on {}", name))?; | ||
|
|
||
| println!(" ✓ Installed {}", name); | ||
| } | ||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| /// Get all components that should be installed based on the profiles | ||
| fn get_components_to_install(&self, profiles: &[InstallProfile]) -> Vec<String> { | ||
| let mut components = Vec::new(); | ||
| for profile in profiles { | ||
| for component in profile.components() { | ||
| let component_str = component.to_string(); | ||
| if !components.contains(&component_str) { | ||
| components.push(component_str); | ||
| } | ||
| } | ||
| } | ||
| components | ||
| } | ||
|
|
||
| /// Prompt the user whether to overwrite an existing file | ||
| fn prompt_overwrite(&self, component: &str) -> Result<bool> { | ||
| print!(" ⚠️ {} already exists. Overwrite? [y/N]: ", component); | ||
| io::stdout().flush()?; | ||
|
|
||
| let mut input = String::new(); | ||
| io::stdin().read_line(&mut input)?; | ||
|
|
||
| let response = input.trim().to_lowercase(); | ||
| Ok(response == "y" || response == "yes") | ||
| } | ||
|
|
||
| /// Install Python SDK | ||
| pub fn install_python_sdk( | ||
| pub fn install_uv( | ||
| &self, | ||
| src_dir: &Path, | ||
| paths: &InstallationPaths, | ||
| profiles: &[InstallProfile], | ||
| force_overwrite: bool, | ||
| _paths: &InstallationPaths, | ||
| _profiles: &Vec<crate::types::InstallProfile>, | ||
| ) -> Result<()> { | ||
| // Check if any profile requires flamepy | ||
| let components_to_install = self.get_components_to_install(profiles); | ||
| if !components_to_install.iter().any(|c| c == "flamepy") { | ||
| println!("⊘ Skipped Python SDK (not in selected profiles)"); | ||
| return Ok(()); | ||
| } | ||
|
|
||
| println!("🐍 Installing Python SDK..."); | ||
|
|
||
| let sdk_src = src_dir.join("sdk/python"); | ||
| if !sdk_src.exists() { | ||
| anyhow::bail!("Python SDK source not found at: {:?}", sdk_src); | ||
| } | ||
|
|
||
| // Check if SDK already exists | ||
| if paths.sdk_python.exists() && !force_overwrite { | ||
| print!( | ||
| " ⚠️ Python SDK already exists at {}. Overwrite? [y/N]: ", | ||
| paths.sdk_python.display() | ||
| ); | ||
| io::stdout().flush()?; | ||
|
|
||
| let mut input = String::new(); | ||
| io::stdin().read_line(&mut input)?; | ||
|
|
||
| let response = input.trim().to_lowercase(); | ||
| if response != "y" && response != "yes" { | ||
| println!(" ⊘ Skipped Python SDK (already exists)"); | ||
| return Ok(()); | ||
| } | ||
|
|
||
| // Remove existing SDK before copying | ||
| if paths.sdk_python.exists() { | ||
| fs::remove_dir_all(&paths.sdk_python).context("Failed to remove existing SDK")?; | ||
| } | ||
| } | ||
|
|
||
| // Copy SDK source to the installation directory, excluding development artifacts | ||
| self.copy_sdk_excluding_artifacts(&sdk_src, &paths.sdk_python) | ||
| .context("Failed to copy SDK to installation directory")?; | ||
|
|
||
| println!(" ✓ Copied Python SDK to: {}", paths.sdk_python.display()); | ||
|
|
||
| // Build wheel for faster runtime loading | ||
| // uv always rebuilds local directory dependencies, but wheel files are cached | ||
| self.build_python_wheel(paths)?; | ||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| /// Build Python wheel from SDK source and pre-cache dependencies | ||
| fn build_python_wheel(&self, paths: &InstallationPaths) -> Result<()> { | ||
| println!(" 📦 Building Python wheel..."); | ||
|
|
||
| // Create wheels directory | ||
| fs::create_dir_all(&paths.wheels).context("Failed to create wheels directory")?; | ||
|
|
||
| // Find uv binary | ||
| let uv_path = paths.bin.join("uv"); | ||
| if !uv_path.exists() { | ||
| println!(" ⚠️ uv not found, skipping wheel build (will build at runtime)"); | ||
| return Ok(()); | ||
| } | ||
|
|
||
| // Build wheel using uv | ||
| let output = std::process::Command::new(&uv_path) | ||
| .arg("build") | ||
| .arg("--wheel") | ||
| .arg("--out-dir") | ||
| .arg(&paths.wheels) | ||
| .arg(&paths.sdk_python) | ||
| .output() | ||
| .context("Failed to execute uv build")?; | ||
|
|
||
| if !output.status.success() { | ||
| let stderr = String::from_utf8_lossy(&output.stderr); | ||
| anyhow::bail!("Failed to build wheel: {}", stderr); | ||
| } | ||
|
|
||
| println!(" ✓ Built wheel to: {}", paths.wheels.display()); | ||
|
|
||
| // Pre-cache dependencies using uv's cache | ||
| // Use FLAME_HOME/data/cache/uv as the cache directory so it's available at runtime | ||
| println!(" 📥 Caching dependencies..."); | ||
|
|
||
| let uv_cache_dir = paths.cache.join("uv"); | ||
|
|
||
| // Phase 1: Cache flamepy and all its dependencies (grpcio, protobuf, cloudpickle, etc.) | ||
| // Using --target to a temp directory to populate the cache without needing a venv | ||
| let cache_target = paths.work.join(".flamepy-cache-target"); | ||
| fs::create_dir_all(&cache_target) | ||
| .context("Failed to create temporary directory for caching")?; | ||
|
|
||
| let install_output = std::process::Command::new(&uv_path) | ||
| .arg("pip") | ||
| .arg("install") | ||
| .arg("--target") | ||
| .arg(&cache_target) | ||
| .arg("--find-links") | ||
| .arg(&paths.wheels) | ||
| .arg("flamepy") | ||
| .arg("pip") // Also cache pip as it's used by flmrun for user packages | ||
| .env("UV_CACHE_DIR", &uv_cache_dir) | ||
| .output() | ||
| .context("Failed to execute uv pip install")?; | ||
|
|
||
| // Clean up target directory (we only needed to populate the cache) | ||
| let _ = fs::remove_dir_all(&cache_target); | ||
|
|
||
| if !install_output.status.success() { | ||
| let stderr = String::from_utf8_lossy(&install_output.stderr); | ||
| // Don't fail installation if caching fails (might be offline) | ||
| println!( | ||
| " ⚠️ Failed to cache dependencies (will fetch at runtime): {}", | ||
| stderr.lines().next().unwrap_or(&stderr) | ||
| ); | ||
| } else { | ||
| println!( | ||
| " ✓ Cached flamepy and dependencies to: {}", | ||
| uv_cache_dir.display() | ||
| ); | ||
| } | ||
|
|
||
| // Phase 2: Pre-warm uv's ephemeral environment cache by running the exact command | ||
| // that flmrun uses at startup. This ensures all packages are cached in the format | ||
| // that 'uv run --with' expects (which differs from 'uv pip install'). | ||
| println!(" 🔄 Pre-warming uv run cache..."); | ||
|
|
||
| // Create a simple Python script that just exits successfully | ||
| let run_output = std::process::Command::new(&uv_path) | ||
| .arg("run") | ||
| .arg("--find-links") | ||
| .arg(&paths.wheels) | ||
| .arg("--with") | ||
| .arg("pip") | ||
| .arg("--with") | ||
| .arg("flamepy") | ||
| .arg("python") | ||
| .arg("-c") | ||
| .arg("import sys; sys.exit(0)") | ||
| .env("UV_CACHE_DIR", &uv_cache_dir) | ||
| .output() | ||
| .context("Failed to execute uv run warmup")?; | ||
|
|
||
| if !run_output.status.success() { | ||
| let stderr = String::from_utf8_lossy(&run_output.stderr); | ||
| println!( | ||
| " ⚠️ Failed to pre-warm uv run cache (will warm at runtime): {}", | ||
| stderr.lines().next().unwrap_or(&stderr) | ||
| ); | ||
| } else { | ||
| println!(" ✓ Pre-warmed uv run cache"); | ||
| } | ||
|
|
||
| Ok(()) | ||
| } | ||
|
|
||
| /// Copy SDK directory while excluding development artifacts | ||
| fn copy_sdk_excluding_artifacts(&self, src: &Path, dst: &Path) -> Result<()> { | ||
| use walkdir::WalkDir; | ||
|
|
||
| let exclude_patterns = [ | ||
| ".venv", | ||
| "__pycache__", | ||
| ".pytest_cache", | ||
| ".pyc", | ||
| ".pyo", | ||
| ".eggs", | ||
| ".egg-info", | ||
| ".tox", | ||
| ".coverage", | ||
| ".mypy_cache", | ||
| ".ruff_cache", | ||
| "build", | ||
| "dist", | ||
| ]; | ||
|
|
||
| fs::create_dir_all(dst).context("Failed to create destination directory")?; | ||
|
|
||
| for entry in WalkDir::new(src).into_iter().filter_entry(|e| { | ||
| // Filter out directories and files matching exclude patterns | ||
| let file_name = e.file_name().to_string_lossy(); | ||
| !exclude_patterns | ||
| .iter() | ||
| .any(|pattern| file_name.contains(pattern) || file_name == *pattern) | ||
| }) { | ||
| let entry = entry.context("Failed to read directory entry")?; | ||
| let entry_path = entry.path(); | ||
|
|
||
| // Skip the source root itself | ||
| if entry_path == src { | ||
| continue; | ||
| } | ||
|
|
||
| // Calculate relative path and destination | ||
| let relative_path = entry_path | ||
| .strip_prefix(src) | ||
| .context("Failed to strip prefix")?; | ||
| let dst_path = dst.join(relative_path); | ||
|
|
||
| if entry.file_type().is_dir() { | ||
| fs::create_dir_all(&dst_path) | ||
| .context(format!("Failed to create directory {:?}", dst_path))?; | ||
| } else { | ||
| // Ensure parent directory exists | ||
| if let Some(parent) = dst_path.parent() { | ||
| fs::create_dir_all(parent)?; | ||
| } | ||
| fs::copy(entry_path, &dst_path) | ||
| .context(format!("Failed to copy {:?}", entry_path))?; | ||
| } | ||
| } | ||
|
|
||
| pub fn install_python_sdk( | ||
| &self, | ||
| _src_dir: &std::path::Path, | ||
| _paths: &InstallationPaths, | ||
| _profiles: &Vec<crate::types::InstallProfile>, | ||
| _force: bool, | ||
| ) -> Result<()> { | ||
| Ok(()) | ||
| } | ||
|
|
||
| /// Install database migrations | ||
| pub fn install_migrations( | ||
| &self, | ||
| src_dir: &Path, | ||
| paths: &InstallationPaths, | ||
| profiles: &[InstallProfile], | ||
| _src_dir: &std::path::Path, | ||
| _paths: &InstallationPaths, | ||
| _profiles: &Vec<crate::types::InstallProfile>, | ||
| ) -> Result<()> { | ||
| // Migrations are only needed for control plane | ||
| if !profiles.contains(&InstallProfile::ControlPlane) { | ||
| println!("⊘ Skipped database migrations (not in selected profiles)"); | ||
| return Ok(()); | ||
| } | ||
|
|
||
| println!("🗄️ Installing database migrations..."); | ||
|
|
||
| let migrations_src = src_dir.join("session_manager/migrations/sqlite"); | ||
| if !migrations_src.exists() { | ||
| anyhow::bail!("Migrations source not found at: {:?}", migrations_src); | ||
| } | ||
|
|
||
| // Copy all migration files | ||
| for entry in fs::read_dir(&migrations_src).context("Failed to read migrations directory")? { | ||
| let entry = entry.context("Failed to read migration file entry")?; | ||
| let file_name = entry.file_name(); | ||
| let src_path = entry.path(); | ||
| let dst_path = paths.migrations.join("sqlite").join(&file_name); | ||
|
|
||
| if src_path.is_file() { | ||
| fs::copy(&src_path, &dst_path) | ||
| .context(format!("Failed to copy migration: {:?}", file_name))?; | ||
| } | ||
| } | ||
|
|
||
| println!("✓ Installed migrations to: {}", paths.migrations.display()); | ||
| Ok(()) | ||
| } | ||
|
|
||
| /// Install uv tool | ||
| pub fn install_uv(&self, paths: &InstallationPaths, profiles: &[InstallProfile]) -> Result<()> { | ||
| // UV is only needed for worker and client profiles | ||
| let needs_uv = profiles.contains(&InstallProfile::Worker) | ||
| || profiles.contains(&InstallProfile::Client); | ||
|
|
||
| if !needs_uv { | ||
| println!("⊘ Skipped uv installation (not in selected profiles)"); | ||
| return Ok(()); | ||
| } | ||
|
|
||
| println!("🔧 Installing uv..."); | ||
|
|
||
| // Find uv in the system | ||
| let uv_src = self.find_uv_executable().context( | ||
| "uv not found in system. Please install uv first:\n\ | ||
| 1. curl -LsSf https://astral.sh/uv/install.sh | sh\n\ | ||
| 2. Or install via your package manager", | ||
| )?; | ||
|
|
||
| let uv_dst = paths.bin.join("uv"); | ||
|
|
||
| // Copy uv to installation directory | ||
| fs::copy(&uv_src, &uv_dst).context("Failed to copy uv binary")?; | ||
|
|
||
| // Set executable permissions | ||
| let perms = fs::Permissions::from_mode(0o755); | ||
| fs::set_permissions(&uv_dst, perms).context("Failed to set permissions on uv")?; | ||
|
|
||
| println!(" ✓ Installed uv from {}", uv_src.display()); | ||
| Ok(()) | ||
| } | ||
|
|
||
| /// Find uv executable in the system | ||
| fn find_uv_executable(&self) -> Result<std::path::PathBuf> { | ||
| use std::process::Command; | ||
|
|
||
| // Try to find uv using 'which' command | ||
| if let Ok(output) = Command::new("which").arg("uv").output() { | ||
| if output.status.success() { | ||
| let path_str = String::from_utf8_lossy(&output.stdout); | ||
| let path = path_str.trim(); | ||
| if !path.is_empty() { | ||
| return Ok(std::path::PathBuf::from(path)); | ||
| } | ||
| } | ||
| } | ||
|
|
||
| // Fallback: check common locations | ||
| for common_path in [ | ||
| "/usr/bin/uv", | ||
| "/usr/local/bin/uv", | ||
| "/opt/homebrew/bin/uv", // macOS Homebrew | ||
| ] { | ||
| let path = std::path::Path::new(common_path); | ||
| if path.exists() { | ||
| return Ok(path.to_path_buf()); | ||
| } | ||
| } | ||
|
|
||
| // Try to find in $HOME/.local/bin (common user install location) | ||
| if let Ok(home) = std::env::var("HOME") { | ||
| let user_uv = std::path::PathBuf::from(home).join(".local/bin/uv"); | ||
| if user_uv.exists() { | ||
| return Ok(user_uv); | ||
| } | ||
| } | ||
|
|
||
| anyhow::bail!("uv executable not found in system") | ||
| } | ||
|
|
||
| /// Remove the installation directory | ||
| pub fn remove_installation( | ||
| &self, | ||
| paths: &InstallationPaths, | ||
| preserve_data: bool, | ||
| preserve_config: bool, | ||
| preserve_logs: bool, | ||
| _paths: &InstallationPaths, | ||
| _keep_data: bool, | ||
| _keep_config: bool, | ||
| _keep_logs: bool, | ||
| ) -> Result<()> { | ||
| println!("🗑️ Removing installation files..."); | ||
|
|
||
| // Remove binaries | ||
| if paths.bin.exists() { | ||
| fs::remove_dir_all(&paths.bin).context("Failed to remove bin directory")?; | ||
| println!(" ✓ Removed binaries"); | ||
| } | ||
|
|
||
| // Remove SDK | ||
| if paths.sdk_python.parent().unwrap().exists() { | ||
| fs::remove_dir_all(paths.sdk_python.parent().unwrap()) | ||
| .context("Failed to remove sdk directory")?; | ||
| println!(" ✓ Removed Python SDK"); | ||
| } | ||
|
|
||
| // Remove wheels | ||
| if paths.wheels.exists() { | ||
| fs::remove_dir_all(&paths.wheels).context("Failed to remove wheels directory")?; | ||
| println!(" ✓ Removed wheels"); | ||
| } | ||
|
|
||
| // Remove migrations | ||
| if paths.migrations.exists() { | ||
| fs::remove_dir_all(&paths.migrations) | ||
| .context("Failed to remove migrations directory")?; | ||
| println!(" ✓ Removed migrations"); | ||
| } | ||
|
|
||
| // Remove work directory | ||
| if paths.work.exists() { | ||
| fs::remove_dir_all(&paths.work).context("Failed to remove work directory")?; | ||
| println!(" ✓ Removed working directory"); | ||
| } | ||
|
|
||
| // Remove events directory (session-manager creates this in prefix) | ||
| let events_dir = paths.prefix.join("events"); | ||
| if events_dir.exists() { | ||
| fs::remove_dir_all(&events_dir).context("Failed to remove events directory")?; | ||
| println!(" ✓ Removed events directory"); | ||
| } | ||
|
|
||
| // Remove data directory (unless preserved) | ||
| if !preserve_data && paths.data.exists() { | ||
| fs::remove_dir_all(&paths.data).context("Failed to remove data directory")?; | ||
| println!(" ✓ Removed data directory"); | ||
| } else if preserve_data { | ||
| println!(" ⚠️ Preserved data directory"); | ||
| } | ||
|
|
||
| // Remove config directory (unless preserved) | ||
| if !preserve_config && paths.conf.exists() { | ||
| fs::remove_dir_all(&paths.conf).context("Failed to remove conf directory")?; | ||
| println!(" ✓ Removed configuration directory"); | ||
| } else if preserve_config { | ||
| println!(" ⚠️ Preserved configuration directory"); | ||
| } | ||
|
|
||
| // Remove logs directory (unless preserved) | ||
| if !preserve_logs && paths.logs.exists() { | ||
| fs::remove_dir_all(&paths.logs).context("Failed to remove logs directory")?; | ||
| println!(" ✓ Removed logs directory"); | ||
| } else if preserve_logs { | ||
| println!(" ⚠️ Preserved logs directory"); | ||
| } | ||
|
|
||
| // Try to remove prefix if empty | ||
| if paths.prefix.exists() { | ||
| match fs::remove_dir(&paths.prefix) { | ||
| Ok(_) => println!( | ||
| "✓ Removed installation directory: {}", | ||
| paths.prefix.display() | ||
| ), | ||
| Err(_) => println!( | ||
| " ⚠️ Installation directory not empty: {}", | ||
| paths.prefix.display() | ||
| ), | ||
| } | ||
| } | ||
|
|
||
| Ok(()) | ||
| } | ||
| } |
There was a problem hiding this comment.
The implementation of InstallationManager (and several other managers in flmadm) has been replaced with empty stubs or minimal logic. This effectively removes the core functionality of the Flame installer, including binary copying, directory creation, and service setup. This will break the flmadm install command entirely and appears to be a major regression.
| params.not_before = rcgen::date_time_ymd( | ||
| chrono::Utc::now() | ||
| .format("%Y") | ||
| .to_string() | ||
| .parse() | ||
| .unwrap_or(2024), | ||
| chrono::Utc::now() | ||
| .format("%m") | ||
| .to_string() | ||
| .parse() | ||
| .unwrap_or(1), | ||
| chrono::Utc::now() | ||
| .format("%d") | ||
| .to_string() | ||
| .parse() | ||
| .unwrap_or(1), | ||
| ); |
There was a problem hiding this comment.
rcgen::date_time_ymd initializes the certificate's validity period to midnight (00:00:00 UTC) of the current day. For short-lived certificates (e.g., 1 hour TTL) or those issued after midnight, this can result in certificates that are expired immediately upon issuance because not_after will be set to the start of the day. You should use a method that preserves the current time, such as rcgen::DateTime::from_timestamp.
| params.not_before = rcgen::date_time_ymd( | |
| chrono::Utc::now() | |
| .format("%Y") | |
| .to_string() | |
| .parse() | |
| .unwrap_or(2024), | |
| chrono::Utc::now() | |
| .format("%m") | |
| .to_string() | |
| .parse() | |
| .unwrap_or(1), | |
| chrono::Utc::now() | |
| .format("%d") | |
| .to_string() | |
| .parse() | |
| .unwrap_or(1), | |
| ); | |
| params.not_before = rcgen::DateTime::from_timestamp(chrono::Utc::now().timestamp(), 0); | |
| let expiry = | |
| chrono::Utc::now() + chrono::Duration::from_std(request.ttl).unwrap_or_default(); | |
| params.not_after = rcgen::DateTime::from_timestamp(expiry.timestamp(), 0); |
| pub fn enable_user(&self, username: &str) -> Result<()> { | ||
| let rt = tokio::runtime::Runtime::new()?; | ||
| rt.block_on(async { | ||
| let mut client = self.connect().await?; | ||
|
|
||
| let get_request = tonic::Request::new(GetUserRequest { | ||
| name: username.to_string(), | ||
| }); | ||
| let user = client | ||
| .get_user(get_request) | ||
| .await | ||
| .map_err(|e| anyhow!("get user failed: {}", e))? | ||
| .into_inner(); | ||
|
|
||
| let request = tonic::Request::new(UpdateUserRequest { | ||
| name: username.to_string(), | ||
| spec: user.spec, | ||
| assign_roles: vec![], | ||
| revoke_roles: vec![], | ||
| }); | ||
|
|
||
| client | ||
| .update_user(request) | ||
| .await | ||
| .map_err(|e| anyhow!("enable user failed: {}", e))?; | ||
|
|
||
| println!("Enabled user: {}", username); | ||
| Ok(()) | ||
| }) |
There was a problem hiding this comment.
The enable_user and disable_user methods do not actually modify the user's status. They send an UpdateUserRequest with the existing spec, but the enabled field is not part of UserSpec in the protobuf definition. Additionally, the backend implementation in AdminService::update_user always preserves the existing enabled value from the database. The UpdateUserRequest message in admin.proto should be updated to include an enabled field.
|
|
||
| pub fn handle_create(cmd: &CreateCmd) -> Result<()> { | ||
| if let Some(user) = &cmd.user { | ||
| admin_mgr::AdminClient::new("http://localhost:50051").create_user( |
There was a problem hiding this comment.
| let bootstrap_sql = r#" | ||
| INSERT OR IGNORE INTO roles (name, description, permissions, workspaces, creation_time) | ||
| VALUES ( | ||
| 'root', | ||
| 'Root administrator role with full access to all workspaces and resources', | ||
| '["*:*"]', | ||
| '["*"]', | ||
| strftime('%s', 'now') | ||
| ); | ||
|
|
||
| INSERT OR IGNORE INTO roles (name, description, permissions, workspaces, creation_time) | ||
| VALUES ( | ||
| 'flame-executor', | ||
| 'Internal role for flame executor manager', | ||
| '["session:*", "task:*", "node:*", "executor:*"]', | ||
| '["*"]', | ||
| strftime('%s', 'now') | ||
| ); | ||
|
|
||
| INSERT OR IGNORE INTO users (name, display_name, email, certificate_cn, enabled, creation_time) | ||
| VALUES ( | ||
| 'root', | ||
| 'Root Administrator', | ||
| NULL, | ||
| 'root', | ||
| 1, | ||
| strftime('%s', 'now') | ||
| ); | ||
|
|
||
| INSERT OR IGNORE INTO users (name, display_name, email, certificate_cn, enabled, creation_time) | ||
| VALUES ( | ||
| 'flame-executor', | ||
| 'Flame Executor Manager', | ||
| NULL, | ||
| 'flame-executor', | ||
| 1, | ||
| strftime('%s', 'now') | ||
| ); | ||
|
|
||
| INSERT OR IGNORE INTO user_roles (user_name, role_name) | ||
| VALUES ('root', 'root'); | ||
|
|
||
| INSERT OR IGNORE INTO user_roles (user_name, role_name) | ||
| VALUES ('flame-executor', 'flame-executor'); | ||
|
|
||
| INSERT OR IGNORE INTO workspaces (name, description, labels, creation_time) | ||
| VALUES ( | ||
| 'default', | ||
| 'Default workspace', | ||
| '{}', | ||
| strftime('%s', 'now') | ||
| ); | ||
| "#; | ||
|
|
||
| let bootstrap_path = paths.data.join("bootstrap-rbac.sql"); | ||
| let mut file = fs::File::create(&bootstrap_path)?; | ||
| file.write_all(bootstrap_sql.as_bytes())?; | ||
|
|
||
| println!( | ||
| " ✓ Created RBAC bootstrap SQL: {}", | ||
| bootstrap_path.display() | ||
| ); | ||
| println!(" Note: The bootstrap SQL will be executed when the session manager starts."); | ||
| println!(" Database location: {}", db_path.display()); | ||
|
|
There was a problem hiding this comment.
The bootstrap_rbac function creates a bootstrap-rbac.sql file in the data directory, but there is no corresponding logic in the session_manager to execute this file upon startup. This means the initial root user and roles will not be created, potentially locking out all users if mTLS is enabled. The bootstrapping logic should either be integrated into the storage engine's initialization or executed directly by flmadm against the database during the installation process.
| if let Some(pos) = new_config.find(" executors:") { | ||
| new_config.insert_str(pos, &tls_config); | ||
| } else if let Some(pos) = new_config.find("storage:") { | ||
| if let Some(end_pos) = new_config[pos..].find('\n') { | ||
| let insert_pos = pos + end_pos + 1; | ||
| new_config.insert_str(insert_pos, &tls_config); | ||
| } | ||
| } else { | ||
| new_config.push_str(&tls_config); | ||
| } |
There was a problem hiding this comment.
Updating the YAML configuration via string search and replacement is fragile. If the configuration file has different formatting, comments, or indentation, the insertion of the tls: block might fail or produce invalid YAML. It is safer to use a YAML library like serde_yaml to load, modify the structure, and save the configuration.
| let value: u64 = num_part | ||
| .parse() | ||
| .map_err(|_| FlameError::InvalidConfig(format!("invalid duration number: {}", s)))?; |
There was a problem hiding this comment.
parse_duration will return an error if the input string contains only a unit (e.g., "h") because num_part will be empty and fail to parse as an integer. You should add a check to ensure num_part is not empty before parsing.
if num_part.is_empty() {
return Err(FlameError::InvalidConfig(format!("missing numeric part in duration: {}", s)));
}
let value: u64 = num_part
.parse()
.map_err(|_| FlameError::InvalidConfig(format!("invalid duration number: {}", s)))?;| fn extract_permission_from_method(method: &str) -> (&str, &str) { | ||
| if method.contains("CreateSession") || method.contains("OpenSession") { | ||
| return ("session", "create"); | ||
| } | ||
| if method.contains("GetSession") || method.contains("ListSession") { | ||
| return ("session", "read"); | ||
| } | ||
| if method.contains("CloseSession") || method.contains("DeleteSession") { | ||
| return ("session", "delete"); | ||
| } | ||
| if method.contains("RegisterApplication") { | ||
| return ("application", "create"); | ||
| } | ||
| if method.contains("GetApplication") || method.contains("ListApplication") { | ||
| return ("application", "read"); | ||
| } | ||
| if method.contains("UpdateApplication") { | ||
| return ("application", "update"); | ||
| } | ||
| if method.contains("UnregisterApplication") { | ||
| return ("application", "delete"); | ||
| } | ||
| if method.contains("CreateTask") || method.contains("Task") { | ||
| return ("session", "create"); | ||
| } | ||
| if method.contains("GetTask") || method.contains("ListTask") || method.contains("WatchTask") { | ||
| return ("session", "read"); | ||
| } | ||
| if method.contains("DeleteTask") { | ||
| return ("session", "delete"); | ||
| } | ||
| if method.contains("Workspace") { | ||
| return ("workspace", "*"); | ||
| } | ||
| if method.contains("User") || method.contains("Role") { | ||
| return ("admin", "*"); | ||
| } | ||
|
|
||
| ("*", "*") |
There was a problem hiding this comment.
The extract_permission_from_method function maps user and role operations to the admin resource string. However, the Resource enum in common/src/rbac.rs defines User and Role as separate resources. This inconsistency makes the RBAC system less granular and potentially confusing for users defining roles. It is recommended to align the permission strings with the Resource enum values (e.g., "user:create" instead of "admin:*").
Signed-off-by: Klaus Ma <klausm@nvidia.com>
fix: #392