refactor(desktop): rework default server initialization and connection handling (#16965)

This commit is contained in:
Brendan Allan
2026-03-12 16:10:52 +08:00
committed by GitHub
parent 51835ecf90
commit b76ead3fe8
19 changed files with 404 additions and 588 deletions

View File

@@ -12,12 +12,10 @@ mod window_customizer;
mod windows;
use crate::cli::CommandChild;
use futures::{
FutureExt, TryFutureExt,
future::{self, Shared},
};
use futures::{FutureExt, TryFutureExt};
use std::{
env,
future::Future,
net::TcpListener,
path::PathBuf,
process::Command,
@@ -35,7 +33,6 @@ use tokio::{
use crate::cli::{sqlite_migration::SqliteMigrationProgress, sync_cli};
use crate::constants::*;
use crate::server::get_saved_server_url;
use crate::windows::{LoadingWindow, MainWindow};
#[derive(Clone, serde::Serialize, specta::Type, Debug)]
@@ -43,7 +40,6 @@ struct ServerReadyData {
url: String,
username: Option<String>,
password: Option<String>,
is_sidecar: bool,
}
#[derive(Clone, Copy, serde::Serialize, specta::Type, Debug)]
@@ -65,27 +61,12 @@ struct InitState {
current: watch::Receiver<InitStep>,
}
#[derive(Clone)]
struct ServerState {
child: Arc<Mutex<Option<CommandChild>>>,
status: future::Shared<oneshot::Receiver<Result<ServerReadyData, String>>>,
}
impl ServerState {
pub fn new(
child: Option<CommandChild>,
status: Shared<oneshot::Receiver<Result<ServerReadyData, String>>>,
) -> Self {
Self {
child: Arc::new(Mutex::new(child)),
status,
}
}
pub fn set_child(&self, child: Option<CommandChild>) {
*self.child.lock().unwrap() = child;
}
}
/// Resolves with sidecar credentials as soon as the sidecar is spawned (before health check).
struct SidecarReady(futures::future::Shared<oneshot::Receiver<ServerReadyData>>);
#[tauri::command]
#[specta::specta]
@@ -110,26 +91,21 @@ fn kill_sidecar(app: AppHandle) {
tracing::info!("Killed server");
}
fn get_logs() -> String {
logging::tail()
}
#[tauri::command]
#[specta::specta]
async fn await_initialization(
state: State<'_, ServerState>,
state: State<'_, SidecarReady>,
init_state: State<'_, InitState>,
events: Channel<InitStep>,
) -> Result<ServerReadyData, String> {
let mut rx = init_state.current.clone();
let events = async {
let stream = async {
let e = *rx.borrow();
let _ = events.send(e);
while rx.changed().await.is_ok() {
let step = *rx.borrow_and_update();
let _ = events.send(step);
if matches!(step, InitStep::Done) {
@@ -138,10 +114,18 @@ async fn await_initialization(
}
};
future::join(state.status.clone(), events)
.await
.0
.map_err(|_| "Failed to get server status".to_string())?
// Wait for sidecar credentials (available immediately after spawn, before health check)
let data = async {
state
.inner()
.0
.clone()
.await
.map_err(|_| "Failed to get sidecar data".to_string())
};
let (result, _) = futures::future::join(data, stream).await;
result
}
#[tauri::command]
@@ -439,22 +423,35 @@ async fn initialize(app: AppHandle) {
setup_app(&app, init_rx);
spawn_cli_sync_task(app.clone());
let (server_ready_tx, server_ready_rx) = oneshot::channel();
let server_ready_rx = server_ready_rx.shared();
app.manage(ServerState::new(None, server_ready_rx.clone()));
// Spawn sidecar immediately - credentials are known before health check
let port = get_sidecar_port();
let hostname = "127.0.0.1";
let url = format!("http://{hostname}:{port}");
let password = uuid::Uuid::new_v4().to_string();
tracing::info!("Spawning sidecar on {url}");
let (child, health_check) =
server::spawn_local_server(app.clone(), hostname.to_string(), port, password.clone());
// Make sidecar credentials available immediately (before health check completes)
let (ready_tx, ready_rx) = oneshot::channel();
let _ = ready_tx.send(ServerReadyData {
url: url.clone(),
username: Some("opencode".to_string()),
password: Some(password),
});
app.manage(SidecarReady(ready_rx.shared()));
app.manage(ServerState {
child: Arc::new(Mutex::new(Some(child))),
});
let loading_window_complete = event_once_fut::<LoadingWindowComplete>(&app);
tracing::info!("Main and loading windows created");
// SQLite migration handling:
// We only do this if the sqlite db doesn't exist, and we're expecting the sidecar to create it
// First, we spawn a task that listens for SqliteMigrationProgress events that can
// come from any invocation of the sidecar CLI. The progress is captured by a stdout stream interceptor.
// Then in the loading task, we wait for sqlite migration to complete before
// starting our health check against the server, otherwise long migrations could result in a timeout.
let needs_sqlite_migration = !sqlite_file_exists();
let sqlite_done = needs_sqlite_migration.then(|| {
// We only do this if the sqlite db doesn't exist, and we're expecting the sidecar to create it.
// A separate loading window is shown for long migrations.
let needs_migration = !sqlite_file_exists();
let sqlite_done = needs_migration.then(|| {
tracing::info!(
path = %opencode_db_path().expect("failed to get db path").display(),
"Sqlite file not found, waiting for it to be generated"
@@ -480,80 +477,22 @@ async fn initialize(app: AppHandle) {
}))
});
// The loading task waits for SQLite migration (if needed) then for the sidecar health check.
// This is only used to drive the loading window progress - the main window is shown immediately.
let loading_task = tokio::spawn({
let app = app.clone();
async move {
tracing::info!("Setting up server connection");
let server_connection = setup_server_connection(app.clone()).await;
tracing::info!("Server connection setup");
// we delay spawning this future so that the timeout is created lazily
let cli_health_check = match server_connection {
ServerConnection::CLI {
child,
health_check,
url,
username,
password,
} => {
let app = app.clone();
Some(
async move {
let res = timeout(Duration::from_secs(30), health_check.0).await;
let err = match res {
Ok(Ok(Ok(()))) => None,
Ok(Ok(Err(e))) => Some(e),
Ok(Err(e)) => Some(format!("Health check task failed: {e}")),
Err(_) => Some("Health check timed out".to_string()),
};
if let Some(err) = err {
let _ = child.kill();
return Err(format!(
"Failed to spawn OpenCode Server ({err}). Logs:\n{}",
get_logs()
));
}
tracing::info!("CLI health check OK");
app.state::<ServerState>().set_child(Some(child));
Ok(ServerReadyData {
url,
username,
password,
is_sidecar: true,
})
}
.map(move |res| {
let _ = server_ready_tx.send(res);
}),
)
}
ServerConnection::Existing { url } => {
let _ = server_ready_tx.send(Ok(ServerReadyData {
url: url.to_string(),
username: None,
password: None,
is_sidecar: false,
}));
None
}
};
tracing::info!("server connection started");
if let Some(cli_health_check) = cli_health_check {
if let Some(sqlite_done_rx) = sqlite_done {
let _ = sqlite_done_rx.await;
}
tokio::spawn(cli_health_check);
if let Some(sqlite_done_rx) = sqlite_done {
let _ = sqlite_done_rx.await;
}
let _ = server_ready_rx.await;
// Wait for sidecar to become healthy (for loading window progress)
let res = timeout(Duration::from_secs(30), health_check.0).await;
match res {
Ok(Ok(Ok(()))) => tracing::info!("Sidecar health check OK"),
Ok(Ok(Err(e))) => tracing::error!("Sidecar health check failed: {e}"),
Ok(Err(e)) => tracing::error!("Sidecar health check task failed: {e}"),
Err(_) => tracing::error!("Sidecar health check timed out"),
}
tracing::info!("Loading task finished");
}
@@ -561,7 +500,8 @@ async fn initialize(app: AppHandle) {
.map_err(|_| ())
.shared();
let loading_window = if needs_sqlite_migration
// Show loading window for SQLite migrations if they take >1s
let loading_window = if needs_migration
&& timeout(Duration::from_secs(1), loading_task.clone())
.await
.is_err()
@@ -571,12 +511,12 @@ async fn initialize(app: AppHandle) {
sleep(Duration::from_secs(1)).await;
Some(loading_window)
} else {
tracing::debug!("Showing main window without loading window");
MainWindow::create(&app).expect("Failed to create main window");
None
};
// Create main window immediately - the web app handles its own loading/health gate
MainWindow::create(&app).expect("Failed to create main window");
let _ = loading_task.await;
tracing::info!("Loading done, completing initialisation");
@@ -584,12 +524,9 @@ async fn initialize(app: AppHandle) {
if loading_window.is_some() {
loading_window_complete.await;
tracing::info!("Loading window completed");
}
MainWindow::create(&app).expect("Failed to create main window");
if let Some(loading_window) = loading_window {
let _ = loading_window.close();
}
@@ -610,59 +547,6 @@ fn spawn_cli_sync_task(app: AppHandle) {
});
}
enum ServerConnection {
Existing {
url: String,
},
CLI {
url: String,
username: Option<String>,
password: Option<String>,
child: CommandChild,
health_check: server::HealthCheck,
},
}
async fn setup_server_connection(app: AppHandle) -> ServerConnection {
let custom_url = get_saved_server_url(&app).await;
tracing::info!(?custom_url, "Attempting server connection");
if let Some(url) = &custom_url
&& server::check_health_or_ask_retry(&app, url).await
{
tracing::info!(%url, "Connected to custom server");
// If the default server is already local, no need to also spawn a sidecar
if server::is_localhost_url(url) {
return ServerConnection::Existing { url: url.clone() };
}
// Remote default server: fall through and also spawn a local sidecar
}
let local_port = get_sidecar_port();
let hostname = "127.0.0.1";
let local_url = format!("http://{hostname}:{local_port}");
tracing::debug!(url = %local_url, "Checking health of local server");
if server::check_health(&local_url, None).await {
tracing::info!(url = %local_url, "Health check OK, using existing server");
return ServerConnection::Existing { url: local_url };
}
let password = uuid::Uuid::new_v4().to_string();
tracing::info!("Spawning new local server");
let (child, health_check) =
server::spawn_local_server(app, hostname.to_string(), local_port, password.clone());
ServerConnection::CLI {
url: local_url,
username: Some("opencode".to_string()),
password: Some(password),
child,
health_check,
}
}
fn get_sidecar_port() -> u32 {
option_env!("OPENCODE_PORT")

View File

@@ -1,7 +1,6 @@
use std::time::{Duration, Instant};
use tauri::AppHandle;
use tauri_plugin_dialog::{DialogExt, MessageDialogButtons, MessageDialogResult};
use tauri_plugin_store::StoreExt;
use tokio::task::JoinHandle;
@@ -85,22 +84,6 @@ pub fn set_wsl_config(app: AppHandle, config: WslConfig) -> Result<(), String> {
Ok(())
}
pub async fn get_saved_server_url(app: &tauri::AppHandle) -> Option<String> {
if let Some(url) = get_default_server_url(app.clone()).ok().flatten() {
tracing::info!(%url, "Using desktop-specific custom URL");
return Some(url);
}
if let Some(cli_config) = cli::get_config(app).await
&& let Some(url) = get_server_url_from_config(&cli_config)
{
tracing::info!(%url, "Using custom server URL from config");
return Some(url);
}
None
}
pub fn spawn_local_server(
app: AppHandle,
hostname: String,
@@ -145,19 +128,27 @@ pub fn spawn_local_server(
pub struct HealthCheck(pub JoinHandle<Result<(), String>>);
pub async fn check_health(url: &str, password: Option<&str>) -> bool {
async fn check_health(url: &str, password: Option<&str>) -> bool {
let Ok(url) = reqwest::Url::parse(url) else {
return false;
};
let mut builder = reqwest::Client::builder().timeout(Duration::from_secs(7));
if url_is_localhost(&url) {
if url
.host_str()
.is_some_and(|host| {
host.eq_ignore_ascii_case("localhost")
|| host
.parse::<std::net::IpAddr>()
.is_ok_and(|ip| ip.is_loopback())
})
{
// Some environments set proxy variables (HTTP_PROXY/HTTPS_PROXY/ALL_PROXY) without
// excluding loopback. reqwest respects these by default, which can prevent the desktop
// app from reaching its own local sidecar server.
builder = builder.no_proxy();
};
}
let Ok(client) = builder.build() else {
return false;
@@ -177,77 +168,3 @@ pub async fn check_health(url: &str, password: Option<&str>) -> bool {
.map(|r| r.status().is_success())
.unwrap_or(false)
}
pub fn is_localhost_url(url: &str) -> bool {
reqwest::Url::parse(url).is_ok_and(|u| url_is_localhost(&u))
}
fn url_is_localhost(url: &reqwest::Url) -> bool {
url.host_str().is_some_and(|host| {
host.eq_ignore_ascii_case("localhost")
|| host
.parse::<std::net::IpAddr>()
.is_ok_and(|ip| ip.is_loopback())
})
}
/// Converts a bind address hostname to a valid URL hostname for connection.
/// - `0.0.0.0` and `::` are wildcard bind addresses, not valid connect targets
/// - IPv6 addresses need brackets in URLs (e.g., `::1` -> `[::1]`)
fn normalize_hostname_for_url(hostname: &str) -> String {
// Wildcard bind addresses -> localhost equivalents
if hostname == "0.0.0.0" {
return "127.0.0.1".to_string();
}
if hostname == "::" {
return "[::1]".to_string();
}
// IPv6 addresses need brackets in URLs
if hostname.contains(':') && !hostname.starts_with('[') {
return format!("[{}]", hostname);
}
hostname.to_string()
}
fn get_server_url_from_config(config: &cli::Config) -> Option<String> {
let server = config.server.as_ref()?;
let port = server.port?;
tracing::debug!(port, "server.port found in OC config");
let hostname = server
.hostname
.as_ref()
.map(|v| normalize_hostname_for_url(v))
.unwrap_or_else(|| "127.0.0.1".to_string());
Some(format!("http://{}:{}", hostname, port))
}
pub async fn check_health_or_ask_retry(app: &AppHandle, url: &str) -> bool {
tracing::debug!(%url, "Checking health");
loop {
if check_health(url, None).await {
return true;
}
const RETRY: &str = "Retry";
let res = app.dialog()
.message(format!("Could not connect to configured server:\n{}\n\nWould you like to retry or start a local server instead?", url))
.title("Connection Failed")
.buttons(MessageDialogButtons::OkCancelCustom(RETRY.to_string(), "Start Local".to_string()))
.blocking_show_with_result();
match res {
MessageDialogResult::Custom(name) if name == RETRY => {
continue;
}
_ => {
break;
}
}
}
false
}