Skip to content

Commit

Permalink
assistant2: Fix thread history only working in one Zed window (#25119)
Browse files Browse the repository at this point in the history
This PR fixes an issue where the thread history would only work in one
Zed window at a time.

The backing LMDB database can only be opened once per Zed instance.
However, the `ThreadStore` has one instance per Zed window.

To fix this, we need to create the `heed` environment once and store it
as a global, and then reference the same environment across all of the
`ThreadStore`s.

Release Notes:

- N/A
  • Loading branch information
maxdeviant authored Feb 18, 2025
1 parent d0816ef commit 98ea659
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 20 deletions.
1 change: 1 addition & 0 deletions crates/assistant2/src/assistant.rs
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ pub fn init(
cx: &mut App,
) {
AssistantSettings::register(cx);
thread_store::init(cx);
assistant_panel::init(cx);

inline_assistant::init(
Expand Down
3 changes: 3 additions & 0 deletions crates/assistant2/src/assistant_panel.rs
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,9 @@ impl AssistantPanel {
}

fn open_history(&mut self, window: &mut Window, cx: &mut Context<Self>) {
self.thread_store
.update(cx, |thread_store, cx| thread_store.reload(cx))
.detach_and_log_err(cx);
self.active_view = ActiveView::History;
self.history.focus_handle(cx).focus(window);
cx.notify();
Expand Down
60 changes: 40 additions & 20 deletions crates/assistant2/src/thread_store.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,9 @@ use context_server::manager::ContextServerManager;
use context_server::{ContextServerFactoryRegistry, ContextServerTool};
use futures::future::{self, BoxFuture, Shared};
use futures::FutureExt as _;
use gpui::{prelude::*, App, BackgroundExecutor, Context, Entity, SharedString, Task};
use gpui::{
prelude::*, App, BackgroundExecutor, Context, Entity, Global, ReadGlobal, SharedString, Task,
};
use heed::types::SerdeBincode;
use heed::Database;
use language_model::Role;
Expand All @@ -19,14 +21,17 @@ use util::ResultExt as _;

use crate::thread::{MessageId, Thread, ThreadId};

pub fn init(cx: &mut App) {
ThreadsDatabase::init(cx);
}

pub struct ThreadStore {
#[allow(unused)]
project: Entity<Project>,
tools: Arc<ToolWorkingSet>,
context_server_manager: Entity<ContextServerManager>,
context_server_tool_ids: HashMap<Arc<str>, Vec<ToolId>>,
threads: Vec<SavedThreadMetadata>,
database_future: Shared<BoxFuture<'static, Result<Arc<ThreadsDatabase>, Arc<anyhow::Error>>>>,
}

impl ThreadStore {
Expand All @@ -41,24 +46,12 @@ impl ThreadStore {
ContextServerManager::new(context_server_factory_registry, project.clone(), cx)
});

let executor = cx.background_executor().clone();
let database_future = executor
.spawn({
let executor = executor.clone();
let database_path = paths::support_dir().join("threads/threads-db.0.mdb");
async move { ThreadsDatabase::new(database_path, executor) }
})
.then(|result| future::ready(result.map(Arc::new).map_err(Arc::new)))
.boxed()
.shared();

let this = Self {
project,
tools,
context_server_manager,
context_server_tool_ids: HashMap::default(),
threads: Vec::new(),
database_future,
};
this.register_context_server_handlers(cx);
this.reload(cx).detach_and_log_err(cx);
Expand Down Expand Up @@ -94,7 +87,7 @@ impl ThreadStore {
cx: &mut Context<Self>,
) -> Task<Result<Entity<Thread>>> {
let id = id.clone();
let database_future = self.database_future.clone();
let database_future = ThreadsDatabase::global_future(cx);
cx.spawn(|this, mut cx| async move {
let database = database_future.await.map_err(|err| anyhow!(err))?;
let thread = database
Expand Down Expand Up @@ -127,7 +120,7 @@ impl ThreadStore {
(id, thread)
});

let database_future = self.database_future.clone();
let database_future = ThreadsDatabase::global_future(cx);
cx.spawn(|this, mut cx| async move {
let database = database_future.await.map_err(|err| anyhow!(err))?;
database.save_thread(metadata, thread).await?;
Expand All @@ -138,7 +131,7 @@ impl ThreadStore {

pub fn delete_thread(&mut self, id: &ThreadId, cx: &mut Context<Self>) -> Task<Result<()>> {
let id = id.clone();
let database_future = self.database_future.clone();
let database_future = ThreadsDatabase::global_future(cx);
cx.spawn(|this, mut cx| async move {
let database = database_future.await.map_err(|err| anyhow!(err))?;
database.delete_thread(id.clone()).await?;
Expand All @@ -149,8 +142,8 @@ impl ThreadStore {
})
}

fn reload(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
let database_future = self.database_future.clone();
pub fn reload(&self, cx: &mut Context<Self>) -> Task<Result<()>> {
let database_future = ThreadsDatabase::global_future(cx);
cx.spawn(|this, mut cx| async move {
let threads = database_future
.await
Expand Down Expand Up @@ -253,13 +246,40 @@ pub struct SavedMessage {
pub text: String,
}

struct ThreadsDatabase {
struct GlobalThreadsDatabase(
Shared<BoxFuture<'static, Result<Arc<ThreadsDatabase>, Arc<anyhow::Error>>>>,
);

impl Global for GlobalThreadsDatabase {}

pub(crate) struct ThreadsDatabase {
executor: BackgroundExecutor,
env: heed::Env,
threads: Database<SerdeBincode<ThreadId>, SerdeBincode<SavedThread>>,
}

impl ThreadsDatabase {
fn global_future(
cx: &mut App,
) -> Shared<BoxFuture<'static, Result<Arc<ThreadsDatabase>, Arc<anyhow::Error>>>> {
GlobalThreadsDatabase::global(cx).0.clone()
}

fn init(cx: &mut App) {
let executor = cx.background_executor().clone();
let database_future = executor
.spawn({
let executor = executor.clone();
let database_path = paths::support_dir().join("threads/threads-db.0.mdb");
async move { ThreadsDatabase::new(database_path, executor) }
})
.then(|result| future::ready(result.map(Arc::new).map_err(Arc::new)))
.boxed()
.shared();

cx.set_global(GlobalThreadsDatabase(database_future));
}

pub fn new(path: PathBuf, executor: BackgroundExecutor) -> Result<Self> {
std::fs::create_dir_all(&path)?;

Expand Down

0 comments on commit 98ea659

Please sign in to comment.