about summary refs log tree commit diff
path: root/tvix
diff options
context:
space:
mode:
authorFlorian Klink <flokli@flokli.de>2024-05-04T19·23+0300
committerflokli <flokli@flokli.de>2024-05-04T21·27+0000
commitba00f0c6955fcd505cfa7ef06dc35b53ac14868a (patch)
treec695c21ee57bc51926fc24daf1ddba5dc8ca0796 /tvix
parentf2f12d15568b068b7b38473c03b74275a7f43cee (diff)
refactor(tvix/*store): use DS: DirectoryService r/8073
We implement DirectoryService for Arc<DirectoryService> and
Box<DirectoryService>, this is sufficient.

Change-Id: I0a5a81cbc4782764406b5bca57f908ace6090737
Reviewed-on: https://cl.tvl.fyi/c/depot/+/11586
Tested-by: BuildkiteCI
Reviewed-by: Connor Brewster <cbrewster@hey.com>
Diffstat (limited to 'tvix')
-rw-r--r--tvix/castore/src/import/archive.rs2
-rw-r--r--tvix/castore/src/import/fs.rs2
-rw-r--r--tvix/castore/src/import/mod.rs4
-rw-r--r--tvix/glue/src/fetchers/mod.rs6
-rw-r--r--tvix/store/src/import.rs2
-rw-r--r--tvix/store/src/nar/import.rs36
6 files changed, 23 insertions, 29 deletions
diff --git a/tvix/castore/src/import/archive.rs b/tvix/castore/src/import/archive.rs
index fb8ef9a50b..0d21481d40 100644
--- a/tvix/castore/src/import/archive.rs
+++ b/tvix/castore/src/import/archive.rs
@@ -84,7 +84,7 @@ pub async fn ingest_archive<BS, DS, R>(
 ) -> Result<Node, IngestionError<Error>>
 where
     BS: BlobService + Clone + 'static,
-    DS: AsRef<dyn DirectoryService>,
+    DS: DirectoryService,
     R: AsyncRead + Unpin,
 {
     // Since tarballs can have entries in any arbitrary order, we need to
diff --git a/tvix/castore/src/import/fs.rs b/tvix/castore/src/import/fs.rs
index b8cfac86f8..9d3ecfe6ab 100644
--- a/tvix/castore/src/import/fs.rs
+++ b/tvix/castore/src/import/fs.rs
@@ -35,7 +35,7 @@ pub async fn ingest_path<BS, DS, P>(
 where
     P: AsRef<std::path::Path> + std::fmt::Debug,
     BS: BlobService + Clone,
-    DS: AsRef<dyn DirectoryService>,
+    DS: DirectoryService,
 {
     let iter = WalkDir::new(path.as_ref())
         .follow_links(false)
diff --git a/tvix/castore/src/import/mod.rs b/tvix/castore/src/import/mod.rs
index 53ebc2b339..7b42644a27 100644
--- a/tvix/castore/src/import/mod.rs
+++ b/tvix/castore/src/import/mod.rs
@@ -49,7 +49,7 @@ pub async fn ingest_entries<DS, S, E>(
     mut entries: S,
 ) -> Result<Node, IngestionError<E>>
 where
-    DS: AsRef<dyn DirectoryService>,
+    DS: DirectoryService,
     S: Stream<Item = Result<IngestionEntry, E>> + Send + std::marker::Unpin,
     E: std::error::Error,
 {
@@ -90,7 +90,7 @@ where
                 // If we don't have one yet (as that's the first one to upload),
                 // initialize the putter.
                 maybe_directory_putter
-                    .get_or_insert_with(|| directory_service.as_ref().put_multiple_start())
+                    .get_or_insert_with(|| directory_service.put_multiple_start())
                     .put(directory)
                     .await
                     .map_err(|e| {
diff --git a/tvix/glue/src/fetchers/mod.rs b/tvix/glue/src/fetchers/mod.rs
index 9a884e51c4..342dfd84e8 100644
--- a/tvix/glue/src/fetchers/mod.rs
+++ b/tvix/glue/src/fetchers/mod.rs
@@ -172,8 +172,8 @@ async fn hash<D: Digest + std::io::Write>(
 
 impl<BS, DS, PS> Fetcher<BS, DS, PS>
 where
-    BS: AsRef<(dyn BlobService + 'static)> + Clone + Send + Sync + 'static,
-    DS: AsRef<(dyn DirectoryService + 'static)>,
+    BS: BlobService + Clone + 'static,
+    DS: DirectoryService + Clone,
     PS: PathInfoService,
 {
     /// Ingest the data from a specified [Fetch].
@@ -247,7 +247,7 @@ where
                 // Ingest the archive, get the root node
                 let node = tvix_castore::import::archive::ingest_archive(
                     self.blob_service.clone(),
-                    &self.directory_service,
+                    self.directory_service.clone(),
                     archive,
                 )
                 .await?;
diff --git a/tvix/store/src/import.rs b/tvix/store/src/import.rs
index 88cebc8be6..2331fd77ea 100644
--- a/tvix/store/src/import.rs
+++ b/tvix/store/src/import.rs
@@ -114,7 +114,7 @@ pub async fn import_path_as_nar_ca<BS, DS, PS, P>(
 where
     P: AsRef<Path> + std::fmt::Debug,
     BS: BlobService + Clone,
-    DS: AsRef<dyn DirectoryService>,
+    DS: DirectoryService,
     PS: AsRef<dyn PathInfoService>,
 {
     let root_node = ingest_path(blob_service, directory_service, path.as_ref())
diff --git a/tvix/store/src/nar/import.rs b/tvix/store/src/nar/import.rs
index 6f4dcdea5d..70f8137e89 100644
--- a/tvix/store/src/nar/import.rs
+++ b/tvix/store/src/nar/import.rs
@@ -24,19 +24,19 @@ pub fn read_nar<R, BS, DS>(
 ) -> io::Result<castorepb::node::Node>
 where
     R: BufRead + Send,
-    BS: AsRef<dyn BlobService>,
-    DS: AsRef<dyn DirectoryService>,
+    BS: BlobService + Clone,
+    DS: DirectoryService,
 {
     let handle = tokio::runtime::Handle::current();
 
-    let directory_putter = directory_service.as_ref().put_multiple_start();
+    let directory_putter = directory_service.put_multiple_start();
 
     let node = nix_compat::nar::reader::open(r)?;
-    let (root_node, mut directory_putter, _) = process_node(
+    let (root_node, mut directory_putter) = process_node(
         handle.clone(),
         "".into(), // this is the root node, it has an empty name
         node,
-        &blob_service,
+        blob_service,
         directory_putter,
     )?;
 
@@ -80,9 +80,9 @@ fn process_node<BS>(
     node: nar::reader::Node,
     blob_service: BS,
     directory_putter: Box<dyn DirectoryPutter>,
-) -> io::Result<(castorepb::node::Node, Box<dyn DirectoryPutter>, BS)>
+) -> io::Result<(castorepb::node::Node, Box<dyn DirectoryPutter>)>
 where
-    BS: AsRef<dyn BlobService>,
+    BS: BlobService + Clone,
 {
     Ok(match node {
         nar::reader::Node::Symlink { target } => (
@@ -91,7 +91,6 @@ where
                 target: target.into(),
             }),
             directory_putter,
-            blob_service,
         ),
         nar::reader::Node::File { executable, reader } => (
             castorepb::node::Node::File(process_file_reader(
@@ -99,19 +98,17 @@ where
                 name,
                 reader,
                 executable,
-                &blob_service,
+                blob_service,
             )?),
             directory_putter,
-            blob_service,
         ),
         nar::reader::Node::Directory(dir_reader) => {
-            let (directory_node, directory_putter, blob_service_back) =
+            let (directory_node, directory_putter) =
                 process_dir_reader(handle, name, dir_reader, blob_service, directory_putter)?;
 
             (
                 castorepb::node::Node::Directory(directory_node),
                 directory_putter,
-                blob_service_back,
             )
         }
     })
@@ -127,13 +124,13 @@ fn process_file_reader<BS>(
     blob_service: BS,
 ) -> io::Result<castorepb::FileNode>
 where
-    BS: AsRef<dyn BlobService>,
+    BS: BlobService,
 {
     // store the length. If we read any other length, reading will fail.
     let expected_len = file_reader.len();
 
     // prepare writing a new blob.
-    let blob_writer = handle.block_on(async { blob_service.as_ref().open_write().await });
+    let blob_writer = handle.block_on(async { blob_service.open_write().await });
 
     // write the blob.
     let mut blob_writer = {
@@ -168,24 +165,22 @@ fn process_dir_reader<BS>(
     mut dir_reader: nar::reader::DirReader,
     blob_service: BS,
     directory_putter: Box<dyn DirectoryPutter>,
-) -> io::Result<(castorepb::DirectoryNode, Box<dyn DirectoryPutter>, BS)>
+) -> io::Result<(castorepb::DirectoryNode, Box<dyn DirectoryPutter>)>
 where
-    BS: AsRef<dyn BlobService>,
+    BS: BlobService + Clone,
 {
     let mut directory = castorepb::Directory::default();
 
     let mut directory_putter = directory_putter;
-    let mut blob_service = blob_service;
     while let Some(entry) = dir_reader.next()? {
-        let (node, directory_putter_back, blob_service_back) = process_node(
+        let (node, directory_putter_back) = process_node(
             handle.clone(),
             entry.name.into(),
             entry.node,
-            blob_service,
+            blob_service.clone(),
             directory_putter,
         )?;
 
-        blob_service = blob_service_back;
         directory_putter = directory_putter_back;
 
         match node {
@@ -213,7 +208,6 @@ where
             size: directory_size,
         },
         directory_putter,
-        blob_service,
     ))
 }