Skip to content

Commit

Permalink
Clean up module tree
Browse files Browse the repository at this point in the history
  • Loading branch information
dmartin committed Jan 6, 2025
1 parent 3bb4e57 commit f125a31
Show file tree
Hide file tree
Showing 5 changed files with 63 additions and 96 deletions.
41 changes: 41 additions & 0 deletions src/backend.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
mod s3;
mod selfhosted;

pub(crate) use s3::S3Backend;
pub(crate) use selfhosted::SelfhostedBackend;

use std::{collections::HashMap, future::Future, path::Path};
use tokio::sync::mpsc::Receiver;

use crate::watcher::BuildEvent;

pub trait Backend: Sized {
/// Create an instance of the backend if all required options are provided.
fn new(options: HashMap<String, String>) -> Result<Self, anyhow::Error>;

/// Run the backend.
///
/// The backend is not provided with the artifact directory (i.e. CMGR_ARTIFACT_DIR) itself, but
/// rather with a cache directory containing extracted artifact tarballs as subdirectories named
/// with the associated build ID. This cache directory is automatically kept up to date by a
/// background thread when the server is run as a binary.
///
/// When a backend runs, it should first perform any synchronization necessary in order to
/// reflect the current contents of the cache directory. For example, if the backend syncs files
/// to remote storage, any directories without matching .__checksum files should be re-uploaded,
/// and any remote directories which no longer exist in the cache should be removed.
///
/// After completing this initial synchronization, the backend should listen on the provided
/// channel for build events and take action accordingly. These events are produced when a build
/// with artifacts is (re-)created (BuildEvent::Update) or deleted (BuildEvent::Delete), and
/// contain the ID of the build. For example, a build's artifacts might be re-uploaded when an
/// Update event occurs, or deleted from remote storage when a Delete event occurs.
///
/// As there is the potential for race conditions when handling build events, backends must
/// process any events with the same build ID serially in the order of their arrival.
fn run(
&self,
cache_dir: &Path,
rx: Receiver<BuildEvent>,
) -> impl Future<Output = Result<(), anyhow::Error>> + Send;
}
24 changes: 7 additions & 17 deletions src/s3.rs → src/backend/s3.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,5 @@
use crate::{
get_cache_dir_checksum, to_filename_str, Backend, BackendCreationError, BuildEvent,
CHECKSUM_FILENAME,
};
use crate::backend::Backend;
use crate::watcher::{get_cache_dir_checksum, to_filename_str, BuildEvent, CHECKSUM_FILENAME};
use aws_config::BehaviorVersion;
use aws_sdk_cloudfront::types::{InvalidationBatch, Paths};
use aws_sdk_s3::primitives::ByteStream;
Expand All @@ -14,25 +12,17 @@ use tokio::sync::mpsc::Receiver;
use walkdir::WalkDir;

#[derive(Debug)]
pub struct S3 {
pub struct S3Backend {
bucket: String,
path_prefix: String,
cloudfront_distribution: Option<String>,
}

impl Backend for S3 {
fn get_options() -> &'static [&'static str] {
&["bucket", "path-prefix", "cloudfront-distribution"]
}

fn get_required_options() -> &'static [&'static str] {
&["bucket"]
}

fn new(options: HashMap<String, String>) -> Result<Self, BackendCreationError> {
impl Backend for S3Backend {
fn new(options: HashMap<String, String>) -> Result<Self, anyhow::Error> {
let bucket = match options.get("bucket") {
Some(bucket_name) => bucket_name.to_string(),
None => return Err(BackendCreationError),
None => anyhow::bail!("required backend option \"bucket\" not provided"),
};
// If non-empty, path prefixes must include a trailing slash, but not a leading slash.
// A root path prefix ("/") must be replaced with an empty string to avoid duplicate leading
Expand Down Expand Up @@ -112,7 +102,7 @@ impl Backend for S3 {
}
}

impl S3 {
impl S3Backend {
/// Test that the current IAM user has all necessary permissions.
async fn test_permissions(
&self,
Expand Down
20 changes: 6 additions & 14 deletions src/selfhosted.rs → src/backend/selfhosted.rs
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
use crate::{Backend, BackendCreationError, BuildEvent};
use crate::backend::Backend;
use crate::watcher::BuildEvent;
use hyper::service::service_fn;
use hyper::{Request, Response};
use hyper_staticfile::{Body, Static};
use hyper_util::rt::TokioIo;
use log::{debug, info};
use std::collections::HashMap;
use std::convert::TryFrom;
use std::error::Error;
use std::fmt::Debug;
use std::net::SocketAddr;
use std::path::Path;
use tokio::net::TcpListener;
use tokio::sync::mpsc::Receiver;

#[derive(Debug)]
pub struct Selfhosted {
pub struct SelfhostedBackend {
address: String,
}

Expand Down Expand Up @@ -55,17 +55,9 @@ async fn handle_request<B>(
Ok(res)
}

impl Backend for Selfhosted {
fn get_options() -> &'static [&'static str] {
&["address"]
}

fn get_required_options() -> &'static [&'static str] {
&[]
}

fn new(options: HashMap<String, String>) -> Result<Self, BackendCreationError> {
let backend = Selfhosted {
impl Backend for SelfhostedBackend {
fn new(options: HashMap<String, String>) -> Result<Self, anyhow::Error> {
let backend = SelfhostedBackend {
address: options
.get("address")
.unwrap_or(&String::from("0.0.0.0:4201"))
Expand Down
10 changes: 7 additions & 3 deletions src/main.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
mod backend;
mod watcher;

use backend::{Backend, S3Backend, SelfhostedBackend};
use clap::{Arg, ArgAction, Command};
use cmgr_artifact_server::{sync_cache, watch_dir, Backend, OptionParsingError, Selfhosted, S3};
use log::{debug, info};
use std::collections::HashMap;
use std::env;
use std::fs;
use std::path::PathBuf;
use watcher::{sync_cache, watch_dir, OptionParsingError};

#[tokio::main]
async fn main() -> Result<(), anyhow::Error> {
Expand Down Expand Up @@ -83,8 +87,8 @@ async fn main() -> Result<(), anyhow::Error> {
.to_lowercase()
.as_str()
{
"selfhosted" => Selfhosted::new(options)?.run(&cache_dir, rx).await,
"s3" => S3::new(options)?.run(&cache_dir, rx).await,
"selfhosted" => SelfhostedBackend::new(options)?.run(&cache_dir, rx).await,
"s3" => S3Backend::new(options)?.run(&cache_dir, rx).await,
_ => panic!("Unreachable - invalid backend"), // TODO: use enum instead
}?;
Ok(())
Expand Down
64 changes: 2 additions & 62 deletions src/lib.rs → src/watcher.rs
Original file line number Diff line number Diff line change
@@ -1,17 +1,11 @@
mod s3;
mod selfhosted;

use blake2::{Blake2b512, Digest};
use flate2::read::GzDecoder;
use log::{debug, info, trace};
use notify::{DebouncedEvent, RecommendedWatcher, Watcher};
pub use s3::S3;
pub use selfhosted::Selfhosted;
use std::collections::HashMap;
use std::error::Error;
use std::fmt::{Debug, Display};
use std::fs;
use std::future::Future;
use std::io::{Read, Seek};
use std::path::Path;
use std::path::PathBuf;
Expand All @@ -35,20 +29,6 @@ impl Display for OptionParsingError {
}
}

#[derive(Debug)]
pub struct BackendCreationError;

impl Error for BackendCreationError {}

impl Display for BackendCreationError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"Unable to initialize backend. Some required options were not provided."
)
}
}

/// Represents detected changes to artifact tarballs.
/// The included string is the build ID.
#[derive(Debug)]
Expand All @@ -58,46 +38,6 @@ pub enum BuildEvent {
Delete(String),
}

pub trait Backend: Sized {
// TODO: currently the get_options() methods are not actually called anywhere. It would be nice
// if they were used in the CLI help output or BackendCreationErrors.

/// Return a list of option keys supported by this backend.
fn get_options() -> &'static [&'static str];

/// Return a list of option keys required by this backend.
fn get_required_options() -> &'static [&'static str];

/// Create an instance of the backend if all required options are provided.
fn new(options: HashMap<String, String>) -> Result<Self, BackendCreationError>;

/// Run the backend.
///
/// The backend is not provided with the artifact directory (i.e. CMGR_ARTIFACT_DIR) itself, but
/// rather with a cache directory containing extracted artifact tarballs as subdirectories named
/// with the associated build ID. This cache directory is automatically kept up to date by a
/// background thread when the server is run as a binary.
///
/// When a backend runs, it should first perform any synchronization necessary in order to
/// reflect the current contents of the cache directory. For example, if the backend syncs files
/// to remote storage, any directories without matching .__checksum files should be re-uploaded,
/// and any remote directories which no longer exist in the cache should be removed.
///
/// After completing this initial synchronization, the backend should listen on the provided
/// channel for build events and take action accordingly. These events are produced when a build
/// with artifacts is (re-)created (BuildEvent::Update) or deleted (BuildEvent::Delete), and
/// contain the ID of the build. For example, a build's artifacts might be re-uploaded when an
/// Update event occurs, or deleted from remote storage when a Delete event occurs.
///
/// As there is the potential for race conditions when handling build events, backends must
/// process any events with the same build ID serially in the order of their arrival.
fn run(
&self,
cache_dir: &Path,
rx: Receiver<BuildEvent>,
) -> impl Future<Output = Result<(), anyhow::Error>> + Send;
}

/// Returns the checksum of an artifact tarball.
fn get_tarball_checksum(tarball: &Path) -> Result<Vec<u8>, std::io::Error> {
let mut hasher = Blake2b512::new();
Expand All @@ -119,7 +59,7 @@ fn get_tarball_checksum(tarball: &Path) -> Result<Vec<u8>, std::io::Error> {
pub const CHECKSUM_FILENAME: &str = ".__checksum";

/// Returns the tarball checksum stored inside a cache directory.
fn get_cache_dir_checksum(cache_dir: &Path) -> Result<Vec<u8>, std::io::Error> {
pub(crate) fn get_cache_dir_checksum(cache_dir: &Path) -> Result<Vec<u8>, std::io::Error> {
let mut checksum_path = PathBuf::from(cache_dir);
checksum_path.push(CHECKSUM_FILENAME);
fs::read(checksum_path)
Expand Down Expand Up @@ -155,7 +95,7 @@ fn extract_to(cache_dir: &Path, tarball: &Path) -> Result<(), std::io::Error> {

/// Converts a PathBuf to a filename string slice.
/// Panics if the conversion fails.
fn to_filename_str(path: &Path) -> &str {
pub(crate) fn to_filename_str(path: &Path) -> &str {
path.file_name()
.unwrap_or_else(|| panic!("Failed to get filename for path {:?}", &path))
.to_str()
Expand Down

0 comments on commit f125a31

Please sign in to comment.