diff --git a/.git_hooks/pre-commit b/.git_hooks/pre-commit new file mode 100755 index 000000000..105684ef7 --- /dev/null +++ b/.git_hooks/pre-commit @@ -0,0 +1,19 @@ +#!/usr/bin/env sh +if ! cargo fmt -- --check ; then + printf "\n" + printf "\033[0;31mpre-commit hook failed during:\033[0m\n" + printf "\033[0;31m\tcargo fmt -- --check\033[0m\n" + exit 1 +fi + +if ! cargo clippy --locked -- -D warnings ; then + printf "\n" + printf "\033[0;31mpre-commit hook failed during:\033[0m\n" + printf "\033[0;31m\tclippy --locked -- -D warning\033[0m\n" + exit 1 +fi + +printf "\n" +printf "\033[0;32mpre-commit hook succeeded\033[0m\n" +exit 0 + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d0c2ce804..20f544924 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -61,7 +61,7 @@ jobs: run: cargo build --locked - name: Test - run: cargo test --locked -- --test-threads=1 + run: cargo test --locked - name: Clean up the database run: docker-compose down --volumes diff --git a/.gitignore b/.gitignore index d7823cbaf..5b7d9caab 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ target .vagrant .rustwide .rustwide-docker +.idea/ \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 0515be845..2239f0ec8 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -358,7 +358,7 @@ dependencies = [ "path-slash 0.1.3 (registry+https://github.com/rust-lang/crates.io-index)", "postgres 0.17.5 (registry+https://github.com/rust-lang/crates.io-index)", "procfs 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)", - "prometheus 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)", + "prometheus 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)", "r2d2 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)", "r2d2_postgres 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)", "rand 0.7.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -368,7 +368,7 @@ dependencies = [ "rusoto_core 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)", "rusoto_credential 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)", "rusoto_s3 0.45.0 (registry+https://github.com/rust-lang/crates.io-index)", - "rustwide 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)", + "rustwide 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)", "sass-rs 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)", "schemamama 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", "schemamama_postgres 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2079,14 +2079,14 @@ dependencies = [ [[package]] name = "prometheus" -version = "0.7.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ "cfg-if 0.1.10 (registry+https://github.com/rust-lang/crates.io-index)", "fnv 1.0.7 (registry+https://github.com/rust-lang/crates.io-index)", "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)", - "quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)", "spin 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)", + "thiserror 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)", ] [[package]] @@ -2516,10 +2516,10 @@ dependencies = [ [[package]] name = "rustwide" -version = "0.7.1" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" dependencies = [ - "base64 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)", + "base64 0.12.1 (registry+https://github.com/rust-lang/crates.io-index)", "failure 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)", "flate2 1.0.14 (registry+https://github.com/rust-lang/crates.io-index)", "fs2 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", @@ -2536,6 +2536,7 @@ dependencies = [ "serde_json 1.0.53 (registry+https://github.com/rust-lang/crates.io-index)", "tar 0.4.26 (registry+https://github.com/rust-lang/crates.io-index)", "tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)", + "thiserror 1.0.20 (registry+https://github.com/rust-lang/crates.io-index)", "tokio 0.2.22 (registry+https://github.com/rust-lang/crates.io-index)", "toml 0.5.6 (registry+https://github.com/rust-lang/crates.io-index)", "walkdir 2.3.1 (registry+https://github.com/rust-lang/crates.io-index)", @@ -3929,7 +3930,7 @@ dependencies = [ "checksum proc-macro2 1.0.13 (registry+https://github.com/rust-lang/crates.io-index)" = "53f5ffe53a6b28e37c9c1ce74893477864d64f74778a93a4beb43c8fa167f639" "checksum procedural-masquerade 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "8f1383dff4092fe903ac180e391a8d4121cc48f08ccf850614b0290c6673b69d" "checksum procfs 0.7.9 (registry+https://github.com/rust-lang/crates.io-index)" = "c434e93ef69c216e68e4f417c927b4f31502c3560b72cfdb6827e2321c5c6b3e" -"checksum prometheus 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "5567486d5778e2c6455b1b90ff1c558f29e751fc018130fa182e15828e728af1" +"checksum prometheus 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd0ced56dee39a6e960c15c74dc48849d614586db2eaada6497477af7c7811cd" "checksum quick-error 1.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" "checksum quote 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "54a21852a652ad6f610c9510194f398ff6f8692e334fd1145fed931f7fbe44ea" "checksum r2d2 0.8.8 (registry+https://github.com/rust-lang/crates.io-index)" = "1497e40855348e4a8a40767d8e55174bce1e445a3ac9254ad44ad468ee0485af" @@ -3971,7 +3972,7 @@ dependencies = [ "checksum rustc-serialize 0.3.24 (registry+https://github.com/rust-lang/crates.io-index)" = "dcf128d1287d2ea9d80910b5f1120d0b8eede3fbf1abe91c40d39ea7d51e6fda" "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" "checksum rusttype 0.9.2 (registry+https://github.com/rust-lang/crates.io-index)" = "dc7c727aded0be18c5b80c1640eae0ac8e396abf6fa8477d96cb37d18ee5ec59" -"checksum rustwide 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)" = "6e97a51dfbb3333ed70d6373d7980165d20dbd84a8173ab184c2c4d8f27bd122" +"checksum rustwide 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "417d578ebc7fa963bcd06f365f7987c091abeba70eac22dba94b7fd922a95c09" "checksum ryu 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "ed3d612bc64430efeb3f7ee6ef26d590dce0c43249217bddc62112540c7941e1" "checksum safemem 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" "checksum same-file 1.0.6 (registry+https://github.com/rust-lang/crates.io-index)" = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" diff --git a/Cargo.toml b/Cargo.toml index d62891cfb..9b224d88c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,8 +30,8 @@ toml = "0.5" schemamama = "0.3" schemamama_postgres = "0.3" systemstat = "0.1.4" -prometheus = { version = "0.7.0", default-features = false } -rustwide = "0.7.1" +prometheus = { version = "0.9.0", default-features = false } +rustwide = "0.10.0" mime_guess = "2" dotenv = "0.15" zstd = "0.5" diff --git a/README.md b/README.md index bca9dc2d6..c84156f16 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,12 @@ The recommended way to develop docs.rs is a combination of `cargo run` for the main binary and [docker-compose](https://docs.docker.com/compose/) for the external services. This gives you reasonable incremental build times without having to add new users and packages to your host machine. +### Git Hooks +For ease of use, `git_hooks` directory contains useful `git hooks` to make your development easier. +```bash +cd .git/hooks && ln -s ../../.git_hooks/* . && cd ../.. +``` + ### Dependencies Docs.rs requires at least the following native C dependencies. @@ -133,7 +139,7 @@ See `cargo run -- --help` for a full list of commands. ```sh # This command will start web interface of docs.rs on http://localhost:3000 -cargo run start-web-server +cargo run -- start-web-server ``` #### `build` subcommand @@ -142,16 +148,16 @@ cargo run start-web-server # Builds and adds it into database # This is the main command to build and add a documentation into docs.rs. # For example, `docker-compose run web build crate regex 1.1.6` -cargo run web build crate +cargo run -- build crate # Builds every crate on crates.io and adds them into database # (beware: this may take months to finish) -cargo run web build world +cargo run -- build world # Builds a local package you have at and adds it to the database. # The package does not have to be on crates.io. # The package must be on the local filesystem, git urls are not allowed. -cargo build crate --local /path/to/source +cargo run -- build crate --local /path/to/source ``` #### `database` subcommand @@ -179,22 +185,22 @@ The database contains a blacklist of crates that should not be built. ```sh # List the crates on the blacklist -cargo run web database blacklist list +cargo run -- database blacklist list # Adds to the blacklist -cargo run web database blacklist add +cargo run -- database blacklist add # Removes from the blacklist -cargo run web database blacklist remove +cargo run -- database blacklist remove ``` #### `daemon` subcommand ```sh # Run a persistent daemon which queues builds and starts a web server. -cargo run daemon --registry-watcher disabled +cargo run -- daemon --registry-watcher=disabled # Add crates to the queue -cargo run queue add +cargo run -- queue add ``` ### Contact diff --git a/git_hooks/pre-commit b/git_hooks/pre-commit new file mode 100755 index 000000000..8ae45b3f3 --- /dev/null +++ b/git_hooks/pre-commit @@ -0,0 +1,19 @@ +#!/usr/bin/env sh +cargo fmt -- --check +if [ "$?" != "0" ] ; then + echo "\n" + echo "\033[0;31mpre-commit hook failed during:\033[0m" + echo "\033[0;31m\tcargo fmt -- --check\033[0m" + exit 1 +fi +cargo clippy --locked -- -D warnings +if [ "$?" != "0" ] ; then + echo "\n" + echo "\033[0;31mpre-commit hook failed during:\033[0m" + echo "\033[0;31m\tclippy --locked -- -D warning\033[0m" + exit 1 +fi + +echo "\n" +echo "\033[0;32mpre-commit hook succeeded\033[0m" +exit 0 \ No newline at end of file diff --git a/src/bin/cratesfyi.rs b/src/bin/cratesfyi.rs index bc6201bc7..a22d122c0 100644 --- a/src/bin/cratesfyi.rs +++ b/src/bin/cratesfyi.rs @@ -3,11 +3,12 @@ use std::fmt::Write; use std::path::PathBuf; use std::sync::Arc; -use cratesfyi::db::{self, add_path_into_database, Pool}; +use cratesfyi::db::{self, add_path_into_database, Pool, PoolClient}; use cratesfyi::index::Index; use cratesfyi::utils::{remove_crate_priority, set_crate_priority}; use cratesfyi::{ - BuildQueue, Config, DocBuilder, DocBuilderOptions, RustwideBuilder, Server, Storage, + BuildQueue, Config, Context, DocBuilder, DocBuilderOptions, Metrics, RustwideBuilder, Server, + Storage, }; use failure::{err_msg, Error, ResultExt}; use once_cell::sync::OnceCell; @@ -108,7 +109,7 @@ enum CommandLine { impl CommandLine { pub fn handle_args(self) -> Result<(), Error> { - let ctx = Context::new(); + let ctx = BinContext::new(); match self { Self::Build(build) => build.handle_args(ctx)?, @@ -116,14 +117,7 @@ impl CommandLine { socket_addr, reload_templates, } => { - Server::start( - Some(&socket_addr), - reload_templates, - ctx.pool()?, - ctx.config()?, - ctx.build_queue()?, - ctx.storage()?, - )?; + Server::start(Some(&socket_addr), reload_templates, &ctx)?; } Self::Daemon { foreground, @@ -133,13 +127,7 @@ impl CommandLine { log::warn!("--foreground was passed, but there is no need for it anymore"); } - cratesfyi::utils::start_daemon( - ctx.config()?, - ctx.pool()?, - ctx.build_queue()?, - ctx.storage()?, - registry_watcher == Toggle::Enabled, - )?; + cratesfyi::utils::start_daemon(&ctx, registry_watcher == Toggle::Enabled)?; } Self::Database { subcommand } => subcommand.handle_args(ctx)?, Self::Queue { subcommand } => subcommand.handle_args(ctx)?, @@ -177,7 +165,7 @@ enum QueueSubcommand { } impl QueueSubcommand { - pub fn handle_args(self, ctx: Context) -> Result<(), Error> { + pub fn handle_args(self, ctx: BinContext) -> Result<(), Error> { match self { Self::Add { crate_name, @@ -213,7 +201,7 @@ enum PrioritySubcommand { } impl PrioritySubcommand { - pub fn handle_args(self, ctx: Context) -> Result<(), Error> { + pub fn handle_args(self, ctx: BinContext) -> Result<(), Error> { match self { Self::Set { pattern, priority } => { set_crate_priority(&mut *ctx.conn()?, &pattern, priority) @@ -259,7 +247,7 @@ struct Build { } impl Build { - pub fn handle_args(self, ctx: Context) -> Result<(), Error> { + pub fn handle_args(self, ctx: BinContext) -> Result<(), Error> { let docbuilder = { let config = ctx.config()?; let mut doc_options = @@ -324,12 +312,13 @@ enum BuildSubcommand { } impl BuildSubcommand { - pub fn handle_args(self, ctx: Context, mut docbuilder: DocBuilder) -> Result<(), Error> { + pub fn handle_args(self, ctx: BinContext, mut docbuilder: DocBuilder) -> Result<(), Error> { match self { Self::World => { docbuilder.load_cache().context("Failed to load cache")?; - let mut builder = RustwideBuilder::init(ctx.pool()?, ctx.storage()?)?; + let mut builder = + RustwideBuilder::init(ctx.pool()?, ctx.metrics()?, ctx.storage()?)?; builder .build_world(&mut docbuilder) .context("Failed to build world")?; @@ -343,8 +332,9 @@ impl BuildSubcommand { local, } => { docbuilder.load_cache().context("Failed to load cache")?; - let mut builder = RustwideBuilder::init(ctx.pool()?, ctx.storage()?) - .context("failed to initialize rustwide")?; + let mut builder = + RustwideBuilder::init(ctx.pool()?, ctx.metrics()?, ctx.storage()?) + .context("failed to initialize rustwide")?; if let Some(path) = local { builder @@ -380,14 +370,16 @@ impl BuildSubcommand { } } - let mut builder = RustwideBuilder::init(ctx.pool()?, ctx.storage()?)?; + let mut builder = + RustwideBuilder::init(ctx.pool()?, ctx.metrics()?, ctx.storage()?)?; builder .update_toolchain() .context("failed to update toolchain")?; } Self::AddEssentialFiles => { - let mut builder = RustwideBuilder::init(ctx.pool()?, ctx.storage()?)?; + let mut builder = + RustwideBuilder::init(ctx.pool()?, ctx.metrics()?, ctx.storage()?)?; builder .add_essential_files() .context("failed to add essential files")?; @@ -446,7 +438,7 @@ enum DatabaseSubcommand { } impl DatabaseSubcommand { - pub fn handle_args(self, ctx: Context) -> Result<(), Error> { + pub fn handle_args(self, ctx: BinContext) -> Result<(), Error> { match self { Self::Migrate { version } => { db::migrate(version, &mut *ctx.conn()?) @@ -514,7 +506,7 @@ enum BlacklistSubcommand { } impl BlacklistSubcommand { - fn handle_args(self, ctx: Context) -> Result<(), Error> { + fn handle_args(self, ctx: BinContext) -> Result<(), Error> { let mut conn = &mut *ctx.conn()?; match self { Self::List => { @@ -554,28 +546,40 @@ enum DeleteSubcommand { }, } -struct Context { +struct BinContext { build_queue: OnceCell>, storage: OnceCell>, config: OnceCell>, pool: OnceCell, + metrics: OnceCell>, } -impl Context { +impl BinContext { fn new() -> Self { Self { build_queue: OnceCell::new(), storage: OnceCell::new(), config: OnceCell::new(), pool: OnceCell::new(), + metrics: OnceCell::new(), } } + fn conn(&self) -> Result { + Ok(self.pool()?.get()?) + } +} + +impl Context for BinContext { fn build_queue(&self) -> Result, Error> { Ok(self .build_queue .get_or_try_init::<_, Error>(|| { - Ok(Arc::new(BuildQueue::new(self.pool()?, &*self.config()?))) + Ok(Arc::new(BuildQueue::new( + self.pool()?, + self.metrics()?, + &*self.config()?, + ))) })? .clone()) } @@ -584,7 +588,11 @@ impl Context { Ok(self .storage .get_or_try_init::<_, Error>(|| { - Ok(Arc::new(Storage::new(self.pool()?, &*self.config()?)?)) + Ok(Arc::new(Storage::new( + self.pool()?, + self.metrics()?, + &*self.config()?, + )?)) })? .clone()) } @@ -599,16 +607,14 @@ impl Context { fn pool(&self) -> Result { Ok(self .pool - .get_or_try_init::<_, Error>(|| Ok(Pool::new(&*self.config()?)?))? + .get_or_try_init::<_, Error>(|| Ok(Pool::new(&*self.config()?, self.metrics()?)?))? .clone()) } - fn conn( - &self, - ) -> Result< - r2d2::PooledConnection>, - Error, - > { - Ok(self.pool()?.get()?) + fn metrics(&self) -> Result, Error> { + Ok(self + .metrics + .get_or_try_init::<_, Error>(|| Ok(Arc::new(Metrics::new()?)))? + .clone()) } } diff --git a/src/build_queue.rs b/src/build_queue.rs index a5490abc6..869aa9b38 100644 --- a/src/build_queue.rs +++ b/src/build_queue.rs @@ -1,7 +1,8 @@ -use crate::config::Config; use crate::db::Pool; use crate::error::Result; +use crate::{Config, Metrics}; use log::error; +use std::sync::Arc; #[derive(Debug, Clone, Eq, PartialEq, serde::Serialize)] pub(crate) struct QueuedCrate { @@ -15,13 +16,15 @@ pub(crate) struct QueuedCrate { #[derive(Debug)] pub struct BuildQueue { db: Pool, + metrics: Arc, max_attempts: i32, } impl BuildQueue { - pub fn new(db: Pool, config: &Config) -> Self { + pub fn new(db: Pool, metrics: Arc, config: &Config) -> Self { BuildQueue { db, + metrics, max_attempts: config.build_attempts.into(), } } @@ -91,7 +94,7 @@ impl BuildQueue { }; let res = f(&to_process); - crate::web::metrics::TOTAL_BUILDS.inc(); + self.metrics.total_builds.inc(); match res { Ok(()) => { conn.execute("DELETE FROM queue WHERE id = $1;", &[&to_process.id])?; @@ -105,7 +108,7 @@ impl BuildQueue { let attempt: i32 = rows[0].get(0); if attempt >= self.max_attempts { - crate::web::metrics::FAILED_BUILDS.inc(); + self.metrics.failed_builds.inc(); } error!( @@ -196,6 +199,11 @@ mod tests { })?; assert!(!called, "there were still items in the queue"); + // Ensure metrics were recorded correctly + let metrics = env.metrics(); + assert_eq!(metrics.total_builds.get(), 9); + assert_eq!(metrics.failed_builds.get(), 1); + Ok(()) }) } diff --git a/src/config.rs b/src/config.rs index 936018b6a..d84f12872 100644 --- a/src/config.rs +++ b/src/config.rs @@ -30,6 +30,8 @@ pub struct Config { pub(crate) max_file_size_html: usize, // The most memory that can be used to parse an HTML file pub(crate) max_parse_memory: usize, + // Time between 'git gc --auto' calls in seconds + pub(crate) registry_gc_interval: u64, } impl Config { @@ -61,6 +63,7 @@ impl Config { // LOL HTML only uses as much memory as the size of the start tag! // https://github.com/rust-lang/docs.rs/pull/930#issuecomment-667729380 max_parse_memory: env("DOCSRS_MAX_PARSE_MEMORY", 5 * 1024 * 1024)?, + registry_gc_interval: env("DOCSRS_REGISTRY_GC_INTERVAL", 60 * 60)?, }) } diff --git a/src/context.rs b/src/context.rs new file mode 100644 index 000000000..cc18b1430 --- /dev/null +++ b/src/context.rs @@ -0,0 +1,12 @@ +use crate::db::Pool; +use crate::{BuildQueue, Config, Metrics, Storage}; +use failure::Error; +use std::sync::Arc; + +pub trait Context { + fn config(&self) -> Result, Error>; + fn build_queue(&self) -> Result, Error>; + fn storage(&self) -> Result, Error>; + fn pool(&self) -> Result; + fn metrics(&self) -> Result, Error>; +} diff --git a/src/db/add_package.rs b/src/db/add_package.rs index 2bc4062fb..ed5e1bfb9 100644 --- a/src/db/add_package.rs +++ b/src/db/add_package.rs @@ -5,7 +5,7 @@ use std::{ }; use crate::{ - docbuilder::BuildResult, + docbuilder::{BuildResult, DocCoverage}, error::Result, index::api::{CrateData, CrateOwner, ReleaseData}, storage::CompressionAlgorithm, @@ -130,6 +130,29 @@ pub(crate) fn add_package_into_database( Ok(release_id) } +pub(crate) fn add_doc_coverage( + conn: &mut Client, + release_id: i32, + doc_coverage: DocCoverage, +) -> Result { + debug!("Adding doc coverage into database"); + let rows = conn.query( + "INSERT INTO doc_coverage (release_id, total_items, documented_items) + VALUES ($1, $2, $3) + ON CONFLICT (release_id) DO UPDATE + SET + total_items = $2, + documented_items = $3 + RETURNING release_id", + &[ + &release_id, + &doc_coverage.total_items, + &doc_coverage.documented_items, + ], + )?; + Ok(rows[0].get(0)) +} + /// Adds a build into database pub(crate) fn add_build_into_database( conn: &mut Client, diff --git a/src/db/delete.rs b/src/db/delete.rs index 0f354d3d3..be73ecfd4 100644 --- a/src/db/delete.rs +++ b/src/db/delete.rs @@ -54,6 +54,7 @@ const METADATA: &[(&str, &str)] = &[ ("keyword_rels", "rid"), ("builds", "rid"), ("compression_rels", "release"), + ("doc_coverage", "release_id"), ]; fn delete_version_from_database(conn: &mut Client, name: &str, version: &str) -> Result<(), Error> { diff --git a/src/db/migrate.rs b/src/db/migrate.rs index ad4361439..d658008b2 100644 --- a/src/db/migrate.rs +++ b/src/db/migrate.rs @@ -381,6 +381,23 @@ pub fn migrate(version: Option, conn: &mut Client) -> CratesfyiResult<( -- Nope, this is a pure database fix, no going back. " ), + migration!( + context, + // version + 16, + // description + "Create new table for doc coverage", + // upgrade query + " + CREATE TABLE doc_coverage ( + release_id INT UNIQUE REFERENCES releases(id), + total_items INT, + documented_items INT + ); + ", + // downgrade query + "DROP TABLE doc_coverage;" + ), ]; for migration in migrations { diff --git a/src/db/mod.rs b/src/db/mod.rs index e1f8db097..ecb326404 100644 --- a/src/db/mod.rs +++ b/src/db/mod.rs @@ -1,12 +1,13 @@ //! Database operations pub use self::add_package::update_crate_data_in_database; -pub(crate) use self::add_package::{add_build_into_database, add_package_into_database}; +pub(crate) use self::add_package::{ + add_build_into_database, add_doc_coverage, add_package_into_database, +}; pub use self::delete::{delete_crate, delete_version}; pub use self::file::add_path_into_database; pub use self::migrate::migrate; -pub(crate) use self::pool::PoolClient; -pub use self::pool::{Pool, PoolError}; +pub use self::pool::{Pool, PoolClient, PoolError}; mod add_package; pub mod blacklist; diff --git a/src/db/pool.rs b/src/db/pool.rs index db9b3d235..69abd697c 100644 --- a/src/db/pool.rs +++ b/src/db/pool.rs @@ -1,29 +1,35 @@ +use crate::metrics::Metrics; use crate::Config; use postgres::{Client, NoTls}; use r2d2_postgres::PostgresConnectionManager; +use std::sync::Arc; -pub(crate) type PoolClient = r2d2::PooledConnection>; +pub type PoolClient = r2d2::PooledConnection>; const DEFAULT_SCHEMA: &str = "public"; #[derive(Debug, Clone)] pub struct Pool { pool: r2d2::Pool>, + metrics: Arc, + max_size: u32, } impl Pool { - pub fn new(config: &Config) -> Result { - Self::new_inner(config, DEFAULT_SCHEMA) + pub fn new(config: &Config, metrics: Arc) -> Result { + Self::new_inner(config, metrics, DEFAULT_SCHEMA) } #[cfg(test)] - pub(crate) fn new_with_schema(config: &Config, schema: &str) -> Result { - Self::new_inner(config, schema) + pub(crate) fn new_with_schema( + config: &Config, + metrics: Arc, + schema: &str, + ) -> Result { + Self::new_inner(config, metrics, schema) } - fn new_inner(config: &Config, schema: &str) -> Result { - crate::web::metrics::MAX_DB_CONNECTIONS.set(config.max_pool_size as i64); - + fn new_inner(config: &Config, metrics: Arc, schema: &str) -> Result { let url = config .database_url .parse() @@ -36,14 +42,18 @@ impl Pool { .build(manager) .map_err(PoolError::PoolCreationFailed)?; - Ok(Pool { pool }) + Ok(Pool { + pool, + metrics, + max_size: config.max_pool_size, + }) } pub fn get(&self) -> Result { match self.pool.get() { Ok(conn) => Ok(conn), Err(err) => { - crate::web::metrics::FAILED_DB_CONNECTIONS.inc(); + self.metrics.failed_db_connections.inc(); Err(PoolError::ClientError(err)) } } @@ -56,6 +66,10 @@ impl Pool { pub(crate) fn idle_connections(&self) -> u32 { self.pool.state().idle_connections } + + pub(crate) fn max_size(&self) -> u32 { + self.max_size + } } #[derive(Debug)] diff --git a/src/docbuilder/mod.rs b/src/docbuilder/mod.rs index ede7d2ac8..fb1277be0 100644 --- a/src/docbuilder/mod.rs +++ b/src/docbuilder/mod.rs @@ -7,8 +7,8 @@ mod rustwide_builder; pub(crate) use self::limits::Limits; pub(self) use self::metadata::Metadata; -pub(crate) use self::rustwide_builder::BuildResult; pub use self::rustwide_builder::RustwideBuilder; +pub(crate) use self::rustwide_builder::{BuildResult, DocCoverage}; use crate::db::Pool; use crate::error::Result; diff --git a/src/docbuilder/queue.rs b/src/docbuilder/queue.rs index ae3bfc76e..900c7d67d 100644 --- a/src/docbuilder/queue.rs +++ b/src/docbuilder/queue.rs @@ -7,6 +7,10 @@ use crates_index_diff::ChangeKind; use log::{debug, error}; impl DocBuilder { + pub fn run_git_gc(&self) { + self.index.run_git_gc(); + } + /// Updates registry index repository and adds new crates into build queue. /// Returns the number of crates added pub fn get_new_crates(&mut self) -> Result { diff --git a/src/docbuilder/rustwide_builder.rs b/src/docbuilder/rustwide_builder.rs index c975238c9..474d37675 100644 --- a/src/docbuilder/rustwide_builder.rs +++ b/src/docbuilder/rustwide_builder.rs @@ -3,14 +3,15 @@ use super::Metadata; use crate::db::blacklist::is_blacklisted; use crate::db::file::add_path_into_database; use crate::db::{ - add_build_into_database, add_package_into_database, update_crate_data_in_database, Pool, + add_build_into_database, add_doc_coverage, add_package_into_database, + update_crate_data_in_database, Pool, }; use crate::docbuilder::{crates::crates_from_path, Limits}; use crate::error::Result; use crate::index::api::ReleaseData; use crate::storage::CompressionAlgorithms; -use crate::storage::Storage; use crate::utils::{copy_doc_dir, parse_rustc_version, CargoMetadata}; +use crate::{Metrics, Storage}; use failure::ResultExt; use log::{debug, info, warn, LevelFilter}; use rustwide::cmd::{Command, SandboxBuilder}; @@ -19,7 +20,7 @@ use rustwide::toolchain::ToolchainError; use rustwide::{Build, Crate, Toolchain, Workspace, WorkspaceBuilder}; use serde_json::Value; use std::borrow::Cow; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use std::path::Path; use std::sync::Arc; @@ -73,12 +74,13 @@ pub struct RustwideBuilder { toolchain: Toolchain, db: Pool, storage: Arc, + metrics: Arc, rustc_version: String, cpu_limit: Option, } impl RustwideBuilder { - pub fn init(db: Pool, storage: Arc) -> Result { + pub fn init(db: Pool, metrics: Arc, storage: Arc) -> Result { use rustwide::cmd::SandboxImage; let env_workspace_path = ::std::env::var("CRATESFYI_RUSTWIDE_WORKSPACE"); let workspace_path = env_workspace_path @@ -114,6 +116,7 @@ impl RustwideBuilder { toolchain, db, storage, + metrics, rustc_version: String::new(), cpu_limit, }) @@ -398,11 +401,11 @@ impl RustwideBuilder { let has_examples = build.host_source_dir().join("examples").is_dir(); if res.result.successful { - crate::web::metrics::SUCCESSFUL_BUILDS.inc(); + self.metrics.successful_builds.inc(); } else if res.cargo_metadata.root().is_library() { - crate::web::metrics::FAILED_BUILDS.inc(); + self.metrics.failed_builds.inc(); } else { - crate::web::metrics::NON_LIBRARY_BUILDS.inc(); + self.metrics.non_library_builds.inc(); } let release_data = match doc_builder.index.api().get_release_data(name, version) { @@ -427,6 +430,10 @@ impl RustwideBuilder { algs, )?; + if let Some(doc_coverage) = res.result.doc_coverage { + add_doc_coverage(&mut conn, release_id, doc_coverage)?; + } + add_build_into_database(&mut conn, release_id, &res.result)?; // Some crates.io crate data is mutable, so we proactively update it during a release @@ -468,6 +475,55 @@ impl RustwideBuilder { Ok(()) } + fn get_coverage( + &self, + target: &str, + build: &Build, + metadata: &Metadata, + limits: &Limits, + ) -> Result> { + let rustdoc_flags = vec![ + "--output-format".to_string(), + "json".to_string(), + "--show-coverage".to_string(), + ]; + + #[derive(serde::Deserialize)] + struct FileCoverage { + total: i32, + with_docs: i32, + } + + let mut coverage = DocCoverage { + total_items: 0, + documented_items: 0, + }; + + self.prepare_command(build, target, metadata, limits, rustdoc_flags)? + .process_lines(&mut |line, _| { + if line.starts_with('{') && line.ends_with('}') { + let parsed = match serde_json::from_str::>(line) { + Ok(parsed) => parsed, + Err(_) => return, + }; + for file in parsed.values() { + coverage.total_items += file.total; + coverage.documented_items += file.with_docs; + } + } + }) + .log_output(false) + .run()?; + + Ok( + if coverage.total_items == 0 && coverage.documented_items == 0 { + None + } else { + Some(coverage) + }, + ) + } + fn execute_build( &self, target: &str, @@ -479,16 +535,8 @@ impl RustwideBuilder { let cargo_metadata = CargoMetadata::load(&self.workspace, &self.toolchain, &build.host_source_dir())?; - let mut rustdoc_flags: Vec = vec![ - "-Z".to_string(), - "unstable-options".to_string(), - "--resource-suffix".to_string(), - format!("-{}", parse_rustc_version(&self.rustc_version)?), - "--static-root-path".to_string(), - "/".to_string(), - "--cap-lints".to_string(), - "warn".to_string(), - ]; + let mut rustdoc_flags = Vec::new(); + for dep in &cargo_metadata.root_dependencies() { rustdoc_flags.push("--extern-html-root-url".to_string()); rustdoc_flags.push(format!( @@ -498,63 +546,25 @@ impl RustwideBuilder { dep.version )); } - if let Some(package_rustdoc_args) = &metadata.rustdoc_args { - rustdoc_flags.append(&mut package_rustdoc_args.iter().map(|s| s.to_owned()).collect()); - } - let mut cargo_args = vec!["doc", "--lib", "--no-deps"]; - if target != HOST_TARGET { - // If the explicit target is not a tier one target, we need to install it. - if !TARGETS.contains(&target) { - // This is a no-op if the target is already installed. - self.toolchain.add_target(&self.workspace, target)?; - } - cargo_args.push("--target"); - cargo_args.push(target); - }; - - let tmp_jobs; - if let Some(cpu_limit) = self.cpu_limit { - tmp_jobs = format!("-j{}", cpu_limit); - cargo_args.push(&tmp_jobs); - } - let tmp; - if let Some(features) = &metadata.features { - cargo_args.push("--features"); - tmp = features.join(" "); - cargo_args.push(&tmp); - } - if metadata.all_features { - cargo_args.push("--all-features"); - } - if metadata.no_default_features { - cargo_args.push("--no-default-features"); - } + rustdoc_flags.extend(vec![ + "--resource-suffix".to_string(), + format!("-{}", parse_rustc_version(&self.rustc_version)?), + ]); let mut storage = LogStorage::new(LevelFilter::Info); storage.set_max_size(limits.max_log_size()); let successful = logging::capture(&storage, || { - build - .cargo() - .timeout(Some(limits.timeout())) - .no_output_timeout(None) - .env( - "RUSTFLAGS", - metadata - .rustc_args - .as_ref() - .map(|args| args.join(" ")) - .unwrap_or_default(), - ) - .env("RUSTDOCFLAGS", rustdoc_flags.join(" ")) - // For docs.rs detection from build script: - // https://github.com/rust-lang/docs.rs/issues/147 - .env("DOCS_RS", "1") - .args(&cargo_args) - .run() + self.prepare_command(build, target, metadata, limits, rustdoc_flags) + .and_then(|command| command.run().map_err(failure::Error::from)) .is_ok() }); + let doc_coverage = if successful { + self.get_coverage(target, build, metadata, limits)? + } else { + None + }; // If we're passed a default_target which requires a cross-compile, // cargo will put the output in `target//doc`. // However, if this is the default build, we don't want it there, @@ -574,12 +584,87 @@ impl RustwideBuilder { rustc_version: self.rustc_version.clone(), docsrs_version: format!("docsrs {}", crate::BUILD_VERSION), successful, + doc_coverage, }, cargo_metadata, target: target.to_string(), }) } + fn prepare_command<'ws, 'pl>( + &self, + build: &'ws Build, + target: &str, + metadata: &Metadata, + limits: &Limits, + rustdoc_flags_extras: Vec, + ) -> Result> { + let mut cargo_args = vec!["doc", "--lib", "--no-deps"]; + if target != HOST_TARGET { + // If the explicit target is not a tier one target, we need to install it. + if !TARGETS.contains(&target) { + // This is a no-op if the target is already installed. + self.toolchain.add_target(&self.workspace, target)?; + } + cargo_args.push("--target"); + cargo_args.push(target); + }; + + let tmp; + if let Some(cpu_limit) = self.cpu_limit { + tmp = format!("-j{}", cpu_limit); + cargo_args.push(&tmp); + } + + let tmp; + if let Some(features) = &metadata.features { + cargo_args.push("--features"); + tmp = features.join(" "); + cargo_args.push(&tmp); + } + if metadata.all_features { + cargo_args.push("--all-features"); + } + if metadata.no_default_features { + cargo_args.push("--no-default-features"); + } + + let mut rustdoc_flags = vec![ + "-Z".to_string(), + "unstable-options".to_string(), + "--static-root-path".to_string(), + "/".to_string(), + "--cap-lints".to_string(), + "warn".to_string(), + ]; + + if let Some(package_rustdoc_args) = &metadata.rustdoc_args { + rustdoc_flags.append(&mut package_rustdoc_args.clone()); + } + + rustdoc_flags.extend(rustdoc_flags_extras); + + let command = build + .cargo() + .timeout(Some(limits.timeout())) + .no_output_timeout(None) + .env( + "RUSTFLAGS", + metadata + .rustc_args + .as_ref() + .map(|args| args.join(" ")) + .unwrap_or_default(), + ) + .env("RUSTDOCFLAGS", rustdoc_flags.join(" ")) + // For docs.rs detection from build script: + // https://github.com/rust-lang/docs.rs/issues/147 + .env("DOCS_RS", "1") + .args(&cargo_args); + + Ok(command) + } + fn copy_docs( &self, target_dir: &Path, @@ -625,9 +710,19 @@ struct FullBuildResult { cargo_metadata: CargoMetadata, } +#[derive(Clone, Copy)] +pub(crate) struct DocCoverage { + /// The total items that could be documented in the current crate, used to calculate + /// documentation coverage. + pub(crate) total_items: i32, + /// The items of the crate that are documented, used to calculate documentation coverage. + pub(crate) documented_items: i32, +} + pub(crate) struct BuildResult { pub(crate) rustc_version: String, pub(crate) docsrs_version: String, pub(crate) build_log: String, pub(crate) successful: bool, + pub(crate) doc_coverage: Option, } diff --git a/src/index/mod.rs b/src/index/mod.rs index acb86a8fc..e00c2c357 100644 --- a/src/index/mod.rs +++ b/src/index/mod.rs @@ -1,4 +1,5 @@ use std::path::{Path, PathBuf}; +use std::process::Command; use url::Url; @@ -50,6 +51,14 @@ impl Index { Ok(Self { path, api }) } + pub fn run_git_gc(&self) { + let cmd = format!("cd {} && git gc --auto", self.path.to_str().unwrap()); + let gc = Command::new("sh").args(&["-c", cmd.as_str()]).output(); + if let Err(err) = gc { + log::error!("Failed to run `{}`: {:?}", cmd, err); + } + } + pub(crate) fn diff(&self) -> Result { let diff = crates_index_diff::Index::from_path_or_cloned(&self.path) .context("re-opening registry index for diff")?; diff --git a/src/lib.rs b/src/lib.rs index d25e79c5a..63eaa9ac7 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -4,18 +4,22 @@ pub use self::build_queue::BuildQueue; pub use self::config::Config; +pub use self::context::Context; pub use self::docbuilder::options::DocBuilderOptions; pub use self::docbuilder::DocBuilder; pub use self::docbuilder::RustwideBuilder; +pub use self::metrics::Metrics; pub use self::storage::Storage; pub use self::web::Server; mod build_queue; mod config; +mod context; pub mod db; mod docbuilder; mod error; pub mod index; +mod metrics; pub mod storage; #[cfg(test)] mod test; diff --git a/src/metrics/macros.rs b/src/metrics/macros.rs new file mode 100644 index 000000000..2571f5b1e --- /dev/null +++ b/src/metrics/macros.rs @@ -0,0 +1,80 @@ +pub(super) trait MetricFromOpts: Sized { + fn from_opts(opts: prometheus::Opts) -> Result; +} + +#[macro_export] +macro_rules! metrics { + ( + $vis:vis struct $name:ident { + $( + #[doc = $help:expr] + $(#[$meta:meta])* + $metric_vis:vis $metric:ident: $ty:ty $([$($label:expr),* $(,)?])? + ),* $(,)? + } + metrics visibility: $metric_vis:vis, + namespace: $namespace:expr, + ) => { + $vis struct $name { + registry: prometheus::Registry, + $( + $(#[$meta])* + $metric_vis $metric: $ty, + )* + } + impl $name { + $vis fn new() -> Result { + let registry = Registry::new(); + $( + $(#[$meta])* + let $metric = <$ty>::from_opts( + Opts::new(stringify!($metric), $help) + .namespace($namespace) + $(.variable_labels(vec![$($label.into()),*]))? + )?; + $(#[$meta])* + registry.register(Box::new($metric.clone()))?; + )* + Ok(Self { + registry, + $( + $(#[$meta])* + $metric, + )* + }) + } + } + impl std::fmt::Debug for $name { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", stringify!($name)) + } + } + }; +} + +#[macro_export] +macro_rules! load_metric_type { + ($name:ident as single) => { + use prometheus::$name; + impl MetricFromOpts for $name { + fn from_opts(opts: Opts) -> Result { + $name::with_opts(opts) + } + } + }; + ($name:ident as vec) => { + use prometheus::$name; + impl MetricFromOpts for $name { + fn from_opts(opts: Opts) -> Result { + $name::new( + opts.clone().into(), + opts.variable_labels + .iter() + .map(|s| s.as_str()) + .collect::>() + .as_slice(), + ) + } + } + }; +} diff --git a/src/metrics/mod.rs b/src/metrics/mod.rs new file mode 100644 index 000000000..3616ef657 --- /dev/null +++ b/src/metrics/mod.rs @@ -0,0 +1,99 @@ +#[macro_use] +mod macros; + +use self::macros::MetricFromOpts; +use crate::db::Pool; +use crate::BuildQueue; +use failure::Error; +use prometheus::{proto::MetricFamily, Opts, Registry}; + +load_metric_type!(IntGauge as single); +load_metric_type!(IntCounter as single); +load_metric_type!(IntCounterVec as vec); +load_metric_type!(HistogramVec as vec); + +metrics! { + pub struct Metrics { + /// Number of crates in the build queue + queued_crates_count: IntGauge, + /// Number of crates in the build queue that have a positive priority + prioritized_crates_count: IntGauge, + /// Number of crates that failed to build + failed_crates_count: IntGauge, + + /// The number of idle database connections + idle_db_connections: IntGauge, + /// The number of used database connections + used_db_connections: IntGauge, + /// The maximum number of database connections + max_db_connections: IntGauge, + /// Number of attempted and failed connections to the database + failed_db_connections: IntCounter, + + /// The number of currently opened file descriptors + #[cfg(linux)] + open_file_descriptors: IntGauge, + /// The number of threads being used by docs.rs + #[cfg(linux)] + running_threads: IntGauge, + + /// The traffic of various docs.rs routes + routes_visited: IntCounterVec["route"], + /// The response times of various docs.rs routes + response_time: HistogramVec["route"], + /// The time it takes to render a rustdoc page + rustdoc_rendering_times: HistogramVec["step"], + + /// Number of crates built + total_builds: IntCounter, + /// Number of builds that successfully generated docs + successful_builds: IntCounter, + /// Number of builds that generated a compiler error + failed_builds: IntCounter, + /// Number of builds that did not complete due to not being a library + non_library_builds: IntCounter, + + /// Number of files uploaded to the storage backend + uploaded_files_total: IntCounter, + + /// The number of attempted files that failed due to a memory limit + html_rewrite_ooms: IntCounter, + } + + metrics visibility: pub(crate), + namespace: "docsrs", +} + +impl Metrics { + pub(crate) fn gather( + &self, + pool: &Pool, + queue: &BuildQueue, + ) -> Result, Error> { + self.idle_db_connections.set(pool.idle_connections() as i64); + self.used_db_connections.set(pool.used_connections() as i64); + self.max_db_connections.set(pool.max_size() as i64); + + self.queued_crates_count.set(queue.pending_count()? as i64); + self.prioritized_crates_count + .set(queue.prioritized_count()? as i64); + self.failed_crates_count.set(queue.failed_count()? as i64); + + self.gather_system_performance(); + Ok(self.registry.gather()) + } + + #[cfg(not(linux))] + fn gather_system_performance(&self) {} + + #[cfg(linux)] + fn gather_system_performance(&self) { + use procfs::process::Process; + + let process = Process::myself().unwrap(); + self.open_file_descriptors + .set(process.fd().unwrap().len() as i64); + self.running_threads + .set(process.stat().unwrap().num_threads as i64); + } +} diff --git a/src/storage/database.rs b/src/storage/database.rs index 9d6343607..6084f4306 100644 --- a/src/storage/database.rs +++ b/src/storage/database.rs @@ -1,16 +1,19 @@ use super::{Blob, StorageTransaction}; use crate::db::Pool; +use crate::Metrics; use chrono::{DateTime, NaiveDateTime, Utc}; use failure::Error; use postgres::Transaction; +use std::sync::Arc; pub(crate) struct DatabaseBackend { pool: Pool, + metrics: Arc, } impl DatabaseBackend { - pub(crate) fn new(pool: Pool) -> Self { - Self { pool } + pub(crate) fn new(pool: Pool, metrics: Arc) -> Self { + Self { pool, metrics } } pub(super) fn exists(&self, path: &str) -> Result { @@ -68,12 +71,14 @@ impl DatabaseBackend { pub(super) fn start_connection(&self) -> Result { Ok(DatabaseClient { conn: self.pool.get()?, + metrics: self.metrics.clone(), }) } } pub(super) struct DatabaseClient { conn: crate::db::PoolClient, + metrics: Arc, } impl DatabaseClient { @@ -82,12 +87,14 @@ impl DatabaseClient { ) -> Result, Error> { Ok(DatabaseStorageTransaction { transaction: self.conn.transaction()?, + metrics: &self.metrics, }) } } pub(super) struct DatabaseStorageTransaction<'a> { transaction: Transaction<'a>, + metrics: &'a Metrics, } impl<'a> StorageTransaction for DatabaseStorageTransaction<'a> { @@ -101,6 +108,7 @@ impl<'a> StorageTransaction for DatabaseStorageTransaction<'a> { SET mime = EXCLUDED.mime, content = EXCLUDED.content, compression = EXCLUDED.compression", &[&blob.path, &blob.mime, &blob.content, &compression], )?; + self.metrics.uploaded_files_total.inc(); } Ok(()) } diff --git a/src/storage/mod.rs b/src/storage/mod.rs index bdb643ffe..8e9939ec6 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -5,7 +5,7 @@ mod s3; pub use self::compression::{compress, decompress, CompressionAlgorithm, CompressionAlgorithms}; use self::database::DatabaseBackend; use self::s3::S3Backend; -use crate::{db::Pool, Config}; +use crate::{db::Pool, Config, Metrics}; use chrono::{DateTime, Utc}; use failure::{err_msg, Error}; use path_slash::PathExt; @@ -14,6 +14,7 @@ use std::{ ffi::OsStr, fmt, fs, path::{Path, PathBuf}, + sync::Arc, }; const MAX_CONCURRENT_UPLOADS: usize = 1000; @@ -76,29 +77,30 @@ pub struct Storage { } impl Storage { - pub fn new(pool: Pool, config: &Config) -> Result { + pub fn new(pool: Pool, metrics: Arc, config: &Config) -> Result { let backend = if let Some(c) = s3::s3_client() { - StorageBackend::S3(Box::new(S3Backend::new(c, config)?)) + StorageBackend::S3(Box::new(S3Backend::new(c, metrics, config)?)) } else { - StorageBackend::Database(DatabaseBackend::new(pool)) + StorageBackend::Database(DatabaseBackend::new(pool, metrics)) }; Ok(Storage { backend }) } #[cfg(test)] - pub(crate) fn temp_new_s3(config: &Config) -> Result { + pub(crate) fn temp_new_s3(metrics: Arc, config: &Config) -> Result { Ok(Storage { backend: StorageBackend::S3(Box::new(S3Backend::new( s3::s3_client().unwrap(), + metrics, config, )?)), }) } #[cfg(test)] - pub(crate) fn temp_new_db(pool: Pool) -> Result { + pub(crate) fn temp_new_db(pool: Pool, metrics: Arc) -> Result { Ok(Storage { - backend: StorageBackend::Database(DatabaseBackend::new(pool)), + backend: StorageBackend::Database(DatabaseBackend::new(pool, metrics)), }) } @@ -389,7 +391,7 @@ mod backend_tests { Ok(()) } - fn test_store_blobs(storage: &Storage) -> Result<(), Error> { + fn test_store_blobs(storage: &Storage, metrics: &Metrics) -> Result<(), Error> { const NAMES: &[&str] = &[ "a", "b", @@ -417,10 +419,12 @@ mod backend_tests { assert_eq!(blob.mime, actual.mime); } + assert_eq!(NAMES.len(), metrics.uploaded_files_total.get() as usize); + Ok(()) } - fn test_store_all(storage: &Storage) -> Result<(), Error> { + fn test_store_all(storage: &Storage, metrics: &Metrics) -> Result<(), Error> { let dir = tempfile::Builder::new() .prefix("docs.rs-upload-test") .tempdir()?; @@ -462,6 +466,8 @@ mod backend_tests { expected_algs.insert(CompressionAlgorithm::default()); assert_eq!(algs, expected_algs); + assert_eq!(2, metrics.uploaded_files_total.get()); + Ok(()) } @@ -557,7 +563,11 @@ mod backend_tests { // Remember to add the test name to the macro below when adding a new one. macro_rules! backend_tests { - (backends($env:ident) { $($backend:ident => $create:expr,)* } tests $tests:tt ) => { + ( + backends($env:ident) { $($backend:ident => $create:expr,)* } + tests $tests:tt + tests_with_metrics $tests_with_metrics:tt + ) => { $( mod $backend { use crate::test::TestEnvironment; @@ -569,6 +579,7 @@ mod backend_tests { } backend_tests!(@tests $tests); + backend_tests!(@tests_with_metrics $tests_with_metrics); } )* }; @@ -582,6 +593,16 @@ mod backend_tests { } )* }; + (@tests_with_metrics { $($test:ident,)* }) => { + $( + #[test] + fn $test() { + crate::test::wrapper(|env| { + super::$test(&*get_storage(env), &*env.metrics()) + }); + } + )* + }; } backend_tests! { @@ -595,10 +616,13 @@ mod backend_tests { test_exists, test_get_object, test_get_too_big, - test_store_blobs, - test_store_all, test_delete_prefix, test_delete_percent, } + + tests_with_metrics { + test_store_blobs, + test_store_all, + } } } diff --git a/src/storage/s3.rs b/src/storage/s3.rs index b45813664..e8b541c1c 100644 --- a/src/storage/s3.rs +++ b/src/storage/s3.rs @@ -1,5 +1,5 @@ use super::{Blob, StorageTransaction}; -use crate::Config; +use crate::{Config, Metrics}; use chrono::{DateTime, NaiveDateTime, Utc}; use failure::Error; use futures_util::{ @@ -13,19 +13,24 @@ use rusoto_s3::{ DeleteObjectsRequest, GetObjectError, GetObjectRequest, HeadObjectError, HeadObjectRequest, ListObjectsV2Request, ObjectIdentifier, PutObjectRequest, S3Client, S3, }; -use std::{convert::TryInto, io::Write}; +use std::{convert::TryInto, io::Write, sync::Arc}; use tokio::runtime::Runtime; pub(super) struct S3Backend { client: S3Client, runtime: Runtime, bucket: String, + metrics: Arc, #[cfg(test)] temporary: bool, } impl S3Backend { - pub(super) fn new(client: S3Client, config: &Config) -> Result { + pub(super) fn new( + client: S3Client, + metrics: Arc, + config: &Config, + ) -> Result { let runtime = Runtime::new()?; #[cfg(test)] @@ -48,6 +53,7 @@ impl S3Backend { Ok(Self { client, runtime, + metrics, bucket: config.s3_bucket.clone(), #[cfg(test)] temporary: config.s3_bucket_is_temporary, @@ -173,7 +179,7 @@ impl<'a> StorageTransaction for S3StorageTransaction<'a> { ..Default::default() }) .map_ok(|_| { - crate::web::metrics::UPLOADED_FILES_TOTAL.inc_by(1); + self.s3.metrics.uploaded_files_total.inc(); }) .map_err(|err| { log::error!("Failed to upload blob to S3: {:?}", err); diff --git a/src/test/fakes.rs b/src/test/fakes.rs index 5cc10ca91..15cc910c4 100644 --- a/src/test/fakes.rs +++ b/src/test/fakes.rs @@ -1,5 +1,5 @@ use super::TestDatabase; -use crate::docbuilder::BuildResult; +use crate::docbuilder::{BuildResult, DocCoverage}; use crate::index::api::{CrateData, CrateOwner, ReleaseData}; use crate::storage::Storage; use crate::utils::{Dependency, MetadataPackage, Target}; @@ -59,6 +59,7 @@ impl<'a> FakeRelease<'a> { docsrs_version: "docs.rs 1.0.0 (000000000 1970-01-01)".into(), build_log: "It works!".into(), successful: true, + doc_coverage: None, }, source_files: Vec::new(), rustdoc_files: Vec::new(), @@ -182,6 +183,14 @@ impl<'a> FakeRelease<'a> { self } + pub(crate) fn coverage(mut self, documented_items: i32, total_items: i32) -> Self { + self.build_result.doc_coverage = Some(DocCoverage { + total_items, + documented_items, + }); + self + } + /// Returns the release_id pub(crate) fn create(self) -> Result { use std::fs; @@ -273,6 +282,9 @@ impl<'a> FakeRelease<'a> { &self.registry_crate_data, )?; crate::db::add_build_into_database(&mut db.conn(), release_id, &self.build_result)?; + if let Some(coverage) = self.build_result.doc_coverage { + crate::db::add_doc_coverage(&mut db.conn(), release_id, coverage)?; + } Ok(release_id) } diff --git a/src/test/mod.rs b/src/test/mod.rs index 5b0e4ffdf..2a6d7cdfb 100644 --- a/src/test/mod.rs +++ b/src/test/mod.rs @@ -3,8 +3,7 @@ mod fakes; use crate::db::{Pool, PoolClient}; use crate::storage::Storage; use crate::web::Server; -use crate::BuildQueue; -use crate::Config; +use crate::{BuildQueue, Config, Context, Metrics}; use failure::Error; use log::error; use once_cell::unsync::OnceCell; @@ -97,6 +96,7 @@ pub(crate) struct TestEnvironment { config: OnceCell>, db: OnceCell, storage: OnceCell>, + metrics: OnceCell>, frontend: OnceCell, s3: OnceCell>, storage_db: OnceCell>, @@ -117,6 +117,7 @@ impl TestEnvironment { config: OnceCell::new(), db: OnceCell::new(), storage: OnceCell::new(), + metrics: OnceCell::new(), frontend: OnceCell::new(), s3: OnceCell::new(), storage_db: OnceCell::new(), @@ -159,7 +160,13 @@ impl TestEnvironment { pub(crate) fn build_queue(&self) -> Arc { self.build_queue - .get_or_init(|| Arc::new(BuildQueue::new(self.db().pool(), &self.config()))) + .get_or_init(|| { + Arc::new(BuildQueue::new( + self.db().pool(), + self.metrics(), + &self.config(), + )) + }) .clone() } @@ -173,29 +180,34 @@ impl TestEnvironment { self.storage .get_or_init(|| { Arc::new( - Storage::new(self.db().pool(), &*self.config()) + Storage::new(self.db().pool(), self.metrics(), &*self.config()) .expect("failed to initialize the storage"), ) }) .clone() } + pub(crate) fn metrics(&self) -> Arc { + self.metrics + .get_or_init(|| Arc::new(Metrics::new().expect("failed to initialize the metrics"))) + .clone() + } + pub(crate) fn db(&self) -> &TestDatabase { - self.db - .get_or_init(|| TestDatabase::new(&self.config()).expect("failed to initialize the db")) + self.db.get_or_init(|| { + TestDatabase::new(&self.config(), self.metrics()).expect("failed to initialize the db") + }) } pub(crate) fn frontend(&self) -> &TestFrontend { - self.frontend.get_or_init(|| { - TestFrontend::new(self.db(), self.config(), self.build_queue(), self.storage()) - }) + self.frontend.get_or_init(|| TestFrontend::new(&*self)) } pub(crate) fn s3(&self) -> Arc { self.s3 .get_or_init(|| { Arc::new( - Storage::temp_new_s3(&*self.config()) + Storage::temp_new_s3(self.metrics(), &*self.config()) .expect("failed to initialize the storage"), ) }) @@ -206,7 +218,7 @@ impl TestEnvironment { self.storage_db .get_or_init(|| { Arc::new( - Storage::temp_new_db(self.db().pool()) + Storage::temp_new_db(self.db().pool(), self.metrics()) .expect("failed to initialize the storage"), ) }) @@ -218,13 +230,35 @@ impl TestEnvironment { } } +impl Context for TestEnvironment { + fn config(&self) -> Result, Error> { + Ok(TestEnvironment::config(self)) + } + + fn build_queue(&self) -> Result, Error> { + Ok(TestEnvironment::build_queue(self)) + } + + fn storage(&self) -> Result, Error> { + Ok(TestEnvironment::storage(self)) + } + + fn pool(&self) -> Result { + Ok(self.db().pool()) + } + + fn metrics(&self) -> Result, Error> { + Ok(self.metrics()) + } +} + pub(crate) struct TestDatabase { pool: Pool, schema: String, } impl TestDatabase { - fn new(config: &Config) -> Result { + fn new(config: &Config, metrics: Arc) -> Result { // A random schema name is generated and used for the current connection. This allows each // test to create a fresh instance of the database to run within. let schema = format!("docs_rs_test_schema_{}", rand::random::()); @@ -263,7 +297,7 @@ impl TestDatabase { conn.batch_execute(&query)?; Ok(TestDatabase { - pool: Pool::new_with_schema(config, &schema)?, + pool: Pool::new_with_schema(config, metrics, &schema)?, schema, }) } @@ -297,22 +331,10 @@ pub(crate) struct TestFrontend { } impl TestFrontend { - fn new( - db: &TestDatabase, - config: Arc, - build_queue: Arc, - storage: Arc, - ) -> Self { + fn new(context: &dyn Context) -> Self { Self { - server: Server::start( - Some("127.0.0.1:0"), - false, - db.pool.clone(), - config, - build_queue, - storage, - ) - .expect("failed to start the web server"), + server: Server::start(Some("127.0.0.1:0"), false, context) + .expect("failed to start the web server"), client: Client::new(), } } diff --git a/src/utils/daemon.rs b/src/utils/daemon.rs index 990516f5d..af8436c94 100644 --- a/src/utils/daemon.rs +++ b/src/utils/daemon.rs @@ -3,28 +3,25 @@ //! This daemon will start web server, track new packages and build them use crate::{ - db::Pool, - storage::Storage, utils::{queue_builder, update_release_activity, GithubUpdater}, - BuildQueue, Config, DocBuilder, DocBuilderOptions, + Context, DocBuilder, DocBuilderOptions, }; use chrono::{Timelike, Utc}; use failure::Error; use log::{debug, error, info}; -use std::sync::Arc; use std::thread; -use std::time::Duration; +use std::time::{Duration, Instant}; -fn start_registry_watcher( - opts: DocBuilderOptions, - pool: Pool, - build_queue: Arc, -) -> Result<(), Error> { +fn start_registry_watcher(opts: DocBuilderOptions, context: &dyn Context) -> Result<(), Error> { + let pool = context.pool()?; + let build_queue = context.build_queue()?; + let config = context.config()?; thread::Builder::new() .name("registry index reader".to_string()) .spawn(move || { // space this out to prevent it from clashing against the queue-builder thread on launch thread::sleep(Duration::from_secs(30)); + let mut last_gc = Instant::now(); loop { let mut doc_builder = DocBuilder::new(opts.clone(), pool.clone(), build_queue.clone()); @@ -39,6 +36,10 @@ fn start_registry_watcher( } } + if last_gc.elapsed().as_secs() >= config.registry_gc_interval { + doc_builder.run_git_gc(); + last_gc = Instant::now(); + } thread::sleep(Duration::from_secs(60)); } })?; @@ -46,13 +47,8 @@ fn start_registry_watcher( Ok(()) } -pub fn start_daemon( - config: Arc, - db: Pool, - build_queue: Arc, - storage: Arc, - enable_registry_watcher: bool, -) -> Result<(), Error> { +pub fn start_daemon(context: &dyn Context, enable_registry_watcher: bool) -> Result<(), Error> { + let config = context.config()?; let dbopts = DocBuilderOptions::new(config.prefix.clone(), config.registry_index_path.clone()); // check paths once @@ -60,27 +56,24 @@ pub fn start_daemon( if enable_registry_watcher { // check new crates every minute - start_registry_watcher(dbopts.clone(), db.clone(), build_queue.clone())?; + start_registry_watcher(dbopts.clone(), context)?; } // build new crates every minute - let cloned_db = db.clone(); - let cloned_build_queue = build_queue.clone(); - let cloned_storage = storage.clone(); + let pool = context.pool()?; + let build_queue = context.build_queue()?; + let storage = context.storage()?; + let metrics = context.metrics()?; thread::Builder::new() .name("build queue reader".to_string()) .spawn(move || { - let doc_builder = DocBuilder::new( - dbopts.clone(), - cloned_db.clone(), - cloned_build_queue.clone(), - ); - queue_builder(doc_builder, cloned_db, cloned_build_queue, cloned_storage).unwrap(); + let doc_builder = DocBuilder::new(dbopts.clone(), pool.clone(), build_queue.clone()); + queue_builder(doc_builder, pool, build_queue, metrics, storage).unwrap(); }) .unwrap(); // update release activity everyday at 23:55 - let cloned_db = db.clone(); + let pool = context.pool()?; cron( "release activity updater", Duration::from_secs(60), @@ -88,14 +81,14 @@ pub fn start_daemon( let now = Utc::now(); if now.hour() == 23 && now.minute() == 55 { info!("Updating release activity"); - update_release_activity(&mut *cloned_db.get()?)?; + update_release_activity(&mut *pool.get()?)?; } Ok(()) }, )?; // update github stats every hour - let github_updater = GithubUpdater::new(&config, db.clone())?; + let github_updater = GithubUpdater::new(&config, context.pool()?)?; cron( "github stats updater", Duration::from_secs(60 * 60), @@ -105,12 +98,10 @@ pub fn start_daemon( }, )?; - // TODO: update ssl certificate every 3 months - // at least start web server info!("Starting web server"); - crate::Server::start(None, false, db, config, build_queue, storage)?; + crate::Server::start(None, false, context)?; Ok(()) } diff --git a/src/utils/queue_builder.rs b/src/utils/queue_builder.rs index 1eaef5478..95f5285cf 100644 --- a/src/utils/queue_builder.rs +++ b/src/utils/queue_builder.rs @@ -1,5 +1,6 @@ use crate::{ - db::Pool, docbuilder::RustwideBuilder, utils::pubsubhubbub, BuildQueue, DocBuilder, Storage, + db::Pool, docbuilder::RustwideBuilder, utils::pubsubhubbub, BuildQueue, DocBuilder, Metrics, + Storage, }; use failure::Error; use log::{debug, error, info, warn}; @@ -13,6 +14,7 @@ pub fn queue_builder( mut doc_builder: DocBuilder, db: Pool, build_queue: Arc, + metrics: Arc, storage: Arc, ) -> Result<(), Error> { /// Represents the current state of the builder thread. @@ -28,7 +30,7 @@ pub fn queue_builder( QueueInProgress(usize), } - let mut builder = RustwideBuilder::init(db, storage)?; + let mut builder = RustwideBuilder::init(db, metrics, storage)?; let mut status = BuilderState::Fresh; diff --git a/src/web/crate_details.rs b/src/web/crate_details.rs index 032ae5614..fb02b173a 100644 --- a/src/web/crate_details.rs +++ b/src/web/crate_details.rs @@ -44,6 +44,8 @@ pub struct CrateDetails { pub(crate) doc_targets: Vec, license: Option, documentation_url: Option, + total_items: Option, + documented_items: Option, } fn optional_markdown(markdown: &Option, serializer: S) -> Result @@ -97,9 +99,12 @@ impl CrateDetails { releases.doc_targets, releases.license, releases.documentation_url, - releases.default_target + releases.default_target, + doc_coverage.total_items, + doc_coverage.documented_items FROM releases INNER JOIN crates ON releases.crate_id = crates.id + LEFT JOIN doc_coverage ON doc_coverage.release_id = releases.id WHERE crates.name = $1 AND releases.version = $2;"; let rows = conn.query(query, &[&name, &version]).unwrap(); @@ -150,6 +155,9 @@ impl CrateDetails { .unwrap_or_else(Vec::new) }; + let documented_items: Option = krate.get("documented_items"); + let total_items: Option = krate.get("total_items"); + let mut crate_details = CrateDetails { name: krate.get("name"), version: krate.get("version"), @@ -180,6 +188,8 @@ impl CrateDetails { doc_targets, license: krate.get("license"), documentation_url: krate.get("documentation_url"), + documented_items: documented_items.map(|v| v as f32), + total_items: total_items.map(|v| v as f32), }; if let Some(repository_url) = crate_details.repository_url.clone() { diff --git a/src/web/extensions.rs b/src/web/extensions.rs index a8957f650..468e67b66 100644 --- a/src/web/extensions.rs +++ b/src/web/extensions.rs @@ -1,18 +1,33 @@ -use crate::config::Config; -use crate::db::Pool; -use crate::storage::Storage; use crate::web::page::TemplateData; -use crate::BuildQueue; +use crate::{db::Pool, BuildQueue, Config, Context, Metrics, Storage}; +use failure::Error; use iron::{BeforeMiddleware, IronResult, Request}; use std::sync::Arc; #[derive(Debug, Clone)] pub(super) struct InjectExtensions { - pub(super) build_queue: Arc, - pub(super) pool: Pool, - pub(super) config: Arc, - pub(super) storage: Arc, - pub(super) template_data: Arc, + build_queue: Arc, + pool: Pool, + config: Arc, + storage: Arc, + metrics: Arc, + template_data: Arc, +} + +impl InjectExtensions { + pub(super) fn new( + context: &dyn Context, + template_data: Arc, + ) -> Result { + Ok(Self { + build_queue: context.build_queue()?, + pool: context.pool()?, + config: context.config()?, + storage: context.storage()?, + metrics: context.metrics()?, + template_data, + }) + } } impl BeforeMiddleware for InjectExtensions { @@ -22,6 +37,7 @@ impl BeforeMiddleware for InjectExtensions { req.extensions.insert::(self.pool.clone()); req.extensions.insert::(self.config.clone()); req.extensions.insert::(self.storage.clone()); + req.extensions.insert::(self.metrics.clone()); req.extensions .insert::(self.template_data.clone()); @@ -41,4 +57,5 @@ key!(BuildQueue => Arc); key!(Pool => Pool); key!(Config => Arc); key!(Storage => Arc); +key!(Metrics => Arc); key!(TemplateData => Arc); diff --git a/src/web/metrics.rs b/src/web/metrics.rs index 0d0742593..dbb910499 100644 --- a/src/web/metrics.rs +++ b/src/web/metrics.rs @@ -1,182 +1,19 @@ use crate::db::Pool; use crate::BuildQueue; +use crate::Metrics; use iron::headers::ContentType; use iron::prelude::*; use iron::status::Status; -use once_cell::sync::Lazy; -use prometheus::{ - opts, register_counter, register_int_counter, register_int_gauge, Encoder, IntCounter, - IntGauge, TextEncoder, __register_gauge, register_int_counter_vec, IntCounterVec, - __register_counter_vec, histogram_opts, register_histogram_vec, HistogramVec, -}; +use prometheus::{Encoder, HistogramVec, TextEncoder}; use std::time::{Duration, Instant}; -static QUEUED_CRATES_COUNT: Lazy = Lazy::new(|| { - register_int_gauge!( - "docsrs_queued_crates_count", - "Number of crates in the build queue" - ) - .unwrap() -}); - -pub static PRIORITIZED_CRATES_COUNT: Lazy = Lazy::new(|| { - register_int_gauge!( - "docsrs_prioritized_crates_count", - "Number of crates in the build queue that have a positive priority" - ) - .unwrap() -}); - -static FAILED_CRATES_COUNT: Lazy = Lazy::new(|| { - register_int_gauge!( - "docsrs_failed_crates_count", - "Number of crates that failed to build" - ) - .unwrap() -}); - -pub static TOTAL_BUILDS: Lazy = - Lazy::new(|| register_int_counter!("docsrs_total_builds", "Number of crates built").unwrap()); - -pub static SUCCESSFUL_BUILDS: Lazy = Lazy::new(|| { - register_int_counter!( - "docsrs_successful_builds", - "Number of builds that successfully generated docs" - ) - .unwrap() -}); - -pub static FAILED_BUILDS: Lazy = Lazy::new(|| { - register_int_counter!( - "docsrs_failed_builds", - "Number of builds that generated a compile error" - ) - .unwrap() -}); - -pub static NON_LIBRARY_BUILDS: Lazy = Lazy::new(|| { - register_int_counter!( - "docsrs_non_library_builds", - "Number of builds that did not complete due to not being a library" - ) - .unwrap() -}); - -pub static UPLOADED_FILES_TOTAL: Lazy = Lazy::new(|| { - register_int_counter!( - "docsrs_uploaded_files_total", - "Number of files uploaded to S3 or stored in the database" - ) - .unwrap() -}); - -pub static ROUTES_VISITED: Lazy = Lazy::new(|| { - register_int_counter_vec!( - "docsrs_routes_visited", - "The traffic of various docs.rs routes", - &["route"] - ) - .unwrap() -}); - -pub static RESPONSE_TIMES: Lazy = Lazy::new(|| { - register_histogram_vec!( - "docsrs_response_time", - "The response times of various docs.rs routes", - &["route"] - ) - .unwrap() -}); - -pub static RUSTDOC_RENDERING_TIMES: Lazy = Lazy::new(|| { - register_histogram_vec!( - "docsrs_rustdoc_rendering_time", - "The time it takes to render a rustdoc page", - &["step"] - ) - .unwrap() -}); - -pub static FAILED_DB_CONNECTIONS: Lazy = Lazy::new(|| { - register_int_counter!( - "docsrs_failed_db_connections", - "Number of attempted and failed connections to the database" - ) - .unwrap() -}); - -pub static USED_DB_CONNECTIONS: Lazy = Lazy::new(|| { - register_int_gauge!( - "docsrs_used_db_connections", - "The number of used database connections" - ) - .unwrap() -}); - -pub static IDLE_DB_CONNECTIONS: Lazy = Lazy::new(|| { - register_int_gauge!( - "docsrs_idle_db_connections", - "The number of idle database connections" - ) - .unwrap() -}); - -pub static MAX_DB_CONNECTIONS: Lazy = Lazy::new(|| { - register_int_gauge!( - "docsrs_max_db_connections", - "The maximum database connections" - ) - .unwrap() -}); - -#[cfg(not(windows))] -pub static OPEN_FILE_DESCRIPTORS: Lazy = Lazy::new(|| { - register_int_gauge!( - "docsrs_open_file_descriptors", - "The number of currently opened file descriptors" - ) - .unwrap() -}); - -#[cfg(not(windows))] -pub static CURRENTLY_RUNNING_THREADS: Lazy = Lazy::new(|| { - register_int_gauge!( - "docsrs_running_threads", - "The number of threads being used by docs.rs" - ) - .unwrap() -}); - -pub static HTML_REWRITE_OOMS: Lazy = Lazy::new(|| { - register_int_gauge!( - "docsrs_html_rewrite_ooms", - "The number of attempted files that failed due to a memory limit" - ) - .unwrap() -}); - pub fn metrics_handler(req: &mut Request) -> IronResult { + let metrics = extension!(req, Metrics); let pool = extension!(req, Pool); let queue = extension!(req, BuildQueue); - USED_DB_CONNECTIONS.set(pool.used_connections() as i64); - IDLE_DB_CONNECTIONS.set(pool.idle_connections() as i64); - - QUEUED_CRATES_COUNT.set(ctry!(req, queue.pending_count()) as i64); - PRIORITIZED_CRATES_COUNT.set(ctry!(req, queue.prioritized_count()) as i64); - FAILED_CRATES_COUNT.set(ctry!(req, queue.failed_count()) as i64); - - #[cfg(target_os = "linux")] - { - use procfs::process::Process; - - let process = Process::myself().unwrap(); - OPEN_FILE_DESCRIPTORS.set(process.fd().unwrap().len() as i64); - CURRENTLY_RUNNING_THREADS.set(process.stat().unwrap().num_threads as i64); - } - let mut buffer = Vec::new(); - let families = prometheus::gather(); + let families = ctry!(req, metrics.gather(pool, &*queue)); ctry!(req, TextEncoder::new().encode(&families, &mut buffer)); let mut resp = Response::with(buffer); @@ -213,14 +50,16 @@ impl iron::Handler for RequestRecorder { let result = self.handler.handle(request); let resp_time = duration_to_seconds(start.elapsed()); - ROUTES_VISITED.with_label_values(&[&self.route_name]).inc(); - RESPONSE_TIMES + let metrics = extension!(request, Metrics); + metrics + .routes_visited + .with_label_values(&[&self.route_name]) + .inc(); + metrics + .response_time .with_label_values(&[&self.route_name]) .observe(resp_time); - #[cfg(test)] - tests::record_tests(&self.route_name); - result } } @@ -230,13 +69,13 @@ struct RenderingTime { step: &'static str, } -pub(crate) struct RenderingTimesRecorder { - metric: &'static HistogramVec, +pub(crate) struct RenderingTimesRecorder<'a> { + metric: &'a HistogramVec, current: Option, } -impl RenderingTimesRecorder { - pub(crate) fn new(metric: &'static HistogramVec) -> Self { +impl<'a> RenderingTimesRecorder<'a> { + pub(crate) fn new(metric: &'a HistogramVec) -> Self { Self { metric, current: None, @@ -260,7 +99,7 @@ impl RenderingTimesRecorder { } } -impl Drop for RenderingTimesRecorder { +impl Drop for RenderingTimesRecorder<'_> { fn drop(&mut self) { self.record_current(); } @@ -269,53 +108,34 @@ impl Drop for RenderingTimesRecorder { #[cfg(test)] mod tests { use crate::test::{assert_success, wrapper}; - use once_cell::sync::Lazy; - use std::{ - collections::HashMap, - sync::{ - atomic::{AtomicUsize, Ordering}, - Mutex, - }, - }; - - static ROUTES_VISITED: AtomicUsize = AtomicUsize::new(0); - static RESPONSE_TIMES: Lazy>> = - Lazy::new(|| Mutex::new(HashMap::new())); - - pub fn record_tests(route: &str) { - ROUTES_VISITED.fetch_add(1, Ordering::SeqCst); - - let mut times = RESPONSE_TIMES.lock().unwrap(); - if let Some(requests) = times.get_mut(route) { - *requests += 1; - } else { - times.insert(route.to_owned(), 1); - } - } - - fn reset_records() { - ROUTES_VISITED.store(0, Ordering::SeqCst); - RESPONSE_TIMES.lock().unwrap().clear(); - } #[test] fn home_page() { wrapper(|env| { let frontend = env.frontend(); - - reset_records(); + let metrics = env.metrics(); frontend.get("/").send()?; frontend.get("/").send()?; - assert_eq!(ROUTES_VISITED.load(Ordering::SeqCst), 2); - assert_eq!(RESPONSE_TIMES.lock().unwrap().get("/"), Some(&2)); - - reset_records(); + assert_eq!(metrics.routes_visited.with_label_values(&["/"]).get(), 2); + assert_eq!( + metrics + .response_time + .with_label_values(&["/"]) + .get_sample_count(), + 2 + ); frontend.get("").send()?; frontend.get("").send()?; - assert_eq!(ROUTES_VISITED.load(Ordering::SeqCst), 2); - assert_eq!(RESPONSE_TIMES.lock().unwrap().get("/"), Some(&2)); + assert_eq!(metrics.routes_visited.with_label_values(&["/"]).get(), 4); + assert_eq!( + metrics + .response_time + .with_label_values(&["/"]) + .get_sample_count(), + 4 + ); Ok(()) }) @@ -325,6 +145,7 @@ mod tests { fn resources() { wrapper(|env| { let frontend = env.frontend(); + let metrics = env.metrics(); let routes = [ "/style.css", @@ -336,18 +157,25 @@ mod tests { ]; for route in routes.iter() { - reset_records(); - frontend.get(route).send()?; frontend.get(route).send()?; - - assert_eq!(ROUTES_VISITED.load(Ordering::SeqCst), 2); - assert_eq!( - RESPONSE_TIMES.lock().unwrap().get("static resource"), - Some(&2) - ); } + assert_eq!( + metrics + .routes_visited + .with_label_values(&["static resource"]) + .get(), + 12 + ); + assert_eq!( + metrics + .response_time + .with_label_values(&["static resource"]) + .get_sample_count(), + 12 + ); + Ok(()) }) } @@ -367,6 +195,7 @@ mod tests { .create()?; let frontend = env.frontend(); + let metrics = env.metrics(); let routes = [ ("/releases", "/releases"), @@ -381,13 +210,20 @@ mod tests { ]; for (route, correct) in routes.iter() { - reset_records(); - frontend.get(route).send()?; frontend.get(route).send()?; - assert_eq!(ROUTES_VISITED.load(Ordering::SeqCst), 2); - assert_eq!(RESPONSE_TIMES.lock().unwrap().get(*correct), Some(&2)); + assert_eq!( + metrics.routes_visited.with_label_values(&[*correct]).get(), + 2 + ); + assert_eq!( + metrics + .response_time + .with_label_values(&[*correct]) + .get_sample_count(), + 2 + ); } Ok(()) @@ -404,28 +240,36 @@ mod tests { .create()?; let frontend = env.frontend(); + let metrics = env.metrics(); let routes = ["/crate/rcc/0.0.0", "/crate/hexponent/0.2.0"]; for route in routes.iter() { - reset_records(); - frontend.get(route).send()?; frontend.get(route).send()?; - - assert_eq!(ROUTES_VISITED.load(Ordering::SeqCst), 2); - assert_eq!( - RESPONSE_TIMES.lock().unwrap().get("/crate/:name/:version"), - Some(&2) - ); } + assert_eq!( + metrics + .routes_visited + .with_label_values(&["/crate/:name/:version"]) + .get(), + 4 + ); + assert_eq!( + metrics + .response_time + .with_label_values(&["/crate/:name/:version"]) + .get_sample_count(), + 4 + ); + Ok(()) }) } #[test] - fn metrics() { + fn test_metrics_page_success() { wrapper(|env| { let web = env.frontend(); assert_success("/about/metrics", web) diff --git a/src/web/mod.rs b/src/web/mod.rs index 2afa558ef..c81e762ee 100644 --- a/src/web/mod.rs +++ b/src/web/mod.rs @@ -77,7 +77,7 @@ mod rustdoc; mod sitemap; mod source; -use crate::{config::Config, db::Pool, impl_webpage, BuildQueue, Storage}; +use crate::{impl_webpage, Context}; use chrono::{DateTime, Utc}; use extensions::InjectExtensions; use failure::Error; @@ -123,19 +123,10 @@ impl CratesfyiHandler { } fn new( - pool: Pool, - config: Arc, template_data: Arc, - build_queue: Arc, - storage: Arc, - ) -> CratesfyiHandler { - let inject_extensions = InjectExtensions { - build_queue, - pool, - config, - storage, - template_data, - }; + context: &dyn Context, + ) -> Result { + let inject_extensions = InjectExtensions::new(context, template_data)?; let routes = routes::build_routes(); let blacklisted_prefixes = routes.page_prefixes(); @@ -151,7 +142,7 @@ impl CratesfyiHandler { let static_handler = Static::new(prefix).cache(Duration::from_secs(STATIC_FILE_CACHE_DURATION)); - CratesfyiHandler { + Ok(CratesfyiHandler { shared_resource_handler: Box::new(shared_resources), router_handler: Box::new(router_chain), database_file_handler: Box::new(routes::BlockBlacklistedPrefixes::new( @@ -160,7 +151,7 @@ impl CratesfyiHandler { )), static_handler: Box::new(static_handler), inject_extensions, - } + }) } } @@ -394,51 +385,30 @@ impl Server { pub fn start( addr: Option<&str>, reload_templates: bool, - db: Pool, - config: Arc, - build_queue: Arc, - storage: Arc, + context: &dyn Context, ) -> Result { // Initialize templates - let template_data = Arc::new(TemplateData::new(&mut *db.get()?)?); + let template_data = Arc::new(TemplateData::new(&mut *context.pool()?.get()?)?); if reload_templates { - TemplateData::start_template_reloading(template_data.clone(), db.clone()); + TemplateData::start_template_reloading(template_data.clone(), context.pool()?); } - let server = Self::start_inner( - addr.unwrap_or(DEFAULT_BIND), - db, - config, - template_data, - build_queue, - storage, - ); + let server = Self::start_inner(addr.unwrap_or(DEFAULT_BIND), template_data, context)?; info!("Running docs.rs web server on http://{}", server.addr()); Ok(server) } fn start_inner( addr: &str, - pool: Pool, - config: Arc, template_data: Arc, - build_queue: Arc, - storage: Arc, - ) -> Self { - // poke all the metrics counters to instantiate and register them - metrics::TOTAL_BUILDS.inc_by(0); - metrics::SUCCESSFUL_BUILDS.inc_by(0); - metrics::FAILED_BUILDS.inc_by(0); - metrics::NON_LIBRARY_BUILDS.inc_by(0); - metrics::UPLOADED_FILES_TOTAL.inc_by(0); - metrics::FAILED_DB_CONNECTIONS.inc_by(0); - - let cratesfyi = CratesfyiHandler::new(pool, config, template_data, build_queue, storage); + context: &dyn Context, + ) -> Result { + let cratesfyi = CratesfyiHandler::new(template_data, context)?; let inner = Iron::new(cratesfyi) .http(addr) .unwrap_or_else(|_| panic!("Failed to bind to socket on {}", addr)); - Server { inner } + Ok(Server { inner }) } pub(crate) fn addr(&self) -> SocketAddr { @@ -677,6 +647,35 @@ mod test { }); } + #[test] + fn test_doc_coverage_for_crate_pages() { + wrapper(|env| { + env.fake_release() + .name("foo") + .version("0.0.1") + .source_file("test.rs", &[]) + .coverage(6, 10) + .create()?; + let web = env.frontend(); + + let foo_crate = kuchiki::parse_html().one(web.get("/crate/foo/0.0.1").send()?.text()?); + for value in &["60%", "6", "10"] { + assert!(foo_crate + .select(".pure-menu-item b") + .unwrap() + .any(|e| e.text_contents().contains(value))); + } + + let foo_doc = kuchiki::parse_html().one(web.get("/foo/0.0.1/foo").send()?.text()?); + assert!(foo_doc + .select(".pure-menu-link b") + .unwrap() + .any(|e| e.text_contents().contains("60%"))); + + Ok(()) + }); + } + #[test] fn test_show_clipboard_for_crate_pages() { wrapper(|env| { diff --git a/src/web/rustdoc.rs b/src/web/rustdoc.rs index 666a64089..d5cb0de22 100644 --- a/src/web/rustdoc.rs +++ b/src/web/rustdoc.rs @@ -4,10 +4,10 @@ use crate::{ db::Pool, utils, web::{ - crate_details::CrateDetails, error::Nope, file::File, match_version, metrics, - redirect_base, MatchSemver, + crate_details::CrateDetails, error::Nope, file::File, match_version, + metrics::RenderingTimesRecorder, redirect_base, MatchSemver, }, - Config, Storage, + Config, Metrics, Storage, }; use iron::{ headers::{CacheControl, CacheDirective, Expires, HttpDate}, @@ -207,13 +207,18 @@ impl RustdocPage { .get::() .expect("missing TemplateData from the request extensions"); + let metrics = req + .extensions + .get::() + .expect("missing Metrics from the request extensions"); + // Build the page of documentation let ctx = ctry!(req, tera::Context::from_serialize(self)); // Extract the head and body of the rustdoc file so that we can insert it into our own html // while logging OOM errors from html rewriting let html = match utils::rewrite_lol(rustdoc_html, max_parse_memory, ctx, templates) { Err(RewritingError::MemoryLimitExceeded(..)) => { - crate::web::metrics::HTML_REWRITE_OOMS.inc(); + metrics.html_rewrite_ooms.inc(); let config = extension!(req, Config); let err = failure::err_msg(format!( @@ -238,8 +243,8 @@ impl RustdocPage { /// This includes all HTML files for an individual crate, as well as the `search-index.js`, which is /// also crate-specific. pub fn rustdoc_html_server_handler(req: &mut Request) -> IronResult { - let mut rendering_time = - metrics::RenderingTimesRecorder::new(&metrics::RUSTDOC_RENDERING_TIMES); + let metrics = extension!(req, Metrics).clone(); + let mut rendering_time = RenderingTimesRecorder::new(&metrics.rustdoc_rendering_times); // Get the request parameters let router = extension!(req, Router); diff --git a/templates/crate/details.html b/templates/crate/details.html index 61d581a72..4868474a9 100644 --- a/templates/crate/details.html +++ b/templates/crate/details.html @@ -16,6 +16,13 @@
    + {%- if details.documented_items and details.total_items -%} + {% set percent = details.documented_items * 100 / details.total_items %} +
  • Coverage
  • +
  • {{ percent | round(precision=2) }}%
    + {{ details.documented_items }} out of {{ details.total_items }} items documented +
  • + {%- endif -%} {# List the release author's names and a link to their docs.rs profile #}
  • Authors
  • {%- for author in details.authors -%} diff --git a/templates/rustdoc/body.html b/templates/rustdoc/body.html index 7e95972d4..17ce5cf03 100644 --- a/templates/rustdoc/body.html +++ b/templates/rustdoc/body.html @@ -117,7 +117,7 @@
-
+ + {%- if krate.documented_items and krate.total_items -%} + {% set percent = krate.documented_items * 100 / krate.total_items %} + + {%- endif -%}