summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-06-10 22:10:37 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-06-10 22:10:37 +0000
commit7ac906c66d6ee41d067b7fb7a94eae464dfb8b86 (patch)
tree530deacbf3276559552bf498c503d9273f9f6f77
parent8d0f2b4671afd5ed1c6ad809141a1f437cfd180c (diff)
parent9edd3976ad9433caff47076945dc36b8ea9e578f (diff)
downloadn2-build-tools-release.tar.gz
Snap for 11949379 from 9edd3976ad9433caff47076945dc36b8ea9e578f to build-tools-releasebuild-tools-release
Change-Id: Id858677836b2645289f59df424fca00d75c5c4e4
-rw-r--r--Android.bp20
-rw-r--r--Cargo.lock42
-rw-r--r--Cargo.toml2
-rw-r--r--src/concurrent_linked_list.rs6
-rw-r--r--src/db.rs565
-rw-r--r--src/densemap.rs8
-rw-r--r--src/file_pool.rs15
-rw-r--r--src/graph.rs30
-rw-r--r--src/lib.rs11
-rw-r--r--src/load.rs131
-rw-r--r--src/progress.rs3
-rw-r--r--src/run.rs15
-rw-r--r--src/tools/mod.rs2
-rw-r--r--src/tools/targets.rs122
-rw-r--r--src/trace.rs160
-rw-r--r--src/work.rs36
-rw-r--r--tests/e2e/basic.rs16
-rw-r--r--tests/e2e/missing.rs28
-rw-r--r--tests/e2e/mod.rs1
-rw-r--r--tests/e2e/tools/mod.rs1
-rw-r--r--tests/e2e/tools/targets_test.rs110
21 files changed, 882 insertions, 442 deletions
diff --git a/Android.bp b/Android.bp
index e436064..9cf8a8e 100644
--- a/Android.bp
+++ b/Android.bp
@@ -29,6 +29,13 @@ rust_library_host {
"libninja_frontend_protos",
"libprotobuf",
],
+ target: {
+ linux: {
+ rustlibs: [
+ "libtikv_jemallocator",
+ ],
+ },
+ },
}
rust_binary_host {
@@ -43,6 +50,12 @@ rust_binary_host {
"liblibc",
"librustc_hash",
],
+ sanitize: {
+ // It appears a sanitizer is now enabled by default that causes
+ // n2 to fail with errors about memory leaks. We need to debug this,
+ // but it may just be caused by our intentional use of std::mem::forget
+ never: true,
+ },
}
rust_test_host {
@@ -65,6 +78,13 @@ rust_test_host {
"libninja_frontend_protos",
"libprotobuf",
],
+ target: {
+ linux: {
+ rustlibs: [
+ "libtikv_jemallocator",
+ ],
+ },
+ },
}
rust_test_host {
diff --git a/Cargo.lock b/Cargo.lock
index 6d768d0..20f68a4 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -324,26 +324,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b"
[[package]]
-name = "jemalloc-sys"
-version = "0.5.4+5.3.0-patched"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ac6c1946e1cea1788cbfde01c993b52a10e2da07f4bac608228d1bed20bfebf2"
-dependencies = [
- "cc",
- "libc",
-]
-
-[[package]]
-name = "jemallocator"
-version = "0.5.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a0de374a9f8e63150e6f5e8a60cc14c668226d7a347d8aee1a45766e3c4dd3bc"
-dependencies = [
- "jemalloc-sys",
- "libc",
-]
-
-[[package]]
name = "js-sys"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -394,13 +374,13 @@ dependencies = [
"argh",
"criterion",
"dashmap",
- "jemallocator",
"libc",
"protobuf",
"protobuf-codegen",
"rayon",
"rustc-hash",
"tempfile",
+ "tikv-jemallocator",
"windows-sys 0.48.0",
]
@@ -714,6 +694,26 @@ dependencies = [
]
[[package]]
+name = "tikv-jemalloc-sys"
+version = "0.5.4+5.3.0-patched"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1"
+dependencies = [
+ "cc",
+ "libc",
+]
+
+[[package]]
+name = "tikv-jemallocator"
+version = "0.5.4"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "965fe0c26be5c56c94e38ba547249074803efd52adfb66de62107d95aab3eaca"
+dependencies = [
+ "libc",
+ "tikv-jemalloc-sys",
+]
+
+[[package]]
name = "tinytemplate"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
diff --git a/Cargo.toml b/Cargo.toml
index 4daf964..993547c 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -38,7 +38,7 @@ features = [
]
[target.'cfg(not(any(windows, target_arch = "wasm32")))'.dependencies]
-jemallocator = "0.5.0"
+tikv-jemallocator = "0.5.0"
[dev-dependencies]
tempfile = "3.6.0"
diff --git a/src/concurrent_linked_list.rs b/src/concurrent_linked_list.rs
index 0c15408..3415a08 100644
--- a/src/concurrent_linked_list.rs
+++ b/src/concurrent_linked_list.rs
@@ -19,7 +19,7 @@ struct ConcurrentLinkedListNode<T> {
}
impl<T> ConcurrentLinkedList<T> {
- pub fn new() -> Self {
+ pub const fn new() -> Self {
ConcurrentLinkedList {
head: AtomicPtr::new(null_mut()),
}
@@ -45,6 +45,10 @@ impl<T> ConcurrentLinkedList<T> {
}
}
+ pub fn is_empty(&self) -> bool {
+ self.head.load(Ordering::Relaxed).is_null()
+ }
+
pub fn iter(&self) -> impl Iterator<Item = &T> {
ConcurrentLinkedListIterator {
cur: self.head.load(Ordering::Relaxed),
diff --git a/src/db.rs b/src/db.rs
index 7157f4b..02505fa 100644
--- a/src/db.rs
+++ b/src/db.rs
@@ -1,20 +1,24 @@
//! The n2 database stores information about previous builds for determining
//! which files are up to date.
+use crate::file_pool::FilePool;
use crate::graph;
use crate::{
densemap, densemap::DenseMap, graph::BuildId, graph::Graph, graph::Hashes, hash::BuildHash,
};
-use anyhow::{anyhow, bail};
+use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use std::collections::HashMap;
+use std::convert::TryInto;
+use std::fmt::Display;
use std::fs::File;
-use std::io::BufReader;
-use std::io::Read;
-use std::io::Write;
+use std::io::{BufWriter, Seek, Write};
use std::path::Path;
+use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
-const VERSION: u32 = 1;
+const VERSION: u32 = 2;
+const DB_HEADER_SIZE: usize = 8;
+const DB_CHUNK_SIZE: usize = 1024 * 1024; // 1 MB
/// Files are identified by integers that are stable across n2 executions.
#[derive(Debug, Clone, Copy)]
@@ -40,78 +44,127 @@ pub struct IdMap {
db_ids: HashMap<*const graph::File, Id>,
}
-/// RecordWriter buffers writes into a Vec<u8>.
-/// We attempt to write a full record per underlying finish() to lessen the chance of writing partial records.
-#[derive(Default)]
-struct RecordWriter(Vec<u8>);
+// SAFETY: send is not automatically implemented for IdMap because it has
+// a raw pointer in it. But we're only using those pointers as map keys,
+// and never dereferencing them, so they're essentially integers.
+unsafe impl Send for IdMap {}
+
+fn distance_to_end_of_chunk(offset: usize, header_size: usize, chunk_size: usize) -> usize {
+ let chunk_end = if offset < header_size {
+ header_size + chunk_size
+ } else if (offset - header_size) % chunk_size == 0 {
+ (offset - header_size + 1).next_multiple_of(chunk_size) + header_size
+ } else {
+ (offset - header_size).next_multiple_of(chunk_size) + header_size
+ };
+ chunk_end - offset
+}
-impl RecordWriter {
- fn write(&mut self, buf: &[u8]) {
- self.0.extend_from_slice(buf);
- }
+#[cfg(test)]
+#[test]
+fn test_distance_to_end_of_chunk() {
+ assert_eq!(distance_to_end_of_chunk(0, 8, 10), 18);
+ assert_eq!(distance_to_end_of_chunk(1, 8, 10), 17);
+ assert_eq!(distance_to_end_of_chunk(8, 8, 10), 10);
+ assert_eq!(distance_to_end_of_chunk(9, 8, 10), 9);
+ assert_eq!(distance_to_end_of_chunk(17, 8, 10), 1);
+ assert_eq!(distance_to_end_of_chunk(18, 8, 10), 10);
+ assert_eq!(distance_to_end_of_chunk(27, 8, 10), 1);
+ assert_eq!(distance_to_end_of_chunk(28, 8, 10), 10);
+ assert_eq!(distance_to_end_of_chunk(29, 8, 10), 9);
+}
- fn write_u16(&mut self, n: u16) {
- self.write(&n.to_le_bytes());
- }
+/// An opened database, ready for writes.
+pub struct Writer {
+ ids: IdMap,
+ w: BufWriter<File>,
+ offset: usize,
+}
- fn write_u24(&mut self, n: u32) {
- self.write(&n.to_le_bytes()[..3]);
+impl Writer {
+ fn create(path: &Path) -> std::io::Result<Self> {
+ let f = std::fs::File::create(path)?;
+ let mut w = Self::from_opened(IdMap::default(), f)?;
+ assert_eq!(w.offset, 0);
+ w.write_signature()?;
+ assert_eq!(w.offset, DB_HEADER_SIZE);
+ Ok(w)
}
- fn write_u64(&mut self, n: u64) {
- self.write(&n.to_le_bytes());
+ fn from_opened(ids: IdMap, mut w: File) -> std::io::Result<Self> {
+ let offset = w.seek(std::io::SeekFrom::End(0))?.try_into().unwrap();
+ Ok(Writer {
+ ids,
+ w: BufWriter::new(w),
+ offset,
+ })
}
- fn write_str(&mut self, s: &str) {
- self.write_u16(s.len() as u16);
- self.write(s.as_bytes());
+ fn write(&mut self, buf: &[u8]) -> std::io::Result<()> {
+ self.offset += buf.len();
+ self.w.write_all(buf)
}
- fn write_id(&mut self, id: Id) {
- if id.0 > (1 << 24) {
- panic!("too many fileids");
- }
- self.write_u24(id.0);
+ fn write_u16(&mut self, n: u16) -> std::io::Result<()> {
+ self.write(&n.to_le_bytes())
}
- fn finish(&self, w: &mut impl Write) -> std::io::Result<()> {
- w.write_all(&self.0)
+ fn write_u24(&mut self, n: u32) -> std::io::Result<()> {
+ self.write(&n.to_le_bytes()[..3])
}
-}
-/// An opened database, ready for writes.
-pub struct Writer {
- ids: IdMap,
- w: File,
-}
+ fn write_u64(&mut self, n: u64) -> std::io::Result<()> {
+ self.write(&n.to_le_bytes())
+ }
-impl Writer {
- fn create(path: &Path) -> std::io::Result<Self> {
- let f = std::fs::File::create(path)?;
- let mut w = Self::from_opened(IdMap::default(), f);
- w.write_signature()?;
- Ok(w)
+ fn write_str(&mut self, s: &str) -> std::io::Result<()> {
+ assert!(s.len() > 0);
+ self.write_u16(s.len() as u16)?;
+ self.write(s.as_bytes())
}
- fn from_opened(ids: IdMap, w: File) -> Self {
- Writer { ids, w }
+ fn write_id(&mut self, id: Id) -> std::io::Result<()> {
+ if id.0 > (1 << 24) {
+ panic!("too many fileids");
+ }
+ self.write_u24(id.0)
}
fn write_signature(&mut self) -> std::io::Result<()> {
- self.w.write_all("n2db".as_bytes())?;
- self.w.write_all(&u32::to_le_bytes(VERSION))
+ self.write("n2db".as_bytes())?;
+ self.write(&u32::to_le_bytes(VERSION))?;
+ // Must flush this because the writers destructor may not be called.
+ // (we use std::mem::forget for performance reasons)
+ self.w.flush()?;
+ Ok(())
+ }
+
+ fn ensure_space_for_record(&mut self, record_size: usize) -> std::io::Result<()> {
+ if record_size > DB_CHUNK_SIZE {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "Record was bigger than the database chunk size",
+ ));
+ }
+ let d2eoc = distance_to_end_of_chunk(self.offset, DB_HEADER_SIZE, DB_CHUNK_SIZE);
+ if record_size > d2eoc {
+ const ZEROS: [u8; DB_CHUNK_SIZE] = [0; DB_CHUNK_SIZE];
+ self.write(&ZEROS[..d2eoc])?;
+ }
+ Ok(())
}
fn write_path(&mut self, name: &str) -> std::io::Result<()> {
if name.len() >= 0b1000_0000_0000_0000 {
panic!("filename too long");
}
- let mut w = RecordWriter::default();
- w.write_str(&name);
- w.finish(&mut self.w)
+ self.ensure_space_for_record(2 + name.len())?;
+ self.write_str(&name)
+ // We don't need to flush paths because they're always written as
+ // part of a build, and the build will flush.
}
- fn ensure_id(&mut self, file: Arc<graph::File>) -> std::io::Result<Id> {
+ fn ensure_id(&mut self, file: &Arc<graph::File>) -> std::io::Result<Id> {
let id = match self.ids.db_ids.get(&(file.as_ref() as *const graph::File)) {
Some(&id) => id,
None => {
@@ -133,195 +186,317 @@ impl Writer {
hash: BuildHash,
) -> std::io::Result<()> {
let build = &graph.builds[id];
- let mut w = RecordWriter::default();
+
+ // Run ensure_id on all the ids we will use first, because ensure_id
+ // may write a filepath to the output file, and we don't what that
+ // write to occurr in the middle of a build record.
let outs = build.outs();
+ let deps = build.discovered_ins();
+ for out in outs {
+ self.ensure_id(&out)?;
+ }
+ for dep in deps {
+ self.ensure_id(&dep)?;
+ }
+
+ let record_size = 2 + 3 * build.outs().len() + 2 + 3 * build.discovered_ins().len() + 8;
+ self.ensure_space_for_record(record_size)?;
+ let initial_offset = self.offset;
+
let mark = (outs.len() as u16) | 0b1000_0000_0000_0000;
- w.write_u16(mark);
+ self.write_u16(mark)?;
for out in outs {
- let id = self.ensure_id(out.clone())?;
- w.write_id(id);
+ let id = self.ensure_id(&out)?;
+ self.write_id(id)?;
}
- let deps = build.discovered_ins();
- w.write_u16(deps.len() as u16);
+ self.write_u16(deps.len() as u16)?;
for dep in deps {
- let id = self.ensure_id(dep.clone())?;
- w.write_id(id);
+ let id = self.ensure_id(&dep)?;
+ self.write_id(id)?;
}
- w.write_u64(hash.0);
- w.finish(&mut self.w)
+ self.write_u64(hash.0)?;
+ assert_eq!(initial_offset + record_size, self.offset);
+ // Flush after writing builds to try and ensure there's a valid
+ // database file even if n2 is killed while running.
+ self.w.flush()?;
+ Ok(())
}
}
-struct Reader<'a> {
- r: BufReader<&'a mut File>,
- ids: IdMap,
- graph: &'a mut Graph,
- hashes: &'a mut Hashes,
+trait ByteReader {
+ fn read_u16(&mut self) -> std::io::Result<u16>;
+ fn read_u24(&mut self) -> std::io::Result<u32>;
+ fn read_u64(&mut self) -> std::io::Result<u64>;
+ fn read_str(&mut self, len: usize) -> std::io::Result<String>;
+ fn read_id(&mut self) -> std::io::Result<Id> {
+ self.read_u24().map(Id)
+ }
}
-impl<'a> Reader<'a> {
+impl ByteReader for &[u8] {
fn read_u16(&mut self) -> std::io::Result<u16> {
- let mut buf: [u8; 2] = [0; 2];
- self.r.read_exact(&mut buf[..])?;
- Ok(u16::from_le_bytes(buf))
+ let result = match self.first_chunk::<2>() {
+ Some(c) => u16::from_le_bytes(*c),
+ None => {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::UnexpectedEof,
+ "EOF",
+ ))
+ }
+ };
+ *self = &self[2..];
+ Ok(result)
}
fn read_u24(&mut self) -> std::io::Result<u32> {
- let mut buf: [u8; 4] = [0; 4];
- self.r.read_exact(&mut buf[..3])?;
- Ok(u32::from_le_bytes(buf))
+ let result = match self.first_chunk::<3>() {
+ Some(c) => {
+ let buf: [u8; 4] = [c[0], c[1], c[2], 0];
+ u32::from_le_bytes(buf)
+ }
+ None => {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::UnexpectedEof,
+ "EOF",
+ ))
+ }
+ };
+ *self = &self[3..];
+ Ok(result)
}
fn read_u64(&mut self) -> std::io::Result<u64> {
- let mut buf: [u8; 8] = [0; 8];
- self.r.read_exact(&mut buf)?;
- Ok(u64::from_le_bytes(buf))
- }
-
- fn read_id(&mut self) -> std::io::Result<Id> {
- self.read_u24().map(Id)
+ let result = match self.first_chunk::<8>() {
+ Some(c) => u64::from_le_bytes(*c),
+ None => {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::UnexpectedEof,
+ "EOF",
+ ))
+ }
+ };
+ *self = &self[8..];
+ Ok(result)
}
fn read_str(&mut self, len: usize) -> std::io::Result<String> {
- let mut buf = vec![0; len];
- self.r.read_exact(buf.as_mut_slice())?;
+ if self.len() < len {
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::UnexpectedEof,
+ "EOF",
+ ));
+ }
+ let mut buf = Vec::new();
+ buf.extend_from_slice(&self[..len]);
+ *self = &self[len..];
Ok(unsafe { String::from_utf8_unchecked(buf) })
}
+}
- fn read_path(&mut self, len: usize) -> std::io::Result<()> {
- let name = self.read_str(len)?;
- // No canonicalization needed, paths were written canonicalized.
- let file = self.graph.files.id_from_canonical(name);
- let dbid = self.ids.fileids.push(file.clone());
- self.ids
- .db_ids
- .insert(file.as_ref() as *const graph::File, dbid);
- Ok(())
- }
+#[derive(Debug)]
+enum ReadError {
+ InvalidSignature,
+ InvalidVersion,
+ IoError(std::io::Error),
+}
- fn read_build(&mut self, len: usize) -> std::io::Result<()> {
- // This record logs a build. We expect all the outputs to be
- // outputs of the same build id; if not, that means the graph has
- // changed since this log, in which case we just ignore it.
- //
- // It's possible we log a build that generates files A B, then
- // change the build file such that it only generates file A; this
- // logic will still attach the old dependencies to A, but it
- // shouldn't matter because the changed command line will cause us
- // to rebuild A regardless, and these dependencies are only used
- // to affect dirty checking, not build order.
-
- let mut unique_bid = None;
- let mut obsolete = false;
- for _ in 0..len {
- let fileid = self.read_id()?;
- if obsolete {
- // Even though we know we don't want this record, we must
- // keep reading to parse through it.
- continue;
- }
- match *self.ids.fileids[fileid].input.lock().unwrap() {
- None => {
- obsolete = true;
- }
- Some(bid) => {
- match unique_bid {
- None => unique_bid = Some(bid),
- Some(unique_bid) if unique_bid == bid => {
- // Ok, matches the existing id.
- }
- Some(_) => {
- // Mismatch.
- unique_bid = None;
- obsolete = true;
- }
- }
- }
- }
+impl Display for ReadError {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ ReadError::InvalidSignature => write!(f, "invalid database header"),
+ ReadError::InvalidVersion => write!(f, "invalid database version"),
+ ReadError::IoError(e) => e.fmt(f),
}
+ }
+}
- let len = self.read_u16()?;
- let mut deps = Vec::with_capacity(len as usize);
- for _ in 0..len {
- let id = self.read_id()?;
- deps.push(self.ids.fileids[id].clone());
- }
+impl std::error::Error for ReadError {}
- let hash = BuildHash(self.read_u64()?);
+impl From<std::io::Error> for ReadError {
+ fn from(value: std::io::Error) -> Self {
+ ReadError::IoError(value)
+ }
+}
- // unique_bid is set here if this record is valid.
- if let Some(id) = unique_bid {
- // Common case: only one associated build.
- self.graph.builds[id].set_discovered_ins(deps);
- self.hashes.set(id, hash);
- }
- Ok(())
+enum Record {
+ File(Arc<graph::File>),
+ Build {
+ outs: Vec<Id>,
+ discovered_deps: Vec<Id>,
+ hash: BuildHash,
+ },
+}
+
+fn read_path(data: &mut &[u8], len: usize, graph: &Graph) -> std::io::Result<Arc<graph::File>> {
+ let name = data.read_str(len)?;
+ // No canonicalization needed, paths were written canonicalized.
+ Ok(graph.files.id_from_canonical(name))
+}
+
+fn read_build(data: &mut &[u8], len: usize) -> std::io::Result<Record> {
+ // This record logs a build. We expect all the outputs to be
+ // outputs of the same build id; if not, that means the graph has
+ // changed since this log, in which case we just ignore it.
+ //
+ // It's possible we log a build that generates files A B, then
+ // change the build file such that it only generates file A; this
+ // logic will still attach the old dependencies to A, but it
+ // shouldn't matter because the changed command line will cause us
+ // to rebuild A regardless, and these dependencies are only used
+ // to affect dirty checking, not build order.
+
+ let mut outs = Vec::with_capacity(len);
+ for _ in 0..len {
+ outs.push(data.read_id()?);
}
- fn read_signature(&mut self) -> anyhow::Result<()> {
- let mut buf: [u8; 4] = [0; 4];
- self.r.read_exact(&mut buf[..])?;
- if buf.as_slice() != "n2db".as_bytes() {
- bail!("invalid db signature");
- }
- self.r.read_exact(&mut buf[..])?;
- let version = u32::from_le_bytes(buf);
- if version != VERSION {
- bail!("db version mismatch: got {version}, expected {VERSION}; TODO: db upgrades etc");
- }
- Ok(())
+ let len = data.read_u16()?;
+ let mut deps = Vec::with_capacity(len as usize);
+ for _ in 0..len {
+ deps.push(data.read_id()?);
+ }
+
+ Ok(Record::Build {
+ outs: outs,
+ discovered_deps: deps,
+ hash: BuildHash(data.read_u64()?),
+ })
+}
+
+fn read_signature(data: &[u8]) -> Result<(), ReadError> {
+ if data.len() < 8 {
+ return Err(ReadError::InvalidSignature);
+ }
+ if &data[..4] != "n2db".as_bytes() {
+ return Err(ReadError::InvalidSignature);
}
+ let version = u32::from_le_bytes(data[4..8].try_into().unwrap());
+ if version != VERSION {
+ return Err(ReadError::InvalidVersion);
+ }
+ Ok(())
+}
- fn read_file(&mut self) -> anyhow::Result<()> {
- self.read_signature()?;
- loop {
- let mut len = match self.read_u16() {
- Ok(r) => r,
- Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
- Err(err) => bail!(err),
- };
- let mask = 0b1000_0000_0000_0000;
- if len & mask == 0 {
- self.read_path(len as usize)?;
- } else {
- len &= !mask;
- self.read_build(len as usize)?;
+/// Reads an on-disk database, loading its state into the provided Graph/Hashes.
+fn read(path: &Path, graph: &mut Graph, hashes: &mut Hashes) -> Result<IdMap, ReadError> {
+ let file_pool = FilePool::new();
+ let data = file_pool.read_file(path)?;
+
+ read_signature(data)?;
+
+ // First, collect all the records from the file. We'll do this in parallel,
+ // splitting the database into fixed size chunks. The size of the chunks is
+ // hardcoded, and the writing code needs to take care to pad to the chunk
+ // size so that this split is valid.
+ let num_file_records = AtomicUsize::new(0);
+ let records = data[DB_HEADER_SIZE..]
+ .chunks(DB_CHUNK_SIZE)
+ .collect::<Vec<_>>()
+ .par_iter()
+ .flat_map(|chunk: &&[u8]| -> Vec<Result<Record, std::io::Error>> {
+ let mut chunk = *chunk;
+ let mut result = Vec::new();
+ loop {
+ let mut len = match chunk.read_u16() {
+ Ok(r) => r,
+ Err(err) if err.kind() == std::io::ErrorKind::UnexpectedEof => break,
+ Err(err) => {
+ result.push(Err(err));
+ break;
+ }
+ };
+ let mask = 0b1000_0000_0000_0000;
+ if len == 0 {
+ // do nothing, this is the padding at the end of the chunk
+ break;
+ } else if len & mask == 0 {
+ num_file_records.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
+ result
+ .push(read_path(&mut chunk, len as usize, graph).map(|x| Record::File(x)));
+ } else {
+ len &= !mask;
+ result.push(read_build(&mut chunk, len as usize));
+ }
+ }
+ result
+ })
+ .collect::<Result<Vec<Record>, std::io::Error>>()?;
+
+ // Drop the file_pool in parallel because munmap can be slow
+ rayon::spawn(|| drop(file_pool));
+
+ // Then, loop over all the records in serial, populating the id maps
+ // and filling out the hashes and discovered deps in the builds.
+ // The only reason we do this in serial is so that the file records are
+ // added in order, we could do those first and then the builds in parallel
+ // after if needed.
+ let num_file_records = num_file_records.load(std::sync::atomic::Ordering::Relaxed);
+ let mut ids = IdMap::default();
+ ids.fileids.reserve(num_file_records);
+ ids.db_ids.reserve(num_file_records);
+ for record in records.into_iter() {
+ match record {
+ Record::File(f) => {
+ let ptr = f.as_ref() as *const graph::File;
+ let id = ids.fileids.push(f);
+ ids.db_ids.insert(ptr, id);
+ }
+ Record::Build {
+ outs,
+ discovered_deps,
+ hash,
+ } => {
+ let mut unique_bid = None;
+ for out in outs {
+ match *ids.fileids[out].input.lock().unwrap() {
+ Some(bid) => match unique_bid {
+ None => unique_bid = Some(bid),
+ Some(x) if x == bid => {
+ // Ok, matches the existing id.
+ }
+ Some(_) => {
+ // mismatch, ignore this build
+ unique_bid = None;
+ break;
+ }
+ },
+ None => {
+ unique_bid = None;
+ break;
+ }
+ }
+ }
+ if let Some(id) = unique_bid {
+ let deps = discovered_deps
+ .into_iter()
+ .map(|d| ids.fileids[d].clone())
+ .collect();
+ // Common case: only one associated build.
+ graph.builds[id].set_discovered_ins(deps);
+ hashes.set(id, hash);
+ }
}
}
- Ok(())
}
- /// Reads an on-disk database, loading its state into the provided Graph/Hashes.
- fn read(f: &mut File, graph: &mut Graph, hashes: &mut Hashes) -> anyhow::Result<IdMap> {
- let mut r = Reader {
- r: std::io::BufReader::new(f),
- ids: IdMap::default(),
- graph,
- hashes,
- };
- r.read_file()?;
-
- Ok(r.ids)
- }
+ Ok(ids)
}
/// Opens or creates an on-disk database, loading its state into the provided Graph.
pub fn open(path: &Path, graph: &mut Graph, hashes: &mut Hashes) -> anyhow::Result<Writer> {
- match std::fs::OpenOptions::new()
- .read(true)
- .append(true)
- .open(path)
- {
- Ok(mut f) => {
- let ids = Reader::read(&mut f, graph, hashes)?;
- Ok(Writer::from_opened(ids, f))
- }
- Err(err) if err.kind() == std::io::ErrorKind::NotFound => {
- let w = Writer::create(path)?;
+ match read(path, graph, hashes) {
+ Ok(ids) => {
+ let f = std::fs::OpenOptions::new().append(true).open(path)?;
+ let w = Writer::from_opened(ids, f)?;
+ assert!(w.offset > 0);
Ok(w)
}
- Err(err) => Err(anyhow!(err)),
+ Err(ReadError::InvalidVersion) => Ok(Writer::create(path)?),
+ Err(ReadError::IoError(err)) if err.kind() == std::io::ErrorKind::NotFound => {
+ Ok(Writer::create(path)?)
+ }
+ Err(e) => Err(e)?,
}
}
diff --git a/src/densemap.rs b/src/densemap.rs
index b95b044..e5da650 100644
--- a/src/densemap.rs
+++ b/src/densemap.rs
@@ -44,6 +44,10 @@ impl<K: Index, V> DenseMap<K, V> {
}
}
+ pub fn reserve(&mut self, additional: usize) {
+ self.vec.reserve(additional)
+ }
+
pub fn lookup(&self, k: K) -> Option<&V> {
self.vec.get(k.index())
}
@@ -57,6 +61,10 @@ impl<K: Index, V> DenseMap<K, V> {
self.vec.push(val);
id
}
+
+ pub fn values(&self) -> impl Iterator<Item = &V> + '_ {
+ self.vec.iter()
+ }
}
impl<K: Index, V: Clone> DenseMap<K, V> {
diff --git a/src/file_pool.rs b/src/file_pool.rs
index 499812c..a6e597c 100644
--- a/src/file_pool.rs
+++ b/src/file_pool.rs
@@ -1,4 +1,3 @@
-use anyhow::bail;
use core::slice;
use std::{path::Path, sync::Mutex};
@@ -30,7 +29,7 @@ mod mmap {
}
}
- pub fn read_file(&self, path: &Path) -> anyhow::Result<&[u8]> {
+ pub fn read_file(&self, path: &Path) -> std::io::Result<&[u8]> {
let page_size = unsafe { sysconf(_SC_PAGESIZE) } as usize;
let file = std::fs::File::open(path)?;
let fd = file.as_fd().as_raw_fd();
@@ -40,7 +39,10 @@ mod mmap {
// size + 1 to add a null terminator.
let addr = mmap(null_mut(), mapping_size, PROT_READ, MAP_PRIVATE, fd, 0);
if addr == MAP_FAILED {
- bail!("mmap failed");
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "mmap failed",
+ ));
}
let addr2 = mmap(
@@ -52,7 +54,10 @@ mod mmap {
0,
);
if addr2 == MAP_FAILED {
- bail!("mmap failed");
+ return Err(std::io::Error::new(
+ std::io::ErrorKind::Other,
+ "mmap failed",
+ ));
}
*(addr.add(mapping_size).sub(page_size) as *mut u8) = 0;
// The manpages say the extra bytes past the end of the file are
@@ -109,7 +114,7 @@ mod read {
}
}
- pub fn read_file(&self, path: &Path) -> anyhow::Result<&[u8]> {
+ pub fn read_file(&self, path: &Path) -> std::io::Result<&[u8]> {
let bytes = read_file_with_nul(path)?;
let addr = bytes.as_ptr();
let len = bytes.len();
diff --git a/src/graph.rs b/src/graph.rs
index 20ff1c9..1fd89a7 100644
--- a/src/graph.rs
+++ b/src/graph.rs
@@ -334,24 +334,30 @@ impl Build {
}
fn get_binding(&self, key: &str, escape_special_vars: bool) -> Option<String> {
+ let scope = self.scope.as_ref().unwrap();
+
+ // interestingly, the lookup order is to start with the build -> scope,
+ // but if the build doesn't have the binding, fall back to
+ // rule -> implicits -> build -> scope. A more logical order might
+ // always be rule -> (implicits -> ) build -> scope, which does work
+ // for the android build, but gets incorrect descriptions.
+ // The description from the rule is used instead of from the build.
+ if let Some(x) = self.bindings.get(key) {
+ return Some(x.evaluate(&[], scope, self.scope_position));
+ }
+
let implicit_vars = BuildImplicitVars {
explicit_ins: &self.ins.ids[..self.ins.explicit],
explicit_outs: &self.outs.ids[..self.outs.explicit],
escape: escape_special_vars,
};
- let scope = self.scope.as_ref().unwrap();
+
let rule = scope.get_rule(&self.rule, self.scope_position).unwrap();
- Some(match rule.vars.get(key) {
- Some(val) => val.evaluate(
- &[&implicit_vars, &self.bindings],
- scope,
- self.scope_position,
- ),
- None => self
- .bindings
- .get(key)?
- .evaluate(&[], scope, self.scope_position),
- })
+ Some(rule.vars.get(key)?.evaluate(
+ &[&implicit_vars, &self.bindings],
+ scope,
+ self.scope_position
+ ))
}
pub fn get_rspfile(&self) -> anyhow::Result<Option<RspFile>> {
diff --git a/src/lib.rs b/src/lib.rs
index 64bbe08..ea8f614 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -21,13 +21,10 @@ mod signal;
mod smallmap;
mod task;
mod terminal;
+mod tools;
mod trace;
mod work;
-// TODO: Import Jemalloc into android and re-enable it here.
-// #[cfg(not(any(windows, target_arch = "wasm32")))]
-// use jemallocator::Jemalloc;
-
-// #[cfg(not(any(windows, target_arch = "wasm32")))]
-// #[global_allocator]
-// static GLOBAL: Jemalloc = Jemalloc;
+#[cfg(target_os = "linux")]
+#[global_allocator]
+static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
diff --git a/src/load.rs b/src/load.rs
index 5806195..ea99238 100644
--- a/src/load.rs
+++ b/src/load.rs
@@ -413,79 +413,78 @@ pub struct State {
pub fn read(build_filename: &str) -> anyhow::Result<State> {
let build_filename = canon_path(build_filename);
let file_pool = FilePool::new();
- let files = GraphFiles::default();
let num_threads = available_parallelism()?.get();
let pool = rayon::ThreadPoolBuilder::new()
.num_threads(num_threads)
.build()?;
- let (defaults, builddir, pools, builds) =
- trace::scope("loader.read_file", || -> anyhow::Result<_> {
- pool.scope(|_| {
- let mut results = subninja(num_threads, &files, &file_pool, build_filename, None)?;
-
- let mut pools = SmallMap::default();
- let mut defaults = Vec::new();
- let mut num_builds = 0;
- trace::scope("add pools and defaults", || -> anyhow::Result<()> {
- for clump in &mut results.clumps {
- for pool in &clump.pools {
- if !pools.insert_if_absent(pool.name.to_owned(), pool.depth) {
- bail!("duplicate pool {}", pool.name);
- }
- }
- for default in &mut clump.defaults {
- defaults.append(&mut default.evaluated);
+ trace::scope("loader.read_file", || -> anyhow::Result<_> {
+ pool.scope(|_| {
+ let files = GraphFiles::default();
+ let mut results = subninja(num_threads, &files, &file_pool, build_filename, None)?;
+
+ let mut pools = SmallMap::default();
+ let mut defaults = Vec::new();
+ let mut num_builds = 0;
+ trace::scope("add pools and defaults", || -> anyhow::Result<()> {
+ for clump in &mut results.clumps {
+ for pool in &clump.pools {
+ if !pools.insert_if_absent(pool.name.to_owned(), pool.depth) {
+ bail!("duplicate pool {}", pool.name);
}
- num_builds += clump.builds.len();
}
- Ok(())
- })?;
- let mut builds = trace::scope("allocate and concat builds", || {
- let mut builds = Vec::with_capacity(num_builds);
- for clump in &mut results.clumps {
- builds.append(&mut clump.builds);
+ for default in &mut clump.defaults {
+ defaults.append(&mut default.evaluated);
}
- builds
- });
- let builddir = results.builddir.take();
- drop(results);
- // Turns out munmap is rather slow, unmapping the android ninja
- // files takes ~150ms. Do this in parallel with initialize_build.
- rayon::spawn(move || {
- drop(file_pool);
- });
- trace::scope("initialize builds", move || {
- builds
- .par_iter_mut()
- .enumerate()
- .try_for_each(|(id, build)| {
- build.id = BuildId::from(id);
- graph::Graph::initialize_build(build)
- })?;
- Ok((defaults, builddir, pools, builds))
- })
+ num_builds += clump.builds.len();
+ }
+ Ok(())
+ })?;
+ let mut builds = trace::scope("allocate and concat builds", || {
+ let mut builds = Vec::with_capacity(num_builds);
+ for clump in &mut results.clumps {
+ builds.append(&mut clump.builds);
+ }
+ builds
+ });
+ let builddir = results.builddir.take();
+ drop(results);
+ // Turns out munmap is rather slow, unmapping the android ninja
+ // files takes ~150ms. Do this in parallel with initialize_build.
+ rayon::spawn(move || {
+ drop(file_pool);
+ });
+ trace::scope("initialize builds", || {
+ builds
+ .par_iter_mut()
+ .enumerate()
+ .try_for_each(|(id, build)| {
+ build.id = BuildId::from(id);
+ graph::Graph::initialize_build(build)
+ })
+ })?;
+
+ let mut graph = Graph::new(builds, files)?;
+ let mut hashes = graph::Hashes::default();
+
+ let db = trace::scope("db::open", || {
+ let mut db_path = PathBuf::from(".n2_db");
+ if let Some(builddir) = &builddir {
+ db_path = Path::new(&builddir).join(db_path);
+ if let Some(parent) = db_path.parent() {
+ std::fs::create_dir_all(parent)?;
+ }
+ };
+ db::open(&db_path, &mut graph, &mut hashes)
})
- })?;
-
- let mut graph = Graph::new(builds, files)?;
- let mut hashes = graph::Hashes::default();
- let db = trace::scope("db::open", || {
- let mut db_path = PathBuf::from(".n2_db");
- if let Some(builddir) = &builddir {
- db_path = Path::new(&builddir).join(db_path);
- if let Some(parent) = db_path.parent() {
- std::fs::create_dir_all(parent)?;
- }
- };
- db::open(&db_path, &mut graph, &mut hashes)
- })
- .map_err(|err| anyhow!("load .n2_db: {}", err))?;
-
- Ok(State {
- graph,
- db,
- hashes,
- default: defaults,
- pools,
+ .map_err(|err| anyhow!("load .n2_db: {}", err))?;
+
+ Ok(State {
+ graph,
+ db,
+ hashes,
+ default: defaults,
+ pools,
+ })
+ })
})
}
diff --git a/src/progress.rs b/src/progress.rs
index 03a527d..37bc72c 100644
--- a/src/progress.rs
+++ b/src/progress.rs
@@ -430,6 +430,9 @@ impl<'a> FrontendProtoProgress<'a> {
let size: u32 = proto.compute_size().try_into().unwrap();
self.output.write_raw_varint32(size)?;
proto.write_to_with_cached_sizes(&mut self.output)?;
+ // There may be a long gap before the next progress update, so flush
+ // the output immediately so the message is displayed in the UI quickly.
+ self.output.flush()?;
Ok(())
}
}
diff --git a/src/run.rs b/src/run.rs
index 492acbb..558d7ad 100644
--- a/src/run.rs
+++ b/src/run.rs
@@ -1,10 +1,8 @@
use crate::{
- load,
- progress::{DumbConsoleProgress, FancyConsoleProgress, FrontendProtoProgress, Progress},
- terminal, trace, work,
+ load, progress::{DumbConsoleProgress, FancyConsoleProgress, FrontendProtoProgress, Progress}, terminal, tools, trace, work
};
use anyhow::anyhow;
-use std::{convert::TryInto, path::Path};
+use std::{convert::TryInto, path::Path, time::Instant};
fn build(
options: work::Options,
@@ -197,7 +195,7 @@ fn run_impl() -> anyhow::Result<i32> {
println!(" trace generate json performance trace");
return Ok(1);
}
- "trace" => trace::open("trace.json")?,
+ "trace" => trace::enable_tracing(),
_ => anyhow::bail!("unknown -d {:?}, use -d list to list", debug),
}
}
@@ -217,7 +215,8 @@ fn run_impl() -> anyhow::Result<i32> {
match tool.as_str() {
"list" => {
println!("subcommands:");
- println!(" (none yet, but see README if you're looking here trying to get CMake to work)");
+ println!(" targets list targets by their rule or depth in the DAG");
+ println!(" (see the README if you're looking here trying to get CMake to work)");
return Ok(1);
}
"compdb" if fake_ninja_compat => {
@@ -234,6 +233,7 @@ fn run_impl() -> anyhow::Result<i32> {
// on.
options.adopt = true;
}
+ "targets" => return tools::tool_targets(&args.build_file, &args.targets),
_ => {
anyhow::bail!("unknown -t {:?}, use -t list to list", tool);
}
@@ -268,7 +268,8 @@ fn run_impl() -> anyhow::Result<i32> {
}
pub fn run() -> anyhow::Result<i32> {
+ let start = Instant::now();
let res = run_impl();
- trace::close();
+ trace::write_trace_file("trace.json", start)?;
res
}
diff --git a/src/tools/mod.rs b/src/tools/mod.rs
new file mode 100644
index 0000000..9fbd69b
--- /dev/null
+++ b/src/tools/mod.rs
@@ -0,0 +1,2 @@
+mod targets;
+pub use targets::tool_targets;
diff --git a/src/tools/targets.rs b/src/tools/targets.rs
new file mode 100644
index 0000000..15c0c0b
--- /dev/null
+++ b/src/tools/targets.rs
@@ -0,0 +1,122 @@
+use crate::graph::File;
+use crate::load;
+use std::collections::BTreeSet;
+use std::sync::Arc;
+use anyhow::bail;
+
+// Implements the "targets" tool.
+//
+// The targets rule is rather convoluted. It has 3 modes. The mode to use is
+// the first argument, and the default is "depth".
+// - depth: prints a tree of files and their dependencies, starting from all
+// of the root nodes in the graph. An argument can be given to
+// specify the maximum depth to print out, the default is 1. A max
+// depth of 0 means "infinity".
+// - rule: the rule mode takes an argument that's the name of a rule. It will
+// print out all the files produced by builds using that rule. If the
+// rule argument is not given, it will instead print out all
+// "source files" in the graph, that is, files that are not produced
+// by any build.
+// - all: prints out the output files of all builds and the name of the rule
+// used to produce them.
+pub fn tool_targets(build_file: &str, args: &Vec<String>) -> anyhow::Result<i32> {
+ let state = load::read(build_file)?;
+ match args.first().map(|x| x.as_str()) {
+ Some("all") => tool_targets_all(&args[1..], state)?,
+ Some("rule") => tool_targets_rule(&args[1..], state)?,
+ Some("depth") => tool_targets_depth(&args[1..], state)?,
+ None => tool_targets_depth(args, state)?,
+ Some(mode) => bail!("unknown target tool mode {:?}, valid modes are \"rule\", \"depth\", or \"all\".", mode),
+ }
+ Ok(0)
+}
+
+fn tool_targets_all(args: &[String], state: load::State) -> anyhow::Result<()> {
+ if !args.is_empty() {
+ bail!("too many arguments to targets tool");
+ }
+ for build in state.graph.builds.values() {
+ for file in &build.outs.ids {
+ println!("{}: {}", file.name, build.rule)
+ }
+ }
+ Ok(())
+}
+
+fn tool_targets_rule(args: &[String], state: load::State) -> anyhow::Result<()> {
+ match args.len() {
+ 0 => {
+ let mut results = BTreeSet::new();
+ for build in state.graph.builds.values() {
+ for file in &build.ins.ids {
+ if file.input.lock().unwrap().is_none() {
+ results.insert(file.name.clone());
+ }
+ }
+ }
+ for result in &results {
+ println!("{}", result);
+ }
+ },
+ 1 => {
+ let rule = &args[0];
+ let mut results = Vec::new();
+ for build in state.graph.builds.values() {
+ if rule == &build.rule {
+ for file in &build.outs.ids {
+ results.push(file.name.clone());
+ }
+ }
+ }
+ results.sort_unstable();
+ for result in &results {
+ println!("{}", result);
+ }
+ },
+ _ => bail!("too many arguments to targets tool"),
+ };
+ Ok(())
+}
+
+fn tool_targets_depth(args: &[String], state: load::State) -> anyhow::Result<()> {
+ let max_depth = match args.len() {
+ 0 => 1,
+ 1 => args[0].parse::<i32>()?,
+ _ => bail!("too many arguments to targets tool"),
+ };
+ // The graph contains file entries for included ninja files, so
+ // we only consider files that are produced by a build.
+ let mut root_nodes = Vec::new();
+ for build in state.graph.builds.values() {
+ for file in &build.outs.ids {
+ if file.dependents.is_empty() {
+ root_nodes.push(file.clone());
+ }
+ }
+ }
+ // sort the root nodes because the builds in the graph are in a nondeterministic order
+ root_nodes.sort_unstable_by(|f1, f2| f1.name.cmp(&f2.name));
+ print_files_recursively(&state, &root_nodes, 0, max_depth);
+ Ok(())
+}
+
+/// print all the files in the `files` argument, and then all of their
+/// dependencies, recursively. `depth` is used to determine the indentation,
+/// start it at 0. max_depth limits the recursion depth, but if it's <= 0,
+/// recursion depth will not be limited.
+fn print_files_recursively(state: &load::State, files: &[Arc<File>], depth: i32, max_depth: i32) {
+ for file in files {
+ for _ in 0..depth {
+ print!(" ");
+ }
+ if let Some(build_id) = *file.input.lock().unwrap() {
+ let build = state.graph.builds.lookup(build_id).unwrap();
+ println!("{}: {}", &file.name, build.rule);
+ if max_depth <= 0 || depth < max_depth - 1 {
+ print_files_recursively(state, build.ordering_ins(), depth+1, max_depth);
+ }
+ } else {
+ println!("{}", &file.name);
+ }
+ }
+}
diff --git a/src/trace.rs b/src/trace.rs
index c862bfc..e5c5e85 100644
--- a/src/trace.rs
+++ b/src/trace.rs
@@ -1,124 +1,96 @@
//! Chrome trace output.
+use std::borrow::Cow;
use std::fs::File;
-use std::io::{BufWriter, Write};
+use std::io::{self, BufWriter, Write};
+use std::sync::atomic::AtomicBool;
use std::time::Instant;
-static mut TRACE: Option<Trace> = None;
+use crate::concurrent_linked_list::ConcurrentLinkedList;
-pub struct Trace {
- start: Instant,
- w: BufWriter<File>,
- count: usize,
-}
+static TRACE: ConcurrentLinkedList<TraceEvent> = ConcurrentLinkedList::new();
+static ENABLED: AtomicBool = AtomicBool::new(false);
-impl Trace {
- fn new(path: &str) -> std::io::Result<Self> {
- let mut w = BufWriter::new(File::create(path)?);
- writeln!(w, "[")?;
- Ok(Trace {
- start: Instant::now(),
- w,
- count: 0,
- })
- }
- fn write_event_prefix(&mut self, name: &str, ts: Instant) {
- if self.count > 0 {
- write!(self.w, ",").unwrap();
+#[non_exhaustive] // potentially add instant later
+pub enum TraceEvent {
+ Complete(CompleteEvent),
+}
+
+impl TraceEvent {
+ pub fn write_json(&self, w: &mut dyn std::io::Write, start: Instant) -> io::Result<()> {
+ match self {
+ TraceEvent::Complete(e) => {
+ write!(
+ w,
+ "{{\"pid\":0, \"name\":{:?}, \"ts\":{}, \"tid\": {}, \"ph\":\"X\", \"dur\":{}}}",
+ e.name,
+ e.start.duration_since(start).as_micros(),
+ e.tid,
+ e.end.duration_since(e.start).as_micros()
+ )
+ },
}
- self.count += 1;
- write!(
- self.w,
- "{{\"pid\":0, \"name\":{:?}, \"ts\":{}, ",
- name,
- ts.duration_since(self.start).as_micros(),
- )
- .unwrap();
}
+}
- pub fn write_complete(&mut self, name: &str, tid: usize, start: Instant, end: Instant) {
- self.write_event_prefix(name, start);
- writeln!(
- self.w,
- "\"tid\": {}, \"ph\":\"X\", \"dur\":{}}}",
- tid,
- end.duration_since(start).as_micros()
- )
- .unwrap();
- }
+pub struct CompleteEvent {
+ name: Cow<'static, str>,
+ tid: usize,
+ start: Instant,
+ end: Instant,
+}
- fn scope<T>(&mut self, name: &str, f: impl FnOnce() -> T) -> T {
- let start = Instant::now();
- let result = f();
- let end = Instant::now();
- self.write_complete(name, 0, start, end);
- result
+impl CompleteEvent {
+ pub fn new(name: &'static str, tid: usize, start: Instant, end: Instant) -> Self {
+ Self { name: Cow::Borrowed(name), tid, start, end }
}
-
- /*
- These functions were useful when developing, but are currently unused.
-
- pub fn write_instant(&mut self, name: &str) {
- self.write_event_prefix(name, Instant::now());
- writeln!(self.w, "\"ph\":\"i\"}}").unwrap();
+ pub fn new_owned(name: String, tid: usize, start: Instant, end: Instant) -> Self {
+ Self { name: Cow::Owned(name), tid, start, end }
}
+}
- pub fn write_counts<'a>(
- &mut self,
- name: &str,
- counts: impl Iterator<Item = &'a (&'a str, usize)>,
- ) {
- self.write_event_prefix(name, Instant::now());
- write!(self.w, "\"ph\":\"C\", \"args\":{{").unwrap();
- for (i, (name, count)) in counts.enumerate() {
- if i > 0 {
- write!(self.w, ",").unwrap();
- }
- write!(self.w, "\"{}\":{}", name, count).unwrap();
- }
- writeln!(self.w, "}}}}").unwrap();
- }
- */
+pub fn enable_tracing() {
+ ENABLED.store(true, std::sync::atomic::Ordering::Relaxed);
+}
- fn close(&mut self) {
- self.write_complete("main", 0, self.start, Instant::now());
- writeln!(self.w, "]").unwrap();
- self.w.flush().unwrap();
+pub fn trace(event: TraceEvent) {
+ if is_enabled() {
+ TRACE.prepend(event);
}
}
-pub fn open(path: &str) -> std::io::Result<()> {
- let trace = Trace::new(path)?;
- // Safety: accessing global mut, not threadsafe.
- unsafe {
- TRACE = Some(trace);
+pub fn write_trace_file(path: &str, start: Instant) -> std::io::Result<()> {
+ if !is_enabled() {
+ return Ok(())
}
+ let mut w = BufWriter::new(File::create(path)?);
+ writeln!(w, "[")?;
+ for (i, event) in TRACE.iter().enumerate() {
+ if i > 0 {
+ write!(w, ",")?;
+ }
+ event.write_json(&mut w, start)?;
+ writeln!(&mut w)?;
+ }
+ writeln!(w, "]")?;
Ok(())
}
#[inline]
-pub fn if_enabled(f: impl FnOnce(&mut Trace)) {
- // Safety: accessing global mut, not threadsafe.
- unsafe {
- match &mut TRACE {
- None => {}
- Some(t) => f(t),
- }
- }
+pub fn is_enabled() -> bool {
+ ENABLED.load(std::sync::atomic::Ordering::Relaxed)
}
#[inline]
pub fn scope<T>(name: &'static str, f: impl FnOnce() -> T) -> T {
- // Safety: accessing global mut, not threadsafe.
- unsafe {
- match &mut TRACE {
- None => f(),
- Some(t) => t.scope(name, f),
- }
+ if is_enabled() {
+ let start = Instant::now();
+ let result = f();
+ let end = Instant::now();
+ TRACE.prepend(TraceEvent::Complete(CompleteEvent::new(name, 0, start, end)));
+ result
+ } else {
+ f()
}
}
-
-pub fn close() {
- if_enabled(|t| t.close());
-}
diff --git a/src/work.rs b/src/work.rs
index 9b5604c..118bac7 100644
--- a/src/work.rs
+++ b/src/work.rs
@@ -1,5 +1,6 @@
//! Build runner, choosing and executing tasks as determined by out of date inputs.
+use crate::trace::CompleteEvent;
use crate::{
canon::canon_path, db, densemap::DenseMap, graph::*, hash, process, progress,
progress::Progress, signal, smallmap::SmallMap, task, trace,
@@ -526,18 +527,31 @@ impl<'a> Work<'a> {
}
}
- let input_was_missing = self.graph.builds[id]
+ // Check for missing input files, which should not be possible
+ // because they would be requested to be built previously, and if their
+ // build failed, the following missing output file check would've
+ // failed.
+ let missing_input_file = self.graph.builds[id]
.dirtying_ins()
.iter()
- .any(|file| self.file_state.get(file.as_ref()).unwrap() == MTime::Missing);
+ .find(|file| self.file_state.get(file.as_ref()).unwrap() == MTime::Missing);
+ if let Some(missing) = missing_input_file {
+ anyhow::bail!(
+ "{}: input file missing: {}",
+ self.graph.builds[id].location,
+ missing.name
+ );
+ }
// Update any cached state of the output files to reflect their new state.
- let output_was_missing = self.stat_all_outputs(id)?.is_some();
-
- if input_was_missing || output_was_missing {
- // If a file is missing, don't record the build in in the db.
- // It will be considered dirty next time anyway due to the missing file.
- return Ok(());
+ // Also error out if any output files were not created by the command.
+ let missing_output_file = self.stat_all_outputs(id)?;
+ if let Some(missing) = missing_output_file {
+ anyhow::bail!(
+ "{}: output file missing after successful execution: {}",
+ self.graph.builds[id].location,
+ missing.name
+ );
}
let build = &self.graph.builds[id];
@@ -788,10 +802,10 @@ impl<'a> Work<'a> {
self.progress.task_output(id, line);
});
let build = &self.graph.builds[task.buildid];
- trace::if_enabled(|t| {
+ if trace::is_enabled() {
let desc = progress::build_message(build);
- t.write_complete(&desc, task.tid + 1, task.span.0, task.span.1);
- });
+ trace::trace(trace::TraceEvent::Complete(CompleteEvent::new_owned(desc, task.tid + 1, task.span.0, task.span.1)));
+ }
self.progress
.task_finished(task.buildid, build, &task.result);
diff --git a/tests/e2e/basic.rs b/tests/e2e/basic.rs
index 3bca1b3..d656738 100644
--- a/tests/e2e/basic.rs
+++ b/tests/e2e/basic.rs
@@ -291,20 +291,20 @@ rule touch_in
Ok(())
}
+// cfg(unix) because I don't have a windows computer to test it on, but it's
+// really a windows-focused test
+#[cfg(unix)]
#[test]
fn showincludes() -> anyhow::Result<()> {
let space = TestSpace::new()?;
space.write(
"build.ninja",
- &[
- ECHO_RULE,
- "
-build out: echo
- text = Note: including file: foo
+ "
+rule myrule
+ command = touch out && echo Note: including file: foo
+build out: myrule
deps = msvc
",
- ]
- .join("\n"),
)?;
space.write("foo", "")?;
@@ -332,7 +332,7 @@ var = 123
rule custom
command = $cmd $var
build out: custom
- cmd = echo $var hello
+ cmd = touch out && echo $var hello
",
]
.join("\n"),
diff --git a/tests/e2e/missing.rs b/tests/e2e/missing.rs
index 1b78d36..9ad7f91 100644
--- a/tests/e2e/missing.rs
+++ b/tests/e2e/missing.rs
@@ -16,26 +16,26 @@ fn missing_input() -> anyhow::Result<()> {
Ok(())
}
+#[cfg(unix)]
#[test]
-fn missing_generated() -> anyhow::Result<()> {
+fn missing_output_file_gives_error() -> anyhow::Result<()> {
+ use anyhow::bail;
+
let space = TestSpace::new()?;
space.write(
"build.ninja",
- &[
- TOUCH_RULE,
- ECHO_RULE,
- "build mid: echo", // never writes output
- "build out: touch mid", // uses never-written output
- "",
- ]
- .join("\n"),
+ "
+rule myrule
+ command = touch out2
+build out: myrule
+",
)?;
- // https://github.com/evmar/n2/issues/69
-
- let out = space.run_expect(&mut n2_command(vec!["out"]))?;
- assert_output_contains(&out, "echo mid");
- assert_output_contains(&out, "touch out");
+ let out = space.run(&mut n2_command(vec!["out"]))?;
+ if out.status.success() {
+ bail!("expected error, got success");
+ }
+ assert_output_contains(&out, "output file missing after successful execution: out");
Ok(())
}
diff --git a/tests/e2e/mod.rs b/tests/e2e/mod.rs
index 3399701..bb267ee 100644
--- a/tests/e2e/mod.rs
+++ b/tests/e2e/mod.rs
@@ -6,6 +6,7 @@ mod discovered;
mod include_and_subninja;
mod missing;
mod regen;
+mod tools;
mod validations;
use anyhow::anyhow;
diff --git a/tests/e2e/tools/mod.rs b/tests/e2e/tools/mod.rs
new file mode 100644
index 0000000..54abf01
--- /dev/null
+++ b/tests/e2e/tools/mod.rs
@@ -0,0 +1 @@
+mod targets_test;
diff --git a/tests/e2e/tools/targets_test.rs b/tests/e2e/tools/targets_test.rs
new file mode 100644
index 0000000..208efa5
--- /dev/null
+++ b/tests/e2e/tools/targets_test.rs
@@ -0,0 +1,110 @@
+use crate::e2e::{n2_command, TestSpace};
+
+#[test]
+fn depth() -> anyhow::Result<()> {
+ let space = TestSpace::new()?;
+ space.write(
+ "build.ninja",
+ "
+rule myrule
+
+build a: myrule b c d
+build b: myrule
+build c: myrule
+build d: myrule e
+build e: myrule
+
+build other_top_level: myrule
+",
+ )?;
+ let output = space.run_expect(&mut n2_command(vec!["-t", "targets"]))?;
+ assert_eq!(String::from_utf8_lossy(&output.stdout), "a: myrule
+other_top_level: myrule
+");
+
+ let output = space.run_expect(&mut n2_command(vec!["-t", "targets", "depth"]))?;
+ assert_eq!(String::from_utf8_lossy(&output.stdout), "a: myrule
+other_top_level: myrule
+");
+
+ let output = space.run_expect(&mut n2_command(vec!["-t", "targets", "depth", "2"]))?;
+ assert_eq!(String::from_utf8_lossy(&output.stdout), "a: myrule
+ b: myrule
+ c: myrule
+ d: myrule
+other_top_level: myrule
+");
+
+ let output = space.run_expect(&mut n2_command(vec!["-t", "targets", "depth", "0"]))?;
+ assert_eq!(String::from_utf8_lossy(&output.stdout), "a: myrule
+ b: myrule
+ c: myrule
+ d: myrule
+ e: myrule
+other_top_level: myrule
+");
+
+ Ok(())
+}
+
+#[test]
+fn all() -> anyhow::Result<()> {
+ let space = TestSpace::new()?;
+ space.write(
+ "build.ninja",
+ "
+rule myrule
+rule myrule2
+
+build a: myrule b c d
+build b: myrule
+build c: myrule2
+build d: myrule2 e
+build e: myrule
+
+build other_top_level: myrule
+",
+ )?;
+ let output = space.run_expect(&mut n2_command(vec!["-t", "targets", "all"]))?;
+ assert_eq!(String::from_utf8_lossy(&output.stdout), "a: myrule
+b: myrule
+c: myrule2
+d: myrule2
+e: myrule
+other_top_level: myrule
+");
+
+ Ok(())
+}
+
+
+#[test]
+fn rule() -> anyhow::Result<()> {
+ let space = TestSpace::new()?;
+ space.write(
+ "build.ninja",
+ "
+rule myrule
+rule myrule2
+
+build a: myrule b c d in1
+build b: myrule in2 in3
+build c: myrule2
+build d: myrule2 e
+build e: myrule in3
+
+build other_top_level: myrule
+",
+ )?;
+ let output = space.run_expect(&mut n2_command(vec!["-t", "targets", "rule", "myrule2"]))?;
+ assert_eq!(String::from_utf8_lossy(&output.stdout), "c
+d
+");
+
+ let output = space.run_expect(&mut n2_command(vec!["-t", "targets", "rule"]))?;
+ assert_eq!(String::from_utf8_lossy(&output.stdout), "in1
+in2
+in3
+");
+ Ok(())
+}