@@ -36,7 +36,7 @@
/// When index and data are not interleaved: bytes of the revlog index.
/// When index and data are interleaved: bytes of the revlog index and
/// data.
- index_bytes: Box<dyn Deref<Target = [u8]> + Send>,
+ index: Index,
/// When index and data are not interleaved: bytes of the revlog data
data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
}
@@ -56,15 +56,13 @@
return Err(RevlogError::UnsuportedVersion(version));
}
- let is_inline = is_inline(&index_mmap);
-
- let index_bytes = Box::new(index_mmap);
+ let index = Index::new(Box::new(index_mmap))?;
// TODO load data only when needed //
// type annotation required
// won't recognize Mmap as Deref<Target = [u8]>
let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
- if is_inline {
+ if index.is_inline() {
None
} else {
let data_path = index_path.with_extension("d");
@@ -73,31 +71,27 @@
Some(Box::new(data_mmap))
};
- Ok(Revlog {
- index_bytes,
- data_bytes,
- })
+ Ok(Revlog { index, data_bytes })
}
/// Return number of entries of the `Revlog`.
pub fn len(&self) -> usize {
- self.index().len()
+ self.index.len()
}
/// Returns `true` if the `Revlog` has zero `entries`.
pub fn is_empty(&self) -> bool {
- self.index().is_empty()
+ self.index.is_empty()
}
/// Return the full data associated to a node.
#[timed]
pub fn get_node_rev(&self, node: &[u8]) -> Result<Revision, RevlogError> {
- let index = self.index();
// This is brute force. But it is fast enough for now.
// Optimization will come later.
for rev in (0..self.len() as Revision).rev() {
let index_entry =
- index.get_entry(rev).ok_or(RevlogError::Corrupted)?;
+ self.index.get_entry(rev).ok_or(RevlogError::Corrupted)?;
if node == index_entry.hash() {
return Ok(rev);
}
@@ -123,9 +117,10 @@
}
// TODO do not look twice in the index
- let index = self.index();
- let index_entry =
- index.get_entry(rev).ok_or(RevlogError::InvalidRevision)?;
+ let index_entry = self
+ .index
+ .get_entry(rev)
+ .ok_or(RevlogError::InvalidRevision)?;
let data: Vec<u8> = if delta_chain.is_empty() {
entry.data()?.into()
@@ -153,13 +148,12 @@
expected: &[u8],
data: &[u8],
) -> bool {
- let index = self.index();
- let e1 = index.get_entry(p1);
+ let e1 = self.index.get_entry(p1);
let h1 = match e1 {
Some(ref entry) => entry.hash(),
None => &NULL_NODE_ID,
};
- let e2 = index.get_entry(p2);
+ let e2 = self.index.get_entry(p2);
let h2 = match e2 {
Some(ref entry) => entry.hash(),
None => &NULL_NODE_ID,
@@ -187,30 +181,32 @@
Ok(patch.apply(&snapshot))
}
- /// Return the revlog index.
- pub fn index(&self) -> Index {
- let is_inline = self.data_bytes.is_none();
- Index::new(&self.index_bytes, is_inline)
- }
-
/// Return the revlog data.
fn data(&self) -> &[u8] {
match self.data_bytes {
Some(ref data_bytes) => &data_bytes,
- None => &self.index_bytes,
+ None => panic!(
+ "forgot to load the data or trying to access inline data"
+ ),
}
}
/// Get an entry of the revlog.
fn get_entry(&self, rev: Revision) -> Result<RevlogEntry, RevlogError> {
- let index = self.index();
- let index_entry =
- index.get_entry(rev).ok_or(RevlogError::InvalidRevision)?;
+ let index_entry = self
+ .index
+ .get_entry(rev)
+ .ok_or(RevlogError::InvalidRevision)?;
let start = index_entry.offset();
let end = start + index_entry.compressed_len();
+ let data = if self.index.is_inline() {
+ self.index.data(start, end)
+ } else {
+ &self.data()[start..end]
+ };
let entry = RevlogEntry {
rev,
- bytes: &self.data()[start..end],
+ bytes: data,
compressed_len: index_entry.compressed_len(),
uncompressed_len: index_entry.uncompressed_len(),
base_rev: if index_entry.base_revision() == rev {
@@ -247,38 +243,44 @@
// Raw revision data follows.
b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
// zlib (RFC 1950) data.
- b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data())),
+ b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
// zstd data.
- b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data())),
+ b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
format_type => Err(RevlogError::UnknowDataFormat(format_type)),
}
}
- fn uncompressed_zlib_data(&self) -> Vec<u8> {
+ fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, RevlogError> {
let mut decoder = ZlibDecoder::new(self.bytes);
if self.is_delta() {
let mut buf = Vec::with_capacity(self.compressed_len);
- decoder.read_to_end(&mut buf).expect("corrupted zlib data");
- buf
+ decoder
+ .read_to_end(&mut buf)
+ .map_err(|_| RevlogError::Corrupted)?;
+ Ok(buf)
} else {
let mut buf = vec![0; self.uncompressed_len];
- decoder.read_exact(&mut buf).expect("corrupted zlib data");
- buf
+ decoder
+ .read_exact(&mut buf)
+ .map_err(|_| RevlogError::Corrupted)?;
+ Ok(buf)
}
}
- fn uncompressed_zstd_data(&self) -> Vec<u8> {
+ fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, RevlogError> {
if self.is_delta() {
let mut buf = Vec::with_capacity(self.compressed_len);
zstd::stream::copy_decode(self.bytes, &mut buf)
- .expect("corrupted zstd data");
- buf
+ .map_err(|_| RevlogError::Corrupted)?;
+ Ok(buf)
} else {
let mut buf = vec![0; self.uncompressed_len];
let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
- .expect("corrupted zstd data");
- assert_eq!(len, self.uncompressed_len, "corrupted zstd data");
- buf
+ .map_err(|_| RevlogError::Corrupted)?;
+ if len != self.uncompressed_len {
+ return Err(RevlogError::Corrupted);
+ }
+ Ok(buf)
}
}
@@ -289,14 +291,6 @@
}
}
-/// Value of the inline flag.
-pub fn is_inline(index_bytes: &[u8]) -> bool {
- match &index_bytes[0..=1] {
- [0, 0] | [0, 2] => false,
- _ => true,
- }
-}
-
/// Format version of the revlog.
pub fn get_version(index_bytes: &[u8]) -> u16 {
BigEndian::read_u16(&index_bytes[2..=3])
@@ -325,113 +319,12 @@
use super::super::index::IndexEntryBuilder;
- #[cfg(test)]
- pub struct RevlogBuilder {
- version: u16,
- is_general_delta: bool,
- is_inline: bool,
- offset: usize,
- index: Vec<Vec<u8>>,
- data: Vec<Vec<u8>>,
- }
-
- #[cfg(test)]
- impl RevlogBuilder {
- pub fn new() -> Self {
- Self {
- version: 2,
- is_inline: false,
- is_general_delta: true,
- offset: 0,
- index: vec![],
- data: vec![],
- }
- }
-
- pub fn with_inline(&mut self, value: bool) -> &mut Self {
- self.is_inline = value;
- self
- }
-
- pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
- self.is_general_delta = value;
- self
- }
-
- pub fn with_version(&mut self, value: u16) -> &mut Self {
- self.version = value;
- self
- }
-
- pub fn push(
- &mut self,
- mut index: IndexEntryBuilder,
- data: Vec<u8>,
- ) -> &mut Self {
- if self.index.is_empty() {
- index.is_first(true);
- index.with_general_delta(self.is_general_delta);
- index.with_inline(self.is_inline);
- index.with_version(self.version);
- } else {
- index.with_offset(self.offset);
- }
- self.index.push(index.build());
- self.offset += data.len();
- self.data.push(data);
- self
- }
-
- pub fn build_inline(&self) -> Vec<u8> {
- let mut bytes =
- Vec::with_capacity(self.index.len() + self.data.len());
- for (index, data) in self.index.iter().zip(self.data.iter()) {
- bytes.extend(index);
- bytes.extend(data);
- }
- bytes
- }
- }
-
- #[test]
- fn is_not_inline_when_no_inline_flag_test() {
- let bytes = RevlogBuilder::new()
- .with_general_delta(false)
- .with_inline(false)
- .push(IndexEntryBuilder::new(), vec![])
- .build_inline();
-
- assert_eq!(is_inline(&bytes), false)
- }
-
- #[test]
- fn is_inline_when_inline_flag_test() {
- let bytes = RevlogBuilder::new()
- .with_general_delta(false)
- .with_inline(true)
- .push(IndexEntryBuilder::new(), vec![])
- .build_inline();
-
- assert_eq!(is_inline(&bytes), true)
- }
-
- #[test]
- fn is_inline_when_inline_and_generaldelta_flags_test() {
- let bytes = RevlogBuilder::new()
- .with_general_delta(true)
- .with_inline(true)
- .push(IndexEntryBuilder::new(), vec![])
- .build_inline();
-
- assert_eq!(is_inline(&bytes), true)
- }
-
#[test]
fn version_test() {
- let bytes = RevlogBuilder::new()
+ let bytes = IndexEntryBuilder::new()
+ .is_first(true)
.with_version(1)
- .push(IndexEntryBuilder::new(), vec![])
- .build_inline();
+ .build();
assert_eq!(get_version(&bytes), 1)
}
@@ -8,35 +8,35 @@
/// - a replacement when `!data.is_empty() && start < end`
/// - not doing anything when `data.is_empty() && start == end`
#[derive(Debug, Clone)]
-struct PatchFrag<'a> {
+struct Chunk<'a> {
/// The start position of the chunk of data to replace
- start: i32,
+ start: u32,
/// The end position of the chunk of data to replace (open end interval)
- end: i32,
+ end: u32,
/// The data replacing the chunk
data: &'a [u8],
}
-impl<'a> PatchFrag<'a> {
+impl Chunk<'_> {
/// Adjusted start of the chunk to replace.
///
- /// Offset allow to take into account the growth/shrinkage of data
+ /// The offset, taking into account the growth/shrinkage of data
/// induced by previously applied chunks.
- fn start_offseted_by(&self, offset: i32) -> i32 {
- self.start + offset
+ fn start_offset_by(&self, offset: i32) -> i32 {
+ (self.start as i32 + offset) as i32
}
/// Adjusted end of the chunk to replace.
///
- /// Offset allow to take into account the growth/shrinkage of data
+ /// The offset, taking into account the growth/shrinkage of data
/// induced by previously applied chunks.
- fn end_offseted_by(&self, offset: i32) -> i32 {
- self.start_offseted_by(offset) + (self.data.len() as i32)
+ fn end_offset_by(&self, offset: i32) -> i32 {
+ self.start_offset_by(offset) + (self.data.len() as i32)
}
/// Length of the replaced chunk.
fn replaced_len(&self) -> i32 {
- self.end - self.start
+ (self.end - self.start) as i32
}
/// Length difference between the replacing data and the replaced data.
@@ -54,35 +54,35 @@
/// - ordered from the left-most replacement to the right-most replacement
/// - non-overlapping, meaning that two chucks can not change the same
/// chunk of the patched data
- frags: Vec<PatchFrag<'a>>,
+ chunks: Vec<Chunk<'a>>,
}
impl<'a> PatchList<'a> {
/// Create a `PatchList` from bytes.
pub fn new(data: &'a [u8]) -> Self {
- let mut frags = vec![];
+ let mut chunks = vec![];
let mut data = data;
while !data.is_empty() {
- let start = BigEndian::read_i32(&data[0..]);
- let end = BigEndian::read_i32(&data[4..]);
- let len = BigEndian::read_i32(&data[8..]);
- assert!(0 <= start && start <= end && len >= 0);
- frags.push(PatchFrag {
+ let start = BigEndian::read_u32(&data[0..]);
+ let end = BigEndian::read_u32(&data[4..]);
+ let len = BigEndian::read_u32(&data[8..]);
+ assert!(start <= end);
+ chunks.push(Chunk {
start,
end,
data: &data[12..12 + (len as usize)],
});
data = &data[12 + (len as usize)..];
}
- PatchList { frags }
+ PatchList { chunks }
}
/// Return the final length of data after patching
/// given its initial length .
fn size(&self, initial_size: i32) -> i32 {
- self.frags
+ self.chunks
.iter()
- .fold(initial_size, |acc, frag| acc + frag.len_diff())
+ .fold(initial_size, |acc, chunk| acc + chunk.len_diff())
}
/// Apply the patch to some data.
@@ -90,7 +90,7 @@
let mut last: usize = 0;
let mut vec =
Vec::with_capacity(self.size(initial.len() as i32) as usize);
- for PatchFrag { start, end, data } in self.frags.iter() {
+ for Chunk { start, end, data } in self.chunks.iter() {
vec.extend(&initial[last..(*start as usize)]);
vec.extend(data.iter());
last = *end as usize;
@@ -105,7 +105,7 @@
/// as the changes introduced by one patch can be overridden by the next.
/// Combining patches optimizes the whole patching sequence.
fn combine(&mut self, other: &mut Self) -> Self {
- let mut frags = vec![];
+ let mut chunks = vec![];
// Keep track of each growth/shrinkage resulting from applying a chunk
// in order to adjust the start/end of subsequent chunks.
@@ -116,15 +116,15 @@
// For each chunk of `other`, chunks of `self` are processed
// until they start after the end of the current chunk.
- for PatchFrag { start, end, data } in other.frags.iter() {
+ for Chunk { start, end, data } in other.chunks.iter() {
// Add chunks of `self` that start before this chunk of `other`
// without overlap.
- while pos < self.frags.len()
- && self.frags[pos].end_offseted_by(offset) <= *start
+ while pos < self.chunks.len()
+ && self.chunks[pos].end_offset_by(offset) <= *start as i32
{
- let first = self.frags[pos].clone();
+ let first = self.chunks[pos].clone();
offset += first.len_diff();
- frags.push(first);
+ chunks.push(first);
pos += 1;
}
@@ -132,15 +132,15 @@
// with overlap.
// The left-most part of data is added as an insertion chunk.
// The right-most part data is kept in the chunk.
- if pos < self.frags.len()
- && self.frags[pos].start_offseted_by(offset) < *start
+ if pos < self.chunks.len()
+ && self.chunks[pos].start_offset_by(offset) < *start as i32
{
- let first = &mut self.frags[pos];
+ let first = &mut self.chunks[pos];
let (data_left, data_right) = first.data.split_at(
- (*start - first.start_offseted_by(offset)) as usize,
+ (*start as i32 - first.start_offset_by(offset)) as usize,
);
- let left = PatchFrag {
+ let left = Chunk {
start: first.start,
end: first.start,
data: data_left,
@@ -150,7 +150,7 @@
offset += left.len_diff();
- frags.push(left);
+ chunks.push(left);
// There is no index incrementation because the right-most part
// needs further examination.
@@ -167,23 +167,23 @@
// Discard the chunks of `self` that are totally overridden
// by the current chunk of `other`
- while pos < self.frags.len()
- && self.frags[pos].end_offseted_by(next_offset) <= *end
+ while pos < self.chunks.len()
+ && self.chunks[pos].end_offset_by(next_offset) <= *end as i32
{
- let first = &self.frags[pos];
+ let first = &self.chunks[pos];
next_offset += first.len_diff();
pos += 1;
}
// Truncate the left-most part of chunk of `self` that overlaps
// the current chunk of `other`.
- if pos < self.frags.len()
- && self.frags[pos].start_offseted_by(next_offset) < *end
+ if pos < self.chunks.len()
+ && self.chunks[pos].start_offset_by(next_offset) < *end as i32
{
- let first = &mut self.frags[pos];
+ let first = &mut self.chunks[pos];
let how_much_to_discard =
- *end - first.start_offseted_by(next_offset);
+ *end as i32 - first.start_offset_by(next_offset);
first.data = &first.data[(how_much_to_discard as usize)..];
@@ -191,9 +191,9 @@
}
// Add the chunk of `other` with adjusted position.
- frags.push(PatchFrag {
- start: *start - offset,
- end: *end - next_offset,
+ chunks.push(Chunk {
+ start: (*start as i32 - offset) as u32,
+ end: (*end as i32 - next_offset) as u32,
data,
});
@@ -202,10 +202,10 @@
}
// Add remaining chunks of `self`.
- for elt in &self.frags[pos..] {
- frags.push(elt.clone());
+ for elt in &self.chunks[pos..] {
+ chunks.push(elt.clone());
}
- PatchList { frags }
+ PatchList { chunks }
}
}
@@ -213,7 +213,7 @@
pub fn fold_patch_lists<'a>(lists: &[PatchList<'a>]) -> PatchList<'a> {
if lists.len() <= 1 {
if lists.is_empty() {
- PatchList { frags: vec![] }
+ PatchList { chunks: vec![] }
} else {
lists[0].clone()
}
@@ -253,7 +253,7 @@
}
pub fn get(&mut self) -> &[u8] {
- &self.data[..]
+ &self.data
}
}
@@ -1,27 +1,31 @@
+use std::ops::Deref;
+
use byteorder::{BigEndian, ByteOrder};
+use crate::revlog::revlog::RevlogError;
use crate::revlog::{Revision, NULL_REVISION};
pub const INDEX_ENTRY_SIZE: usize = 64;
-/// A Revlog index
-#[derive(Debug)]
-pub struct Index<'a> {
- bytes: &'a [u8],
+/// A `revlog` index
+pub struct Index {
+ bytes: Box<dyn Deref<Target = [u8]> + Send>,
/// Offsets of starts of index blocks.
/// Only needed when the index is interleaved with data.
offsets: Option<Vec<usize>>,
}
-impl<'a> Index<'a> {
+impl Index {
/// Create an index from bytes.
/// Calculate the start of each entry when is_inline is true.
- pub fn new(bytes: &'a [u8], is_inline: bool) -> Self {
- if is_inline {
+ pub fn new(
+ bytes: Box<dyn Deref<Target = [u8]> + Send>,
+ ) -> Result<Self, RevlogError> {
+ if is_inline(&bytes) {
let mut offset: usize = 0;
let mut offsets = Vec::new();
- while (bytes.len() - offset) >= INDEX_ENTRY_SIZE {
+ while INDEX_ENTRY_SIZE + offset <= bytes.len() {
offsets.push(offset);
let end = offset + INDEX_ENTRY_SIZE;
let entry = IndexEntry {
@@ -32,18 +36,35 @@
offset += INDEX_ENTRY_SIZE + entry.compressed_len();
}
- Self {
+ if offset != bytes.len() {
+ return Err(RevlogError::Corrupted);
+ }
+
+ Ok(Self {
bytes,
offsets: Some(offsets),
- }
+ })
} else {
- Self {
+ Ok(Self {
bytes,
offsets: None,
- }
+ })
}
}
+ /// Return `true` if `revlog` is inline. `false` otherwise.
+ pub fn is_inline(&self) -> bool {
+ is_inline(&self.bytes)
+ }
+
+ /// Return a slice of bytes if `revlog` is inline. Panic if not.
+ pub fn data(&self, start: usize, end: usize) -> &[u8] {
+ if !self.is_inline() {
+ panic!("tried to access data in the index of a revlog that is not inline");
+ }
+ &self.bytes[start..end]
+ }
+
/// Return number of entries of the revlog index.
pub fn len(&self) -> usize {
if let Some(offsets) = &self.offsets {
@@ -98,11 +119,9 @@
let end = start + INDEX_ENTRY_SIZE;
let bytes = &self.bytes[start..end];
- // See IndexEntry for an explanation of this override.
- let offset_override = match rev {
- 0 => Some(0),
- _ => None,
- };
+ // override the offset of the first revision as its bytes are used
+ // for index's metadata (saving space because it is alway 0)
+ let offset_override = if rev == 0 { Some(0) } else { None };
Some(IndexEntry {
bytes,
@@ -128,14 +147,13 @@
}
impl<'a> IndexEntry<'a> {
- /// Return the offset of the data if not overridden by offset_override.
+ /// Return the offset of the data.
pub fn offset(&self) -> usize {
if let Some(offset_override) = self.offset_override {
offset_override
} else {
- let mut bytes = [0; 8];
- bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
- BigEndian::read_u64(&bytes[..]) as usize
+ ((BigEndian::read_u16(&self.bytes[0..2]) as usize) << 32)
+ + (BigEndian::read_u32(&self.bytes[2..6]) as usize)
}
}
@@ -174,6 +192,14 @@
}
}
+/// Value of the inline flag.
+pub fn is_inline(index_bytes: &[u8]) -> bool {
+ match &index_bytes[0..=1] {
+ [0, 0] | [0, 2] => false,
+ _ => true,
+ }
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -271,6 +297,39 @@
}
#[test]
+ fn is_not_inline_when_no_inline_flag_test() {
+ let bytes = IndexEntryBuilder::new()
+ .is_first(true)
+ .with_general_delta(false)
+ .with_inline(false)
+ .build();
+
+ assert_eq!(is_inline(&bytes), false)
+ }
+
+ #[test]
+ fn is_inline_when_inline_flag_test() {
+ let bytes = IndexEntryBuilder::new()
+ .is_first(true)
+ .with_general_delta(false)
+ .with_inline(true)
+ .build();
+
+ assert_eq!(is_inline(&bytes), true)
+ }
+
+ #[test]
+ fn is_inline_when_inline_and_generaldelta_flags_test() {
+ let bytes = IndexEntryBuilder::new()
+ .is_first(true)
+ .with_general_delta(true)
+ .with_inline(true)
+ .build();
+
+ assert_eq!(is_inline(&bytes), true)
+ }
+
+ #[test]
fn test_offset() {
let bytes = IndexEntryBuilder::new().with_offset(1).build();
let entry = IndexEntry {
@@ -14,6 +14,7 @@
pub mod manifest;
pub mod patch;
pub mod revlog;
+pub use revlog::RevlogError;
/// Mercurial revision numbers
///
@@ -711,6 +711,16 @@
]
[[package]]
+name = "time"
+version = "0.1.44"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "libc 0.2.77 (registry+https://github.com/rust-lang/crates.io-index)",
+ "wasi 0.10.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
+[[package]]
name = "twox-hash"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -744,6 +754,11 @@
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]]
+name = "wasi"
+version = "0.10.0+wasi-snapshot-preview1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+
+[[package]]
name = "winapi"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
@@ -880,11 +895,13 @@
"checksum termcolor 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb6bfa289a4d7c5766392812c0a1f4c1ba45afa1ad47803c11e1f407d846d75f"
"checksum textwrap 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "d326610f408c7a4eb6f51c37c330e496b08506c9457c9d34287ecc38809fb060"
"checksum thread_local 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14"
+"checksum time 0.1.44 (registry+https://github.com/rust-lang/crates.io-index)" = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255"
"checksum twox-hash 1.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56"
"checksum unicode-width 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "9337591893a19b88d8d87f2cec1e73fad5cdfd10e5a6f349f498ad6ea2ffb1e3"
"checksum unicode-xid 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f7fe0bb3479651439c9112f72b6c505038574c9fbb575ed1bf3b797fa39dd564"
"checksum vcpkg 0.2.10 (registry+https://github.com/rust-lang/crates.io-index)" = "6454029bf181f092ad1b853286f23e2c507d8e8194d01d92da4a55c274a5508c"
"checksum vec_map 0.8.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191"
+"checksum wasi 0.10.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f"
"checksum wasi 0.9.0+wasi-snapshot-preview1 (registry+https://github.com/rust-lang/crates.io-index)" = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519"
"checksum winapi 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)" = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"