Merge "aconfig: package.map file serialization" into main

This commit is contained in:
Dennis Shen
2023-12-19 18:57:24 +00:00
committed by Gerrit Code Review
2 changed files with 317 additions and 6 deletions

View File

@@ -14,11 +14,41 @@
* limitations under the License. * limitations under the License.
*/ */
use anyhow::Result; pub mod package_table;
use std::collections::{HashMap, HashSet};
use anyhow::{anyhow, Result};
use std::collections::{hash_map::DefaultHasher, HashMap, HashSet};
use std::hash::{Hash, Hasher};
use std::path::PathBuf;
use crate::commands::OutputFile; use crate::commands::OutputFile;
use crate::protos::{ProtoParsedFlag, ProtoParsedFlags}; use crate::protos::{ProtoParsedFlag, ProtoParsedFlags};
use crate::storage::package_table::PackageTable;
pub const FILE_VERSION: u32 = 1;
pub const HASH_PRIMES: [u32; 29] = [
7, 13, 29, 53, 97, 193, 389, 769, 1543, 3079, 6151, 12289, 24593, 49157, 98317, 196613, 393241,
786433, 1572869, 3145739, 6291469, 12582917, 25165843, 50331653, 100663319, 201326611,
402653189, 805306457, 1610612741,
];
/// Get the right hash table size given number of entries in the table. Use a
/// load factor of 0.5 for performance.
pub fn get_table_size(entries: u32) -> Result<u32> {
HASH_PRIMES
.iter()
.find(|&&num| num >= 2 * entries)
.copied()
.ok_or(anyhow!("Number of packages is too large"))
}
/// Get the corresponding bucket index given the key and number of buckets
pub fn get_bucket_index<T: Hash>(val: &T, num_buckets: u32) -> u32 {
let mut s = DefaultHasher::new();
val.hash(&mut s);
(s.finish() % num_buckets as u64) as u32
}
pub struct FlagPackage<'a> { pub struct FlagPackage<'a> {
pub package_name: &'a str, pub package_name: &'a str,
@@ -52,7 +82,7 @@ where
{ {
// group flags by package // group flags by package
let mut packages: Vec<FlagPackage<'a>> = Vec::new(); let mut packages: Vec<FlagPackage<'a>> = Vec::new();
let mut package_index: HashMap<&'a str, usize> = HashMap::new(); let mut package_index: HashMap<&str, usize> = HashMap::new();
for parsed_flags in parsed_flags_vec_iter { for parsed_flags in parsed_flags_vec_iter {
for parsed_flag in parsed_flags.parsed_flag.iter() { for parsed_flag in parsed_flags.parsed_flag.iter() {
let index = *(package_index.entry(parsed_flag.package()).or_insert(packages.len())); let index = *(package_index.entry(parsed_flag.package()).or_insert(packages.len()));
@@ -76,14 +106,21 @@ where
} }
pub fn generate_storage_files<'a, I>( pub fn generate_storage_files<'a, I>(
_containser: &str, container: &str,
parsed_flags_vec_iter: I, parsed_flags_vec_iter: I,
) -> Result<Vec<OutputFile>> ) -> Result<Vec<OutputFile>>
where where
I: Iterator<Item = &'a ProtoParsedFlags>, I: Iterator<Item = &'a ProtoParsedFlags>,
{ {
let _packages = group_flags_by_package(parsed_flags_vec_iter); let packages = group_flags_by_package(parsed_flags_vec_iter);
Ok(vec![])
// create and serialize package map
let package_table = PackageTable::new(container, &packages)?;
let package_table_file_path = PathBuf::from("package.map");
let package_table_file =
OutputFile { contents: package_table.as_bytes(), path: package_table_file_path };
Ok(vec![package_table_file])
} }
#[cfg(test)] #[cfg(test)]
@@ -91,6 +128,21 @@ mod tests {
use super::*; use super::*;
use crate::Input; use crate::Input;
/// Read and parse bytes as u32
pub fn read_u32_from_bytes(buf: &[u8], head: &mut usize) -> Result<u32> {
let val = u32::from_le_bytes(buf[*head..*head + 4].try_into()?);
*head += 4;
Ok(val)
}
/// Read and parse bytes as string
pub fn read_str_from_bytes(buf: &[u8], head: &mut usize) -> Result<String> {
let num_bytes = read_u32_from_bytes(buf, head)? as usize;
let val = String::from_utf8(buf[*head..*head + num_bytes].to_vec())?;
*head += num_bytes;
Ok(val)
}
pub fn parse_all_test_flags() -> Vec<ProtoParsedFlags> { pub fn parse_all_test_flags() -> Vec<ProtoParsedFlags> {
let aconfig_files = [ let aconfig_files = [
( (

View File

@@ -0,0 +1,259 @@
/*
* Copyright (C) 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::storage::{self, FlagPackage};
use anyhow::Result;
#[derive(PartialEq, Debug)]
pub struct PackageTableHeader {
pub version: u32,
pub container: String,
pub file_size: u32,
pub num_packages: u32,
pub bucket_offset: u32,
pub node_offset: u32,
}
impl PackageTableHeader {
fn new(container: &str, num_packages: u32) -> Self {
Self {
version: storage::FILE_VERSION,
container: String::from(container),
file_size: 0,
num_packages,
bucket_offset: 0,
node_offset: 0,
}
}
fn as_bytes(&self) -> Vec<u8> {
let mut result = Vec::new();
result.extend_from_slice(&self.version.to_le_bytes());
let container_bytes = self.container.as_bytes();
result.extend_from_slice(&(container_bytes.len() as u32).to_le_bytes());
result.extend_from_slice(container_bytes);
result.extend_from_slice(&self.file_size.to_le_bytes());
result.extend_from_slice(&self.num_packages.to_le_bytes());
result.extend_from_slice(&self.bucket_offset.to_le_bytes());
result.extend_from_slice(&self.node_offset.to_le_bytes());
result
}
}
#[derive(PartialEq, Debug)]
pub struct PackageTableNode {
pub package_name: String,
pub package_id: u32,
pub boolean_offset: u32,
pub next_offset: Option<u32>,
pub bucket_index: u32,
}
impl PackageTableNode {
fn new(package: &FlagPackage, num_buckets: u32) -> Self {
let bucket_index =
storage::get_bucket_index(&package.package_name.to_string(), num_buckets);
Self {
package_name: String::from(package.package_name),
package_id: package.package_id,
boolean_offset: package.boolean_offset,
next_offset: None,
bucket_index,
}
}
fn as_bytes(&self) -> Vec<u8> {
let mut result = Vec::new();
let name_bytes = self.package_name.as_bytes();
result.extend_from_slice(&(name_bytes.len() as u32).to_le_bytes());
result.extend_from_slice(name_bytes);
result.extend_from_slice(&self.package_id.to_le_bytes());
result.extend_from_slice(&self.boolean_offset.to_le_bytes());
result.extend_from_slice(&self.next_offset.unwrap_or(0).to_le_bytes());
result
}
}
pub struct PackageTable {
pub header: PackageTableHeader,
pub buckets: Vec<Option<u32>>,
pub nodes: Vec<PackageTableNode>,
}
impl PackageTable {
pub fn new(container: &str, packages: &[FlagPackage]) -> Result<Self> {
// create table
let num_packages = packages.len() as u32;
let num_buckets = storage::get_table_size(num_packages)?;
let mut table = Self {
header: PackageTableHeader::new(container, num_packages),
buckets: vec![None; num_buckets as usize],
nodes: packages.iter().map(|pkg| PackageTableNode::new(pkg, num_buckets)).collect(),
};
// sort nodes by bucket index for efficiency
table.nodes.sort_by(|a, b| a.bucket_index.cmp(&b.bucket_index));
// fill all node offset
let mut offset = 0;
for i in 0..table.nodes.len() {
let node_bucket_idx = table.nodes[i].bucket_index;
let next_node_bucket_idx = if i + 1 < table.nodes.len() {
Some(table.nodes[i + 1].bucket_index)
} else {
None
};
if table.buckets[node_bucket_idx as usize].is_none() {
table.buckets[node_bucket_idx as usize] = Some(offset);
}
offset += table.nodes[i].as_bytes().len() as u32;
if let Some(index) = next_node_bucket_idx {
if index == node_bucket_idx {
table.nodes[i].next_offset = Some(offset);
}
}
}
// fill table region offset
table.header.bucket_offset = table.header.as_bytes().len() as u32;
table.header.node_offset = table.header.bucket_offset + num_buckets * 4;
table.header.file_size = table.header.node_offset
+ table.nodes.iter().map(|x| x.as_bytes().len()).sum::<usize>() as u32;
Ok(table)
}
pub fn as_bytes(&self) -> Vec<u8> {
[
self.header.as_bytes(),
self.buckets.iter().map(|v| v.unwrap_or(0).to_le_bytes()).collect::<Vec<_>>().concat(),
self.nodes.iter().map(|v| v.as_bytes()).collect::<Vec<_>>().concat(),
]
.concat()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::storage::{
group_flags_by_package, tests::parse_all_test_flags, tests::read_str_from_bytes,
tests::read_u32_from_bytes,
};
impl PackageTableHeader {
// test only method to deserialize back into the header struct
fn from_bytes(bytes: &[u8]) -> Result<Self> {
let mut head = 0;
Ok(Self {
version: read_u32_from_bytes(bytes, &mut head)?,
container: read_str_from_bytes(bytes, &mut head)?,
file_size: read_u32_from_bytes(bytes, &mut head)?,
num_packages: read_u32_from_bytes(bytes, &mut head)?,
bucket_offset: read_u32_from_bytes(bytes, &mut head)?,
node_offset: read_u32_from_bytes(bytes, &mut head)?,
})
}
}
impl PackageTableNode {
// test only method to deserialize back into the node struct
fn from_bytes(bytes: &[u8], num_buckets: u32) -> Result<Self> {
let mut head = 0;
let mut node = Self {
package_name: read_str_from_bytes(bytes, &mut head)?,
package_id: read_u32_from_bytes(bytes, &mut head)?,
boolean_offset: read_u32_from_bytes(bytes, &mut head)?,
next_offset: match read_u32_from_bytes(bytes, &mut head)? {
0 => None,
val => Some(val),
},
bucket_index: 0,
};
node.bucket_index = storage::get_bucket_index(&node.package_name, num_buckets);
Ok(node)
}
}
pub fn create_test_package_table() -> Result<PackageTable> {
let caches = parse_all_test_flags();
let packages = group_flags_by_package(caches.iter());
PackageTable::new("system", &packages)
}
#[test]
// this test point locks down the table creation and each field
fn test_table_contents() {
let package_table = create_test_package_table();
assert!(package_table.is_ok());
let header: &PackageTableHeader = &package_table.as_ref().unwrap().header;
let expected_header = PackageTableHeader {
version: storage::FILE_VERSION,
container: String::from("system"),
file_size: 158,
num_packages: 2,
bucket_offset: 30,
node_offset: 58,
};
assert_eq!(header, &expected_header);
let buckets: &Vec<Option<u32>> = &package_table.as_ref().unwrap().buckets;
let expected: Vec<Option<u32>> = vec![Some(0), None, None, Some(50), None, None, None];
assert_eq!(buckets, &expected);
let nodes: &Vec<PackageTableNode> = &package_table.as_ref().unwrap().nodes;
assert_eq!(nodes.len(), 2);
let first_node_expected = PackageTableNode {
package_name: String::from("com.android.aconfig.storage.test_2"),
package_id: 1,
boolean_offset: 10,
next_offset: None,
bucket_index: 0,
};
assert_eq!(nodes[0], first_node_expected);
let second_node_expected = PackageTableNode {
package_name: String::from("com.android.aconfig.storage.test_1"),
package_id: 0,
boolean_offset: 0,
next_offset: None,
bucket_index: 3,
};
assert_eq!(nodes[1], second_node_expected);
}
#[test]
// this test point locks down the table serialization
fn test_serialization() {
let package_table = create_test_package_table();
assert!(package_table.is_ok());
let header: &PackageTableHeader = &package_table.as_ref().unwrap().header;
let reinterpreted_header = PackageTableHeader::from_bytes(&header.as_bytes());
assert!(reinterpreted_header.is_ok());
assert_eq!(header, &reinterpreted_header.unwrap());
let nodes: &Vec<PackageTableNode> = &package_table.as_ref().unwrap().nodes;
let num_buckets = storage::get_table_size(header.num_packages).unwrap();
for node in nodes.iter() {
let reinterpreted_node = PackageTableNode::from_bytes(&node.as_bytes(), num_buckets);
assert!(reinterpreted_node.is_ok());
assert_eq!(node, &reinterpreted_node.unwrap());
}
}
}