Cleanup more bazel code

Bug: 315353489
Test: m nothing --no-skip-soong-tests
Change-Id: I6c6b4fcd559f4a45b1a96c70cbb5d6d1a615ccff
This commit is contained in:
Cole Faust
2024-06-25 10:42:33 -07:00
parent 79b0eef5a1
commit 487b144fe0
16 changed files with 0 additions and 3215 deletions

View File

@@ -12,7 +12,6 @@ bootstrap_go_package {
"soong",
"soong-aconfig",
"soong-android",
"soong-bazel",
"soong-java",
"soong-rust",
],

View File

@@ -26,8 +26,6 @@ import (
"sort"
"strings"
"android/soong/bazel"
"github.com/google/blueprint"
"github.com/google/blueprint/proptools"
)
@@ -849,9 +847,6 @@ type ModuleBase struct {
// archPropRoot that is filled with arch specific values by the arch mutator.
archProperties [][]interface{}
// Properties specific to the Blueprint to BUILD migration.
bazelTargetModuleProperties bazel.BazelTargetModuleProperties
// Information about all the properties on the module that contains visibility rules that need
// checking.
visibilityPropertyInfo []visibilityProperty

View File

@@ -9,7 +9,6 @@ bootstrap_go_package {
"blueprint",
"blueprint-parser",
"blueprint-proptools",
"soong-bazel",
"soong-starlark-format",
],
srcs: [

View File

@@ -6,22 +6,17 @@ bootstrap_go_package {
name: "soong-bazel",
pkgPath: "android/soong/bazel",
srcs: [
"aquery.go",
"bazel_proxy.go",
"configurability.go",
"constants.go",
"properties.go",
"testing.go",
],
testSrcs: [
"aquery_test.go",
"properties_test.go",
],
pluginFor: [
"soong_build",
],
deps: [
"bazel_analysis_v2_proto",
"blueprint",
],
}

View File

@@ -1,768 +0,0 @@
// Copyright 2020 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bazel
import (
"crypto/sha256"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"path/filepath"
"reflect"
"sort"
"strings"
"sync"
analysis_v2_proto "prebuilts/bazel/common/proto/analysis_v2"
"github.com/google/blueprint/metrics"
"github.com/google/blueprint/proptools"
"google.golang.org/protobuf/proto"
)
type artifactId int
type depsetId int
type pathFragmentId int
// KeyValuePair represents Bazel's aquery proto, KeyValuePair.
type KeyValuePair struct {
Key string
Value string
}
// AqueryDepset is a depset definition from Bazel's aquery response. This is
// akin to the `depSetOfFiles` in the response proto, except:
// - direct artifacts are enumerated by full path instead of by ID
// - it has a hash of the depset contents, instead of an int ID (for determinism)
//
// A depset is a data structure for efficient transitive handling of artifact
// paths. A single depset consists of one or more artifact paths and one or
// more "child" depsets.
type AqueryDepset struct {
ContentHash string
DirectArtifacts []string
TransitiveDepSetHashes []string
}
// BuildStatement contains information to register a build statement corresponding (one to one)
// with a Bazel action from Bazel's action graph.
type BuildStatement struct {
Command string
Depfile *string
OutputPaths []string
SymlinkPaths []string
Env []*analysis_v2_proto.KeyValuePair
Mnemonic string
// Inputs of this build statement, either as unexpanded depsets or expanded
// input paths. There should be no overlap between these fields; an input
// path should either be included as part of an unexpanded depset or a raw
// input path string, but not both.
InputDepsetHashes []string
InputPaths []string
FileContents string
// If ShouldRunInSbox is true, Soong will use sbox to created an isolated environment
// and run the mixed build action there
ShouldRunInSbox bool
// A list of files to add as implicit deps to the outputs of this BuildStatement.
// Unlike most properties in BuildStatement, these paths must be relative to the root of
// the whole out/ folder, instead of relative to ctx.Config().BazelContext.OutputBase()
ImplicitDeps []string
IsExecutable bool
}
// A helper type for aquery processing which facilitates retrieval of path IDs from their
// less readable Bazel structures (depset and path fragment).
type aqueryArtifactHandler struct {
// Maps depset id to AqueryDepset, a representation of depset which is
// post-processed for middleman artifact handling, unhandled artifact
// dropping, content hashing, etc.
depsetIdToAqueryDepset map[depsetId]AqueryDepset
emptyDepsetIds map[depsetId]struct{}
// Maps content hash to AqueryDepset.
depsetHashToAqueryDepset map[string]AqueryDepset
// depsetIdToArtifactIdsCache is a memoization of depset flattening, because flattening
// may be an expensive operation.
depsetHashToArtifactPathsCache sync.Map
// Maps artifact ids to fully expanded paths.
artifactIdToPath map[artifactId]string
}
// The tokens should be substituted with the value specified here, instead of the
// one returned in 'substitutions' of TemplateExpand action.
var templateActionOverriddenTokens = map[string]string{
// Uses "python3" for %python_binary% instead of the value returned by aquery
// which is "py3wrapper.sh". See removePy3wrapperScript.
"%python_binary%": "python3",
}
const (
middlemanMnemonic = "Middleman"
// The file name of py3wrapper.sh, which is used by py_binary targets.
py3wrapperFileName = "/py3wrapper.sh"
)
func indexBy[K comparable, V any](values []V, keyFn func(v V) K) map[K]V {
m := map[K]V{}
for _, v := range values {
m[keyFn(v)] = v
}
return m
}
func newAqueryHandler(aqueryResult *analysis_v2_proto.ActionGraphContainer) (*aqueryArtifactHandler, error) {
pathFragments := indexBy(aqueryResult.PathFragments, func(pf *analysis_v2_proto.PathFragment) pathFragmentId {
return pathFragmentId(pf.Id)
})
artifactIdToPath := make(map[artifactId]string, len(aqueryResult.Artifacts))
for _, artifact := range aqueryResult.Artifacts {
artifactPath, err := expandPathFragment(pathFragmentId(artifact.PathFragmentId), pathFragments)
if err != nil {
return nil, err
}
if artifact.IsTreeArtifact &&
!strings.HasPrefix(artifactPath, "bazel-out/io_bazel_rules_go/") &&
!strings.HasPrefix(artifactPath, "bazel-out/rules_java_builtin/") {
// Since we're using ninja as an executor, we can't use tree artifacts. Ninja only
// considers a file/directory "dirty" when it's mtime changes. Directories' mtimes will
// only change when a file in the directory is added/removed, but not when files in
// the directory are changed, or when files in subdirectories are changed/added/removed.
// Bazel handles this by walking the directory and generating a hash for it after the
// action runs, which we would have to do as well if we wanted to support these
// artifacts in mixed builds.
//
// However, there are some bazel built-in rules that use tree artifacts. Allow those,
// but keep in mind that they'll have incrementality issues.
return nil, fmt.Errorf("tree artifacts are currently not supported in mixed builds: " + artifactPath)
}
artifactIdToPath[artifactId(artifact.Id)] = artifactPath
}
// Map middleman artifact ContentHash to input artifact depset ID.
// Middleman artifacts are treated as "substitute" artifacts for mixed builds. For example,
// if we find a middleman action which has inputs [foo, bar], and output [baz_middleman], then,
// for each other action which has input [baz_middleman], we add [foo, bar] to the inputs for
// that action instead.
middlemanIdToDepsetIds := map[artifactId][]uint32{}
for _, actionEntry := range aqueryResult.Actions {
if actionEntry.Mnemonic == middlemanMnemonic {
for _, outputId := range actionEntry.OutputIds {
middlemanIdToDepsetIds[artifactId(outputId)] = actionEntry.InputDepSetIds
}
}
}
depsetIdToDepset := indexBy(aqueryResult.DepSetOfFiles, func(d *analysis_v2_proto.DepSetOfFiles) depsetId {
return depsetId(d.Id)
})
aqueryHandler := aqueryArtifactHandler{
depsetIdToAqueryDepset: map[depsetId]AqueryDepset{},
depsetHashToAqueryDepset: map[string]AqueryDepset{},
depsetHashToArtifactPathsCache: sync.Map{},
emptyDepsetIds: make(map[depsetId]struct{}, 0),
artifactIdToPath: artifactIdToPath,
}
// Validate and adjust aqueryResult.DepSetOfFiles values.
for _, depset := range aqueryResult.DepSetOfFiles {
_, err := aqueryHandler.populateDepsetMaps(depset, middlemanIdToDepsetIds, depsetIdToDepset)
if err != nil {
return nil, err
}
}
return &aqueryHandler, nil
}
// Ensures that the handler's depsetIdToAqueryDepset map contains an entry for the given
// depset.
func (a *aqueryArtifactHandler) populateDepsetMaps(depset *analysis_v2_proto.DepSetOfFiles, middlemanIdToDepsetIds map[artifactId][]uint32, depsetIdToDepset map[depsetId]*analysis_v2_proto.DepSetOfFiles) (*AqueryDepset, error) {
if aqueryDepset, containsDepset := a.depsetIdToAqueryDepset[depsetId(depset.Id)]; containsDepset {
return &aqueryDepset, nil
}
transitiveDepsetIds := depset.TransitiveDepSetIds
directArtifactPaths := make([]string, 0, len(depset.DirectArtifactIds))
for _, id := range depset.DirectArtifactIds {
aId := artifactId(id)
path, pathExists := a.artifactIdToPath[aId]
if !pathExists {
return nil, fmt.Errorf("undefined input artifactId %d", aId)
}
// Filter out any inputs which are universally dropped, and swap middleman
// artifacts with their corresponding depsets.
if depsetsToUse, isMiddleman := middlemanIdToDepsetIds[aId]; isMiddleman {
// Swap middleman artifacts with their corresponding depsets and drop the middleman artifacts.
transitiveDepsetIds = append(transitiveDepsetIds, depsetsToUse...)
} else if strings.HasSuffix(path, py3wrapperFileName) ||
strings.HasPrefix(path, "../bazel_tools") {
continue
// Drop these artifacts.
// See go/python-binary-host-mixed-build for more details.
// 1) Drop py3wrapper.sh, just use python binary, the launcher script generated by the
// TemplateExpandAction handles everything necessary to launch a Pythin application.
// 2) ../bazel_tools: they have MODIFY timestamp 10years in the future and would cause the
// containing depset to always be considered newer than their outputs.
} else {
directArtifactPaths = append(directArtifactPaths, path)
}
}
childDepsetHashes := make([]string, 0, len(transitiveDepsetIds))
for _, id := range transitiveDepsetIds {
childDepsetId := depsetId(id)
childDepset, exists := depsetIdToDepset[childDepsetId]
if !exists {
if _, empty := a.emptyDepsetIds[childDepsetId]; empty {
continue
} else {
return nil, fmt.Errorf("undefined input depsetId %d (referenced by depsetId %d)", childDepsetId, depset.Id)
}
}
if childAqueryDepset, err := a.populateDepsetMaps(childDepset, middlemanIdToDepsetIds, depsetIdToDepset); err != nil {
return nil, err
} else if childAqueryDepset == nil {
continue
} else {
childDepsetHashes = append(childDepsetHashes, childAqueryDepset.ContentHash)
}
}
if len(directArtifactPaths) == 0 && len(childDepsetHashes) == 0 {
a.emptyDepsetIds[depsetId(depset.Id)] = struct{}{}
return nil, nil
}
aqueryDepset := AqueryDepset{
ContentHash: depsetContentHash(directArtifactPaths, childDepsetHashes),
DirectArtifacts: directArtifactPaths,
TransitiveDepSetHashes: childDepsetHashes,
}
a.depsetIdToAqueryDepset[depsetId(depset.Id)] = aqueryDepset
a.depsetHashToAqueryDepset[aqueryDepset.ContentHash] = aqueryDepset
return &aqueryDepset, nil
}
// getInputPaths flattens the depsets of the given IDs and returns all transitive
// input paths contained in these depsets.
// This is a potentially expensive operation, and should not be invoked except
// for actions which need specialized input handling.
func (a *aqueryArtifactHandler) getInputPaths(depsetIds []uint32) ([]string, error) {
var inputPaths []string
for _, id := range depsetIds {
inputDepSetId := depsetId(id)
depset := a.depsetIdToAqueryDepset[inputDepSetId]
inputArtifacts, err := a.artifactPathsFromDepsetHash(depset.ContentHash)
if err != nil {
return nil, err
}
for _, inputPath := range inputArtifacts {
inputPaths = append(inputPaths, inputPath)
}
}
return inputPaths, nil
}
func (a *aqueryArtifactHandler) artifactPathsFromDepsetHash(depsetHash string) ([]string, error) {
if result, exists := a.depsetHashToArtifactPathsCache.Load(depsetHash); exists {
return result.([]string), nil
}
if depset, exists := a.depsetHashToAqueryDepset[depsetHash]; exists {
result := depset.DirectArtifacts
for _, childHash := range depset.TransitiveDepSetHashes {
childArtifactIds, err := a.artifactPathsFromDepsetHash(childHash)
if err != nil {
return nil, err
}
result = append(result, childArtifactIds...)
}
a.depsetHashToArtifactPathsCache.Store(depsetHash, result)
return result, nil
} else {
return nil, fmt.Errorf("undefined input depset hash %s", depsetHash)
}
}
// AqueryBuildStatements returns a slice of BuildStatements and a slice of AqueryDepset
// which should be registered (and output to a ninja file) to correspond with Bazel's
// action graph, as described by the given action graph json proto.
// BuildStatements are one-to-one with actions in the given action graph, and AqueryDepsets
// are one-to-one with Bazel's depSetOfFiles objects.
func AqueryBuildStatements(aqueryJsonProto []byte, eventHandler *metrics.EventHandler) ([]*BuildStatement, []AqueryDepset, error) {
aqueryProto := &analysis_v2_proto.ActionGraphContainer{}
err := proto.Unmarshal(aqueryJsonProto, aqueryProto)
if err != nil {
return nil, nil, err
}
var aqueryHandler *aqueryArtifactHandler
{
eventHandler.Begin("init_handler")
defer eventHandler.End("init_handler")
aqueryHandler, err = newAqueryHandler(aqueryProto)
if err != nil {
return nil, nil, err
}
}
// allocate both length and capacity so each goroutine can write to an index independently without
// any need for synchronization for slice access.
buildStatements := make([]*BuildStatement, len(aqueryProto.Actions))
{
eventHandler.Begin("build_statements")
defer eventHandler.End("build_statements")
wg := sync.WaitGroup{}
var errOnce sync.Once
id2targets := make(map[uint32]string, len(aqueryProto.Targets))
for _, t := range aqueryProto.Targets {
id2targets[t.GetId()] = t.GetLabel()
}
for i, actionEntry := range aqueryProto.Actions {
wg.Add(1)
go func(i int, actionEntry *analysis_v2_proto.Action) {
if strings.HasPrefix(id2targets[actionEntry.TargetId], "@bazel_tools//") {
// bazel_tools are removed depsets in `populateDepsetMaps()` so skipping
// conversion to build statements as well
buildStatements[i] = nil
} else if buildStatement, aErr := aqueryHandler.actionToBuildStatement(actionEntry); aErr != nil {
errOnce.Do(func() {
aErr = fmt.Errorf("%s: [%s] [%s]", aErr.Error(), actionEntry.GetMnemonic(), id2targets[actionEntry.TargetId])
err = aErr
})
} else {
// set build statement at an index rather than appending such that each goroutine does not
// impact other goroutines
buildStatements[i] = buildStatement
}
wg.Done()
}(i, actionEntry)
}
wg.Wait()
}
if err != nil {
return nil, nil, err
}
depsetsByHash := map[string]AqueryDepset{}
depsets := make([]AqueryDepset, 0, len(aqueryHandler.depsetIdToAqueryDepset))
{
eventHandler.Begin("depsets")
defer eventHandler.End("depsets")
for _, aqueryDepset := range aqueryHandler.depsetIdToAqueryDepset {
if prevEntry, hasKey := depsetsByHash[aqueryDepset.ContentHash]; hasKey {
// Two depsets collide on hash. Ensure that their contents are identical.
if !reflect.DeepEqual(aqueryDepset, prevEntry) {
return nil, nil, fmt.Errorf("two different depsets have the same hash: %v, %v", prevEntry, aqueryDepset)
}
} else {
depsetsByHash[aqueryDepset.ContentHash] = aqueryDepset
depsets = append(depsets, aqueryDepset)
}
}
}
eventHandler.Do("build_statement_sort", func() {
// Build Statements and depsets must be sorted by their content hash to
// preserve determinism between builds (this will result in consistent ninja file
// output). Note they are not sorted by their original IDs nor their Bazel ordering,
// as Bazel gives nondeterministic ordering / identifiers in aquery responses.
sort.Slice(buildStatements, func(i, j int) bool {
// Sort all nil statements to the end of the slice
if buildStatements[i] == nil {
return false
} else if buildStatements[j] == nil {
return true
}
//For build statements, compare output lists. In Bazel, each output file
// may only have one action which generates it, so this will provide
// a deterministic ordering.
outputs_i := buildStatements[i].OutputPaths
outputs_j := buildStatements[j].OutputPaths
if len(outputs_i) != len(outputs_j) {
return len(outputs_i) < len(outputs_j)
}
if len(outputs_i) == 0 {
// No outputs for these actions, so compare commands.
return buildStatements[i].Command < buildStatements[j].Command
}
// There may be multiple outputs, but the output ordering is deterministic.
return outputs_i[0] < outputs_j[0]
})
})
eventHandler.Do("depset_sort", func() {
sort.Slice(depsets, func(i, j int) bool {
return depsets[i].ContentHash < depsets[j].ContentHash
})
})
return buildStatements, depsets, nil
}
// depsetContentHash computes and returns a SHA256 checksum of the contents of
// the given depset. This content hash may serve as the depset's identifier.
// Using a content hash for an identifier is superior for determinism. (For example,
// using an integer identifier which depends on the order in which the depsets are
// created would result in nondeterministic depset IDs.)
func depsetContentHash(directPaths []string, transitiveDepsetHashes []string) string {
h := sha256.New()
// Use newline as delimiter, as paths cannot contain newline.
h.Write([]byte(strings.Join(directPaths, "\n")))
h.Write([]byte(strings.Join(transitiveDepsetHashes, "")))
fullHash := base64.RawURLEncoding.EncodeToString(h.Sum(nil))
return fullHash
}
func (a *aqueryArtifactHandler) depsetContentHashes(inputDepsetIds []uint32) ([]string, error) {
var hashes []string
for _, id := range inputDepsetIds {
dId := depsetId(id)
if aqueryDepset, exists := a.depsetIdToAqueryDepset[dId]; !exists {
if _, empty := a.emptyDepsetIds[dId]; !empty {
return nil, fmt.Errorf("undefined (not even empty) input depsetId %d", dId)
}
} else {
hashes = append(hashes, aqueryDepset.ContentHash)
}
}
return hashes, nil
}
// escapes the args received from aquery and creates a command string
func commandString(actionEntry *analysis_v2_proto.Action) string {
argsEscaped := make([]string, len(actionEntry.Arguments))
for i, arg := range actionEntry.Arguments {
if arg == "" {
// If this is an empty string, add ''
// And not
// 1. (literal empty)
// 2. `''\'''\'''` (escaped version of '')
//
// If we had used (1), then this would appear as a whitespace when we strings.Join
argsEscaped[i] = "''"
} else {
argsEscaped[i] = proptools.ShellEscapeIncludingSpaces(arg)
}
}
return strings.Join(argsEscaped, " ")
}
func (a *aqueryArtifactHandler) normalActionBuildStatement(actionEntry *analysis_v2_proto.Action) (*BuildStatement, error) {
command := commandString(actionEntry)
inputDepsetHashes, err := a.depsetContentHashes(actionEntry.InputDepSetIds)
if err != nil {
return nil, err
}
outputPaths, depfile, err := a.getOutputPaths(actionEntry)
if err != nil {
return nil, err
}
buildStatement := &BuildStatement{
Command: command,
Depfile: depfile,
OutputPaths: outputPaths,
InputDepsetHashes: inputDepsetHashes,
Env: actionEntry.EnvironmentVariables,
Mnemonic: actionEntry.Mnemonic,
}
if buildStatement.Mnemonic == "GoToolchainBinaryBuild" {
// Unlike b's execution root, mixed build execution root contains a symlink to prebuilts/go
// This causes issues for `GOCACHE=$(mktemp -d) go build ...`
// To prevent this, sandbox this action in mixed builds as well
buildStatement.ShouldRunInSbox = true
}
return buildStatement, nil
}
func (a *aqueryArtifactHandler) templateExpandActionBuildStatement(actionEntry *analysis_v2_proto.Action) (*BuildStatement, error) {
outputPaths, depfile, err := a.getOutputPaths(actionEntry)
if err != nil {
return nil, err
}
if len(outputPaths) != 1 {
return nil, fmt.Errorf("Expect 1 output to template expand action, got: output %q", outputPaths)
}
expandedTemplateContent := expandTemplateContent(actionEntry)
// The expandedTemplateContent is escaped for being used in double quotes and shell unescape,
// and the new line characters (\n) are also changed to \\n which avoids some Ninja escape on \n, which might
// change \n to space and mess up the format of Python programs.
// sed is used to convert \\n back to \n before saving to output file.
// See go/python-binary-host-mixed-build for more details.
command := fmt.Sprintf(`/bin/bash -c 'echo "%[1]s" | sed "s/\\\\n/\\n/g" > %[2]s && chmod a+x %[2]s'`,
escapeCommandlineArgument(expandedTemplateContent), outputPaths[0])
inputDepsetHashes, err := a.depsetContentHashes(actionEntry.InputDepSetIds)
if err != nil {
return nil, err
}
buildStatement := &BuildStatement{
Command: command,
Depfile: depfile,
OutputPaths: outputPaths,
InputDepsetHashes: inputDepsetHashes,
Env: actionEntry.EnvironmentVariables,
Mnemonic: actionEntry.Mnemonic,
}
return buildStatement, nil
}
func (a *aqueryArtifactHandler) fileWriteActionBuildStatement(actionEntry *analysis_v2_proto.Action) (*BuildStatement, error) {
outputPaths, _, err := a.getOutputPaths(actionEntry)
var depsetHashes []string
if err == nil {
depsetHashes, err = a.depsetContentHashes(actionEntry.InputDepSetIds)
}
if err != nil {
return nil, err
}
return &BuildStatement{
Depfile: nil,
OutputPaths: outputPaths,
Env: actionEntry.EnvironmentVariables,
Mnemonic: actionEntry.Mnemonic,
InputDepsetHashes: depsetHashes,
FileContents: actionEntry.FileContents,
IsExecutable: actionEntry.IsExecutable,
}, nil
}
func (a *aqueryArtifactHandler) symlinkTreeActionBuildStatement(actionEntry *analysis_v2_proto.Action) (*BuildStatement, error) {
outputPaths, _, err := a.getOutputPaths(actionEntry)
if err != nil {
return nil, err
}
inputPaths, err := a.getInputPaths(actionEntry.InputDepSetIds)
if err != nil {
return nil, err
}
if len(inputPaths) != 1 || len(outputPaths) != 1 {
return nil, fmt.Errorf("Expect 1 input and 1 output to symlink action, got: input %q, output %q", inputPaths, outputPaths)
}
// The actual command is generated in bazelSingleton.GenerateBuildActions
return &BuildStatement{
Depfile: nil,
OutputPaths: outputPaths,
Env: actionEntry.EnvironmentVariables,
Mnemonic: actionEntry.Mnemonic,
InputPaths: inputPaths,
}, nil
}
type bazelSandwichJson struct {
Target string `json:"target"`
DependOnTarget *bool `json:"depend_on_target,omitempty"`
ImplicitDeps []string `json:"implicit_deps"`
}
func (a *aqueryArtifactHandler) unresolvedSymlinkActionBuildStatement(actionEntry *analysis_v2_proto.Action) (*BuildStatement, error) {
outputPaths, depfile, err := a.getOutputPaths(actionEntry)
if err != nil {
return nil, err
}
if len(actionEntry.InputDepSetIds) != 0 || len(outputPaths) != 1 {
return nil, fmt.Errorf("expected 0 inputs and 1 output to symlink action, got: input %q, output %q", actionEntry.InputDepSetIds, outputPaths)
}
target := actionEntry.UnresolvedSymlinkTarget
if target == "" {
return nil, fmt.Errorf("expected an unresolved_symlink_target, but didn't get one")
}
if filepath.Clean(target) != target {
return nil, fmt.Errorf("expected %q, got %q", filepath.Clean(target), target)
}
if strings.HasPrefix(target, "/") {
return nil, fmt.Errorf("no absolute symlinks allowed: %s", target)
}
out := outputPaths[0]
outDir := filepath.Dir(out)
var implicitDeps []string
if strings.HasPrefix(target, "bazel_sandwich:") {
j := bazelSandwichJson{}
err := json.Unmarshal([]byte(target[len("bazel_sandwich:"):]), &j)
if err != nil {
return nil, err
}
if proptools.BoolDefault(j.DependOnTarget, true) {
implicitDeps = append(implicitDeps, j.Target)
}
implicitDeps = append(implicitDeps, j.ImplicitDeps...)
dotDotsToReachCwd := ""
if outDir != "." {
dotDotsToReachCwd = strings.Repeat("../", strings.Count(outDir, "/")+1)
}
target = proptools.ShellEscapeIncludingSpaces(j.Target)
target = "{DOTDOTS_TO_OUTPUT_ROOT}" + dotDotsToReachCwd + target
} else {
target = proptools.ShellEscapeIncludingSpaces(target)
}
outDir = proptools.ShellEscapeIncludingSpaces(outDir)
out = proptools.ShellEscapeIncludingSpaces(out)
// Use absolute paths, because some soong actions don't play well with relative paths (for example, `cp -d`).
command := fmt.Sprintf("mkdir -p %[1]s && rm -f %[2]s && ln -sf %[3]s %[2]s", outDir, out, target)
symlinkPaths := outputPaths[:]
buildStatement := &BuildStatement{
Command: command,
Depfile: depfile,
OutputPaths: outputPaths,
Env: actionEntry.EnvironmentVariables,
Mnemonic: actionEntry.Mnemonic,
SymlinkPaths: symlinkPaths,
ImplicitDeps: implicitDeps,
}
return buildStatement, nil
}
func (a *aqueryArtifactHandler) symlinkActionBuildStatement(actionEntry *analysis_v2_proto.Action) (*BuildStatement, error) {
outputPaths, depfile, err := a.getOutputPaths(actionEntry)
if err != nil {
return nil, err
}
inputPaths, err := a.getInputPaths(actionEntry.InputDepSetIds)
if err != nil {
return nil, err
}
if len(inputPaths) != 1 || len(outputPaths) != 1 {
return nil, fmt.Errorf("Expect 1 input and 1 output to symlink action, got: input %q, output %q", inputPaths, outputPaths)
}
out := outputPaths[0]
outDir := proptools.ShellEscapeIncludingSpaces(filepath.Dir(out))
out = proptools.ShellEscapeIncludingSpaces(out)
in := filepath.Join("$PWD", proptools.ShellEscapeIncludingSpaces(inputPaths[0]))
// Use absolute paths, because some soong actions don't play well with relative paths (for example, `cp -d`).
command := fmt.Sprintf("mkdir -p %[1]s && rm -f %[2]s && ln -sf %[3]s %[2]s", outDir, out, in)
symlinkPaths := outputPaths[:]
buildStatement := &BuildStatement{
Command: command,
Depfile: depfile,
OutputPaths: outputPaths,
InputPaths: inputPaths,
Env: actionEntry.EnvironmentVariables,
Mnemonic: actionEntry.Mnemonic,
SymlinkPaths: symlinkPaths,
}
return buildStatement, nil
}
func (a *aqueryArtifactHandler) getOutputPaths(actionEntry *analysis_v2_proto.Action) (outputPaths []string, depfile *string, err error) {
for _, outputId := range actionEntry.OutputIds {
outputPath, exists := a.artifactIdToPath[artifactId(outputId)]
if !exists {
err = fmt.Errorf("undefined outputId %d", outputId)
return
}
ext := filepath.Ext(outputPath)
if ext == ".d" {
if depfile != nil {
err = fmt.Errorf("found multiple potential depfiles %q, %q", *depfile, outputPath)
return
} else {
depfile = &outputPath
}
} else {
outputPaths = append(outputPaths, outputPath)
}
}
return
}
// expandTemplateContent substitutes the tokens in a template.
func expandTemplateContent(actionEntry *analysis_v2_proto.Action) string {
replacerString := make([]string, len(actionEntry.Substitutions)*2)
for i, pair := range actionEntry.Substitutions {
value := pair.Value
if val, ok := templateActionOverriddenTokens[pair.Key]; ok {
value = val
}
replacerString[i*2] = pair.Key
replacerString[i*2+1] = value
}
replacer := strings.NewReplacer(replacerString...)
return replacer.Replace(actionEntry.TemplateContent)
}
// \->\\, $->\$, `->\`, "->\", \n->\\n, '->'"'"'
var commandLineArgumentReplacer = strings.NewReplacer(
`\`, `\\`,
`$`, `\$`,
"`", "\\`",
`"`, `\"`,
"\n", "\\n",
`'`, `'"'"'`,
)
func escapeCommandlineArgument(str string) string {
return commandLineArgumentReplacer.Replace(str)
}
func (a *aqueryArtifactHandler) actionToBuildStatement(actionEntry *analysis_v2_proto.Action) (*BuildStatement, error) {
switch actionEntry.Mnemonic {
// Middleman actions are not handled like other actions; they are handled separately as a
// preparatory step so that their inputs may be relayed to actions depending on middleman
// artifacts.
case middlemanMnemonic:
return nil, nil
// PythonZipper is bogus action returned by aquery, ignore it (b/236198693)
case "PythonZipper":
return nil, nil
// Skip "Fail" actions, which are placeholder actions designed to always fail.
case "Fail":
return nil, nil
case "BaselineCoverage":
return nil, nil
case "Symlink", "SolibSymlink", "ExecutableSymlink":
return a.symlinkActionBuildStatement(actionEntry)
case "TemplateExpand":
if len(actionEntry.Arguments) < 1 {
return a.templateExpandActionBuildStatement(actionEntry)
}
case "FileWrite", "SourceSymlinkManifest", "RepoMappingManifest":
return a.fileWriteActionBuildStatement(actionEntry)
case "SymlinkTree":
return a.symlinkTreeActionBuildStatement(actionEntry)
case "UnresolvedSymlink":
return a.unresolvedSymlinkActionBuildStatement(actionEntry)
}
if len(actionEntry.Arguments) < 1 {
return nil, errors.New("received action with no command")
}
return a.normalActionBuildStatement(actionEntry)
}
func expandPathFragment(id pathFragmentId, pathFragmentsMap map[pathFragmentId]*analysis_v2_proto.PathFragment) (string, error) {
var labels []string
currId := id
// Only positive IDs are valid for path fragments. An ID of zero indicates a terminal node.
for currId > 0 {
currFragment, ok := pathFragmentsMap[currId]
if !ok {
return "", fmt.Errorf("undefined path fragment id %d", currId)
}
labels = append([]string{currFragment.Label}, labels...)
parentId := pathFragmentId(currFragment.ParentId)
if currId == parentId {
return "", fmt.Errorf("fragment cannot refer to itself as parent %#v", currFragment)
}
currId = parentId
}
return filepath.Join(labels...), nil
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,237 +0,0 @@
// Copyright 2023 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bazel
import (
"bytes"
"encoding/gob"
"fmt"
"net"
os_lib "os"
"os/exec"
"path/filepath"
"strings"
"time"
)
// Logs events of ProxyServer.
type ServerLogger interface {
Fatal(v ...interface{})
Fatalf(format string, v ...interface{})
Println(v ...interface{})
}
// CmdRequest is a request to the Bazel Proxy server.
type CmdRequest struct {
// Args to the Bazel command.
Argv []string
// Environment variables to pass to the Bazel invocation. Strings should be of
// the form "KEY=VALUE".
Env []string
}
// CmdResponse is a response from the Bazel Proxy server.
type CmdResponse struct {
Stdout string
Stderr string
ErrorString string
}
// ProxyClient is a client which can issue Bazel commands to the Bazel
// proxy server. Requests are issued (and responses received) via a unix socket.
// See ProxyServer for more details.
type ProxyClient struct {
outDir string
}
// ProxyServer is a server which runs as a background goroutine. Each
// request to the server describes a Bazel command which the server should run.
// The server then issues the Bazel command, and returns a response describing
// the stdout/stderr of the command.
// Client-server communication is done via a unix socket under the output
// directory.
// The server is intended to circumvent sandboxing for subprocesses of the
// build. The build orchestrator (soong_ui) can launch a server to exist outside
// of sandboxing, and sandboxed processes (such as soong_build) can issue
// bazel commands through this socket tunnel. This allows a sandboxed process
// to issue bazel requests to a bazel that resides outside of sandbox. This
// is particularly useful to maintain a persistent Bazel server which lives
// past the duration of a single build.
// The ProxyServer will only live as long as soong_ui does; the
// underlying Bazel server will live past the duration of the build.
type ProxyServer struct {
logger ServerLogger
outDir string
workspaceDir string
bazeliskVersion string
// The server goroutine will listen on this channel and stop handling requests
// once it is written to.
done chan struct{}
}
// NewProxyClient is a constructor for a ProxyClient.
func NewProxyClient(outDir string) *ProxyClient {
return &ProxyClient{
outDir: outDir,
}
}
func unixSocketPath(outDir string) string {
return filepath.Join(outDir, "bazelsocket.sock")
}
// IssueCommand issues a request to the Bazel Proxy Server to issue a Bazel
// request. Returns a response describing the output from the Bazel process
// (if the Bazel process had an error, then the response will include an error).
// Returns an error if there was an issue with the connection to the Bazel Proxy
// server.
func (b *ProxyClient) IssueCommand(req CmdRequest) (CmdResponse, error) {
var resp CmdResponse
var err error
// Check for connections every 1 second. This is chosen to be a relatively
// short timeout, because the proxy server should accept requests quite
// quickly.
d := net.Dialer{Timeout: 1 * time.Second}
var conn net.Conn
conn, err = d.Dial("unix", unixSocketPath(b.outDir))
if err != nil {
return resp, err
}
defer conn.Close()
enc := gob.NewEncoder(conn)
if err = enc.Encode(req); err != nil {
return resp, err
}
dec := gob.NewDecoder(conn)
err = dec.Decode(&resp)
return resp, err
}
// NewProxyServer is a constructor for a ProxyServer.
func NewProxyServer(logger ServerLogger, outDir string, workspaceDir string, bazeliskVersion string) *ProxyServer {
if len(bazeliskVersion) > 0 {
logger.Println("** Using Bazelisk for this build, due to env var USE_BAZEL_VERSION=" + bazeliskVersion + " **")
}
return &ProxyServer{
logger: logger,
outDir: outDir,
workspaceDir: workspaceDir,
done: make(chan struct{}),
bazeliskVersion: bazeliskVersion,
}
}
func ExecBazel(bazelPath string, workspaceDir string, request CmdRequest) (stdout []byte, stderr []byte, cmdErr error) {
bazelCmd := exec.Command(bazelPath, request.Argv...)
bazelCmd.Dir = workspaceDir
bazelCmd.Env = request.Env
stderrBuffer := &bytes.Buffer{}
bazelCmd.Stderr = stderrBuffer
if output, err := bazelCmd.Output(); err != nil {
cmdErr = fmt.Errorf("bazel command failed: %s\n---command---\n%s\n---env---\n%s\n---stderr---\n%s---",
err, bazelCmd, strings.Join(bazelCmd.Env, "\n"), stderrBuffer)
} else {
stdout = output
}
stderr = stderrBuffer.Bytes()
return
}
func (b *ProxyServer) handleRequest(conn net.Conn) error {
defer conn.Close()
dec := gob.NewDecoder(conn)
var req CmdRequest
if err := dec.Decode(&req); err != nil {
return fmt.Errorf("Error decoding request: %s", err)
}
if len(b.bazeliskVersion) > 0 {
req.Env = append(req.Env, "USE_BAZEL_VERSION="+b.bazeliskVersion)
}
stdout, stderr, cmdErr := ExecBazel("./build/bazel/bin/bazel", b.workspaceDir, req)
errorString := ""
if cmdErr != nil {
errorString = cmdErr.Error()
}
resp := CmdResponse{string(stdout), string(stderr), errorString}
enc := gob.NewEncoder(conn)
if err := enc.Encode(&resp); err != nil {
return fmt.Errorf("Error encoding response: %s", err)
}
return nil
}
func (b *ProxyServer) listenUntilClosed(listener net.Listener) error {
for {
// Check for connections every 1 second. This is a blocking operation, so
// if the server is closed, the goroutine will not fully close until this
// deadline is reached. Thus, this deadline is short (but not too short
// so that the routine churns).
listener.(*net.UnixListener).SetDeadline(time.Now().Add(time.Second))
conn, err := listener.Accept()
select {
case <-b.done:
return nil
default:
}
if err != nil {
if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() {
// Timeout is normal and expected while waiting for client to establish
// a connection.
continue
} else {
b.logger.Fatalf("Listener error: %s", err)
}
}
err = b.handleRequest(conn)
if err != nil {
b.logger.Fatal(err)
}
}
}
// Start initializes the server unix socket and (in a separate goroutine)
// handles requests on the socket until the server is closed. Returns an error
// if a failure occurs during initialization. Will log any post-initialization
// errors to the server's logger.
func (b *ProxyServer) Start() error {
unixSocketAddr := unixSocketPath(b.outDir)
if err := os_lib.RemoveAll(unixSocketAddr); err != nil {
return fmt.Errorf("couldn't remove socket '%s': %s", unixSocketAddr, err)
}
listener, err := net.Listen("unix", unixSocketAddr)
if err != nil {
return fmt.Errorf("error listening on socket '%s': %s", unixSocketAddr, err)
}
go b.listenUntilClosed(listener)
return nil
}
// Close shuts down the server. This will stop the server from listening for
// additional requests.
func (b *ProxyServer) Close() {
b.done <- struct{}{}
}

View File

@@ -1,30 +0,0 @@
package bazel
type RunName string
// Below is a list bazel execution run names used through out the
// Platform Build systems. Each run name represents an unique key
// to query the bazel metrics.
const (
// Perform a bazel build of the phony root to generate symlink forests
// for dependencies of the bazel build.
BazelBuildPhonyRootRunName = RunName("bazel-build-phony-root")
// Perform aquery of the bazel build root to retrieve action information.
AqueryBuildRootRunName = RunName("aquery-buildroot")
// Perform cquery of the Bazel build root and its dependencies.
CqueryBuildRootRunName = RunName("cquery-buildroot")
// Run bazel as a ninja executer
BazelNinjaExecRunName = RunName("bazel-ninja-exec")
SoongInjectionDirName = "soong_injection"
GeneratedBazelFileWarning = "# GENERATED FOR BAZEL FROM SOONG. DO NOT EDIT."
)
// String returns the name of the run.
func (c RunName) String() string {
return string(c)
}

View File

@@ -1,17 +0,0 @@
package {
default_applicable_licenses: ["Android-Apache-2.0"],
}
bootstrap_go_package {
name: "soong-cquery",
pkgPath: "android/soong/bazel/cquery",
srcs: [
"request_type.go",
],
pluginFor: [
"soong_build",
],
testSrcs: [
"request_type_test.go",
],
}

View File

@@ -1,426 +0,0 @@
package cquery
import (
"encoding/json"
"fmt"
"strings"
)
var (
GetOutputFiles = &getOutputFilesRequestType{}
GetCcInfo = &getCcInfoType{}
GetApexInfo = &getApexInfoType{}
GetCcUnstrippedInfo = &getCcUnstrippedInfoType{}
GetPrebuiltFileInfo = &getPrebuiltFileInfo{}
)
type CcAndroidMkInfo struct {
LocalStaticLibs []string
LocalWholeStaticLibs []string
LocalSharedLibs []string
}
type CcInfo struct {
CcAndroidMkInfo
OutputFiles []string
CcObjectFiles []string
CcSharedLibraryFiles []string
CcStaticLibraryFiles []string
Includes []string
SystemIncludes []string
Headers []string
// Archives owned by the current target (not by its dependencies). These will
// be a subset of OutputFiles. (or static libraries, this will be equal to OutputFiles,
// but general cc_library will also have dynamic libraries in output files).
RootStaticArchives []string
// Dynamic libraries (.so files) created by the current target. These will
// be a subset of OutputFiles. (or shared libraries, this will be equal to OutputFiles,
// but general cc_library will also have dynamic libraries in output files).
RootDynamicLibraries []string
TidyFiles []string
TocFile string
UnstrippedOutput string
AbiDiffFiles []string
}
type getOutputFilesRequestType struct{}
// Name returns a string name for this request type. Such request type names must be unique,
// and must only consist of alphanumeric characters.
func (g getOutputFilesRequestType) Name() string {
return "getOutputFiles"
}
// StarlarkFunctionBody returns a starlark function body to process this request type.
// The returned string is the body of a Starlark function which obtains
// all request-relevant information about a target and returns a string containing
// this information.
// The function should have the following properties:
// - The arguments are `target` (a configured target) and `id_string` (the label + configuration).
// - The return value must be a string.
// - The function body should not be indented outside of its own scope.
func (g getOutputFilesRequestType) StarlarkFunctionBody() string {
return "return ', '.join([f.path for f in target.files.to_list()])"
}
// ParseResult returns a value obtained by parsing the result of the request's Starlark function.
// The given rawString must correspond to the string output which was created by evaluating the
// Starlark given in StarlarkFunctionBody.
func (g getOutputFilesRequestType) ParseResult(rawString string) []string {
return splitOrEmpty(rawString, ", ")
}
type getCcInfoType struct{}
// Name returns a string name for this request type. Such request type names must be unique,
// and must only consist of alphanumeric characters.
func (g getCcInfoType) Name() string {
return "getCcInfo"
}
// StarlarkFunctionBody returns a starlark function body to process this request type.
// The returned string is the body of a Starlark function which obtains
// all request-relevant information about a target and returns a string containing
// this information.
// The function should have the following properties:
// - The arguments are `target` (a configured target) and `id_string` (the label + configuration).
// - The return value must be a string.
// - The function body should not be indented outside of its own scope.
func (g getCcInfoType) StarlarkFunctionBody() string {
return `
outputFiles = [f.path for f in target.files.to_list()]
p = providers(target)
cc_info = p.get("CcInfo")
if not cc_info:
fail("%s did not provide CcInfo" % id_string)
includes = cc_info.compilation_context.includes.to_list()
system_includes = cc_info.compilation_context.system_includes.to_list()
headers = [f.path for f in cc_info.compilation_context.headers.to_list()]
ccObjectFiles = []
staticLibraries = []
rootStaticArchives = []
linker_inputs = cc_info.linking_context.linker_inputs.to_list()
static_info_tag = "//build/bazel/rules/cc:cc_library_static.bzl%CcStaticLibraryInfo"
if static_info_tag in p:
static_info = p[static_info_tag]
ccObjectFiles = [f.path for f in static_info.objects]
rootStaticArchives = [static_info.root_static_archive.path]
else:
for linker_input in linker_inputs:
for library in linker_input.libraries:
for object in library.objects:
ccObjectFiles += [object.path]
if library.static_library:
staticLibraries.append(library.static_library.path)
if linker_input.owner == target.label:
rootStaticArchives.append(library.static_library.path)
sharedLibraries = []
rootSharedLibraries = []
shared_info_tag = "//build/bazel/rules/cc:cc_library_shared.bzl%CcSharedLibraryOutputInfo"
stubs_tag = "//build/bazel/rules/cc:cc_stub_library.bzl%CcStubInfo"
unstripped_tag = "//build/bazel/rules/cc:stripped_cc_common.bzl%CcUnstrippedInfo"
unstripped = ""
if shared_info_tag in p:
shared_info = p[shared_info_tag]
path = shared_info.output_file.path
sharedLibraries.append(path)
rootSharedLibraries += [path]
unstripped = path
if unstripped_tag in p:
unstripped = p[unstripped_tag].unstripped.path
elif stubs_tag in p:
rootSharedLibraries.extend([f.path for f in target.files.to_list()])
else:
for linker_input in linker_inputs:
for library in linker_input.libraries:
if library.dynamic_library:
path = library.dynamic_library.path
sharedLibraries.append(path)
if linker_input.owner == target.label:
rootSharedLibraries.append(path)
toc_file = ""
toc_file_tag = "//build/bazel/rules/cc:generate_toc.bzl%CcTocInfo"
if toc_file_tag in p:
toc_file = p[toc_file_tag].toc.path
else:
# NOTE: It's OK if there's no ToC, as Soong just uses it for optimization
pass
tidy_files = []
clang_tidy_info = p.get("//build/bazel/rules/cc:clang_tidy.bzl%ClangTidyInfo")
if clang_tidy_info:
tidy_files = [v.path for v in clang_tidy_info.transitive_tidy_files.to_list()]
abi_diff_files = []
abi_diff_info = p.get("//build/bazel/rules/abi:abi_dump.bzl%AbiDiffInfo")
if abi_diff_info:
abi_diff_files = [f.path for f in abi_diff_info.diff_files.to_list()]
local_static_libs = []
local_whole_static_libs = []
local_shared_libs = []
androidmk_tag = "//build/bazel/rules/cc:cc_library_common.bzl%CcAndroidMkInfo"
if androidmk_tag in p:
androidmk_info = p[androidmk_tag]
local_static_libs = androidmk_info.local_static_libs
local_whole_static_libs = androidmk_info.local_whole_static_libs
local_shared_libs = androidmk_info.local_shared_libs
return json.encode({
"OutputFiles": outputFiles,
"CcObjectFiles": ccObjectFiles,
"CcSharedLibraryFiles": sharedLibraries,
"CcStaticLibraryFiles": staticLibraries,
"Includes": includes,
"SystemIncludes": system_includes,
"Headers": headers,
"RootStaticArchives": rootStaticArchives,
"RootDynamicLibraries": rootSharedLibraries,
"TidyFiles": [t for t in tidy_files],
"TocFile": toc_file,
"UnstrippedOutput": unstripped,
"AbiDiffFiles": abi_diff_files,
"LocalStaticLibs": [l for l in local_static_libs],
"LocalWholeStaticLibs": [l for l in local_whole_static_libs],
"LocalSharedLibs": [l for l in local_shared_libs],
})`
}
// ParseResult returns a value obtained by parsing the result of the request's Starlark function.
// The given rawString must correspond to the string output which was created by evaluating the
// Starlark given in StarlarkFunctionBody.
func (g getCcInfoType) ParseResult(rawString string) (CcInfo, error) {
var ccInfo CcInfo
if err := parseJson(rawString, &ccInfo); err != nil {
return ccInfo, err
}
return ccInfo, nil
}
// Query Bazel for the artifacts generated by the apex modules.
type getApexInfoType struct{}
// Name returns a string name for this request type. Such request type names must be unique,
// and must only consist of alphanumeric characters.
func (g getApexInfoType) Name() string {
return "getApexInfo"
}
// StarlarkFunctionBody returns a starlark function body to process this request type.
// The returned string is the body of a Starlark function which obtains
// all request-relevant information about a target and returns a string containing
// this information. The function should have the following properties:
// - The arguments are `target` (a configured target) and `id_string` (the label + configuration).
// - The return value must be a string.
// - The function body should not be indented outside of its own scope.
func (g getApexInfoType) StarlarkFunctionBody() string {
return `
info = providers(target).get("//build/bazel/rules/apex:apex_info.bzl%ApexInfo")
if not info:
fail("%s did not provide ApexInfo" % id_string)
bundle_key_info = info.bundle_key_info
container_key_info = info.container_key_info
signed_compressed_output = "" # no .capex if the apex is not compressible, cannot be None as it needs to be json encoded.
if info.signed_compressed_output:
signed_compressed_output = info.signed_compressed_output.path
mk_info = providers(target).get("//build/bazel/rules/apex:apex_info.bzl%ApexMkInfo")
if not mk_info:
fail("%s did not provide ApexMkInfo" % id_string)
tidy_files = []
clang_tidy_info = providers(target).get("//build/bazel/rules/cc:clang_tidy.bzl%ClangTidyInfo")
if clang_tidy_info:
tidy_files = [v.path for v in clang_tidy_info.transitive_tidy_files.to_list()]
return json.encode({
"signed_output": info.signed_output.path,
"signed_compressed_output": signed_compressed_output,
"unsigned_output": info.unsigned_output.path,
"provides_native_libs": [str(lib) for lib in info.provides_native_libs],
"requires_native_libs": [str(lib) for lib in info.requires_native_libs],
"bundle_key_info": [bundle_key_info.public_key.path, bundle_key_info.private_key.path],
"container_key_info": [container_key_info.pem.path, container_key_info.pk8.path, container_key_info.key_name],
"package_name": info.package_name,
"symbols_used_by_apex": info.symbols_used_by_apex.path,
"java_symbols_used_by_apex": info.java_symbols_used_by_apex.path,
"backing_libs": info.backing_libs.path,
"bundle_file": info.base_with_config_zip.path,
"installed_files": info.installed_files.path,
"make_modules_to_install": mk_info.make_modules_to_install,
"files_info": mk_info.files_info,
"tidy_files": [t for t in tidy_files],
})`
}
type ApexInfo struct {
// From the ApexInfo provider
SignedOutput string `json:"signed_output"`
SignedCompressedOutput string `json:"signed_compressed_output"`
UnsignedOutput string `json:"unsigned_output"`
ProvidesLibs []string `json:"provides_native_libs"`
RequiresLibs []string `json:"requires_native_libs"`
BundleKeyInfo []string `json:"bundle_key_info"`
ContainerKeyInfo []string `json:"container_key_info"`
PackageName string `json:"package_name"`
SymbolsUsedByApex string `json:"symbols_used_by_apex"`
JavaSymbolsUsedByApex string `json:"java_symbols_used_by_apex"`
BackingLibs string `json:"backing_libs"`
BundleFile string `json:"bundle_file"`
InstalledFiles string `json:"installed_files"`
TidyFiles []string `json:"tidy_files"`
// From the ApexMkInfo provider
MakeModulesToInstall []string `json:"make_modules_to_install"`
PayloadFilesInfo []map[string]string `json:"files_info"`
}
// ParseResult returns a value obtained by parsing the result of the request's Starlark function.
// The given rawString must correspond to the string output which was created by evaluating the
// Starlark given in StarlarkFunctionBody.
func (g getApexInfoType) ParseResult(rawString string) (ApexInfo, error) {
var info ApexInfo
err := parseJson(rawString, &info)
return info, err
}
// getCcUnstrippedInfoType implements cqueryRequest interface. It handles the
// interaction with `bazel cquery` to retrieve CcUnstrippedInfo provided
// by the` cc_binary` and `cc_shared_library` rules.
type getCcUnstrippedInfoType struct{}
func (g getCcUnstrippedInfoType) Name() string {
return "getCcUnstrippedInfo"
}
func (g getCcUnstrippedInfoType) StarlarkFunctionBody() string {
return `
p = providers(target)
output_path = target.files.to_list()[0].path
unstripped = output_path
unstripped_tag = "//build/bazel/rules/cc:stripped_cc_common.bzl%CcUnstrippedInfo"
if unstripped_tag in p:
unstripped_info = p[unstripped_tag]
unstripped = unstripped_info.unstripped[0].files.to_list()[0].path
local_static_libs = []
local_whole_static_libs = []
local_shared_libs = []
androidmk_tag = "//build/bazel/rules/cc:cc_library_common.bzl%CcAndroidMkInfo"
if androidmk_tag in p:
androidmk_info = p[androidmk_tag]
local_static_libs = androidmk_info.local_static_libs
local_whole_static_libs = androidmk_info.local_whole_static_libs
local_shared_libs = androidmk_info.local_shared_libs
tidy_files = []
clang_tidy_info = p.get("//build/bazel/rules/cc:clang_tidy.bzl%ClangTidyInfo")
if clang_tidy_info:
tidy_files = [v.path for v in clang_tidy_info.transitive_tidy_files.to_list()]
return json.encode({
"OutputFile": output_path,
"UnstrippedOutput": unstripped,
"LocalStaticLibs": [l for l in local_static_libs],
"LocalWholeStaticLibs": [l for l in local_whole_static_libs],
"LocalSharedLibs": [l for l in local_shared_libs],
"TidyFiles": [t for t in tidy_files],
})
`
}
// ParseResult returns a value obtained by parsing the result of the request's Starlark function.
// The given rawString must correspond to the string output which was created by evaluating the
// Starlark given in StarlarkFunctionBody.
func (g getCcUnstrippedInfoType) ParseResult(rawString string) (CcUnstrippedInfo, error) {
var info CcUnstrippedInfo
err := parseJson(rawString, &info)
return info, err
}
type CcUnstrippedInfo struct {
CcAndroidMkInfo
OutputFile string
UnstrippedOutput string
TidyFiles []string
}
// splitOrEmpty is a modification of strings.Split() that returns an empty list
// if the given string is empty.
func splitOrEmpty(s string, sep string) []string {
if len(s) < 1 {
return []string{}
} else {
return strings.Split(s, sep)
}
}
// parseJson decodes json string into the fields of the receiver.
// Unknown attribute name causes panic.
func parseJson(jsonString string, info interface{}) error {
decoder := json.NewDecoder(strings.NewReader(jsonString))
decoder.DisallowUnknownFields() //useful to detect typos, e.g. in unit tests
err := decoder.Decode(info)
if err != nil {
return fmt.Errorf("cannot parse cquery result '%s': %s", jsonString, err)
}
return nil
}
type getPrebuiltFileInfo struct{}
// Name returns a string name for this request type. Such request type names must be unique,
// and must only consist of alphanumeric characters.
func (g getPrebuiltFileInfo) Name() string {
return "getPrebuiltFileInfo"
}
// StarlarkFunctionBody returns a starlark function body to process this request type.
// The returned string is the body of a Starlark function which obtains
// all request-relevant information about a target and returns a string containing
// this information.
// The function should have the following properties:
// - The arguments are `target` (a configured target) and `id_string` (the label + configuration).
// - The return value must be a string.
// - The function body should not be indented outside of its own scope.
func (g getPrebuiltFileInfo) StarlarkFunctionBody() string {
return `
p = providers(target)
prebuilt_file_info = p.get("//build/bazel/rules:prebuilt_file.bzl%PrebuiltFileInfo")
if not prebuilt_file_info:
fail("%s did not provide PrebuiltFileInfo" % id_string)
return json.encode({
"Src": prebuilt_file_info.src.path,
"Dir": prebuilt_file_info.dir,
"Filename": prebuilt_file_info.filename,
"Installable": prebuilt_file_info.installable,
})`
}
type PrebuiltFileInfo struct {
// TODO: b/207489266 - Fully support all properties in prebuilt_file
Src string
Dir string
Filename string
Installable bool
}
// ParseResult returns a value obtained by parsing the result of the request's Starlark function.
// The given rawString must correspond to the string output which was created by evaluating the
// Starlark given in StarlarkFunctionBody.
func (g getPrebuiltFileInfo) ParseResult(rawString string) (PrebuiltFileInfo, error) {
var info PrebuiltFileInfo
err := parseJson(rawString, &info)
return info, err
}

View File

@@ -1,281 +0,0 @@
package cquery
import (
"encoding/json"
"reflect"
"strings"
"testing"
)
func TestGetOutputFilesParseResults(t *testing.T) {
t.Parallel()
testCases := []struct {
description string
input string
expectedOutput []string
}{
{
description: "no result",
input: "",
expectedOutput: []string{},
},
{
description: "one result",
input: "test",
expectedOutput: []string{"test"},
},
{
description: "splits on comma with space",
input: "foo, bar",
expectedOutput: []string{"foo", "bar"},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
actualOutput := GetOutputFiles.ParseResult(tc.input)
if !reflect.DeepEqual(tc.expectedOutput, actualOutput) {
t.Errorf("expected %#v != actual %#v", tc.expectedOutput, actualOutput)
}
})
}
}
func TestGetCcInfoParseResults(t *testing.T) {
t.Parallel()
testCases := []struct {
description string
inputCcInfo CcInfo
expectedOutput CcInfo
}{
{
description: "no result",
inputCcInfo: CcInfo{},
expectedOutput: CcInfo{},
},
{
description: "all items set",
inputCcInfo: CcInfo{
OutputFiles: []string{"out1", "out2"},
CcObjectFiles: []string{"object1", "object2"},
CcSharedLibraryFiles: []string{"shared_lib1", "shared_lib2"},
CcStaticLibraryFiles: []string{"static_lib1", "static_lib2"},
Includes: []string{".", "dir/subdir"},
SystemIncludes: []string{"system/dir", "system/other/dir"},
Headers: []string{"dir/subdir/hdr.h"},
RootStaticArchives: []string{"rootstaticarchive1"},
RootDynamicLibraries: []string{"rootdynamiclibrary1"},
TocFile: "lib.so.toc",
},
expectedOutput: CcInfo{
OutputFiles: []string{"out1", "out2"},
CcObjectFiles: []string{"object1", "object2"},
CcSharedLibraryFiles: []string{"shared_lib1", "shared_lib2"},
CcStaticLibraryFiles: []string{"static_lib1", "static_lib2"},
Includes: []string{".", "dir/subdir"},
SystemIncludes: []string{"system/dir", "system/other/dir"},
Headers: []string{"dir/subdir/hdr.h"},
RootStaticArchives: []string{"rootstaticarchive1"},
RootDynamicLibraries: []string{"rootdynamiclibrary1"},
TocFile: "lib.so.toc",
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
jsonInput, _ := json.Marshal(tc.inputCcInfo)
actualOutput, err := GetCcInfo.ParseResult(string(jsonInput))
if err != nil {
t.Errorf("error parsing result: %q", err)
} else if err == nil && !reflect.DeepEqual(tc.expectedOutput, actualOutput) {
t.Errorf("expected %#v\n!= actual %#v", tc.expectedOutput, actualOutput)
}
})
}
}
func TestGetCcInfoParseResultsError(t *testing.T) {
t.Parallel()
testCases := []struct {
description string
input string
expectedError string
}{
{
description: "not json",
input: ``,
expectedError: `cannot parse cquery result '': EOF`,
},
{
description: "invalid field",
input: `{
"toc_file": "dir/file.so.toc"
}`,
expectedError: `json: unknown field "toc_file"`,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
_, err := GetCcInfo.ParseResult(tc.input)
if !strings.Contains(err.Error(), tc.expectedError) {
t.Errorf("expected string %q in error message, got %q", tc.expectedError, err)
}
})
}
}
func TestGetApexInfoParseResults(t *testing.T) {
t.Parallel()
testCases := []struct {
description string
input string
expectedOutput ApexInfo
}{
{
description: "no result",
input: "{}",
expectedOutput: ApexInfo{},
},
{
description: "one result",
input: `{
"signed_output":"my.apex",
"unsigned_output":"my.apex.unsigned",
"requires_native_libs":["//bionic/libc:libc","//bionic/libdl:libdl"],
"bundle_key_info":["foo.pem", "foo.privkey"],
"container_key_info":["foo.x509.pem", "foo.pk8", "foo"],
"package_name":"package.name",
"symbols_used_by_apex": "path/to/my.apex_using.txt",
"backing_libs":"path/to/backing.txt",
"bundle_file": "dir/bundlefile.zip",
"installed_files":"path/to/installed-files.txt",
"provides_native_libs":[],
"make_modules_to_install": ["foo","bar"]
}`,
expectedOutput: ApexInfo{
// ApexInfo
SignedOutput: "my.apex",
UnsignedOutput: "my.apex.unsigned",
RequiresLibs: []string{"//bionic/libc:libc", "//bionic/libdl:libdl"},
ProvidesLibs: []string{},
BundleKeyInfo: []string{"foo.pem", "foo.privkey"},
ContainerKeyInfo: []string{"foo.x509.pem", "foo.pk8", "foo"},
PackageName: "package.name",
SymbolsUsedByApex: "path/to/my.apex_using.txt",
BackingLibs: "path/to/backing.txt",
BundleFile: "dir/bundlefile.zip",
InstalledFiles: "path/to/installed-files.txt",
// ApexMkInfo
MakeModulesToInstall: []string{"foo", "bar"},
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
actualOutput, err := GetApexInfo.ParseResult(tc.input)
if err != nil {
t.Errorf("Unexpected error %q", err)
}
if !reflect.DeepEqual(tc.expectedOutput, actualOutput) {
t.Errorf("expected %#v != actual %#v", tc.expectedOutput, actualOutput)
}
})
}
}
func TestGetApexInfoParseResultsError(t *testing.T) {
t.Parallel()
testCases := []struct {
description string
input string
expectedError string
}{
{
description: "not json",
input: ``,
expectedError: `cannot parse cquery result '': EOF`,
},
{
description: "invalid field",
input: `{
"fake_field": "path/to/file"
}`,
expectedError: `json: unknown field "fake_field"`,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
_, err := GetApexInfo.ParseResult(tc.input)
if !strings.Contains(err.Error(), tc.expectedError) {
t.Errorf("expected string %q in error message, got %q", tc.expectedError, err)
}
})
}
}
func TestGetCcUnstrippedParseResults(t *testing.T) {
t.Parallel()
testCases := []struct {
description string
input string
expectedOutput CcUnstrippedInfo
}{
{
description: "no result",
input: "{}",
expectedOutput: CcUnstrippedInfo{},
},
{
description: "one result",
input: `{"OutputFile":"myapp", "UnstrippedOutput":"myapp_unstripped"}`,
expectedOutput: CcUnstrippedInfo{
OutputFile: "myapp",
UnstrippedOutput: "myapp_unstripped",
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
actualOutput, err := GetCcUnstrippedInfo.ParseResult(tc.input)
if err != nil {
t.Errorf("Unexpected error %q", err)
}
if !reflect.DeepEqual(tc.expectedOutput, actualOutput) {
t.Errorf("expected %#v != actual %#v", tc.expectedOutput, actualOutput)
}
})
}
}
func TestGetCcUnstrippedParseResultsErrors(t *testing.T) {
t.Parallel()
testCases := []struct {
description string
input string
expectedError string
}{
{
description: "not json",
input: ``,
expectedError: `cannot parse cquery result '': EOF`,
},
{
description: "invalid field",
input: `{
"fake_field": "path/to/file"
}`,
expectedError: `json: unknown field "fake_field"`,
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
_, err := GetCcUnstrippedInfo.ParseResult(tc.input)
if !strings.Contains(err.Error(), tc.expectedError) {
t.Errorf("expected string %q in error message, got %q", tc.expectedError, err)
}
})
}
}

1
go.mod
View File

@@ -5,6 +5,5 @@ go 1.22
require (
github.com/google/blueprint v0.0.0
google.golang.org/protobuf v0.0.0
prebuilts/bazel/common/proto/analysis_v2 v0.0.0
go.starlark.net v0.0.0
)

View File

@@ -5,8 +5,6 @@ use (
../../external/go-cmp
../../external/golang-protobuf
../../external/starlark-go
../../prebuilts/bazel/common/proto/analysis_v2
../../prebuilts/bazel/common/proto/build
../blueprint
)
@@ -15,7 +13,5 @@ replace (
github.com/google/blueprint v0.0.0 => ../blueprint
github.com/google/go-cmp v0.0.0 => ../../external/go-cmp
google.golang.org/protobuf v0.0.0 => ../../external/golang-protobuf
prebuilts/bazel/common/proto/analysis_v2 v0.0.0 => ../../prebuilts/bazel/common/proto/analysis_v2
prebuilts/bazel/common/proto/build v0.0.0 => ../../prebuilts/bazel/common/proto/build
go.starlark.net v0.0.0 => ../../external/starlark-go
)

View File

@@ -15,7 +15,6 @@ bootstrap_go_package {
"paths_test.go",
],
deps: [
"soong-bazel",
"golang-protobuf-proto",
],
}

View File

@@ -18,8 +18,6 @@ package shared
import (
"path/filepath"
"android/soong/bazel"
)
// A SharedPaths represents a list of paths that are shared between
@@ -49,11 +47,3 @@ func JoinPath(base string, rest ...string) string {
func TempDirForOutDir(outDir string) (tempPath string) {
return filepath.Join(outDir, ".temp")
}
// BazelMetricsFilename returns the bazel profile filename based
// on the action name. This is to help to store a set of bazel
// profiles since bazel may execute multiple times during a single
// build.
func BazelMetricsFilename(s SharedPaths, actionName bazel.RunName) string {
return filepath.Join(s.BazelMetricsDir(), actionName.String()+"_bazel_profile.gz")
}

View File

@@ -27,7 +27,6 @@ import (
"android/soong/ui/tracer"
"android/soong/bazel"
"android/soong/ui/metrics"
"android/soong/ui/metrics/metrics_proto"
"android/soong/ui/status"
@@ -603,10 +602,6 @@ func runSoong(ctx Context, config Config) {
checkEnvironmentFile(ctx, soongBuildEnv, config.UsedEnvFile(soongBuildTag))
// Remove bazel files in the event that bazel is disabled for the build.
// These files may have been left over from a previous bazel-enabled build.
cleanBazelFiles(config)
if config.JsonModuleGraph() {
checkEnvironmentFile(ctx, soongBuildEnv, config.UsedEnvFile(jsonModuleGraphTag))
}
@@ -758,18 +753,6 @@ func loadSoongBuildMetrics(ctx Context, config Config, oldTimestamp time.Time) {
}
}
func cleanBazelFiles(config Config) {
files := []string{
shared.JoinPath(config.SoongOutDir(), "bp2build"),
shared.JoinPath(config.SoongOutDir(), "workspace"),
shared.JoinPath(config.SoongOutDir(), bazel.SoongInjectionDirName),
shared.JoinPath(config.OutDir(), "bazel")}
for _, f := range files {
os.RemoveAll(f)
}
}
func runMicrofactory(ctx Context, config Config, name string, pkg string, mapping map[string]string) {
ctx.BeginTrace(metrics.RunSoong, name)
defer ctx.EndTrace()