Merge pull request #13 from inconshreveable/experimental

Proposed changes after experimenting with batchconvert
pull/4/head
jimt 2013-11-12 01:41:44 -08:00
commit f43e6bd89c
6 changed files with 85 additions and 60 deletions

View File

@ -10,6 +10,17 @@ import (
"path/filepath"
)
// InputConfig defines options on a asset directory to be convert.
type InputConfig struct {
// Path defines a directory containing asset files to be included
// in the generated output.
Path string
// Recusive defines whether subdirectories of Path
// should be recursively included in the conversion.
Recursive bool
}
// Config defines a set of options for the asset conversion.
type Config struct {
// Name of the package to use. Defaults to 'main'.
@ -21,10 +32,9 @@ type Config struct {
// and must follow the build tags syntax specified by the go tool.
Tags string
// Input defines the directory path, containing all asset files.
// This may contain sub directories, which will be included in the
// conversion.
Input string
// Input defines the directory path, containing all asset files as
// well as whether to recursively process assets in any sub directories.
Input []InputConfig
// Output defines the output file for the generated code.
// If left empty, this defaults to 'bindata.go' in the current
@ -130,13 +140,15 @@ func (c *Config) validate() error {
return fmt.Errorf("Missing package name")
}
stat, err := os.Lstat(c.Input)
if err != nil {
return fmt.Errorf("Input path: %v", err)
}
for _, input := range c.Input {
stat, err := os.Lstat(input.Path)
if err != nil {
return fmt.Errorf("Failed to stat input path '%s': %v", input.Path, err)
}
if !stat.IsDir() {
return fmt.Errorf("Input path is not a directory.")
if !stat.IsDir() {
return fmt.Errorf("Input path '%s' is not a directory.", input.Path)
}
}
if len(c.Output) == 0 {
@ -148,7 +160,7 @@ func (c *Config) validate() error {
c.Output = filepath.Join(cwd, "bindata.go")
}
stat, err = os.Lstat(c.Output)
stat, err := os.Lstat(c.Output)
if err != nil {
if !os.IsNotExist(err) {
return fmt.Errorf("Output path: %v", err)

View File

@ -26,9 +26,11 @@ func Translate(c *Config) error {
}
// Locate all the assets.
err = findFiles(c.Input, c.Prefix, c.Recursive, &toc)
if err != nil {
return err
for _, input := range c.Input {
err = findFiles(input.Path, c.Prefix, input.Recursive, &toc)
if err != nil {
return err
}
}
// Create output file.

View File

@ -30,29 +30,18 @@ func writeDebug(w io.Writer, toc []Asset) error {
// This targets debug builds.
func writeDebugHeader(w io.Writer) error {
_, err := fmt.Fprintf(w, `import (
"bytes"
"io"
"log"
"os"
"fmt"
"io/ioutil"
)
// bindata_read reads the given file from disk.
// It panics if anything went wrong.
func bindata_read(path, name string) []byte {
fd, err := os.Open(path)
// bindata_read reads the given file from disk. It returns
// an error on failure.
func bindata_read(path, name string) ([]byte, error) {
buf, err := ioutil.ReadFile(path)
if err != nil {
log.Fatalf("Read %%s: %%v", name, err)
err = fmt.Errorf("Error reading asset %%s at %%s: %%v", name, path, err)
}
defer fd.Close()
var buf bytes.Buffer
_, err = io.Copy(&buf, fd)
if err != nil {
log.Fatalf("Read %%s: %%v", name, err)
}
return buf.Bytes()
return buf, err
}
`)
@ -66,7 +55,7 @@ func writeDebugAsset(w io.Writer, asset *Asset) error {
_, err := fmt.Fprintf(w, `
// %s reads file data from disk.
// It panics if something went wrong in the process.
func %s() []byte {
func %s() ([]byte, error) {
return bindata_read(
%q,
%q,

View File

@ -10,6 +10,7 @@ import (
"github.com/jteeuwen/go-bindata"
"os"
"path/filepath"
"strings"
)
func main() {
@ -41,7 +42,6 @@ func parseArgs() *bindata.Config {
flag.StringVar(&c.Tags, "tags", c.Tags, "Optional set of uild tags to include.")
flag.StringVar(&c.Prefix, "prefix", c.Prefix, "Optional path prefix to strip off asset names.")
flag.StringVar(&c.Package, "pkg", c.Package, "Package name to use in the generated code.")
flag.BoolVar(&c.Recursive, "r", c.Recursive, "Recursive processing of the target directory and all its sub-directories.")
flag.BoolVar(&c.NoMemCopy, "nomemcopy", c.NoMemCopy, "Use a .rodata hack to get rid of unnecessary memcopies. Refer to the documentation to see what implications this carries.")
flag.BoolVar(&c.NoCompress, "nocompress", c.NoCompress, "Assets will *not* be GZIP compressed when this flag is specified.")
flag.BoolVar(&version, "version", false, "Displays version information.")
@ -54,15 +54,32 @@ func parseArgs() *bindata.Config {
// Make sure we have in/output paths.
if flag.NArg() == 0 {
fmt.Fprintf(os.Stderr, "Missing input path.\n")
fmt.Fprintf(os.Stderr, "Missing <input dir>\n\n")
flag.Usage()
os.Exit(1)
}
c.Input = filepath.Clean(flag.Arg(0))
input := filepath.Clean(flag.Arg(0))
c.Input = []bindata.InputConfig{parseInput(input)}
if flag.NArg() > 1 {
c.Output = filepath.Clean(flag.Arg(1))
}
return c
}
// parseRecursive determines whether the given path has a recrusive indicator and
// returns a new path with the recursive indicator chopped off if it does.
//
// ex:
// /path/to/foo/... -> (/path/to/foo, true)
// /path/to/bar -> (/path/to/bar, false)
func parseInput(path string) bindata.InputConfig {
if strings.HasSuffix(path, "/...") {
return bindata.InputConfig{Path: path[:len(path)-4], Recursive: true}
} else {
return bindata.InputConfig{Path: path, Recursive: false}
}
}

View File

@ -78,13 +78,13 @@ func header_compressed_nomemcopy(w io.Writer) error {
_, err := fmt.Fprintf(w, `import (
"bytes"
"compress/gzip"
"fmt"
"io"
"log"
"reflect"
"unsafe"
)
func bindata_read(data, name string) []byte {
func bindata_read(data, name string) ([]byte, error) {
var empty [0]byte
sx := (*reflect.StringHeader)(unsafe.Pointer(&data))
b := empty[:]
@ -95,7 +95,7 @@ func bindata_read(data, name string) []byte {
gz, err := gzip.NewReader(bytes.NewBuffer(b))
if err != nil {
log.Fatalf("Read %%q: %%v", name, err)
return nil, fmt.Errorf("Read %%q: %%v", name, err)
}
var buf bytes.Buffer
@ -103,10 +103,10 @@ func bindata_read(data, name string) []byte {
gz.Close()
if err != nil {
log.Fatalf("Read %%q: %%v", name, err)
return nil, fmt.Errorf("Read %%q: %%v", name, err)
}
return buf.Bytes()
return buf.Bytes(), nil
}
`)
@ -117,14 +117,14 @@ func header_compressed_memcopy(w io.Writer) error {
_, err := fmt.Fprintf(w, `import (
"bytes"
"compress/gzip"
"fmt"
"io"
"log"
)
func bindata_read(data []byte, name string) []byte {
func bindata_read(data []byte, name string) ([]byte, error) {
gz, err := gzip.NewReader(bytes.NewBuffer(data))
if err != nil {
log.Fatalf("Read %%q: %%v", name, err)
return nil, fmt.Errorf("Read %%q: %%v", name, err)
}
var buf bytes.Buffer
@ -132,10 +132,10 @@ func bindata_read(data []byte, name string) []byte {
gz.Close()
if err != nil {
log.Fatalf("Read %%q: %%v", name, err)
return nil, fmt.Errorf("Read %%q: %%v", name, err)
}
return buf.Bytes()
return buf.Bytes(), nil
}
`)
@ -144,11 +144,12 @@ func bindata_read(data []byte, name string) []byte {
func header_uncompressed_nomemcopy(w io.Writer) error {
_, err := fmt.Fprintf(w, `import (
"fmt"
"reflect"
"unsafe"
)
func bindata_read(data, name string) []byte {
func bindata_read(data, name string) ([]byte, error) {
var empty [0]byte
sx := (*reflect.StringHeader)(unsafe.Pointer(&data))
b := empty[:]
@ -156,7 +157,7 @@ func bindata_read(data, name string) []byte {
bx.Data = sx.Data
bx.Len = len(data)
bx.Cap = bx.Len
return b
return b, nil
}
`)
@ -164,8 +165,11 @@ func bindata_read(data, name string) []byte {
}
func header_uncompressed_memcopy(w io.Writer) error {
// nop -- We require no imports or helper functions.
return nil
_, err := fmt.Fprintf(w, `import (
"fmt"
)
`)
return err
}
func compressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
@ -184,7 +188,7 @@ func compressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
_, err = fmt.Fprintf(w, `"
func %s() []byte {
func %s() ([]byte, error) {
return bindata_read(
_%s,
%q,
@ -196,7 +200,7 @@ func %s() []byte {
}
func compressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
_, err := fmt.Fprintf(w, `func %s() []byte {
_, err := fmt.Fprintf(w, `func %s() ([]byte, error) {
return bindata_read([]byte{`, asset.Func)
if err != nil {
@ -234,7 +238,7 @@ func uncompressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
_, err = fmt.Fprintf(w, `"
func %s() []byte {
func %s() ([]byte, error) {
return bindata_read(
_%s,
%q,
@ -246,7 +250,7 @@ func %s() []byte {
}
func uncompressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
_, err := fmt.Fprintf(w, `func %s() []byte {
_, err := fmt.Fprintf(w, `func %s() ([]byte, error) {
return []byte{`, asset.Func)
if err != nil {
return err
@ -258,7 +262,7 @@ func uncompressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
}
_, err = fmt.Fprintf(w, `
}
}, nil
}
`)

9
toc.go
View File

@ -30,16 +30,17 @@ func writeTOC(w io.Writer, toc []Asset) error {
func writeTOCHeader(w io.Writer) error {
_, err := fmt.Fprintf(w, `
// Asset loads and returns the asset for the given name.
// This returns nil of the asset could not be found.
func Asset(name string) []byte {
// It returns an error if the asset could not be found or
// could not be loaded.
func Asset(name string) ([]byte, error) {
if f, ok := _bindata[name]; ok {
return f()
}
return nil
return nil, fmt.Errorf("Asset %%s not found", name)
}
// _bindata is a table, holding each asset generator, mapped to its name.
var _bindata = map[string] func() []byte {
var _bindata = map[string] func() ([]byte, error) {
`)
return err
}