Merge pull request #13 from inconshreveable/experimental
Proposed changes after experimenting with batchconvert
This commit is contained in:
commit
f43e6bd89c
34
config.go
34
config.go
|
@ -10,6 +10,17 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// InputConfig defines options on a asset directory to be convert.
|
||||||
|
type InputConfig struct {
|
||||||
|
// Path defines a directory containing asset files to be included
|
||||||
|
// in the generated output.
|
||||||
|
Path string
|
||||||
|
|
||||||
|
// Recusive defines whether subdirectories of Path
|
||||||
|
// should be recursively included in the conversion.
|
||||||
|
Recursive bool
|
||||||
|
}
|
||||||
|
|
||||||
// Config defines a set of options for the asset conversion.
|
// Config defines a set of options for the asset conversion.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// Name of the package to use. Defaults to 'main'.
|
// Name of the package to use. Defaults to 'main'.
|
||||||
|
@ -21,10 +32,9 @@ type Config struct {
|
||||||
// and must follow the build tags syntax specified by the go tool.
|
// and must follow the build tags syntax specified by the go tool.
|
||||||
Tags string
|
Tags string
|
||||||
|
|
||||||
// Input defines the directory path, containing all asset files.
|
// Input defines the directory path, containing all asset files as
|
||||||
// This may contain sub directories, which will be included in the
|
// well as whether to recursively process assets in any sub directories.
|
||||||
// conversion.
|
Input []InputConfig
|
||||||
Input string
|
|
||||||
|
|
||||||
// Output defines the output file for the generated code.
|
// Output defines the output file for the generated code.
|
||||||
// If left empty, this defaults to 'bindata.go' in the current
|
// If left empty, this defaults to 'bindata.go' in the current
|
||||||
|
@ -130,13 +140,15 @@ func (c *Config) validate() error {
|
||||||
return fmt.Errorf("Missing package name")
|
return fmt.Errorf("Missing package name")
|
||||||
}
|
}
|
||||||
|
|
||||||
stat, err := os.Lstat(c.Input)
|
for _, input := range c.Input {
|
||||||
if err != nil {
|
stat, err := os.Lstat(input.Path)
|
||||||
return fmt.Errorf("Input path: %v", err)
|
if err != nil {
|
||||||
}
|
return fmt.Errorf("Failed to stat input path '%s': %v", input.Path, err)
|
||||||
|
}
|
||||||
|
|
||||||
if !stat.IsDir() {
|
if !stat.IsDir() {
|
||||||
return fmt.Errorf("Input path is not a directory.")
|
return fmt.Errorf("Input path '%s' is not a directory.", input.Path)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(c.Output) == 0 {
|
if len(c.Output) == 0 {
|
||||||
|
@ -148,7 +160,7 @@ func (c *Config) validate() error {
|
||||||
c.Output = filepath.Join(cwd, "bindata.go")
|
c.Output = filepath.Join(cwd, "bindata.go")
|
||||||
}
|
}
|
||||||
|
|
||||||
stat, err = os.Lstat(c.Output)
|
stat, err := os.Lstat(c.Output)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
return fmt.Errorf("Output path: %v", err)
|
return fmt.Errorf("Output path: %v", err)
|
||||||
|
|
|
@ -26,9 +26,11 @@ func Translate(c *Config) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Locate all the assets.
|
// Locate all the assets.
|
||||||
err = findFiles(c.Input, c.Prefix, c.Recursive, &toc)
|
for _, input := range c.Input {
|
||||||
if err != nil {
|
err = findFiles(input.Path, c.Prefix, input.Recursive, &toc)
|
||||||
return err
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create output file.
|
// Create output file.
|
||||||
|
|
29
debug.go
29
debug.go
|
@ -30,29 +30,18 @@ func writeDebug(w io.Writer, toc []Asset) error {
|
||||||
// This targets debug builds.
|
// This targets debug builds.
|
||||||
func writeDebugHeader(w io.Writer) error {
|
func writeDebugHeader(w io.Writer) error {
|
||||||
_, err := fmt.Fprintf(w, `import (
|
_, err := fmt.Fprintf(w, `import (
|
||||||
"bytes"
|
"fmt"
|
||||||
"io"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// bindata_read reads the given file from disk.
|
// bindata_read reads the given file from disk. It returns
|
||||||
// It panics if anything went wrong.
|
// an error on failure.
|
||||||
func bindata_read(path, name string) []byte {
|
func bindata_read(path, name string) ([]byte, error) {
|
||||||
fd, err := os.Open(path)
|
buf, err := ioutil.ReadFile(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Read %%s: %%v", name, err)
|
err = fmt.Errorf("Error reading asset %%s at %%s: %%v", name, path, err)
|
||||||
}
|
}
|
||||||
|
return buf, err
|
||||||
defer fd.Close()
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
_, err = io.Copy(&buf, fd)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("Read %%s: %%v", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return buf.Bytes()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
`)
|
`)
|
||||||
|
@ -66,7 +55,7 @@ func writeDebugAsset(w io.Writer, asset *Asset) error {
|
||||||
_, err := fmt.Fprintf(w, `
|
_, err := fmt.Fprintf(w, `
|
||||||
// %s reads file data from disk.
|
// %s reads file data from disk.
|
||||||
// It panics if something went wrong in the process.
|
// It panics if something went wrong in the process.
|
||||||
func %s() []byte {
|
func %s() ([]byte, error) {
|
||||||
return bindata_read(
|
return bindata_read(
|
||||||
%q,
|
%q,
|
||||||
%q,
|
%q,
|
||||||
|
|
|
@ -10,6 +10,7 @@ import (
|
||||||
"github.com/jteeuwen/go-bindata"
|
"github.com/jteeuwen/go-bindata"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
@ -41,7 +42,6 @@ func parseArgs() *bindata.Config {
|
||||||
flag.StringVar(&c.Tags, "tags", c.Tags, "Optional set of uild tags to include.")
|
flag.StringVar(&c.Tags, "tags", c.Tags, "Optional set of uild tags to include.")
|
||||||
flag.StringVar(&c.Prefix, "prefix", c.Prefix, "Optional path prefix to strip off asset names.")
|
flag.StringVar(&c.Prefix, "prefix", c.Prefix, "Optional path prefix to strip off asset names.")
|
||||||
flag.StringVar(&c.Package, "pkg", c.Package, "Package name to use in the generated code.")
|
flag.StringVar(&c.Package, "pkg", c.Package, "Package name to use in the generated code.")
|
||||||
flag.BoolVar(&c.Recursive, "r", c.Recursive, "Recursive processing of the target directory and all its sub-directories.")
|
|
||||||
flag.BoolVar(&c.NoMemCopy, "nomemcopy", c.NoMemCopy, "Use a .rodata hack to get rid of unnecessary memcopies. Refer to the documentation to see what implications this carries.")
|
flag.BoolVar(&c.NoMemCopy, "nomemcopy", c.NoMemCopy, "Use a .rodata hack to get rid of unnecessary memcopies. Refer to the documentation to see what implications this carries.")
|
||||||
flag.BoolVar(&c.NoCompress, "nocompress", c.NoCompress, "Assets will *not* be GZIP compressed when this flag is specified.")
|
flag.BoolVar(&c.NoCompress, "nocompress", c.NoCompress, "Assets will *not* be GZIP compressed when this flag is specified.")
|
||||||
flag.BoolVar(&version, "version", false, "Displays version information.")
|
flag.BoolVar(&version, "version", false, "Displays version information.")
|
||||||
|
@ -54,15 +54,32 @@ func parseArgs() *bindata.Config {
|
||||||
|
|
||||||
// Make sure we have in/output paths.
|
// Make sure we have in/output paths.
|
||||||
if flag.NArg() == 0 {
|
if flag.NArg() == 0 {
|
||||||
fmt.Fprintf(os.Stderr, "Missing input path.\n")
|
fmt.Fprintf(os.Stderr, "Missing <input dir>\n\n")
|
||||||
|
flag.Usage()
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.Input = filepath.Clean(flag.Arg(0))
|
input := filepath.Clean(flag.Arg(0))
|
||||||
|
|
||||||
|
c.Input = []bindata.InputConfig{parseInput(input)}
|
||||||
if flag.NArg() > 1 {
|
if flag.NArg() > 1 {
|
||||||
c.Output = filepath.Clean(flag.Arg(1))
|
c.Output = filepath.Clean(flag.Arg(1))
|
||||||
}
|
}
|
||||||
|
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// parseRecursive determines whether the given path has a recrusive indicator and
|
||||||
|
// returns a new path with the recursive indicator chopped off if it does.
|
||||||
|
//
|
||||||
|
// ex:
|
||||||
|
// /path/to/foo/... -> (/path/to/foo, true)
|
||||||
|
// /path/to/bar -> (/path/to/bar, false)
|
||||||
|
func parseInput(path string) bindata.InputConfig {
|
||||||
|
if strings.HasSuffix(path, "/...") {
|
||||||
|
return bindata.InputConfig{Path: path[:len(path)-4], Recursive: true}
|
||||||
|
} else {
|
||||||
|
return bindata.InputConfig{Path: path, Recursive: false}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
42
release.go
42
release.go
|
@ -78,13 +78,13 @@ func header_compressed_nomemcopy(w io.Writer) error {
|
||||||
_, err := fmt.Fprintf(w, `import (
|
_, err := fmt.Fprintf(w, `import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
func bindata_read(data, name string) []byte {
|
func bindata_read(data, name string) ([]byte, error) {
|
||||||
var empty [0]byte
|
var empty [0]byte
|
||||||
sx := (*reflect.StringHeader)(unsafe.Pointer(&data))
|
sx := (*reflect.StringHeader)(unsafe.Pointer(&data))
|
||||||
b := empty[:]
|
b := empty[:]
|
||||||
|
@ -95,7 +95,7 @@ func bindata_read(data, name string) []byte {
|
||||||
|
|
||||||
gz, err := gzip.NewReader(bytes.NewBuffer(b))
|
gz, err := gzip.NewReader(bytes.NewBuffer(b))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Read %%q: %%v", name, err)
|
return nil, fmt.Errorf("Read %%q: %%v", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
|
@ -103,10 +103,10 @@ func bindata_read(data, name string) []byte {
|
||||||
gz.Close()
|
gz.Close()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Read %%q: %%v", name, err)
|
return nil, fmt.Errorf("Read %%q: %%v", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.Bytes()
|
return buf.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
`)
|
`)
|
||||||
|
@ -117,14 +117,14 @@ func header_compressed_memcopy(w io.Writer) error {
|
||||||
_, err := fmt.Fprintf(w, `import (
|
_, err := fmt.Fprintf(w, `import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
"compress/gzip"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func bindata_read(data []byte, name string) []byte {
|
func bindata_read(data []byte, name string) ([]byte, error) {
|
||||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Read %%q: %%v", name, err)
|
return nil, fmt.Errorf("Read %%q: %%v", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
|
@ -132,10 +132,10 @@ func bindata_read(data []byte, name string) []byte {
|
||||||
gz.Close()
|
gz.Close()
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatalf("Read %%q: %%v", name, err)
|
return nil, fmt.Errorf("Read %%q: %%v", name, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return buf.Bytes()
|
return buf.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
`)
|
`)
|
||||||
|
@ -144,11 +144,12 @@ func bindata_read(data []byte, name string) []byte {
|
||||||
|
|
||||||
func header_uncompressed_nomemcopy(w io.Writer) error {
|
func header_uncompressed_nomemcopy(w io.Writer) error {
|
||||||
_, err := fmt.Fprintf(w, `import (
|
_, err := fmt.Fprintf(w, `import (
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
func bindata_read(data, name string) []byte {
|
func bindata_read(data, name string) ([]byte, error) {
|
||||||
var empty [0]byte
|
var empty [0]byte
|
||||||
sx := (*reflect.StringHeader)(unsafe.Pointer(&data))
|
sx := (*reflect.StringHeader)(unsafe.Pointer(&data))
|
||||||
b := empty[:]
|
b := empty[:]
|
||||||
|
@ -156,7 +157,7 @@ func bindata_read(data, name string) []byte {
|
||||||
bx.Data = sx.Data
|
bx.Data = sx.Data
|
||||||
bx.Len = len(data)
|
bx.Len = len(data)
|
||||||
bx.Cap = bx.Len
|
bx.Cap = bx.Len
|
||||||
return b
|
return b, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
`)
|
`)
|
||||||
|
@ -164,8 +165,11 @@ func bindata_read(data, name string) []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
func header_uncompressed_memcopy(w io.Writer) error {
|
func header_uncompressed_memcopy(w io.Writer) error {
|
||||||
// nop -- We require no imports or helper functions.
|
_, err := fmt.Fprintf(w, `import (
|
||||||
return nil
|
"fmt"
|
||||||
|
)
|
||||||
|
`)
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func compressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
|
func compressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
|
||||||
|
@ -184,7 +188,7 @@ func compressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
|
||||||
|
|
||||||
_, err = fmt.Fprintf(w, `"
|
_, err = fmt.Fprintf(w, `"
|
||||||
|
|
||||||
func %s() []byte {
|
func %s() ([]byte, error) {
|
||||||
return bindata_read(
|
return bindata_read(
|
||||||
_%s,
|
_%s,
|
||||||
%q,
|
%q,
|
||||||
|
@ -196,7 +200,7 @@ func %s() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
func compressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
|
func compressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
|
||||||
_, err := fmt.Fprintf(w, `func %s() []byte {
|
_, err := fmt.Fprintf(w, `func %s() ([]byte, error) {
|
||||||
return bindata_read([]byte{`, asset.Func)
|
return bindata_read([]byte{`, asset.Func)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -234,7 +238,7 @@ func uncompressed_nomemcopy(w io.Writer, asset *Asset, r io.Reader) error {
|
||||||
|
|
||||||
_, err = fmt.Fprintf(w, `"
|
_, err = fmt.Fprintf(w, `"
|
||||||
|
|
||||||
func %s() []byte {
|
func %s() ([]byte, error) {
|
||||||
return bindata_read(
|
return bindata_read(
|
||||||
_%s,
|
_%s,
|
||||||
%q,
|
%q,
|
||||||
|
@ -246,7 +250,7 @@ func %s() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
func uncompressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
|
func uncompressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
|
||||||
_, err := fmt.Fprintf(w, `func %s() []byte {
|
_, err := fmt.Fprintf(w, `func %s() ([]byte, error) {
|
||||||
return []byte{`, asset.Func)
|
return []byte{`, asset.Func)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -258,7 +262,7 @@ func uncompressed_memcopy(w io.Writer, asset *Asset, r io.Reader) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = fmt.Fprintf(w, `
|
_, err = fmt.Fprintf(w, `
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
`)
|
`)
|
||||||
|
|
9
toc.go
9
toc.go
|
@ -30,16 +30,17 @@ func writeTOC(w io.Writer, toc []Asset) error {
|
||||||
func writeTOCHeader(w io.Writer) error {
|
func writeTOCHeader(w io.Writer) error {
|
||||||
_, err := fmt.Fprintf(w, `
|
_, err := fmt.Fprintf(w, `
|
||||||
// Asset loads and returns the asset for the given name.
|
// Asset loads and returns the asset for the given name.
|
||||||
// This returns nil of the asset could not be found.
|
// It returns an error if the asset could not be found or
|
||||||
func Asset(name string) []byte {
|
// could not be loaded.
|
||||||
|
func Asset(name string) ([]byte, error) {
|
||||||
if f, ok := _bindata[name]; ok {
|
if f, ok := _bindata[name]; ok {
|
||||||
return f()
|
return f()
|
||||||
}
|
}
|
||||||
return nil
|
return nil, fmt.Errorf("Asset %%s not found", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||||
var _bindata = map[string] func() []byte {
|
var _bindata = map[string] func() ([]byte, error) {
|
||||||
`)
|
`)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user