diff --git a/cmd/doodad/commands/convert.go b/cmd/doodad/commands/convert.go index d73b655..b2ad7cf 100644 --- a/cmd/doodad/commands/convert.go +++ b/cmd/doodad/commands/convert.go @@ -340,6 +340,10 @@ func imageToChunker(img image.Image, chroma render.Color, palette *level.Palette sort.Strings(sortedColors) for _, hex := range sortedColors { if _, ok := newColors[hex]; ok { + if err := palette.AddSwatch(uniqueColor[hex]); err != nil { + log.Error("Could not add more colors to the palette: %s", err) + panic(err.Error()) + } palette.Swatches = append(palette.Swatches, uniqueColor[hex]) } } diff --git a/pkg/balance/feature_flags.go b/pkg/balance/feature_flags.go index 6f9805b..a109dc0 100644 --- a/pkg/balance/feature_flags.go +++ b/pkg/balance/feature_flags.go @@ -1,5 +1,35 @@ package balance +// Hard-coded feature flags. +const ( + // Enable "v1.5" compression in the MapAccessor Chunker. + // + // The original MapAccessor encodes a chunk to json using syntax like + // {"x,y": index} mapping coordinates to palette swatches. + // + // With compression on, it is encoded to a byte stream of x,y,index + // triplets. The game can read both formats and will follow this flag + // on all saves. NOTE: this applies to when we still use JSON format. + // If BinaryChunkerEnabled, map accessors are always compressed as they + // are written to .bin files instead of .json. + CompressMapAccessor = true + + // Enable "v2" binary storage of Chunk data in Zipfiles. + // + // This is a separate toggle to the CompressMapAccessor. Some possible + // variations of these flags includes: + // + // - CompressMapAccessor=true alone, will write the compressed bytes + // still wrapped in the JSON format as a Base64 encoded string. + // - With BinaryChunkerEnabled=true: all chunks are encoded to + // binary and put in the zip as .bin instead of as .json files. + // MapAccessor is always compressed in binary mode. + // + // If you set both flags to false, level zipfiles will use the classic + // json chunk format as before on save. + BinaryChunkerEnabled = true +) + // Feature Flags to turn on/off experimental content. var Feature = feature{ ///////// diff --git a/pkg/editor_ui_popups.go b/pkg/editor_ui_popups.go index fe6902c..b7820e7 100644 --- a/pkg/editor_ui_popups.go +++ b/pkg/editor_ui_popups.go @@ -310,7 +310,12 @@ func (u *EditorUI) SetupPopups(d *Doodle) { }, OnAddColor: func() { // Adding a new color to the palette. - sw := pal.AddSwatch() + sw, err := pal.NewSwatch() + if err != nil { + modal.Alert("Couldn't add this swatch: %s", err).WithTitle("Limit Reached") + return + } + log.Info("Added new palette color: %+v", sw) // Awkward but... reload this very same window. diff --git a/pkg/level/chunk.go b/pkg/level/chunk.go index 46a461c..85e2cb7 100644 --- a/pkg/level/chunk.go +++ b/pkg/level/chunk.go @@ -1,6 +1,8 @@ package level import ( + "bytes" + "encoding/binary" "encoding/json" "fmt" "image" @@ -16,13 +18,13 @@ import ( // Types of chunks. const ( - MapType int = iota + MapType uint64 = iota GridType ) // Chunk holds a single portion of the pixel canvas. type Chunk struct { - Type int // map vs. 2D array. + Type uint64 // map vs. 2D array. Accessor // Values told to it from higher up, not stored in JSON. @@ -43,7 +45,7 @@ type Chunk struct { // JSONChunk holds a lightweight (interface-free) copy of the Chunk for // unmarshalling JSON files from disk. type JSONChunk struct { - Type int `json:"type"` + Type uint64 `json:"type"` Data json.RawMessage `json:"data"` BinData interface{} `json:"-"` } @@ -58,6 +60,8 @@ type Accessor interface { Set(render.Point, *Swatch) error Delete(render.Point) error Len() int + MarshalBinary() ([]byte, error) + UnmarshalBinary([]byte) error MarshalJSON() ([]byte, error) UnmarshalJSON([]byte) error } @@ -327,6 +331,8 @@ func (c *Chunk) Usage(size int) float64 { } // MarshalJSON writes the chunk to JSON. +// +// DEPRECATED: MarshalBinary will encode chunks to a tighter binary format. func (c *Chunk) MarshalJSON() ([]byte, error) { data, err := c.Accessor.MarshalJSON() if err != nil { @@ -343,6 +349,8 @@ func (c *Chunk) MarshalJSON() ([]byte, error) { // UnmarshalJSON loads the chunk from JSON and uses the correct accessor to // parse the inner details. +// +// DEPRECATED in favor of binary marshalling. func (c *Chunk) UnmarshalJSON(b []byte) error { // Parse it generically so we can hand off the inner "data" object to the // right accessor for unmarshalling. @@ -360,3 +368,47 @@ func (c *Chunk) UnmarshalJSON(b []byte) error { return fmt.Errorf("Chunk.UnmarshalJSON: unsupported chunk type '%d'", c.Type) } } + +// MarshalBinary encodes a chunk to binary format. +// +// The binary format consists of one Uvarint for the chunk Type and then followed +// by whatever binary representation that chunk type encodes its data with. +func (c *Chunk) MarshalBinary() ([]byte, error) { + var ( + compressed []byte + ) + + // Encode the chunk type first. + compressed = binary.AppendUvarint(compressed, c.Type) + + // Encode the rest of the chunk. + data, err := c.Accessor.MarshalBinary() + if err != nil { + return nil, err + } + + compressed = append(compressed, data...) + return compressed, nil +} + +// UnmarshalBinary decodes a chunk from binary format. +func (c *Chunk) UnmarshalBinary(b []byte) error { + var reader = bytes.NewBuffer(b) + + // Read off the type byte. + chunkType, err := binary.ReadUvarint(reader) + if err != nil { + return err + } + + // Read off the remaining data. + + // Decode the rest of the byte stream. + switch chunkType { + case MapType: + c.Accessor = NewMapAccessor() + return c.Accessor.UnmarshalBinary(reader.Bytes()) + default: + return fmt.Errorf("Chunk.UnmarshalJSON: unsupported chunk type '%d'", c.Type) + } +} diff --git a/pkg/level/chunk_map.go b/pkg/level/chunk_map.go index 2a391e4..99329f7 100644 --- a/pkg/level/chunk_map.go +++ b/pkg/level/chunk_map.go @@ -1,11 +1,14 @@ package level import ( + "bytes" + "encoding/binary" "encoding/json" "errors" "fmt" "sync" + "git.kirsle.net/SketchyMaze/doodle/pkg/balance" "git.kirsle.net/go/render" ) @@ -121,10 +124,35 @@ func (a *MapAccessor) Delete(p render.Point) error { // When serialized, the key is the "X,Y" coordinate and the value is the // swatch index of the Palette, rather than redundantly serializing out the // Swatch object for every pixel. +// +// DEPRECATED: in the Zipfile format chunks will be saved as binary files +// instead of with their JSON wrappers, so MarshalJSON will be phased out. func (a *MapAccessor) MarshalJSON() ([]byte, error) { a.mu.Lock() defer a.mu.Unlock() + // Write in the new compressed format. + if balance.CompressMapAccessor { + var compressed []byte + for point, sw := range a.grid { + var ( + x = int64(point.X) + y = int64(point.Y) + sw = uint64(sw.index) + entry = []byte{} + ) + + entry = binary.AppendVarint(entry, x) + entry = binary.AppendVarint(entry, y) + entry = binary.AppendUvarint(entry, sw) + + compressed = append(compressed, entry...) + } + + out, err := json.Marshal(compressed) + return out, err + } + dict := map[string]int{} for point, sw := range a.grid { dict[point.String()] = sw.Index() @@ -135,16 +163,59 @@ func (a *MapAccessor) MarshalJSON() ([]byte, error) { } // UnmarshalJSON to convert the chunk map back from JSON. +// +// DEPRECATED: in the Zipfile format chunks will be saved as binary files +// instead of with their JSON wrappers, so MarshalJSON will be phased out. func (a *MapAccessor) UnmarshalJSON(b []byte) error { a.mu.Lock() defer a.mu.Unlock() - var dict map[string]int + // Transparently upgrade the compression algorithm for this level. + // - Old style was a map[string]int like {"123,456": 4} mapping + // a coordinate to a palette index. + // - Now, coords and palettes are uint8 constrained so we can + // really tighten this up. + // For transparent upgrade, try and parse it the old way first. + var ( + dict map[string]int // old-style + compressed []byte // new-style + ) err := json.Unmarshal(b, &dict) if err != nil { - return err + // Now try the new way. + err = json.Unmarshal(b, &compressed) + if err != nil { + return err + } } + // New format: decompress the byte stream. + if compressed != nil { + // log.Debug("MapAccessor.Unmarshal: Reading %d bytes of compressed chunk data", len(compressed)) + + var ( + reader = bytes.NewBuffer(compressed) + ) + + for { + var ( + x, err1 = binary.ReadVarint(reader) + y, err2 = binary.ReadVarint(reader) + sw, err3 = binary.ReadUvarint(reader) + ) + + point := render.NewPoint(int(x), int(y)) + a.grid[point] = NewSparseSwatch(int(sw)) + + if err1 != nil || err2 != nil || err3 != nil { + // log.Error("Break read loop: %s; %s; %s", err1, err2, err3) + break + } + } + return nil + } + + // Old format: read the dict in. for coord, index := range dict { point, err := render.ParsePoint(coord) if err != nil { @@ -155,3 +226,69 @@ func (a *MapAccessor) UnmarshalJSON(b []byte) error { return nil } + +/* +MarshalBinary converts the chunk data to a binary representation, for +better compression compared to JSON. + +In the binary format each chunk begins with one Varint (the chunk Type) +followed by whatever wire format the chunk needs given its type. + +This function is related to the CompressMapAccessor config constant: +the MapAccessor compression boils down each point to a series if packed +varints: the X, Y coord (varint) followed by palette index (Uvarint). + +The output of this function is just the compressed MapAccessor stream. +*/ +func (a *MapAccessor) MarshalBinary() ([]byte, error) { + a.mu.Lock() + defer a.mu.Unlock() + + // Write in the new compressed format. + var compressed []byte + for point, sw := range a.grid { + var ( + x = int64(point.X) + y = int64(point.Y) + sw = uint64(sw.index) + entry = []byte{} + ) + + entry = binary.AppendVarint(entry, x) + entry = binary.AppendVarint(entry, y) + entry = binary.AppendUvarint(entry, sw) + + compressed = append(compressed, entry...) + } + + return compressed, nil +} + +// UnmarshalBinary will decode a compressed MapAccessor byte stream. +func (a *MapAccessor) UnmarshalBinary(compressed []byte) error { + a.mu.Lock() + defer a.mu.Unlock() + + // New format: decompress the byte stream. + //log.Debug("MapAccessor.Unmarshal: Reading %d bytes of compressed chunk data", len(compressed)) + + var reader = bytes.NewBuffer(compressed) + + for { + var ( + x, err1 = binary.ReadVarint(reader) + y, err2 = binary.ReadVarint(reader) + sw, err3 = binary.ReadUvarint(reader) + ) + + point := render.NewPoint(int(x), int(y)) + a.grid[point] = NewSparseSwatch(int(sw)) + + if err1 != nil || err2 != nil || err3 != nil { + // log.Error("Break read loop: %s; %s; %s", err1, err2, err3) + break + } + } + + return nil +} diff --git a/pkg/level/chunker_zipfile.go b/pkg/level/chunker_zipfile.go index e29e528..f6612d2 100644 --- a/pkg/level/chunker_zipfile.go +++ b/pkg/level/chunker_zipfile.go @@ -7,6 +7,7 @@ import ( "regexp" "strconv" + "git.kirsle.net/SketchyMaze/doodle/pkg/balance" "git.kirsle.net/SketchyMaze/doodle/pkg/log" "git.kirsle.net/go/render" ) @@ -15,7 +16,7 @@ import ( // chunks of large levels need be in active memory. var ( - zipChunkfileRegexp = regexp.MustCompile(`^chunks/(\d+)/(.+?)\.json$`) + zipChunkfileRegexp = regexp.MustCompile(`^chunks/(\d+)/(.+?)\.(bin|json)$`) ) // MigrateZipfile is called on save to migrate old-style ChunkMap @@ -24,7 +25,10 @@ var ( func (c *Chunker) MigrateZipfile(zf *zip.Writer) error { // Identify if any chunks in active memory had been completely erased. var ( + // Chunks that have become empty and are to be REMOVED from zip. erasedChunks = map[render.Point]interface{}{} + + // Unique chunks we added to the zip file so we don't add duplicates. chunksZipped = map[render.Point]interface{}{} ) for coord, chunk := range c.Chunks { @@ -42,8 +46,19 @@ func (c *Chunker) MigrateZipfile(zf *zip.Writer) error { for _, file := range c.Zipfile.File { m := zipChunkfileRegexp.FindStringSubmatch(file.Name) if len(m) > 0 { - mLayer, _ := strconv.Atoi(m[1]) - coord := m[2] + var ( + mLayer, _ = strconv.Atoi(m[1]) + coord = m[2] + ext = m[3] + ) + + // Will we need to do a format conversion now? + var reencode bool + if ext == "json" && balance.BinaryChunkerEnabled { + reencode = true + } else if ext == "bin" && !balance.BinaryChunkerEnabled { + reencode = true + } // Not our layer, not our problem. if mLayer != c.Layer { @@ -77,14 +92,27 @@ func (c *Chunker) MigrateZipfile(zf *zip.Writer) error { } // Verify that this chunk file in the old ZIP was not empty. - if chunk, err := ChunkFromZipfile(c.Zipfile, c.Layer, point); err == nil && chunk.Len() == 0 { + chunk, err := ChunkFromZipfile(c.Zipfile, c.Layer, point) + if err == nil && chunk.Len() == 0 { log.Debug("Skip chunk %s (old zipfile chunk was empty)", coord) continue } - log.Debug("Copy existing chunk %s", file.Name) - if err := zf.Copy(file); err != nil { - return err + // Are we simply copying the existing chunk, or re-encoding it too? + if reencode { + log.Debug("Re-encoding existing chunk %s into target format", file.Name) + if err := chunk.Inflate(c.pal); err != nil { + return fmt.Errorf("couldn't inflate cold storage chunk for reencode: %s", err) + } + + if err := chunk.ToZipfile(zf, mLayer, point); err != nil { + return err + } + } else { + log.Debug("Copy existing chunk %s", file.Name) + if err := zf.Copy(file); err != nil { + return err + } } } } @@ -104,9 +132,9 @@ func (c *Chunker) MigrateZipfile(zf *zip.Writer) error { continue } - filename := fmt.Sprintf("chunks/%d/%s.json", c.Layer, coord.String()) - log.Debug("Flush in-memory chunks to %s", filename) - chunk.ToZipfile(zf, filename) + // Are we encoding chunks as JSON? + log.Debug("Flush in-memory chunks %s to zip", coord) + chunk.ToZipfile(zf, c.Layer, coord) } // Flush the chunkmap out. @@ -136,18 +164,41 @@ func (c *Chunker) GCSize() int { } // ToZipfile writes just a chunk's data into a zipfile. -func (c *Chunk) ToZipfile(zf *zip.Writer, filename string) error { +// +// It will write a file like "chunks/{layer}/{coord}.json" if using JSON +// format or a .bin file for binary format based on the BinaryChunkerEnabled +// game config constant. +func (c *Chunk) ToZipfile(zf *zip.Writer, layer int, coord render.Point) error { + // File name? + ext := ".json" + if balance.BinaryChunkerEnabled { + ext = ".bin" + } + filename := fmt.Sprintf("chunks/%d/%s%s", layer, coord, ext) + writer, err := zf.Create(filename) if err != nil { return err } - json, err := c.MarshalJSON() - if err != nil { - return err + // Are we writing it as binary format? + var data []byte + if balance.BinaryChunkerEnabled { + if bytes, err := c.MarshalBinary(); err != nil { + return err + } else { + data = bytes + } + } else { + if json, err := c.MarshalJSON(); err != nil { + return err + } else { + data = json + } } - n, err := writer.Write(json) + // Write the file contents to zip whether binary or json. + n, err := writer.Write(data) if err != nil { return err } @@ -158,21 +209,37 @@ func (c *Chunk) ToZipfile(zf *zip.Writer, filename string) error { // ChunkFromZipfile loads a chunk from a zipfile. func ChunkFromZipfile(zf *zip.Reader, layer int, coord render.Point) (*Chunk, error) { - filename := fmt.Sprintf("chunks/%d/%s.json", layer, coord) + // File names? + var ( + binfile = fmt.Sprintf("chunks/%d/%s.bin", layer, coord) + jsonfile = fmt.Sprintf("chunks/%d/%s.json", layer, coord) + chunk = NewChunk() + ) - file, err := zf.Open(filename) - if err != nil { - return nil, err - } + // Read from the new binary format. + if file, err := zf.Open(binfile); err == nil { + log.Debug("Reading binary compressed chunk from %s", binfile) + bin, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } - bin, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } + err = chunk.UnmarshalBinary(bin) + if err != nil { + return nil, err + } + } else if file, err := zf.Open(jsonfile); err == nil { + log.Debug("Reading JSON encoded chunk from %s", jsonfile) + bin, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } - var chunk = NewChunk() - err = chunk.UnmarshalJSON(bin) - if err != nil { + err = chunk.UnmarshalJSON(bin) + if err != nil { + return nil, err + } + } else { return nil, err } diff --git a/pkg/level/palette.go b/pkg/level/palette.go index 5264135..1cec028 100644 --- a/pkg/level/palette.go +++ b/pkg/level/palette.go @@ -1,11 +1,15 @@ package level import ( + "errors" "fmt" "git.kirsle.net/go/render" ) +// Palettes are limited to uint8 in length, to aid image compression. +const PaletteSizeLimit = 256 + // DefaultPalette returns a sensible default palette. func DefaultPalette() *Palette { return &Palette{ @@ -98,8 +102,8 @@ func (p *Palette) FlushCaches() { p.update() } -// AddSwatch adds a new swatch to the palette. -func (p *Palette) AddSwatch() *Swatch { +// NewSwatch adds a new swatch to the palette. +func (p *Palette) NewSwatch() (*Swatch, error) { p.update() var ( @@ -107,6 +111,10 @@ func (p *Palette) AddSwatch() *Swatch { name = fmt.Sprintf("color %d", len(p.Swatches)) ) + if index > PaletteSizeLimit { + return nil, errors.New("only 256 colors are supported in a palette") + } + p.Swatches = append(p.Swatches, &Swatch{ Name: name, Color: render.Magenta, @@ -114,7 +122,22 @@ func (p *Palette) AddSwatch() *Swatch { }) p.byName[name] = index - return p.Swatches[index] + return p.Swatches[index], nil +} + +// AddSwatch adds a new swatch to the palette. +func (p *Palette) AddSwatch(swatch *Swatch) error { + p.update() + + var index = len(p.Swatches) + if len(p.Swatches) > PaletteSizeLimit { + return errors.New("only 256 colors are supported in a palette") + } + + p.Swatches = append(p.Swatches, swatch) + p.byName[swatch.Name] = index + + return nil } // Get a swatch by name.