Binary format for chunks in zipfiles
This commit is contained in:
parent
0d8933513e
commit
03cd1d4ca0
|
@ -340,6 +340,10 @@ func imageToChunker(img image.Image, chroma render.Color, palette *level.Palette
|
||||||
sort.Strings(sortedColors)
|
sort.Strings(sortedColors)
|
||||||
for _, hex := range sortedColors {
|
for _, hex := range sortedColors {
|
||||||
if _, ok := newColors[hex]; ok {
|
if _, ok := newColors[hex]; ok {
|
||||||
|
if err := palette.AddSwatch(uniqueColor[hex]); err != nil {
|
||||||
|
log.Error("Could not add more colors to the palette: %s", err)
|
||||||
|
panic(err.Error())
|
||||||
|
}
|
||||||
palette.Swatches = append(palette.Swatches, uniqueColor[hex])
|
palette.Swatches = append(palette.Swatches, uniqueColor[hex])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,35 @@
|
||||||
package balance
|
package balance
|
||||||
|
|
||||||
|
// Hard-coded feature flags.
|
||||||
|
const (
|
||||||
|
// Enable "v1.5" compression in the MapAccessor Chunker.
|
||||||
|
//
|
||||||
|
// The original MapAccessor encodes a chunk to json using syntax like
|
||||||
|
// {"x,y": index} mapping coordinates to palette swatches.
|
||||||
|
//
|
||||||
|
// With compression on, it is encoded to a byte stream of x,y,index
|
||||||
|
// triplets. The game can read both formats and will follow this flag
|
||||||
|
// on all saves. NOTE: this applies to when we still use JSON format.
|
||||||
|
// If BinaryChunkerEnabled, map accessors are always compressed as they
|
||||||
|
// are written to .bin files instead of .json.
|
||||||
|
CompressMapAccessor = true
|
||||||
|
|
||||||
|
// Enable "v2" binary storage of Chunk data in Zipfiles.
|
||||||
|
//
|
||||||
|
// This is a separate toggle to the CompressMapAccessor. Some possible
|
||||||
|
// variations of these flags includes:
|
||||||
|
//
|
||||||
|
// - CompressMapAccessor=true alone, will write the compressed bytes
|
||||||
|
// still wrapped in the JSON format as a Base64 encoded string.
|
||||||
|
// - With BinaryChunkerEnabled=true: all chunks are encoded to
|
||||||
|
// binary and put in the zip as .bin instead of as .json files.
|
||||||
|
// MapAccessor is always compressed in binary mode.
|
||||||
|
//
|
||||||
|
// If you set both flags to false, level zipfiles will use the classic
|
||||||
|
// json chunk format as before on save.
|
||||||
|
BinaryChunkerEnabled = true
|
||||||
|
)
|
||||||
|
|
||||||
// Feature Flags to turn on/off experimental content.
|
// Feature Flags to turn on/off experimental content.
|
||||||
var Feature = feature{
|
var Feature = feature{
|
||||||
/////////
|
/////////
|
||||||
|
|
|
@ -310,7 +310,12 @@ func (u *EditorUI) SetupPopups(d *Doodle) {
|
||||||
},
|
},
|
||||||
OnAddColor: func() {
|
OnAddColor: func() {
|
||||||
// Adding a new color to the palette.
|
// Adding a new color to the palette.
|
||||||
sw := pal.AddSwatch()
|
sw, err := pal.NewSwatch()
|
||||||
|
if err != nil {
|
||||||
|
modal.Alert("Couldn't add this swatch: %s", err).WithTitle("Limit Reached")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
log.Info("Added new palette color: %+v", sw)
|
log.Info("Added new palette color: %+v", sw)
|
||||||
|
|
||||||
// Awkward but... reload this very same window.
|
// Awkward but... reload this very same window.
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package level
|
package level
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"image"
|
"image"
|
||||||
|
@ -16,13 +18,13 @@ import (
|
||||||
|
|
||||||
// Types of chunks.
|
// Types of chunks.
|
||||||
const (
|
const (
|
||||||
MapType int = iota
|
MapType uint64 = iota
|
||||||
GridType
|
GridType
|
||||||
)
|
)
|
||||||
|
|
||||||
// Chunk holds a single portion of the pixel canvas.
|
// Chunk holds a single portion of the pixel canvas.
|
||||||
type Chunk struct {
|
type Chunk struct {
|
||||||
Type int // map vs. 2D array.
|
Type uint64 // map vs. 2D array.
|
||||||
Accessor
|
Accessor
|
||||||
|
|
||||||
// Values told to it from higher up, not stored in JSON.
|
// Values told to it from higher up, not stored in JSON.
|
||||||
|
@ -43,7 +45,7 @@ type Chunk struct {
|
||||||
// JSONChunk holds a lightweight (interface-free) copy of the Chunk for
|
// JSONChunk holds a lightweight (interface-free) copy of the Chunk for
|
||||||
// unmarshalling JSON files from disk.
|
// unmarshalling JSON files from disk.
|
||||||
type JSONChunk struct {
|
type JSONChunk struct {
|
||||||
Type int `json:"type"`
|
Type uint64 `json:"type"`
|
||||||
Data json.RawMessage `json:"data"`
|
Data json.RawMessage `json:"data"`
|
||||||
BinData interface{} `json:"-"`
|
BinData interface{} `json:"-"`
|
||||||
}
|
}
|
||||||
|
@ -58,6 +60,8 @@ type Accessor interface {
|
||||||
Set(render.Point, *Swatch) error
|
Set(render.Point, *Swatch) error
|
||||||
Delete(render.Point) error
|
Delete(render.Point) error
|
||||||
Len() int
|
Len() int
|
||||||
|
MarshalBinary() ([]byte, error)
|
||||||
|
UnmarshalBinary([]byte) error
|
||||||
MarshalJSON() ([]byte, error)
|
MarshalJSON() ([]byte, error)
|
||||||
UnmarshalJSON([]byte) error
|
UnmarshalJSON([]byte) error
|
||||||
}
|
}
|
||||||
|
@ -327,6 +331,8 @@ func (c *Chunk) Usage(size int) float64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarshalJSON writes the chunk to JSON.
|
// MarshalJSON writes the chunk to JSON.
|
||||||
|
//
|
||||||
|
// DEPRECATED: MarshalBinary will encode chunks to a tighter binary format.
|
||||||
func (c *Chunk) MarshalJSON() ([]byte, error) {
|
func (c *Chunk) MarshalJSON() ([]byte, error) {
|
||||||
data, err := c.Accessor.MarshalJSON()
|
data, err := c.Accessor.MarshalJSON()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -343,6 +349,8 @@ func (c *Chunk) MarshalJSON() ([]byte, error) {
|
||||||
|
|
||||||
// UnmarshalJSON loads the chunk from JSON and uses the correct accessor to
|
// UnmarshalJSON loads the chunk from JSON and uses the correct accessor to
|
||||||
// parse the inner details.
|
// parse the inner details.
|
||||||
|
//
|
||||||
|
// DEPRECATED in favor of binary marshalling.
|
||||||
func (c *Chunk) UnmarshalJSON(b []byte) error {
|
func (c *Chunk) UnmarshalJSON(b []byte) error {
|
||||||
// Parse it generically so we can hand off the inner "data" object to the
|
// Parse it generically so we can hand off the inner "data" object to the
|
||||||
// right accessor for unmarshalling.
|
// right accessor for unmarshalling.
|
||||||
|
@ -360,3 +368,47 @@ func (c *Chunk) UnmarshalJSON(b []byte) error {
|
||||||
return fmt.Errorf("Chunk.UnmarshalJSON: unsupported chunk type '%d'", c.Type)
|
return fmt.Errorf("Chunk.UnmarshalJSON: unsupported chunk type '%d'", c.Type)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MarshalBinary encodes a chunk to binary format.
|
||||||
|
//
|
||||||
|
// The binary format consists of one Uvarint for the chunk Type and then followed
|
||||||
|
// by whatever binary representation that chunk type encodes its data with.
|
||||||
|
func (c *Chunk) MarshalBinary() ([]byte, error) {
|
||||||
|
var (
|
||||||
|
compressed []byte
|
||||||
|
)
|
||||||
|
|
||||||
|
// Encode the chunk type first.
|
||||||
|
compressed = binary.AppendUvarint(compressed, c.Type)
|
||||||
|
|
||||||
|
// Encode the rest of the chunk.
|
||||||
|
data, err := c.Accessor.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
compressed = append(compressed, data...)
|
||||||
|
return compressed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary decodes a chunk from binary format.
|
||||||
|
func (c *Chunk) UnmarshalBinary(b []byte) error {
|
||||||
|
var reader = bytes.NewBuffer(b)
|
||||||
|
|
||||||
|
// Read off the type byte.
|
||||||
|
chunkType, err := binary.ReadUvarint(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read off the remaining data.
|
||||||
|
|
||||||
|
// Decode the rest of the byte stream.
|
||||||
|
switch chunkType {
|
||||||
|
case MapType:
|
||||||
|
c.Accessor = NewMapAccessor()
|
||||||
|
return c.Accessor.UnmarshalBinary(reader.Bytes())
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Chunk.UnmarshalJSON: unsupported chunk type '%d'", c.Type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -1,11 +1,14 @@
|
||||||
package level
|
package level
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"git.kirsle.net/SketchyMaze/doodle/pkg/balance"
|
||||||
"git.kirsle.net/go/render"
|
"git.kirsle.net/go/render"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -121,10 +124,35 @@ func (a *MapAccessor) Delete(p render.Point) error {
|
||||||
// When serialized, the key is the "X,Y" coordinate and the value is the
|
// When serialized, the key is the "X,Y" coordinate and the value is the
|
||||||
// swatch index of the Palette, rather than redundantly serializing out the
|
// swatch index of the Palette, rather than redundantly serializing out the
|
||||||
// Swatch object for every pixel.
|
// Swatch object for every pixel.
|
||||||
|
//
|
||||||
|
// DEPRECATED: in the Zipfile format chunks will be saved as binary files
|
||||||
|
// instead of with their JSON wrappers, so MarshalJSON will be phased out.
|
||||||
func (a *MapAccessor) MarshalJSON() ([]byte, error) {
|
func (a *MapAccessor) MarshalJSON() ([]byte, error) {
|
||||||
a.mu.Lock()
|
a.mu.Lock()
|
||||||
defer a.mu.Unlock()
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
|
// Write in the new compressed format.
|
||||||
|
if balance.CompressMapAccessor {
|
||||||
|
var compressed []byte
|
||||||
|
for point, sw := range a.grid {
|
||||||
|
var (
|
||||||
|
x = int64(point.X)
|
||||||
|
y = int64(point.Y)
|
||||||
|
sw = uint64(sw.index)
|
||||||
|
entry = []byte{}
|
||||||
|
)
|
||||||
|
|
||||||
|
entry = binary.AppendVarint(entry, x)
|
||||||
|
entry = binary.AppendVarint(entry, y)
|
||||||
|
entry = binary.AppendUvarint(entry, sw)
|
||||||
|
|
||||||
|
compressed = append(compressed, entry...)
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := json.Marshal(compressed)
|
||||||
|
return out, err
|
||||||
|
}
|
||||||
|
|
||||||
dict := map[string]int{}
|
dict := map[string]int{}
|
||||||
for point, sw := range a.grid {
|
for point, sw := range a.grid {
|
||||||
dict[point.String()] = sw.Index()
|
dict[point.String()] = sw.Index()
|
||||||
|
@ -135,16 +163,59 @@ func (a *MapAccessor) MarshalJSON() ([]byte, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// UnmarshalJSON to convert the chunk map back from JSON.
|
// UnmarshalJSON to convert the chunk map back from JSON.
|
||||||
|
//
|
||||||
|
// DEPRECATED: in the Zipfile format chunks will be saved as binary files
|
||||||
|
// instead of with their JSON wrappers, so MarshalJSON will be phased out.
|
||||||
func (a *MapAccessor) UnmarshalJSON(b []byte) error {
|
func (a *MapAccessor) UnmarshalJSON(b []byte) error {
|
||||||
a.mu.Lock()
|
a.mu.Lock()
|
||||||
defer a.mu.Unlock()
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
var dict map[string]int
|
// Transparently upgrade the compression algorithm for this level.
|
||||||
|
// - Old style was a map[string]int like {"123,456": 4} mapping
|
||||||
|
// a coordinate to a palette index.
|
||||||
|
// - Now, coords and palettes are uint8 constrained so we can
|
||||||
|
// really tighten this up.
|
||||||
|
// For transparent upgrade, try and parse it the old way first.
|
||||||
|
var (
|
||||||
|
dict map[string]int // old-style
|
||||||
|
compressed []byte // new-style
|
||||||
|
)
|
||||||
err := json.Unmarshal(b, &dict)
|
err := json.Unmarshal(b, &dict)
|
||||||
|
if err != nil {
|
||||||
|
// Now try the new way.
|
||||||
|
err = json.Unmarshal(b, &compressed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// New format: decompress the byte stream.
|
||||||
|
if compressed != nil {
|
||||||
|
// log.Debug("MapAccessor.Unmarshal: Reading %d bytes of compressed chunk data", len(compressed))
|
||||||
|
|
||||||
|
var (
|
||||||
|
reader = bytes.NewBuffer(compressed)
|
||||||
|
)
|
||||||
|
|
||||||
|
for {
|
||||||
|
var (
|
||||||
|
x, err1 = binary.ReadVarint(reader)
|
||||||
|
y, err2 = binary.ReadVarint(reader)
|
||||||
|
sw, err3 = binary.ReadUvarint(reader)
|
||||||
|
)
|
||||||
|
|
||||||
|
point := render.NewPoint(int(x), int(y))
|
||||||
|
a.grid[point] = NewSparseSwatch(int(sw))
|
||||||
|
|
||||||
|
if err1 != nil || err2 != nil || err3 != nil {
|
||||||
|
// log.Error("Break read loop: %s; %s; %s", err1, err2, err3)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Old format: read the dict in.
|
||||||
for coord, index := range dict {
|
for coord, index := range dict {
|
||||||
point, err := render.ParsePoint(coord)
|
point, err := render.ParsePoint(coord)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -155,3 +226,69 @@ func (a *MapAccessor) UnmarshalJSON(b []byte) error {
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
MarshalBinary converts the chunk data to a binary representation, for
|
||||||
|
better compression compared to JSON.
|
||||||
|
|
||||||
|
In the binary format each chunk begins with one Varint (the chunk Type)
|
||||||
|
followed by whatever wire format the chunk needs given its type.
|
||||||
|
|
||||||
|
This function is related to the CompressMapAccessor config constant:
|
||||||
|
the MapAccessor compression boils down each point to a series if packed
|
||||||
|
varints: the X, Y coord (varint) followed by palette index (Uvarint).
|
||||||
|
|
||||||
|
The output of this function is just the compressed MapAccessor stream.
|
||||||
|
*/
|
||||||
|
func (a *MapAccessor) MarshalBinary() ([]byte, error) {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
|
// Write in the new compressed format.
|
||||||
|
var compressed []byte
|
||||||
|
for point, sw := range a.grid {
|
||||||
|
var (
|
||||||
|
x = int64(point.X)
|
||||||
|
y = int64(point.Y)
|
||||||
|
sw = uint64(sw.index)
|
||||||
|
entry = []byte{}
|
||||||
|
)
|
||||||
|
|
||||||
|
entry = binary.AppendVarint(entry, x)
|
||||||
|
entry = binary.AppendVarint(entry, y)
|
||||||
|
entry = binary.AppendUvarint(entry, sw)
|
||||||
|
|
||||||
|
compressed = append(compressed, entry...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return compressed, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalBinary will decode a compressed MapAccessor byte stream.
|
||||||
|
func (a *MapAccessor) UnmarshalBinary(compressed []byte) error {
|
||||||
|
a.mu.Lock()
|
||||||
|
defer a.mu.Unlock()
|
||||||
|
|
||||||
|
// New format: decompress the byte stream.
|
||||||
|
//log.Debug("MapAccessor.Unmarshal: Reading %d bytes of compressed chunk data", len(compressed))
|
||||||
|
|
||||||
|
var reader = bytes.NewBuffer(compressed)
|
||||||
|
|
||||||
|
for {
|
||||||
|
var (
|
||||||
|
x, err1 = binary.ReadVarint(reader)
|
||||||
|
y, err2 = binary.ReadVarint(reader)
|
||||||
|
sw, err3 = binary.ReadUvarint(reader)
|
||||||
|
)
|
||||||
|
|
||||||
|
point := render.NewPoint(int(x), int(y))
|
||||||
|
a.grid[point] = NewSparseSwatch(int(sw))
|
||||||
|
|
||||||
|
if err1 != nil || err2 != nil || err3 != nil {
|
||||||
|
// log.Error("Break read loop: %s; %s; %s", err1, err2, err3)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"git.kirsle.net/SketchyMaze/doodle/pkg/balance"
|
||||||
"git.kirsle.net/SketchyMaze/doodle/pkg/log"
|
"git.kirsle.net/SketchyMaze/doodle/pkg/log"
|
||||||
"git.kirsle.net/go/render"
|
"git.kirsle.net/go/render"
|
||||||
)
|
)
|
||||||
|
@ -15,7 +16,7 @@ import (
|
||||||
// chunks of large levels need be in active memory.
|
// chunks of large levels need be in active memory.
|
||||||
|
|
||||||
var (
|
var (
|
||||||
zipChunkfileRegexp = regexp.MustCompile(`^chunks/(\d+)/(.+?)\.json$`)
|
zipChunkfileRegexp = regexp.MustCompile(`^chunks/(\d+)/(.+?)\.(bin|json)$`)
|
||||||
)
|
)
|
||||||
|
|
||||||
// MigrateZipfile is called on save to migrate old-style ChunkMap
|
// MigrateZipfile is called on save to migrate old-style ChunkMap
|
||||||
|
@ -24,7 +25,10 @@ var (
|
||||||
func (c *Chunker) MigrateZipfile(zf *zip.Writer) error {
|
func (c *Chunker) MigrateZipfile(zf *zip.Writer) error {
|
||||||
// Identify if any chunks in active memory had been completely erased.
|
// Identify if any chunks in active memory had been completely erased.
|
||||||
var (
|
var (
|
||||||
|
// Chunks that have become empty and are to be REMOVED from zip.
|
||||||
erasedChunks = map[render.Point]interface{}{}
|
erasedChunks = map[render.Point]interface{}{}
|
||||||
|
|
||||||
|
// Unique chunks we added to the zip file so we don't add duplicates.
|
||||||
chunksZipped = map[render.Point]interface{}{}
|
chunksZipped = map[render.Point]interface{}{}
|
||||||
)
|
)
|
||||||
for coord, chunk := range c.Chunks {
|
for coord, chunk := range c.Chunks {
|
||||||
|
@ -42,8 +46,19 @@ func (c *Chunker) MigrateZipfile(zf *zip.Writer) error {
|
||||||
for _, file := range c.Zipfile.File {
|
for _, file := range c.Zipfile.File {
|
||||||
m := zipChunkfileRegexp.FindStringSubmatch(file.Name)
|
m := zipChunkfileRegexp.FindStringSubmatch(file.Name)
|
||||||
if len(m) > 0 {
|
if len(m) > 0 {
|
||||||
mLayer, _ := strconv.Atoi(m[1])
|
var (
|
||||||
coord := m[2]
|
mLayer, _ = strconv.Atoi(m[1])
|
||||||
|
coord = m[2]
|
||||||
|
ext = m[3]
|
||||||
|
)
|
||||||
|
|
||||||
|
// Will we need to do a format conversion now?
|
||||||
|
var reencode bool
|
||||||
|
if ext == "json" && balance.BinaryChunkerEnabled {
|
||||||
|
reencode = true
|
||||||
|
} else if ext == "bin" && !balance.BinaryChunkerEnabled {
|
||||||
|
reencode = true
|
||||||
|
}
|
||||||
|
|
||||||
// Not our layer, not our problem.
|
// Not our layer, not our problem.
|
||||||
if mLayer != c.Layer {
|
if mLayer != c.Layer {
|
||||||
|
@ -77,17 +92,30 @@ func (c *Chunker) MigrateZipfile(zf *zip.Writer) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Verify that this chunk file in the old ZIP was not empty.
|
// Verify that this chunk file in the old ZIP was not empty.
|
||||||
if chunk, err := ChunkFromZipfile(c.Zipfile, c.Layer, point); err == nil && chunk.Len() == 0 {
|
chunk, err := ChunkFromZipfile(c.Zipfile, c.Layer, point)
|
||||||
|
if err == nil && chunk.Len() == 0 {
|
||||||
log.Debug("Skip chunk %s (old zipfile chunk was empty)", coord)
|
log.Debug("Skip chunk %s (old zipfile chunk was empty)", coord)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Are we simply copying the existing chunk, or re-encoding it too?
|
||||||
|
if reencode {
|
||||||
|
log.Debug("Re-encoding existing chunk %s into target format", file.Name)
|
||||||
|
if err := chunk.Inflate(c.pal); err != nil {
|
||||||
|
return fmt.Errorf("couldn't inflate cold storage chunk for reencode: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := chunk.ToZipfile(zf, mLayer, point); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
log.Debug("Copy existing chunk %s", file.Name)
|
log.Debug("Copy existing chunk %s", file.Name)
|
||||||
if err := zf.Copy(file); err != nil {
|
if err := zf.Copy(file); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Debug("Chunker.MigrateZipfile: the drawing did not give me a zipfile!")
|
log.Debug("Chunker.MigrateZipfile: the drawing did not give me a zipfile!")
|
||||||
}
|
}
|
||||||
|
@ -104,9 +132,9 @@ func (c *Chunker) MigrateZipfile(zf *zip.Writer) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
filename := fmt.Sprintf("chunks/%d/%s.json", c.Layer, coord.String())
|
// Are we encoding chunks as JSON?
|
||||||
log.Debug("Flush in-memory chunks to %s", filename)
|
log.Debug("Flush in-memory chunks %s to zip", coord)
|
||||||
chunk.ToZipfile(zf, filename)
|
chunk.ToZipfile(zf, c.Layer, coord)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Flush the chunkmap out.
|
// Flush the chunkmap out.
|
||||||
|
@ -136,18 +164,41 @@ func (c *Chunker) GCSize() int {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToZipfile writes just a chunk's data into a zipfile.
|
// ToZipfile writes just a chunk's data into a zipfile.
|
||||||
func (c *Chunk) ToZipfile(zf *zip.Writer, filename string) error {
|
//
|
||||||
|
// It will write a file like "chunks/{layer}/{coord}.json" if using JSON
|
||||||
|
// format or a .bin file for binary format based on the BinaryChunkerEnabled
|
||||||
|
// game config constant.
|
||||||
|
func (c *Chunk) ToZipfile(zf *zip.Writer, layer int, coord render.Point) error {
|
||||||
|
// File name?
|
||||||
|
ext := ".json"
|
||||||
|
if balance.BinaryChunkerEnabled {
|
||||||
|
ext = ".bin"
|
||||||
|
}
|
||||||
|
filename := fmt.Sprintf("chunks/%d/%s%s", layer, coord, ext)
|
||||||
|
|
||||||
writer, err := zf.Create(filename)
|
writer, err := zf.Create(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
json, err := c.MarshalJSON()
|
// Are we writing it as binary format?
|
||||||
if err != nil {
|
var data []byte
|
||||||
|
if balance.BinaryChunkerEnabled {
|
||||||
|
if bytes, err := c.MarshalBinary(); err != nil {
|
||||||
return err
|
return err
|
||||||
|
} else {
|
||||||
|
data = bytes
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if json, err := c.MarshalJSON(); err != nil {
|
||||||
|
return err
|
||||||
|
} else {
|
||||||
|
data = json
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n, err := writer.Write(json)
|
// Write the file contents to zip whether binary or json.
|
||||||
|
n, err := writer.Write(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -158,23 +209,39 @@ func (c *Chunk) ToZipfile(zf *zip.Writer, filename string) error {
|
||||||
|
|
||||||
// ChunkFromZipfile loads a chunk from a zipfile.
|
// ChunkFromZipfile loads a chunk from a zipfile.
|
||||||
func ChunkFromZipfile(zf *zip.Reader, layer int, coord render.Point) (*Chunk, error) {
|
func ChunkFromZipfile(zf *zip.Reader, layer int, coord render.Point) (*Chunk, error) {
|
||||||
filename := fmt.Sprintf("chunks/%d/%s.json", layer, coord)
|
// File names?
|
||||||
|
var (
|
||||||
file, err := zf.Open(filename)
|
binfile = fmt.Sprintf("chunks/%d/%s.bin", layer, coord)
|
||||||
if err != nil {
|
jsonfile = fmt.Sprintf("chunks/%d/%s.json", layer, coord)
|
||||||
return nil, err
|
chunk = NewChunk()
|
||||||
}
|
)
|
||||||
|
|
||||||
|
// Read from the new binary format.
|
||||||
|
if file, err := zf.Open(binfile); err == nil {
|
||||||
|
log.Debug("Reading binary compressed chunk from %s", binfile)
|
||||||
|
bin, err := ioutil.ReadAll(file)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = chunk.UnmarshalBinary(bin)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else if file, err := zf.Open(jsonfile); err == nil {
|
||||||
|
log.Debug("Reading JSON encoded chunk from %s", jsonfile)
|
||||||
bin, err := ioutil.ReadAll(file)
|
bin, err := ioutil.ReadAll(file)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var chunk = NewChunk()
|
|
||||||
err = chunk.UnmarshalJSON(bin)
|
err = chunk.UnmarshalJSON(bin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
return chunk, nil
|
return chunk, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,11 +1,15 @@
|
||||||
package level
|
package level
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"git.kirsle.net/go/render"
|
"git.kirsle.net/go/render"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Palettes are limited to uint8 in length, to aid image compression.
|
||||||
|
const PaletteSizeLimit = 256
|
||||||
|
|
||||||
// DefaultPalette returns a sensible default palette.
|
// DefaultPalette returns a sensible default palette.
|
||||||
func DefaultPalette() *Palette {
|
func DefaultPalette() *Palette {
|
||||||
return &Palette{
|
return &Palette{
|
||||||
|
@ -98,8 +102,8 @@ func (p *Palette) FlushCaches() {
|
||||||
p.update()
|
p.update()
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddSwatch adds a new swatch to the palette.
|
// NewSwatch adds a new swatch to the palette.
|
||||||
func (p *Palette) AddSwatch() *Swatch {
|
func (p *Palette) NewSwatch() (*Swatch, error) {
|
||||||
p.update()
|
p.update()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -107,6 +111,10 @@ func (p *Palette) AddSwatch() *Swatch {
|
||||||
name = fmt.Sprintf("color %d", len(p.Swatches))
|
name = fmt.Sprintf("color %d", len(p.Swatches))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
if index > PaletteSizeLimit {
|
||||||
|
return nil, errors.New("only 256 colors are supported in a palette")
|
||||||
|
}
|
||||||
|
|
||||||
p.Swatches = append(p.Swatches, &Swatch{
|
p.Swatches = append(p.Swatches, &Swatch{
|
||||||
Name: name,
|
Name: name,
|
||||||
Color: render.Magenta,
|
Color: render.Magenta,
|
||||||
|
@ -114,7 +122,22 @@ func (p *Palette) AddSwatch() *Swatch {
|
||||||
})
|
})
|
||||||
p.byName[name] = index
|
p.byName[name] = index
|
||||||
|
|
||||||
return p.Swatches[index]
|
return p.Swatches[index], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSwatch adds a new swatch to the palette.
|
||||||
|
func (p *Palette) AddSwatch(swatch *Swatch) error {
|
||||||
|
p.update()
|
||||||
|
|
||||||
|
var index = len(p.Swatches)
|
||||||
|
if len(p.Swatches) > PaletteSizeLimit {
|
||||||
|
return errors.New("only 256 colors are supported in a palette")
|
||||||
|
}
|
||||||
|
|
||||||
|
p.Swatches = append(p.Swatches, swatch)
|
||||||
|
p.byName[swatch.Name] = index
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a swatch by name.
|
// Get a swatch by name.
|
||||||
|
|
Loading…
Reference in New Issue
Block a user