Zipfiles as File Format for Levels and Doodads

Especially to further optimize memory for large levels, Levels and
Doodads can now read and write to a ZIP file format on disk with
chunks in external files within the zip.

Existing doodads and levels can still load as normal, and will be
converted into ZIP files on the next save:

* The Chunker.ChunkMap which used to hold ALL chunks in the main json/gz
  file, now becomes the cache of "hot chunks" loaded from ZIP. If there is
  a ZIP file, chunks not accessed recently are flushed from the ChunkMap
  to save on memory.
* During save, the ChunkMap is flushed to ZIP along with any non-loaded
  chunks from a previous zipfile. So legacy levels "just work" when
  saving, and levels loaded FROM Zip will manage their ChunkMap hot
  memory more carefully.

Memory savings observed on "Azulian Tag - Forest.level":

* Before: 1716 MB was loaded from the old level format into RAM along
  with a slow load screen.
* After: only 243 MB memory was used by the game and it loaded with
  a VERY FAST load screen.

Updates to the F3 Debug Overlay:

* "Chunks: 20 in 45 out 20 cached" shows the count of chunks inside the
  viewport (having bitmaps and textures loaded) vs. chunks outside which
  have their textures freed (but data kept), and the number of chunks
  currently hot cached in the ChunkMap.

The `doodad` tool has new commands to "touch" your existing levels
and doodads, to upgrade them to the new format (or you can simply
open and re-save them in-game):

    doodad edit-level --touch ./example.level
    doodad edit-doodad --touch ./example.doodad

The output from that and `doodad show` should say "File format: zipfile"
in the headers section.

To do:

* File attachments should also go in as ZIP files, e.g. wallpapers
This commit is contained in:
Noah 2022-04-29 20:34:59 -07:00
parent 2d3f36379c
commit 93623e4e8a
22 changed files with 861 additions and 103 deletions

View File

@ -168,10 +168,7 @@ func imageToDrawing(c *cli.Context, chroma render.Color, inputFiles []string, ou
img := images[i] img := images[i]
log.Info("Converting extra layer %d", i) log.Info("Converting extra layer %d", i)
_, chunker := imageToChunker(img, chroma, palette, chunkSize) _, chunker := imageToChunker(img, chroma, palette, chunkSize)
doodad.Layers = append(doodad.Layers, doodads.Layer{ doodad.AddLayer(toLayerName(inputFiles[i]), chunker)
Name: toLayerName(inputFiles[i]),
Chunker: chunker,
})
} }
} }

View File

@ -59,6 +59,10 @@ func init() {
Name: "unlock", Name: "unlock",
Usage: "remove the write-lock on the level file", Usage: "remove the write-lock on the level file",
}, },
&cli.BoolFlag{
Name: "touch",
Usage: "simply load and re-save the doodad, to migrate it to a zipfile",
},
}, },
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
if c.NArg() < 1 { if c.NArg() < 1 {
@ -94,6 +98,11 @@ func editDoodad(c *cli.Context, filename string) error {
* Update level properties * * Update level properties *
***************************/ ***************************/
if c.Bool("touch") {
log.Info("Just touching and resaving the file")
modified = true
}
if c.String("title") != "" { if c.String("title") != "" {
dd.Title = c.String("title") dd.Title = c.String("title")
log.Info("Set title: %s", dd.Title) log.Info("Set title: %s", dd.Title)

View File

@ -59,6 +59,10 @@ func init() {
Name: "remove-actor", Name: "remove-actor",
Usage: "Remove all instances of the actor from the level. Value is their filename or UUID.", Usage: "Remove all instances of the actor from the level. Value is their filename or UUID.",
}, },
&cli.BoolFlag{
Name: "touch",
Usage: "simply load and re-save the level, to migrate it to a zipfile",
},
}, },
Action: func(c *cli.Context) error { Action: func(c *cli.Context) error {
if c.NArg() < 1 { if c.NArg() < 1 {
@ -94,6 +98,11 @@ func editLevel(c *cli.Context, filename string) error {
* Update level properties * * Update level properties *
***************************/ ***************************/
if c.Bool("touch") {
log.Info("Just touching and resaving the file")
modified = true
}
if c.String("title") != "" { if c.String("title") != "" {
lvl.Title = c.String("title") lvl.Title = c.String("title")
log.Info("Set title: %s", lvl.Title) log.Info("Set title: %s", lvl.Title)

View File

@ -92,9 +92,16 @@ func showLevel(c *cli.Context, filename string) error {
} }
} }
// Is it a new zipfile format?
var fileType = "json or gzip"
if lvl.Zipfile != nil {
fileType = "zipfile"
}
fmt.Printf("===== Level: %s =====\n", filename) fmt.Printf("===== Level: %s =====\n", filename)
fmt.Println("Headers:") fmt.Println("Headers:")
fmt.Printf(" File format: %s\n", fileType)
fmt.Printf(" File version: %d\n", lvl.Version) fmt.Printf(" File version: %d\n", lvl.Version)
fmt.Printf(" Game version: %s\n", lvl.GameVersion) fmt.Printf(" Game version: %s\n", lvl.GameVersion)
fmt.Printf(" Level title: %s\n", lvl.Title) fmt.Printf(" Level title: %s\n", lvl.Title)
@ -170,9 +177,16 @@ func showDoodad(c *cli.Context, filename string) error {
return nil return nil
} }
// Is it a new zipfile format?
var fileType = "json or gzip"
if dd.Zipfile != nil {
fileType = "zipfile"
}
fmt.Printf("===== Doodad: %s =====\n", filename) fmt.Printf("===== Doodad: %s =====\n", filename)
fmt.Println("Headers:") fmt.Println("Headers:")
fmt.Printf(" File format: %s\n", fileType)
fmt.Printf(" File version: %d\n", dd.Version) fmt.Printf(" File version: %d\n", dd.Version)
fmt.Printf(" Game version: %s\n", dd.GameVersion) fmt.Printf(" Game version: %s\n", dd.GameVersion)
fmt.Printf(" Doodad title: %s\n", dd.Title) fmt.Printf(" Doodad title: %s\n", dd.Title)

View File

@ -41,10 +41,6 @@ var Boolprops = map[string]Boolprop{
Get: func() bool { return usercfg.Current.HorizontalToolbars }, Get: func() bool { return usercfg.Current.HorizontalToolbars },
Set: func(v bool) { usercfg.Current.HorizontalToolbars = v }, Set: func(v bool) { usercfg.Current.HorizontalToolbars = v },
}, },
"compress-drawings": {
Get: func() bool { return CompressDrawings },
Set: func(v bool) { CompressDrawings = v },
},
"eager-render": { "eager-render": {
Get: func() bool { return EagerRenderLevelChunks }, Get: func() bool { return EagerRenderLevelChunks },
Set: func(v bool) { EagerRenderLevelChunks = v }, Set: func(v bool) { EagerRenderLevelChunks = v },

View File

@ -6,6 +6,15 @@ import (
"git.kirsle.net/go/render" "git.kirsle.net/go/render"
) )
// Format for level and doodad files.
type Format int
const (
FormatJSON Format = iota // v0: plain json files
FormatGZip // v1: gzip compressed json files
FormatZipfile // v2: zip archive with external chunks
)
// Numbers. // Numbers.
var ( var (
// Window dimensions. // Window dimensions.
@ -87,7 +96,14 @@ var (
EmbeddedWallpaperBasePath = "assets/wallpapers/" EmbeddedWallpaperBasePath = "assets/wallpapers/"
// File formats: save new levels and doodads gzip compressed // File formats: save new levels and doodads gzip compressed
CompressDrawings = true DrawingFormat = FormatZipfile
// Zipfile drawings: max size of the LRU cache for loading chunks from
// a zip file. Normally the chunker discards chunks not loaded in a
// recent tick, but when iterating the full level this limits the max
// size of loaded chunks before some will be freed to make room.
// 0 = do not cap the cache.
ChunkerLRUCacheMax = 0
// Play Mode Touchscreen controls. // Play Mode Touchscreen controls.
PlayModeIdleTimeout = 2200 * time.Millisecond PlayModeIdleTimeout = 2200 * time.Millisecond

View File

@ -52,6 +52,25 @@ func New(size int) *Doodad {
} }
} }
// AddLayer adds a new layer to the doodad. Call this rather than appending
// your own layer so it points the Zipfile and layer number in. The chunker
// is optional - pass nil and a new blank chunker is created.
func (d *Doodad) AddLayer(name string, chunker *level.Chunker) Layer {
if chunker == nil {
chunker = level.NewChunker(d.ChunkSize())
}
layer := Layer{
Name: name,
Chunker: chunker,
}
layer.Chunker.Layer = len(d.Layers)
d.Layers = append(d.Layers, layer)
d.Inflate()
return layer
}
// Teardown cleans up texture cache memory when the doodad is no longer needed by the game. // Teardown cleans up texture cache memory when the doodad is no longer needed by the game.
func (d *Doodad) Teardown() { func (d *Doodad) Teardown() {
var ( var (
@ -101,7 +120,8 @@ func (d *Doodad) Rect() render.Rect {
// Inflate attaches the pixels to their swatches after loading from disk. // Inflate attaches the pixels to their swatches after loading from disk.
func (d *Doodad) Inflate() { func (d *Doodad) Inflate() {
d.Palette.Inflate() d.Palette.Inflate()
for _, layer := range d.Layers { for i, layer := range d.Layers {
layer.Chunker.Layer = i
layer.Chunker.Inflate(d.Palette) layer.Chunker.Inflate(d.Palette)
} }
} }

110
pkg/doodads/fmt_zipfile.go Normal file
View File

@ -0,0 +1,110 @@
package doodads
import (
"archive/zip"
"bytes"
"encoding/json"
"fmt"
"git.kirsle.net/apps/doodle/pkg/balance"
"git.kirsle.net/apps/doodle/pkg/log"
)
// ToZipfile serializes the doodad into zipfile format.
func (d *Doodad) ToZipfile() ([]byte, error) {
fh := bytes.NewBuffer([]byte{})
zipper := zip.NewWriter(fh)
defer zipper.Close()
// Migrate the Chunker caches into the zipfile.
for _, layer := range d.Layers {
if err := layer.Chunker.MigrateZipfile(zipper); err != nil {
return nil, fmt.Errorf("MigrateZipfile: %s", err)
}
}
// Write the header json.
{
header, err := d.AsJSON()
if err != nil {
return nil, err
}
writer, err := zipper.Create("doodad.json")
if err != nil {
return nil, err
}
if n, err := writer.Write(header); err != nil {
return nil, err
} else {
log.Debug("Written doodad.json to zipfile: %d bytes", n)
}
}
zipper.Close()
// Refresh our Zipfile reader from the zipper we just wrote.
bin := fh.Bytes()
if err := d.ReloadZipfile(bin); err != nil {
log.Error("ReloadZipfile: %s", err)
}
return fh.Bytes(), nil
}
// FromZipfile reads a doodad from zipfile format.
func FromZipfile(data []byte) (*Doodad, error) {
var (
doodad = New(balance.DoodadSize)
err = doodad.populateFromZipfile(data)
)
return doodad, err
}
// ReloadZipfile re-reads the level's zipfile after a write.
func (d *Doodad) ReloadZipfile(data []byte) error {
return d.populateFromZipfile(data)
}
// Common function between FromZipfile and ReloadZipFile.
func (d *Doodad) populateFromZipfile(data []byte) error {
var (
buf = bytes.NewReader(data)
zf *zip.Reader
decoder *json.Decoder
)
zf, err := zip.NewReader(buf, buf.Size())
if err != nil {
return err
}
// Read the doodad.json.
file, err := zf.Open("doodad.json")
if err != nil {
return err
}
decoder = json.NewDecoder(file)
err = decoder.Decode(d)
// Keep the zipfile reader handy.
d.Zipfile = zf
for i, layer := range d.Layers {
layer.Chunker.Layer = i
layer.Chunker.Zipfile = zf
}
return err
}
// Loop may be called each loop to allow the level to maintain its
// memory usage, e.g., for chunks not requested recently from a zipfile
// level to free those from RAM.
func (d *Doodad) Loop() error {
for _, layer := range d.Layers {
layer.Chunker.FreeCaches()
}
return nil
}

View File

@ -5,6 +5,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http"
"path/filepath" "path/filepath"
"git.kirsle.net/apps/doodle/pkg/balance" "git.kirsle.net/apps/doodle/pkg/balance"
@ -17,10 +18,20 @@ import (
// and the return value is gz bytes and not the raw JSON. // and the return value is gz bytes and not the raw JSON.
func (d *Doodad) ToJSON() ([]byte, error) { func (d *Doodad) ToJSON() ([]byte, error) {
// Gzip compressing? // Gzip compressing?
if balance.CompressDrawings { if balance.DrawingFormat == balance.FormatGZip {
return d.ToGzip() return d.ToGzip()
} }
// Zipfile?
if balance.DrawingFormat == balance.FormatZipfile {
return d.ToZipfile()
}
return d.AsJSON()
}
// AsJSON returns it just as JSON without any fancy gzip/zip magic.
func (d *Doodad) AsJSON() ([]byte, error) {
out := bytes.NewBuffer([]byte{}) out := bytes.NewBuffer([]byte{})
encoder := json.NewEncoder(out) encoder := json.NewEncoder(out)
if usercfg.Current.JSONIndent { if usercfg.Current.JSONIndent {
@ -48,6 +59,12 @@ func FromJSON(filename string, data []byte) (*Doodad, error) {
} else { } else {
doodad = gzd doodad = gzd
} }
} else if http.DetectContentType(data) == "application/zip" {
if zipdoodad, err := FromZipfile(data); err != nil {
return nil, err
} else {
doodad = zipdoodad
}
} }
// Inflate the chunk metadata to map the pixels to their palette indexes. // Inflate the chunk metadata to map the pixels to their palette indexes.

View File

@ -289,7 +289,7 @@ func (s *EditorScene) Loop(d *Doodle, ev *event.State) error {
} }
if s.UI.Canvas != nil { if s.UI.Canvas != nil {
inside, outside := s.UI.Canvas.LoadUnloadMetrics() inside, outside := s.UI.Canvas.LoadUnloadMetrics()
*s.debLoadingViewport = fmt.Sprintf("%d in %d out", inside, outside) *s.debLoadingViewport = fmt.Sprintf("%d in %d out %d cached", inside, outside, s.UI.Canvas.Chunker().CacheSize())
} }
// Has the window been resized? // Has the window been resized?

View File

@ -354,11 +354,10 @@ func (u *EditorUI) SetupPopups(d *Doodle) {
u.layersWindow.Show() u.layersWindow.Show()
}, },
OnAddLayer: func() { OnAddLayer: func() {
layer := doodads.Layer{ layer := scene.Doodad.AddLayer(
Name: fmt.Sprintf("layer %d", len(scene.Doodad.Layers)), fmt.Sprintf("layer %d", len(scene.Doodad.Layers)),
Chunker: level.NewChunker(scene.DoodadSize), nil,
} )
scene.Doodad.Layers = append(scene.Doodad.Layers, layer)
log.Info("Added new layer: %d %s", log.Info("Added new layer: %d %s",
len(scene.Doodad.Layers), layer.Name) len(scene.Doodad.Layers), layer.Name)

View File

@ -226,7 +226,7 @@ func (c *Chunk) Teardown() int {
if c.texture != nil { if c.texture != nil {
c.texture.Free() c.texture.Free()
c.texture = nil c.texture = nil // NPE <- here
freed++ freed++
} }

View File

@ -4,22 +4,28 @@ import (
"encoding/json" "encoding/json"
"errors" "errors"
"fmt" "fmt"
"sync"
"git.kirsle.net/go/render" "git.kirsle.net/go/render"
) )
// MapAccessor implements a chunk accessor by using a map of points to their // MapAccessor implements a chunk accessor by using a map of points to their
// palette indexes. This is the simplest accessor and is best for sparse chunks. // palette indexes. This is the simplest accessor and is best for sparse chunks.
type MapAccessor map[render.Point]*Swatch type MapAccessor struct {
grid map[render.Point]*Swatch
mu sync.RWMutex
}
// NewMapAccessor initializes a MapAccessor. // NewMapAccessor initializes a MapAccessor.
func NewMapAccessor() MapAccessor { func NewMapAccessor() *MapAccessor {
return MapAccessor{} return &MapAccessor{
grid: map[render.Point]*Swatch{},
}
} }
// Inflate the sparse swatches from their palette indexes. // Inflate the sparse swatches from their palette indexes.
func (a MapAccessor) Inflate(pal *Palette) error { func (a *MapAccessor) Inflate(pal *Palette) error {
for point, swatch := range a { for point, swatch := range a.grid {
if swatch.IsSparse() { if swatch.IsSparse() {
// Replace this with the correct swatch from the palette. // Replace this with the correct swatch from the palette.
if swatch.paletteIndex >= len(pal.Swatches) { if swatch.paletteIndex >= len(pal.Swatches) {
@ -29,19 +35,24 @@ func (a MapAccessor) Inflate(pal *Palette) error {
len(pal.Swatches), len(pal.Swatches),
) )
} }
a[point] = pal.Swatches[swatch.paletteIndex]
a.mu.Lock()
a.grid[point] = pal.Swatches[swatch.paletteIndex] // <- concurrent write
a.mu.Unlock()
} }
} }
return nil return nil
} }
// Len returns the current size of the map, or number of pixels registered. // Len returns the current size of the map, or number of pixels registered.
func (a MapAccessor) Len() int { func (a *MapAccessor) Len() int {
return len(a) a.mu.RLock()
defer a.mu.RUnlock()
return len(a.grid)
} }
// IterViewport returns a channel to loop over pixels in the viewport. // IterViewport returns a channel to loop over pixels in the viewport.
func (a MapAccessor) IterViewport(viewport render.Rect) <-chan Pixel { func (a *MapAccessor) IterViewport(viewport render.Rect) <-chan Pixel {
pipe := make(chan Pixel) pipe := make(chan Pixel)
go func() { go func() {
for px := range a.Iter() { for px := range a.Iter() {
@ -55,24 +66,29 @@ func (a MapAccessor) IterViewport(viewport render.Rect) <-chan Pixel {
} }
// Iter returns a channel to loop over all points in this chunk. // Iter returns a channel to loop over all points in this chunk.
func (a MapAccessor) Iter() <-chan Pixel { func (a *MapAccessor) Iter() <-chan Pixel {
pipe := make(chan Pixel) pipe := make(chan Pixel)
go func() { go func() {
for point, swatch := range a { a.mu.Lock()
for point, swatch := range a.grid {
pipe <- Pixel{ pipe <- Pixel{
X: point.X, X: point.X,
Y: point.Y, Y: point.Y,
Swatch: swatch, Swatch: swatch,
} }
} }
a.mu.Unlock()
close(pipe) close(pipe)
}() }()
return pipe return pipe
} }
// Get a pixel from the map. // Get a pixel from the map.
func (a MapAccessor) Get(p render.Point) (*Swatch, error) { func (a *MapAccessor) Get(p render.Point) (*Swatch, error) {
pixel, ok := a[p] a.mu.Lock()
defer a.mu.Unlock()
pixel, ok := a.grid[p] // <- concurrent read and write
if !ok { if !ok {
return nil, errors.New("no pixel") return nil, errors.New("no pixel")
} }
@ -80,17 +96,23 @@ func (a MapAccessor) Get(p render.Point) (*Swatch, error) {
} }
// Set a pixel on the map. // Set a pixel on the map.
func (a MapAccessor) Set(p render.Point, sw *Swatch) error { func (a *MapAccessor) Set(p render.Point, sw *Swatch) error {
a[p] = sw a.mu.Lock()
defer a.mu.Unlock()
a.grid[p] = sw
return nil return nil
} }
// Delete a pixel from the map. // Delete a pixel from the map.
func (a MapAccessor) Delete(p render.Point) error { func (a *MapAccessor) Delete(p render.Point) error {
if _, ok := a[p]; ok { a.mu.Lock()
delete(a, p) defer a.mu.Unlock()
if _, ok := a.grid[p]; ok {
delete(a.grid, p)
return nil return nil
} }
return errors.New("pixel was not there") return errors.New("pixel was not there")
} }
@ -99,9 +121,12 @@ func (a MapAccessor) Delete(p render.Point) error {
// When serialized, the key is the "X,Y" coordinate and the value is the // When serialized, the key is the "X,Y" coordinate and the value is the
// swatch index of the Palette, rather than redundantly serializing out the // swatch index of the Palette, rather than redundantly serializing out the
// Swatch object for every pixel. // Swatch object for every pixel.
func (a MapAccessor) MarshalJSON() ([]byte, error) { func (a *MapAccessor) MarshalJSON() ([]byte, error) {
a.mu.Lock()
defer a.mu.Unlock()
dict := map[string]int{} dict := map[string]int{}
for point, sw := range a { for point, sw := range a.grid {
dict[point.String()] = sw.Index() dict[point.String()] = sw.Index()
} }
@ -110,7 +135,10 @@ func (a MapAccessor) MarshalJSON() ([]byte, error) {
} }
// UnmarshalJSON to convert the chunk map back from JSON. // UnmarshalJSON to convert the chunk map back from JSON.
func (a MapAccessor) UnmarshalJSON(b []byte) error { func (a *MapAccessor) UnmarshalJSON(b []byte) error {
a.mu.Lock()
defer a.mu.Unlock()
var dict map[string]int var dict map[string]int
err := json.Unmarshal(b, &dict) err := json.Unmarshal(b, &dict)
if err != nil { if err != nil {
@ -122,7 +150,7 @@ func (a MapAccessor) UnmarshalJSON(b []byte) error {
if err != nil { if err != nil {
return fmt.Errorf("MapAccessor.UnmarshalJSON: %s", err) return fmt.Errorf("MapAccessor.UnmarshalJSON: %s", err)
} }
a[point] = NewSparseSwatch(index) a.grid[point] = NewSparseSwatch(index)
} }
return nil return nil

View File

@ -1,11 +1,15 @@
package level package level
import ( import (
"archive/zip"
"encoding/json" "encoding/json"
"fmt" "fmt"
"math" "math"
"sync"
"git.kirsle.net/apps/doodle/pkg/balance"
"git.kirsle.net/apps/doodle/pkg/log" "git.kirsle.net/apps/doodle/pkg/log"
"git.kirsle.net/apps/doodle/pkg/shmem"
"git.kirsle.net/go/render" "git.kirsle.net/go/render"
) )
@ -13,8 +17,35 @@ import (
// provides the API to interact with the pixels using their absolute coordinates // provides the API to interact with the pixels using their absolute coordinates
// while abstracting away the underlying details. // while abstracting away the underlying details.
type Chunker struct { type Chunker struct {
// Layer is optional for the caller, levels use only 0 and
// doodads use them for frames. When chunks are exported to
// zipfile the Layer keeps them from overlapping.
Layer int
Size int `json:"size"` Size int `json:"size"`
// A Zipfile reference for new-style levels and doodads which
// keep their chunks in external parts of a zip file.
Zipfile *zip.Reader `json:"-"`
// Chunks, oh boy.
// The v1 drawing format had all the chunks in the JSON file.
// New drawings write them to zips. Legacy drawings can be converted
// simply by loading and resaving: their Chunks loads from JSON and
// is committed to zipfile on save. This makes Chunks also a good
// cache even when we have a zipfile to fall back on.
Chunks ChunkMap `json:"chunks"` Chunks ChunkMap `json:"chunks"`
chunkMu sync.RWMutex
// If we have a zipfile, only keep chunks warm in memory if they
// are actively wanted by the game.
lastTick uint64 // NOTE: tracks from shmem.Tick
chunkRequestsThisTick map[render.Point]interface{}
requestsN1 map[render.Point]interface{}
requestsN2 map[render.Point]interface{}
requestMu sync.Mutex
// The palette reference from first call to Inflate()
pal *Palette
} }
// NewChunker creates a new chunk manager with a given chunk size. // NewChunker creates a new chunk manager with a given chunk size.
@ -22,6 +53,10 @@ func NewChunker(size int) *Chunker {
return &Chunker{ return &Chunker{
Size: size, Size: size,
Chunks: ChunkMap{}, Chunks: ChunkMap{},
chunkRequestsThisTick: map[render.Point]interface{}{},
requestsN1: map[render.Point]interface{}{},
requestsN2: map[render.Point]interface{}{},
} }
} }
@ -29,6 +64,10 @@ func NewChunker(size int) *Chunker {
// Sparse Swatches (which have only their palette index, from the file format // Sparse Swatches (which have only their palette index, from the file format
// on disk) to connect references to the swatches in the palette. // on disk) to connect references to the swatches in the palette.
func (c *Chunker) Inflate(pal *Palette) error { func (c *Chunker) Inflate(pal *Palette) error {
c.pal = pal
c.chunkMu.RLock()
defer c.chunkMu.RUnlock()
for coord, chunk := range c.Chunks { for coord, chunk := range c.Chunks {
chunk.Point = coord chunk.Point = coord
chunk.Size = c.Size chunk.Size = c.Size
@ -70,11 +109,70 @@ func (c *Chunker) IterViewport(viewport render.Rect) <-chan Pixel {
// IterChunks returns a channel to iterate over all chunks in the drawing. // IterChunks returns a channel to iterate over all chunks in the drawing.
func (c *Chunker) IterChunks() <-chan render.Point { func (c *Chunker) IterChunks() <-chan render.Point {
pipe := make(chan render.Point) var (
pipe = make(chan render.Point)
sent = map[render.Point]interface{}{}
)
go func() { go func() {
c.chunkMu.RLock()
// Send the chunk coords we have in working memory.
// v1 levels: had all their chunks there in their JSON data
// v2 levels: chunks are in zipfile, cached ones are here
for point := range c.Chunks { for point := range c.Chunks {
sent[point] = nil
pipe <- point pipe <- point
} }
c.chunkMu.RUnlock()
// If we have a zipfile, send any remaining chunks that are
// in colder storage.
if c.Zipfile != nil {
for _, point := range ChunksInZipfile(c.Zipfile, c.Layer) {
if _, ok := sent[point]; ok {
continue // Already sent from active memory
}
pipe <- point
}
}
close(pipe)
}()
return pipe
}
/*
IterChunksThemselves iterates all chunks in the drawing rather than coords.
Note: this will mark every chunk as "touched" this frame, so in a zipfile
level will load ALL chunks into memory.
*/
func (c *Chunker) IterChunksThemselves() <-chan *Chunk {
pipe := make(chan *Chunk)
go func() {
for coord := range c.IterChunks() {
if chunk, ok := c.GetChunk(coord); ok {
pipe <- chunk
}
}
close(pipe)
}()
return pipe
}
// IterCachedChunks iterates ONLY over the chunks currently cached in memory,
// e.g. so they can be torn down without loading extra chunks by looping normally.
func (c *Chunker) IterCachedChunks() <-chan *Chunk {
pipe := make(chan *Chunk)
go func() {
c.chunkMu.RLock()
defer c.chunkMu.RUnlock()
for _, chunk := range c.Chunks {
pipe <- chunk
}
close(pipe) close(pipe)
}() }()
return pipe return pipe
@ -128,7 +226,7 @@ func (c *Chunker) IterViewportChunks(viewport render.Rect) <-chan render.Point {
func (c *Chunker) IterPixels() <-chan Pixel { func (c *Chunker) IterPixels() <-chan Pixel {
pipe := make(chan Pixel) pipe := make(chan Pixel)
go func() { go func() {
for _, chunk := range c.Chunks { for chunk := range c.IterChunksThemselves() {
for px := range chunk.Iter() { for px := range chunk.Iter() {
pipe <- px pipe <- px
} }
@ -166,7 +264,7 @@ func (c *Chunker) WorldSizePositive() render.Rect {
// Bounds returns the boundary points of the lowest and highest chunk which // Bounds returns the boundary points of the lowest and highest chunk which
// have any data in them. // have any data in them.
func (c *Chunker) Bounds() (low, high render.Point) { func (c *Chunker) Bounds() (low, high render.Point) {
for coord := range c.Chunks { for coord := range c.IterChunks() {
if coord.X < low.X { if coord.X < low.X {
low.X = coord.X low.X = coord.X
} }
@ -185,16 +283,168 @@ func (c *Chunker) Bounds() (low, high render.Point) {
return low, high return low, high
} }
// GetChunk gets a chunk at a certain position. Returns false if not found. /*
GetChunk gets a chunk at a certain position. Returns false if not found.
This should be the centralized function to request a Chunk from the Chunker
(or IterChunksThemselves). On old-style levels all of the chunks were just
in memory as part of the JSON struct, in Zip files we can load/unload them
at will from external files.
*/
func (c *Chunker) GetChunk(p render.Point) (*Chunk, bool) { func (c *Chunker) GetChunk(p render.Point) (*Chunk, bool) {
// It's currently cached in memory?
c.chunkMu.RLock()
chunk, ok := c.Chunks[p] chunk, ok := c.Chunks[p]
c.chunkMu.RUnlock()
if ok {
// An empty chunk? We hang onto these until save time to commit
// the empty chunk to ZIP.
if chunk.Len() == 0 {
return nil, false
}
c.logChunkAccess(p, chunk) // for the LRU cache
return chunk, ok return chunk, ok
}
// Hit the zipfile for it.
if c.Zipfile != nil {
if chunk, err := ChunkFromZipfile(c.Zipfile, c.Layer, p); err == nil {
log.Debug("GetChunk(%s) cache miss, read from zip", p)
c.SetChunk(p, chunk) // cache it
c.logChunkAccess(p, chunk) // for the LRU cache
if c.pal != nil {
chunk.Point = p
chunk.Size = c.Size
chunk.Inflate(c.pal)
}
return chunk, true
}
}
// Is our chunk cache getting too full? e.g. on full level
// sweeps where a whole zip file's worth of chunks are scanned.
if balance.ChunkerLRUCacheMax > 0 && len(c.Chunks) > balance.ChunkerLRUCacheMax {
log.Error("Chunks in memory (%d) exceeds LRU cache cap of %d, freeing random chunks")
c.chunkMu.Lock()
defer c.chunkMu.Unlock()
var (
i = 0
limit = len(c.Chunks) - balance.ChunkerLRUCacheMax
)
for coord := range c.Chunks {
if i < limit {
delete(c.Chunks, coord)
}
i++
}
}
return nil, false
}
// LRU cache for chunks from zipfiles: log which chunks were accessed
// this tick, so they can be compared to the tick prior, and then freed
// up after that.
func (c *Chunker) logChunkAccess(p render.Point, chunk *Chunk) {
// Record this point.
c.requestMu.Lock()
if c.chunkRequestsThisTick == nil {
c.chunkRequestsThisTick = map[render.Point]interface{}{}
}
c.chunkRequestsThisTick[p] = nil
c.requestMu.Unlock()
}
// FreeCaches unloads chunks that have not been requested in 2 frames.
//
// Only on chunkers that have zipfiles, old-style levels without zips
// wouldn't be able to restore their chunks otherwise! Returns -1 if
// no Zipfile, otherwise number of chunks freed.
func (c *Chunker) FreeCaches() int {
if c.Zipfile == nil {
return -1
}
var thisTick = shmem.Tick
// Very first tick this chunker has seen?
if c.lastTick == 0 {
c.lastTick = thisTick
}
// A new tick?
if (thisTick-c.lastTick)%4 == 0 {
c.requestMu.Lock()
c.chunkMu.Lock()
defer c.requestMu.Unlock()
defer c.chunkMu.Unlock()
var (
requestsThisTick = c.chunkRequestsThisTick
requestsN2 = c.requestsN2
delete_coords = []render.Point{}
)
// Chunks not requested this last tick, unload from the cache.
for coord := range requestsN2 {
// Old point not requested recently?
if _, ok := requestsThisTick[coord]; !ok {
delete_coords = append(delete_coords, coord)
}
}
for _, coord := range delete_coords {
c.FreeChunk(coord)
}
// Rotate the cached ticks and clean the slate.
c.requestsN2 = c.requestsN1
c.requestsN1 = requestsThisTick
c.chunkRequestsThisTick = map[render.Point]interface{}{}
c.lastTick = thisTick
return len(delete_coords)
}
return 0
}
// SetChunk writes the chunk into the cache dict and nothing more.
//
// This function should be the singular writer to the chunk cache.
func (c *Chunker) SetChunk(p render.Point, chunk *Chunk) {
c.chunkMu.Lock()
c.Chunks[p] = chunk
c.chunkMu.Unlock()
c.logChunkAccess(p, chunk)
}
// FreeChunk unloads a chunk from active memory for zipfile-backed levels.
//
// Not thread safe: it is assumed the caller has the lock on c.Chunks.
func (c *Chunker) FreeChunk(p render.Point) bool {
if c.Zipfile == nil {
return false
}
// Don't delete empty chunks, hang on until next zipfile save.
if chunk, ok := c.Chunks[p]; ok && chunk.Len() == 0 {
return false
}
delete(c.Chunks, p)
return true
} }
// Redraw marks every chunk as dirty and invalidates all their texture caches, // Redraw marks every chunk as dirty and invalidates all their texture caches,
// forcing the drawing to re-generate from scratch. // forcing the drawing to re-generate from scratch.
func (c *Chunker) Redraw() { func (c *Chunker) Redraw() {
for _, chunk := range c.Chunks { for chunk := range c.IterChunksThemselves() {
chunk.SetDirty() chunk.SetDirty()
} }
} }
@ -203,7 +453,7 @@ func (c *Chunker) Redraw() {
// the whole drawing for smooth gameplay rather than chunks lazy rendering as // the whole drawing for smooth gameplay rather than chunks lazy rendering as
// they enter the screen. // they enter the screen.
func (c *Chunker) Prerender() { func (c *Chunker) Prerender() {
for _, chunk := range c.Chunks { for chunk := range c.IterChunksThemselves() {
_ = chunk.CachedBitmap(render.Invisible) _ = chunk.CachedBitmap(render.Invisible)
} }
} }
@ -218,7 +468,7 @@ func (c *Chunker) PrerenderN(n int) (remaining int) {
modified int // number modified this call modified int // number modified this call
) )
for _, chunk := range c.Chunks { for chunk := range c.IterChunksThemselves() {
total++ total++
if chunk.bitmap != nil { if chunk.bitmap != nil {
totalRendered++ totalRendered++
@ -241,7 +491,7 @@ func (c *Chunker) PrerenderN(n int) (remaining int) {
func (c *Chunker) Get(p render.Point) (*Swatch, error) { func (c *Chunker) Get(p render.Point) (*Swatch, error) {
// Compute the chunk coordinate. // Compute the chunk coordinate.
coord := c.ChunkCoordinate(p) coord := c.ChunkCoordinate(p)
if chunk, ok := c.Chunks[coord]; ok { if chunk, ok := c.GetChunk(coord); ok {
return chunk.Get(p) return chunk.Get(p)
} }
return nil, fmt.Errorf("no chunk %s exists for point %s", coord, p) return nil, fmt.Errorf("no chunk %s exists for point %s", coord, p)
@ -250,12 +500,12 @@ func (c *Chunker) Get(p render.Point) (*Swatch, error) {
// Set a pixel at the given coordinate. // Set a pixel at the given coordinate.
func (c *Chunker) Set(p render.Point, sw *Swatch) error { func (c *Chunker) Set(p render.Point, sw *Swatch) error {
coord := c.ChunkCoordinate(p) coord := c.ChunkCoordinate(p)
chunk, ok := c.Chunks[coord] chunk, ok := c.GetChunk(coord)
if !ok { if !ok {
chunk = NewChunk() chunk = NewChunk()
c.Chunks[coord] = chunk
chunk.Point = coord chunk.Point = coord
chunk.Size = c.Size chunk.Size = c.Size
c.SetChunk(coord, chunk)
} }
return chunk.Set(p, sw) return chunk.Set(p, sw)
@ -281,9 +531,8 @@ func (c *Chunker) SetRect(r render.Rect, sw *Swatch) error {
// Delete a pixel at the given coordinate. // Delete a pixel at the given coordinate.
func (c *Chunker) Delete(p render.Point) error { func (c *Chunker) Delete(p render.Point) error {
coord := c.ChunkCoordinate(p) coord := c.ChunkCoordinate(p)
defer c.pruneChunk(coord)
if chunk, ok := c.Chunks[coord]; ok { if chunk, ok := c.GetChunk(coord); ok {
return chunk.Delete(p) return chunk.Delete(p)
} }
return fmt.Errorf("no chunk %s exists for point %s", coord, p) return fmt.Errorf("no chunk %s exists for point %s", coord, p)
@ -308,17 +557,6 @@ func (c *Chunker) DeleteRect(r render.Rect) error {
return nil return nil
} }
// pruneChunk will remove an empty chunk from the chunk map, called after
// delete operations.
func (c *Chunker) pruneChunk(coord render.Point) {
if chunk, ok := c.Chunks[coord]; ok {
if chunk.Len() == 0 {
log.Info("Chunker.pruneChunk: %s has become empty", coord)
delete(c.Chunks, coord)
}
}
}
// ChunkCoordinate computes a chunk coordinate from an absolute coordinate. // ChunkCoordinate computes a chunk coordinate from an absolute coordinate.
func (c *Chunker) ChunkCoordinate(abs render.Point) render.Point { func (c *Chunker) ChunkCoordinate(abs render.Point) render.Point {
if c.Size == 0 { if c.Size == 0 {

View File

@ -0,0 +1,196 @@
package level
import (
"archive/zip"
"fmt"
"io/ioutil"
"regexp"
"strconv"
"git.kirsle.net/apps/doodle/pkg/log"
"git.kirsle.net/go/render"
)
// Zipfile interactions for the Chunker to cache and manage which
// chunks of large levels need be in active memory.
var (
zipChunkfileRegexp = regexp.MustCompile(`^chunks/(\d+)/(.+?)\.json$`)
)
// MigrateZipfile is called on save to migrate old-style ChunkMap
// chunks into external zipfile members and free up space in the
// master Level or Doodad struct.
func (c *Chunker) MigrateZipfile(zf *zip.Writer) error {
// Identify if any chunks in active memory had been completely erased.
var (
erasedChunks = map[render.Point]interface{}{}
chunksZipped = map[render.Point]interface{}{}
)
for coord, chunk := range c.Chunks {
if chunk.Len() == 0 {
log.Info("Chunker.MigrateZipfile: %s has become empty, remove from zip", coord)
erasedChunks[coord] = nil
}
}
// Copy all COLD STORED chunks from our original zipfile into the new one.
// These are chunks that are NOT actively loaded (those are written next),
// and erasedChunks are not written to the zipfile at all.
if c.Zipfile != nil {
log.Info("MigrateZipfile: Copying chunk files from old zip to new zip")
for _, file := range c.Zipfile.File {
m := zipChunkfileRegexp.FindStringSubmatch(file.Name)
if len(m) > 0 {
mLayer, _ := strconv.Atoi(m[1])
coord := m[2]
// Not our layer, not our problem.
if mLayer != c.Layer {
continue
}
point, err := render.ParsePoint(coord)
if err != nil {
return err
}
// Don't create zip files for empty (0 pixel) chunks.
if _, ok := erasedChunks[point]; ok {
log.Debug("Skip copying %s: chunk is empty", coord)
continue
}
// Don't ever write duplicate files.
if _, ok := chunksZipped[point]; ok {
log.Debug("Skip copying duplicate chunk %s", coord)
continue
}
chunksZipped[point] = nil
// Don't copy the chunks we have currently in memory: those
// are written next. Apparently zip files are allowed to
// have duplicate named members!
if _, ok := c.Chunks[point]; ok {
log.Debug("Skip chunk %s (in memory)", coord)
continue
}
log.Info("Copy existing chunk %s", file.Name)
if err := zf.Copy(file); err != nil {
return err
}
}
}
} else {
log.Warn("Chunker.MigrateZipfile: the drawing did not give me a zipfile!")
}
if len(c.Chunks) == 0 {
return nil
}
log.Info("MigrateZipfile: chunker has %d in memory, exporting to zipfile", len(c.Chunks))
// Flush in-memory chunks out to zipfile.
for coord, chunk := range c.Chunks {
filename := fmt.Sprintf("chunks/%d/%s.json", c.Layer, coord.String())
log.Info("Flush in-memory chunks to %s", filename)
chunk.ToZipfile(zf, filename)
}
// Flush the chunkmap out.
// TODO: do similar to move old attached files (wallpapers) too
c.Chunks = ChunkMap{}
return nil
}
// ClearChunkCache completely flushes the ChunkMap from memory. BE CAREFUL.
// If the level is a Zipfile the chunks will reload as needed, but old style
// levels this will nuke the whole drawing!
func (c *Chunker) ClearChunkCache() {
c.chunkMu.Lock()
c.Chunks = ChunkMap{}
c.chunkMu.Unlock()
}
// CacheSize returns the number of chunks in memory.
func (c *Chunker) CacheSize() int {
return len(c.Chunks)
}
// ToZipfile writes just a chunk's data into a zipfile.
func (c *Chunk) ToZipfile(zf *zip.Writer, filename string) error {
writer, err := zf.Create(filename)
if err != nil {
return err
}
json, err := c.MarshalJSON()
if err != nil {
return err
}
n, err := writer.Write(json)
if err != nil {
return err
}
log.Debug("Written chunk to zipfile: %s (%d bytes)", filename, n)
return nil
}
// ChunkFromZipfile loads a chunk from a zipfile.
func ChunkFromZipfile(zf *zip.Reader, layer int, coord render.Point) (*Chunk, error) {
filename := fmt.Sprintf("chunks/%d/%s.json", layer, coord)
file, err := zf.Open(filename)
if err != nil {
return nil, err
}
bin, err := ioutil.ReadAll(file)
if err != nil {
return nil, err
}
var chunk = NewChunk()
err = chunk.UnmarshalJSON(bin)
if err != nil {
return nil, err
}
return chunk, nil
}
// ChunksInZipfile returns the list of chunk coordinates in a zipfile.
func ChunksInZipfile(zf *zip.Reader, layer int) []render.Point {
var (
result = []render.Point{}
sLayer = fmt.Sprintf("%d", layer)
)
for _, file := range zf.File {
m := zipChunkfileRegexp.FindStringSubmatch(file.Name)
if len(m) > 0 {
var (
mLayer = m[1]
mPoint = m[2]
)
// Not our layer?
if mLayer != sLayer {
continue
}
if point, err := render.ParsePoint(mPoint); err == nil {
result = append(result, point)
} else {
log.Error("ChunksInZipfile: file '%s' didn't parse as a point: %s", file.Name, err)
}
}
}
return result
}

View File

@ -1,12 +1,13 @@
package level package level
import ( import (
"archive/zip"
"bytes" "bytes"
"compress/gzip" "compress/gzip"
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http"
"git.kirsle.net/apps/doodle/pkg/balance" "git.kirsle.net/apps/doodle/pkg/balance"
"git.kirsle.net/apps/doodle/pkg/log" "git.kirsle.net/apps/doodle/pkg/log"
@ -32,8 +33,14 @@ func FromJSON(filename string, data []byte) (*Level, error) {
} else { } else {
m = gzmap m = gzmap
} }
} else if http.DetectContentType(data) == "application/zip" {
if zipmap, err := FromZipfile(data); err != nil {
return nil, err
} else { } else {
return nil, errors.New("invalid file format") m = zipmap
}
} else {
return nil, fmt.Errorf("invalid file format")
} }
// Fill in defaults. // Fill in defaults.
@ -55,10 +62,20 @@ func FromJSON(filename string, data []byte) (*Level, error) {
// "gzip supported" in their descriptions. // "gzip supported" in their descriptions.
func (m *Level) ToJSON() ([]byte, error) { func (m *Level) ToJSON() ([]byte, error) {
// Gzip compressing? // Gzip compressing?
if balance.CompressDrawings { if balance.DrawingFormat == balance.FormatGZip {
return m.ToGzip() return m.ToGzip()
} }
// Zipfile?
if balance.DrawingFormat == balance.FormatZipfile {
return m.ToZipfile()
}
return m.AsJSON()
}
// AsJSON returns it just as JSON without any fancy gzip/zip magic.
func (m *Level) AsJSON() ([]byte, error) {
out := bytes.NewBuffer([]byte{}) out := bytes.NewBuffer([]byte{})
encoder := json.NewEncoder(out) encoder := json.NewEncoder(out)
if usercfg.Current.JSONIndent { if usercfg.Current.JSONIndent {
@ -83,6 +100,50 @@ func (m *Level) ToGzip() ([]byte, error) {
return handle.Bytes(), err return handle.Bytes(), err
} }
// ToZipfile serializes the level as a ZIP archive and also migrates
// data loaded from an older save into the new zip format.
func (m *Level) ToZipfile() ([]byte, error) {
// If we do not have a Zipfile yet, migrate legacy data into one.
// if m.Zipfile == nil {
fh := bytes.NewBuffer([]byte{})
zipper := zip.NewWriter(fh)
defer zipper.Close()
// Migrate any legacy Chunker data into external files in the zip.
if err := m.Chunker.MigrateZipfile(zipper); err != nil {
return nil, fmt.Errorf("MigrateZipfile: %s", err)
}
// Write the header json.
{
header, err := m.AsJSON()
if err != nil {
return nil, err
}
writer, err := zipper.Create("level.json")
if err != nil {
return nil, fmt.Errorf("zipping index.js: %s", err)
}
if n, err := writer.Write(header); err != nil {
return nil, err
} else {
log.Debug("Written level.json to zipfile: %s bytes", n)
}
}
zipper.Close()
// Refresh our Zipfile reader from the zipper we just wrote.
bin := fh.Bytes()
if err := m.ReloadZipfile(bin); err != nil {
log.Error("ReloadZipfile: %s", err)
}
return fh.Bytes(), nil
}
// FromGzip deserializes a gzip compressed level JSON. // FromGzip deserializes a gzip compressed level JSON.
func FromGzip(data []byte) (*Level, error) { func FromGzip(data []byte) (*Level, error) {
// This function works, do not touch. // This function works, do not touch.
@ -104,6 +165,49 @@ func FromGzip(data []byte) (*Level, error) {
return level, nil return level, nil
} }
// FromZipfile reads a level in zipfile format.
func FromZipfile(data []byte) (*Level, error) {
var (
level = New()
err = level.populateFromZipfile(data)
)
return level, err
}
// ReloadZipfile re-reads the level's zipfile after a write.
func (m *Level) ReloadZipfile(data []byte) error {
return m.populateFromZipfile(data)
}
// Common function between FromZipfile and ReloadZipFile.
func (m *Level) populateFromZipfile(data []byte) error {
var (
buf = bytes.NewReader(data)
zf *zip.Reader
decoder *json.Decoder
)
zf, err := zip.NewReader(buf, buf.Size())
if err != nil {
return err
}
// Read the level.json.
file, err := zf.Open("level.json")
if err != nil {
return err
}
decoder = json.NewDecoder(file)
err = decoder.Decode(m)
// Keep the zipfile reader handy.
m.Zipfile = zf
m.Chunker.Zipfile = zf
return err
}
// LoadJSON loads a map from JSON file (gzip supported). // LoadJSON loads a map from JSON file (gzip supported).
func LoadJSON(filename string) (*Level, error) { func LoadJSON(filename string) (*Level, error) {
data, err := ioutil.ReadFile(filename) data, err := ioutil.ReadFile(filename)
@ -128,3 +232,11 @@ func (m *Level) WriteJSON(filename string) error {
return nil return nil
} }
// Loop may be called each loop to allow the level to maintain its
// memory usage, e.g., for chunks not requested recently from a zipfile
// level to free those from RAM.
func (m *Level) Loop() error {
m.Chunker.FreeCaches()
return nil
}

View File

@ -4,19 +4,6 @@ import "git.kirsle.net/apps/doodle/pkg/log"
// Maintenance functions for the file format on disk. // Maintenance functions for the file format on disk.
// PruneChunks cleans up any level chunks that have no pixel data.
func (m *Level) PruneChunks() int {
var count int
for coord, chunk := range m.Chunker.Chunks {
if chunk.Len() == 0 {
log.Info("PruneChunks: %d has no pixels", coord)
delete(m.Chunker.Chunks, coord)
count++
}
}
return count
}
// PruneLinks cleans up any Actor Links that can not be resolved in the // PruneLinks cleans up any Actor Links that can not be resolved in the
// level data. For example, if actors were linked in Edit Mode and one // level data. For example, if actors were linked in Edit Mode and one
// actor is deleted leaving a broken link. // actor is deleted leaving a broken link.

View File

@ -1,7 +1,6 @@
package level package level
import ( import (
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"runtime" "runtime"
@ -82,9 +81,8 @@ func LoadFile(filename string) (*Level, error) {
return level, nil return level, nil
} else { } else {
log.Warn(err.Error()) log.Warn(err.Error())
return nil, err
} }
return nil, errors.New("invalid file type")
} }
// WriteFile saves a level to disk in the user's config directory. // WriteFile saves a level to disk in the user's config directory.
@ -98,7 +96,6 @@ func (m *Level) WriteFile(filename string) error {
m.GameVersion = branding.Version m.GameVersion = branding.Version
// Maintenance functions, clean up cruft before save. // Maintenance functions, clean up cruft before save.
m.PruneChunks()
m.PruneLinks() m.PruneLinks()
bin, err := m.ToJSON() bin, err := m.ToJSON()

View File

@ -1,6 +1,7 @@
package level package level
import ( import (
"archive/zip"
"encoding/json" "encoding/json"
"fmt" "fmt"
"os" "os"
@ -26,6 +27,13 @@ type Base struct {
Author string `json:"author"` Author string `json:"author"`
Locked bool `json:"locked"` Locked bool `json:"locked"`
// v2 level format: zip files with external chunks.
// (v0 was json text, v1 was gzip compressed json text).
// The game must load levels created using the previous
// formats, they will not have a Zipfile and will have
// Chunkers in memory from their (gz) json.
Zipfile *zip.Reader `json:"-"`
// Every drawing type is able to embed other files inside of itself. // Every drawing type is able to embed other files inside of itself.
Files FileSystem `json:"files"` Files FileSystem `json:"files"`
} }
@ -95,13 +103,12 @@ func (m *Level) Teardown() {
textures int textures int
) )
for coord := range m.Chunker.IterChunks() { // Free any CACHED chunks' memory.
if chunk, ok := m.Chunker.GetChunk(coord); ok { for chunk := range m.Chunker.IterCachedChunks() {
freed := chunk.Teardown() freed := chunk.Teardown()
chunks++ chunks++
textures += freed textures += freed
} }
}
log.Debug("Teardown level (%s): Freed %d textures across %d level chunks", m.Title, textures, chunks) log.Debug("Teardown level (%s): Freed %d textures across %d level chunks", m.Title, textures, chunks)
} }

View File

@ -660,7 +660,7 @@ func (s *PlayScene) Loop(d *Doodle, ev *event.State) error {
*s.debViewport = s.drawing.Viewport().String() *s.debViewport = s.drawing.Viewport().String()
*s.debScroll = s.drawing.Scroll.String() *s.debScroll = s.drawing.Scroll.String()
inside, outside := s.drawing.LoadUnloadMetrics() inside, outside := s.drawing.LoadUnloadMetrics()
*s.debLoadUnload = fmt.Sprintf("%d in %d out", inside, outside) *s.debLoadUnload = fmt.Sprintf("%d in %d out %d cached", inside, outside, s.drawing.Chunker().CacheSize())
// Update the timer. // Update the timer.
s.timerLabel.Text = savegame.FormatDuration(time.Since(s.startTime)) s.timerLabel.Text = savegame.FormatDuration(time.Since(s.startTime))

View File

@ -272,8 +272,15 @@ func (w *Canvas) Loop(ev *event.State) error {
_ = w.loopConstrainScroll() _ = w.loopConstrainScroll()
// Every so often, eager-load/unload chunk bitmaps to save on memory. // Every so often, eager-load/unload chunk bitmaps to save on memory.
if w.level != nil {
// Unloads bitmaps and textures every N frames...
w.LoadUnloadChunks() w.LoadUnloadChunks()
// Unloads chunks themselves (from zipfile levels) that aren't
// recently accessed.
w.chunks.FreeCaches()
}
// Remove any actors that were destroyed the previous tick. // Remove any actors that were destroyed the previous tick.
var newActors []*Actor var newActors []*Actor
for _, a := range w.actors { for _, a := range w.actors {

View File

@ -45,20 +45,19 @@ func (w *Canvas) LoadUnloadChunks() {
wg.Add(1) wg.Add(1)
go func(i int) { go func(i int) {
for coord := range chunks { for coord := range chunks {
if chunk, ok := w.level.Chunker.GetChunk(coord); ok {
chunk := chunk
if _, ok := chunksInside[coord]; ok { if _, ok := chunksInside[coord]; ok {
// Preload its bitmap image. // This chunk is INSIDE our viewport, preload its bitmap.
if chunk, ok := w.level.Chunker.GetChunk(coord); ok {
_ = chunk.CachedBitmap(render.Invisible) _ = chunk.CachedBitmap(render.Invisible)
resultInside++ resultInside++
} else { continue
// Unload its bitmap and texture. }
chunksTeardown = append(chunksTeardown, chunk) }
// Chunks outside the viewport, we won't load them and
// the Chunker will flush them out to (zip) file.
resultOutside++ resultOutside++
} }
}
}
wg.Done() wg.Done()
}(i) }(i)
} }