Compare commits

..

No commits in common. "master" and "v0.0.1" have entirely different histories.

55 changed files with 473 additions and 603 deletions

2
.gitignore vendored
View File

@ -1,2 +0,0 @@
lsconvert
*.sublime-workspace

View File

@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2020 lordwelch Copyright (c) 2015 Norbyte
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -108,6 +108,7 @@ func (v Vec) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
switch i { switch i {
case 0: case 0:
name.Local = "x" name.Local = "x"
// start.Name = "float1"
case 1: case 1:
name.Local = "y" name.Local = "y"
start.Name.Local = "float2" start.Name.Local = "float2"
@ -384,7 +385,9 @@ func (na *NodeAttribute) FromString(str string) error {
} }
} }
var err error var (
err error
)
switch na.Type { switch na.Type {
case DTNone: case DTNone:

View File

@ -85,18 +85,21 @@ func MakeCompressionFlags(method CompressionMethod, level CompressionLevel) int
func Decompress(compressed io.Reader, uncompressedSize int, compressionFlags byte, chunked bool) io.ReadSeeker { func Decompress(compressed io.Reader, uncompressedSize int, compressionFlags byte, chunked bool) io.ReadSeeker {
switch CompressionMethod(compressionFlags & 0x0f) { switch CompressionMethod(compressionFlags & 0x0f) {
case CMNone: case CMNone:
// logger.Println("No compression")
if v, ok := compressed.(io.ReadSeeker); ok { if v, ok := compressed.(io.ReadSeeker); ok {
return v return v
} }
panic(errors.New("compressed must be an io.ReadSeeker if there is no compression")) panic(errors.New("compressed must be an io.ReadSeeker if there is no compression"))
case CMZlib: case CMZlib:
// logger.Println("zlib compression")
zr, _ := zlib.NewReader(compressed) zr, _ := zlib.NewReader(compressed)
v, _ := ioutil.ReadAll(zr) v, _ := ioutil.ReadAll(zr)
return bytes.NewReader(v) return bytes.NewReader(v)
case CMLZ4: case CMLZ4:
if chunked { if chunked {
// logger.Println("lz4 stream compressed")
zr := lz4.NewReader(compressed) zr := lz4.NewReader(compressed)
p := make([]byte, uncompressedSize) p := make([]byte, uncompressedSize)
_, err := zr.Read(p) _, err := zr.Read(p)
@ -105,7 +108,10 @@ func Decompress(compressed io.Reader, uncompressedSize int, compressionFlags byt
} }
return bytes.NewReader(p) return bytes.NewReader(p)
} }
// logger.Println("lz4 block compressed")
// panic(errors.New("not implemented"))
src, _ := ioutil.ReadAll(compressed) src, _ := ioutil.ReadAll(compressed)
// logger.Println(len(src))
dst := make([]byte, uncompressedSize*2) dst := make([]byte, uncompressedSize*2)
_, err := lz4.UncompressBlock(src, dst) _, err := lz4.UncompressBlock(src, dst)
if err != nil { if err != nil {
@ -141,7 +147,7 @@ func ReadAttribute(r io.ReadSeeker, name string, DT DataType, length uint, l log
pos int64 pos int64
n int n int
) )
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
switch DT { switch DT {
case DTNone: case DTNone:
@ -400,140 +406,3 @@ func (l *LimitedReadSeeker) Seek(offset int64, whence int) (int64, error) {
return -1, io.ErrNoProgress return -1, io.ErrNoProgress
} }
} }
func ReadTranslatedString(r io.ReadSeeker, version FileVersion, engineVersion uint32) (TranslatedString, error) {
var (
str TranslatedString
err error
)
if version >= VerBG3 || engineVersion == 0x4000001d {
var version uint16
err = binary.Read(r, binary.LittleEndian, &version)
if err != nil {
return str, err
}
str.Version = version
err = binary.Read(r, binary.LittleEndian, &version)
if err != nil {
return str, err
}
if version == 0 {
str.Value, err = ReadCString(r, int(str.Version))
if err != nil {
return str, err
}
str.Version = 0
} else {
_, _ = r.Seek(-2, io.SeekCurrent)
}
} else {
str.Version = 0
var (
vlength int32
v []byte
// n int
)
err = binary.Read(r, binary.LittleEndian, &vlength)
if err != nil {
return str, err
}
v = make([]byte, vlength)
_, err = r.Read(v)
if err != nil {
return str, err
}
str.Value = string(v)
}
var handleLength int32
err = binary.Read(r, binary.LittleEndian, &handleLength)
if err != nil {
return str, err
}
str.Handle, err = ReadCString(r, int(handleLength))
if err != nil {
return str, err
}
return str, nil
}
func ReadTranslatedFSString(r io.Reader, version FileVersion) (TranslatedFSString, error) {
var (
str = TranslatedFSString{}
err error
)
if version >= VerBG3 {
var version uint16
err = binary.Read(r, binary.LittleEndian, &version)
if err != nil {
return str, err
}
str.Version = version
} else {
str.Version = 0
var length int32
err = binary.Read(r, binary.LittleEndian, &length)
if err != nil {
return str, err
}
str.Value, err = ReadCString(r, int(length))
if err != nil {
return str, err
}
}
var handleLength int32
err = binary.Read(r, binary.LittleEndian, &handleLength)
if err != nil {
return str, err
}
str.Handle, err = ReadCString(r, int(handleLength))
if err != nil {
return str, err
}
var arguments int32
err = binary.Read(r, binary.LittleEndian, &arguments)
if err != nil {
return str, err
}
str.Arguments = make([]TranslatedFSStringArgument, 0, arguments)
for i := 0; i < int(arguments); i++ {
arg := TranslatedFSStringArgument{}
var argKeyLength int32
err = binary.Read(r, binary.LittleEndian, &argKeyLength)
if err != nil {
return str, err
}
arg.Key, err = ReadCString(r, int(argKeyLength))
if err != nil {
return str, err
}
arg.String, err = ReadTranslatedFSString(r, version)
if err != nil {
return str, err
}
var argValueLength int32
err = binary.Read(r, binary.LittleEndian, &argValueLength)
if err != nil {
return str, err
}
arg.Value, err = ReadCString(r, int(argValueLength))
if err != nil {
return str, err
}
str.Arguments = append(str.Arguments, arg)
}
return str, nil
}

View File

@ -1,20 +1,16 @@
package main package main
import ( import (
"bytes"
"encoding/xml" "encoding/xml"
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"strings" "strings"
"git.narnian.us/lordwelch/lsgo" "git.narnian.us/lordwelch/lsgo"
_ "git.narnian.us/lordwelch/lsgo/lsb"
_ "git.narnian.us/lordwelch/lsgo/lsf"
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
"github.com/kr/pretty" "github.com/kr/pretty"
@ -39,6 +35,7 @@ func init() {
} }
func main() { func main() {
for _, v := range flag.Args() { for _, v := range flag.Args() {
fi, err := os.Stat(v) fi, err := os.Stat(v)
if err != nil { if err != nil {
@ -46,6 +43,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
switch { switch {
case !fi.IsDir(): case !fi.IsDir():
err = openLSF(v) err = openLSF(v)
if err != nil && !errors.As(err, &lsgo.HeaderError{}) { if err != nil && !errors.As(err, &lsgo.HeaderError{}) {
@ -77,7 +75,6 @@ func main() {
} }
} }
} }
func openLSF(filename string) error { func openLSF(filename string) error {
var ( var (
l *lsgo.Resource l *lsgo.Resource
@ -122,64 +119,16 @@ func openLSF(filename string) error {
func readLSF(filename string) (*lsgo.Resource, error) { func readLSF(filename string) (*lsgo.Resource, error) {
var ( var (
l lsgo.Resource l lsgo.Resource
r io.ReadSeeker f *os.File
file *os.File
fi os.FileInfo
err error err error
) )
switch filepath.Ext(filename) { f, err = os.Open(filename)
case ".lsf", ".lsb":
var b []byte
fi, err = os.Stat(filename)
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Arbitrary size, no lsf file should reach 100 MB (I haven't found one over 90 KB) defer f.Close()
// and if you don't have 100 MB of ram free you shouldn't be using this
if fi.Size() <= 100*1024*1024 {
b, err = ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
r = bytes.NewReader(b)
break
}
fallthrough
default:
b := make([]byte, 4)
file, err = os.Open(filename)
if err != nil {
return nil, err
}
defer file.Close()
_, err = file.Read(b) l, err = lsgo.ReadLSF(f)
if err != nil {
return nil, err
}
if !lsgo.SupportedFormat(b) {
return nil, lsgo.ErrFormat
}
_, err = file.Seek(0, io.SeekStart)
if err != nil {
return nil, err
}
fi, _ = os.Stat(filename)
// I have never seen a valid "ls*" file over 90 KB
if fi.Size() < 1*1024*1024 {
b, err = ioutil.ReadAll(file)
if err != nil {
return nil, err
}
r = bytes.NewReader(b)
} else {
r = file
}
}
l, _, err = lsgo.Decode(r)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -210,7 +159,9 @@ func marshalXML(l *lsgo.Resource) (string, error) {
} }
func writeXML(f io.StringWriter, n string) error { func writeXML(f io.StringWriter, n string) error {
var err error var (
err error
)
_, err = f.WriteString(strings.ToLower(xml.Header)) _, err = f.WriteString(strings.ToLower(xml.Header))
if err != nil { if err != nil {
return err return err

View File

@ -1,9 +1,6 @@
package lsgo package lsgo
import ( import "errors"
"errors"
"fmt"
)
type FileVersion uint32 type FileVersion uint32
@ -46,12 +43,3 @@ var (
ErrInvalidNameKey = errors.New("invalid name key") ErrInvalidNameKey = errors.New("invalid name key")
ErrKeyDoesNotMatch = errors.New("key for this node does not match") ErrKeyDoesNotMatch = errors.New("key for this node does not match")
) )
type HeaderError struct {
Expected string
Got []byte
}
func (he HeaderError) Error() string {
return fmt.Sprintf("Invalid LSF signature; expected % X, got % X", he.Expected, he.Got)
}

View File

@ -1,92 +0,0 @@
// Adapted from the image package
package lsgo
import (
"errors"
"fmt"
"io"
"os"
"sync"
"sync/atomic"
)
// ErrFormat indicates that decoding encountered an unknown format.
var ErrFormat = errors.New("lsgo: unknown format")
// A format holds an image format's name, magic header and how to decode it.
type format struct {
name, magic string
decode func(io.ReadSeeker) (Resource, error)
}
// Formats is the list of registered formats.
var (
formatsMu sync.Mutex
atomicFormats atomic.Value
)
// RegisterFormat registers an image format for use by Decode.
// Name is the name of the format, like "jpeg" or "png".
// Magic is the magic prefix that identifies the format's encoding. The magic
// string can contain "?" wildcards that each match any one byte.
// Decode is the function that decodes the encoded image.
// DecodeConfig is the function that decodes just its configuration.
func RegisterFormat(name, magic string, decode func(io.ReadSeeker) (Resource, error)) {
formatsMu.Lock()
formats, _ := atomicFormats.Load().([]format)
atomicFormats.Store(append(formats, format{name, magic, decode}))
formatsMu.Unlock()
}
// Match reports whether magic matches b. Magic may contain "?" wildcards.
func match(magic string, b []byte) bool {
if len(magic) != len(b) {
return false
}
for i, c := range b {
if magic[i] != c && magic[i] != '?' {
return false
}
}
return true
}
// Sniff determines the format of r's data.
func sniff(r io.ReadSeeker) format {
var (
b []byte = make([]byte, 4)
err error
)
formats, _ := atomicFormats.Load().([]format)
for _, f := range formats {
if len(b) < len(f.magic) {
fmt.Fprintln(os.Stderr, f.magic)
b = make([]byte, len(f.magic))
}
_, err = r.Read(b)
_, _ = r.Seek(0, io.SeekStart)
if err == nil && match(f.magic, b) {
return f
}
}
return format{}
}
func Decode(r io.ReadSeeker) (Resource, string, error) {
f := sniff(r)
if f.decode == nil {
return Resource{}, "", ErrFormat
}
m, err := f.decode(r)
return m, f.name, err
}
func SupportedFormat(signature []byte) bool {
formats, _ := atomicFormats.Load().([]format)
for _, f := range formats {
if match(f.magic, signature) {
return true
}
}
return false
}

8
go.mod
View File

@ -2,12 +2,12 @@ module git.narnian.us/lordwelch/lsgo
go 1.15 go 1.15
replace github.com/pierrec/lz4/v4 v4.1.3 => ./third_party/lz4 replace github.com/pierrec/lz4/v4 v4.1.1 => ./lz4
require ( require (
github.com/go-kit/kit v0.10.0 github.com/go-kit/kit v0.10.0
github.com/google/uuid v1.1.4 github.com/google/uuid v1.1.2
github.com/kr/pretty v0.2.1 github.com/kr/pretty v0.2.1
github.com/pierrec/lz4/v4 v4.1.3 github.com/pierrec/lz4/v4 v4.1.1
gonum.org/v1/gonum v0.8.2 gonum.org/v1/gonum v0.8.1
) )

8
go.sum
View File

@ -86,8 +86,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.4 h1:0ecGp3skIrHWPNGPJDaBIghfA6Sp7Ruo2Io8eLKzWm0= github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.4/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -329,8 +329,8 @@ golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapK
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM= gonum.org/v1/gonum v0.8.1 h1:wGtP3yGpc5mCLOLeTeBdjeui9oZSz5De0eOjMLC/QuQ=
gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= gonum.org/v1/gonum v0.8.1/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=

View File

@ -1,4 +1,4 @@
package lsb package lsgo
import ( import (
"encoding/binary" "encoding/binary"
@ -7,101 +7,94 @@ import (
"io" "io"
"sort" "sort"
"git.narnian.us/lordwelch/lsgo"
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
) )
const ( type LSBHeader struct {
Signature = "LSFM"
PreBG3Signature = "\x00\x00\x00\x40"
)
type Header struct {
Signature [4]byte Signature [4]byte
Size uint32 Size uint32
Endianness uint32 Endianness uint32
Unknown uint32 Unknown uint32
Version lsgo.LSMetadata Version LSMetadata
} }
func (h *Header) Read(r io.ReadSeeker) error { func (lsbh *LSBHeader) Read(r io.ReadSeeker) error {
var ( var (
l log.Logger l log.Logger
pos int64 pos int64
n int n int
err error err error
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsb", "part", "header") l = log.With(Logger, "component", "LS converter", "file type", "lsb", "part", "header")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
n, err = r.Read(h.Signature[:]) n, err = r.Read(lsbh.Signature[:])
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Signature", "read", n, "start position", pos, "value", fmt.Sprintf("%#x", h.Signature[:])) l.Log("member", "Signature", "read", n, "start position", pos, "value", fmt.Sprintf("%#x", lsbh.Signature[:]))
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Size) err = binary.Read(r, binary.LittleEndian, &lsbh.Size)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Size", "read", n, "start position", pos, "value", h.Size) l.Log("member", "Size", "read", n, "start position", pos, "value", lsbh.Size)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Endianness) err = binary.Read(r, binary.LittleEndian, &lsbh.Endianness)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Endianness", "read", n, "start position", pos, "value", h.Endianness) l.Log("member", "Endianness", "read", n, "start position", pos, "value", lsbh.Endianness)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Unknown) err = binary.Read(r, binary.LittleEndian, &lsbh.Unknown)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Unknown", "read", n, "start position", pos, "value", h.Unknown) l.Log("member", "Unknown", "read", n, "start position", pos, "value", lsbh.Unknown)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Version.Timestamp) err = binary.Read(r, binary.LittleEndian, &lsbh.Version.Timestamp)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Version.Timestamp", "read", n, "start position", pos, "value", h.Version.Timestamp) l.Log("member", "Version.Timestamp", "read", n, "start position", pos, "value", lsbh.Version.Timestamp)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Version.Major) err = binary.Read(r, binary.LittleEndian, &lsbh.Version.Major)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Version.Major", "read", n, "start position", pos, "value", h.Version.Major) l.Log("member", "Version.Major", "read", n, "start position", pos, "value", lsbh.Version.Major)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Version.Minor) err = binary.Read(r, binary.LittleEndian, &lsbh.Version.Minor)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Version.Minor", "read", n, "start position", pos, "value", h.Version.Minor) l.Log("member", "Version.Minor", "read", n, "start position", pos, "value", lsbh.Version.Minor)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Version.Revision) err = binary.Read(r, binary.LittleEndian, &lsbh.Version.Revision)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Version.Revision", "read", n, "start position", pos, "value", h.Version.Revision) l.Log("member", "Version.Revision", "read", n, "start position", pos, "value", lsbh.Version.Revision)
err = binary.Read(r, binary.LittleEndian, &h.Version.Build) err = binary.Read(r, binary.LittleEndian, &lsbh.Version.Build)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Version.Build", "read", n, "start position", pos, "value", h.Version.Build) l.Log("member", "Version.Build", "read", n, "start position", pos, "value", lsbh.Version.Build)
pos += int64(n) pos += int64(n)
return nil return nil
@ -109,42 +102,43 @@ func (h *Header) Read(r io.ReadSeeker) error {
type IdentifierDictionary map[int]string type IdentifierDictionary map[int]string
func Read(r io.ReadSeeker) (lsgo.Resource, error) { func ReadLSB(r io.ReadSeeker) (Resource, error) {
var ( var (
hdr = &Header{} hdr = &LSBHeader{}
h = [4]byte{0x00, 0x00, 0x00, 0x40}
err error err error
d IdentifierDictionary d IdentifierDictionary
res lsgo.Resource res Resource
l log.Logger l log.Logger
pos int64 pos int64
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsb", "part", "file") l = log.With(Logger, "component", "LS converter", "file type", "lsb", "part", "file")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
l.Log("member", "header", "start position", pos) l.Log("member", "header", "start position", pos)
err = hdr.Read(r) err = hdr.Read(r)
if err != nil { if err != nil {
return lsgo.Resource{}, err return Resource{}, err
} }
if !(string(hdr.Signature[:]) == Signature || string(hdr.Signature[:]) == PreBG3Signature) { if !(hdr.Signature == [4]byte{'L', 'S', 'F', 'M'} || hdr.Signature == h) {
return lsgo.Resource{}, lsgo.HeaderError{ return Resource{}, HeaderError{
Expected: Signature, Expected: []byte("LSFM"),
Got: hdr.Signature[:], Got: hdr.Signature[:],
} }
} }
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
l.Log("member", "string dictionary", "start position", pos) l.Log("member", "string dictionary", "start position", pos)
d, err = ReadLSBDictionary(r, binary.LittleEndian) d, err = ReadLSBDictionary(r, binary.LittleEndian)
if err != nil { if err != nil {
return lsgo.Resource{}, err return Resource{}, err
} }
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
l.Log("member", "Regions", "start position", pos) l.Log("member", "Regions", "start position", pos)
res, err = ReadLSBRegions(r, d, binary.LittleEndian, lsgo.FileVersion(hdr.Version.Major)) res, err = ReadLSBRegions(r, d, binary.LittleEndian, FileVersion(hdr.Version.Major))
res.Metadata = hdr.Version res.Metadata = hdr.Version
return res, err return res, err
} }
@ -159,8 +153,8 @@ func ReadLSBDictionary(r io.ReadSeeker, endianness binary.ByteOrder) (Identifier
pos int64 pos int64
n int n int
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsb", "part", "dictionary") l = log.With(Logger, "component", "LS converter", "file type", "lsb", "part", "dictionary")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
err = binary.Read(r, endianness, &length) err = binary.Read(r, endianness, &length)
n = 4 n = 4
@ -185,7 +179,7 @@ func ReadLSBDictionary(r io.ReadSeeker, endianness binary.ByteOrder) (Identifier
l.Log("member", "stringLength", "read", n, "start position", pos, "value", stringLength) l.Log("member", "stringLength", "read", n, "start position", pos, "value", stringLength)
pos += int64(n) pos += int64(n)
str, err = lsgo.ReadCString(r, int(stringLength)) str, err = ReadCString(r, int(stringLength))
n += int(stringLength) n += int(stringLength)
if err != nil { if err != nil {
return dict, err return dict, err
@ -205,7 +199,7 @@ func ReadLSBDictionary(r io.ReadSeeker, endianness binary.ByteOrder) (Identifier
return dict, nil return dict, nil
} }
func ReadLSBRegions(r io.ReadSeeker, d IdentifierDictionary, endianness binary.ByteOrder, version lsgo.FileVersion) (lsgo.Resource, error) { func ReadLSBRegions(r io.ReadSeeker, d IdentifierDictionary, endianness binary.ByteOrder, version FileVersion) (Resource, error) {
var ( var (
regions []struct { regions []struct {
name string name string
@ -218,13 +212,13 @@ func ReadLSBRegions(r io.ReadSeeker, d IdentifierDictionary, endianness binary.B
pos int64 pos int64
n int n int
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsb", "part", "region") l = log.With(Logger, "component", "LS converter", "file type", "lsb", "part", "region")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
err = binary.Read(r, endianness, &regionCount) err = binary.Read(r, endianness, &regionCount)
n = 4 n = 4
if err != nil { if err != nil {
return lsgo.Resource{}, err return Resource{}, err
} }
l.Log("member", "regionCount", "read", n, "start position", pos, "value", regionCount) l.Log("member", "regionCount", "read", n, "start position", pos, "value", regionCount)
pos += int64(n) pos += int64(n)
@ -241,17 +235,17 @@ func ReadLSBRegions(r io.ReadSeeker, d IdentifierDictionary, endianness binary.B
err = binary.Read(r, endianness, &key) err = binary.Read(r, endianness, &key)
n = 4 n = 4
if err != nil { if err != nil {
return lsgo.Resource{}, err return Resource{}, err
} }
l.Log("member", "key", "read", n, "start position", pos, "value", d[int(key)], "key", key) l.Log("member", "key", "read", n, "start position", pos, "value", d[int(key)], "key", key)
pos += int64(n) pos += int64(n)
if regions[i].name, ok = d[int(key)]; !ok { if regions[i].name, ok = d[int(key)]; !ok {
return lsgo.Resource{}, lsgo.ErrInvalidNameKey return Resource{}, ErrInvalidNameKey
} }
err = binary.Read(r, endianness, &regions[i].offset) err = binary.Read(r, endianness, &regions[i].offset)
n = 4 n = 4
if err != nil { if err != nil {
return lsgo.Resource{}, err return Resource{}, err
} }
l.Log("member", "offset", "read", n, "start position", pos, "value", regions[i].offset) l.Log("member", "offset", "read", n, "start position", pos, "value", regions[i].offset)
pos += int64(n) pos += int64(n)
@ -259,11 +253,11 @@ func ReadLSBRegions(r io.ReadSeeker, d IdentifierDictionary, endianness binary.B
sort.Slice(regions, func(i, j int) bool { sort.Slice(regions, func(i, j int) bool {
return regions[i].offset < regions[j].offset return regions[i].offset < regions[j].offset
}) })
res := lsgo.Resource{ res := Resource{
Regions: make([]*lsgo.Node, 0, regionCount), Regions: make([]*Node, 0, regionCount),
} }
for _, re := range regions { for _, re := range regions {
var node *lsgo.Node var node *Node
node, err = readLSBNode(r, d, endianness, version, re.offset) node, err = readLSBNode(r, d, endianness, version, re.offset)
if err != nil { if err != nil {
return res, err return res, err
@ -274,12 +268,12 @@ func ReadLSBRegions(r io.ReadSeeker, d IdentifierDictionary, endianness binary.B
return res, nil return res, nil
} }
func readLSBNode(r io.ReadSeeker, d IdentifierDictionary, endianness binary.ByteOrder, version lsgo.FileVersion, offset uint32) (*lsgo.Node, error) { func readLSBNode(r io.ReadSeeker, d IdentifierDictionary, endianness binary.ByteOrder, version FileVersion, offset uint32) (*Node, error) {
var ( var (
key uint32 key uint32
attrCount uint32 attrCount uint32
childCount uint32 childCount uint32
node = new(lsgo.Node) node = new(Node)
err error err error
ok bool ok bool
@ -287,8 +281,8 @@ func readLSBNode(r io.ReadSeeker, d IdentifierDictionary, endianness binary.Byte
pos int64 pos int64
n int n int
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsb", "part", "node") l = log.With(Logger, "component", "LS converter", "file type", "lsb", "part", "node")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
if pos != int64(offset) && offset != 0 { if pos != int64(offset) && offset != 0 {
panic("shit") panic("shit")
@ -321,7 +315,7 @@ func readLSBNode(r io.ReadSeeker, d IdentifierDictionary, endianness binary.Byte
} }
l.Log("member", "childCount", "read", n, "start position", pos, "value", childCount) l.Log("member", "childCount", "read", n, "start position", pos, "value", childCount)
node.Attributes = make([]lsgo.NodeAttribute, int(attrCount)) node.Attributes = make([]NodeAttribute, int(attrCount))
for i := range node.Attributes { for i := range node.Attributes {
node.Attributes[i], err = readLSBAttribute(r, d, endianness, version) node.Attributes[i], err = readLSBAttribute(r, d, endianness, version)
@ -330,7 +324,7 @@ func readLSBNode(r io.ReadSeeker, d IdentifierDictionary, endianness binary.Byte
} }
} }
node.Children = make([]*lsgo.Node, int(childCount)) node.Children = make([]*Node, int(childCount))
for i := range node.Children { for i := range node.Children {
node.Children[i], err = readLSBNode(r, d, endianness, version, 0) node.Children[i], err = readLSBNode(r, d, endianness, version, 0)
if err != nil { if err != nil {
@ -340,12 +334,12 @@ func readLSBNode(r io.ReadSeeker, d IdentifierDictionary, endianness binary.Byte
return node, nil return node, nil
} }
func readLSBAttribute(r io.ReadSeeker, d IdentifierDictionary, endianness binary.ByteOrder, version lsgo.FileVersion) (lsgo.NodeAttribute, error) { func readLSBAttribute(r io.ReadSeeker, d IdentifierDictionary, endianness binary.ByteOrder, version FileVersion) (NodeAttribute, error) {
var ( var (
key uint32 key uint32
name string name string
attrType uint32 attrType uint32
attr lsgo.NodeAttribute attr NodeAttribute
err error err error
ok bool ok bool
) )
@ -354,21 +348,21 @@ func readLSBAttribute(r io.ReadSeeker, d IdentifierDictionary, endianness binary
return attr, err return attr, err
} }
if name, ok = d[int(key)]; !ok { if name, ok = d[int(key)]; !ok {
return attr, lsgo.ErrInvalidNameKey return attr, ErrInvalidNameKey
} }
err = binary.Read(r, endianness, &attrType) err = binary.Read(r, endianness, &attrType)
if err != nil { if err != nil {
return attr, err return attr, err
} }
return ReadLSBAttr(r, name, lsgo.DataType(attrType), endianness, version) return ReadLSBAttr(r, name, DataType(attrType), endianness, version)
} }
func ReadLSBAttr(r io.ReadSeeker, name string, dt lsgo.DataType, endianness binary.ByteOrder, version lsgo.FileVersion) (lsgo.NodeAttribute, error) { func ReadLSBAttr(r io.ReadSeeker, name string, dt DataType, endianness binary.ByteOrder, version FileVersion) (NodeAttribute, error) {
// LSF and LSB serialize the buffer types differently, so specialized // LSF and LSB serialize the buffer types differently, so specialized
// code is added to the LSB and LSf serializers, and the common code is // code is added to the LSB and LSf serializers, and the common code is
// available in BinUtils.ReadAttribute() // available in BinUtils.ReadAttribute()
var ( var (
attr = lsgo.NodeAttribute{ attr = NodeAttribute{
Type: dt, Type: dt,
Name: name, Name: name,
} }
@ -378,38 +372,38 @@ func ReadLSBAttr(r io.ReadSeeker, name string, dt lsgo.DataType, endianness bina
l log.Logger l log.Logger
pos int64 pos int64
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsb", "part", "attribute") l = log.With(Logger, "component", "LS converter", "file type", "lsb", "part", "attribute")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
switch dt { switch dt {
case lsgo.DTString, lsgo.DTPath, lsgo.DTFixedString, lsgo.DTLSString: // DTLSWString: case DTString, DTPath, DTFixedString, DTLSString: //, DTLSWString:
var v string var v string
err = binary.Read(r, endianness, &length) err = binary.Read(r, endianness, &length)
if err != nil { if err != nil {
return attr, err return attr, err
} }
v, err = lsgo.ReadCString(r, int(length)) v, err = ReadCString(r, int(length))
attr.Value = v attr.Value = v
l.Log("member", name, "read", length, "start position", pos, "value", attr.Value) l.Log("member", name, "read", length, "start position", pos, "value", attr.Value)
return attr, err return attr, err
case lsgo.DTWString: case DTWString:
panic("Not implemented") panic("Not implemented")
case lsgo.DTTranslatedString: case DTTranslatedString:
var v lsgo.TranslatedString var v TranslatedString
v, err = lsgo.ReadTranslatedString(r, version, 0) v, err = ReadTranslatedString(r, version, 0)
attr.Value = v attr.Value = v
l.Log("member", name, "read", length, "start position", pos, "value", attr.Value) l.Log("member", name, "read", length, "start position", pos, "value", attr.Value)
return attr, err return attr, err
case lsgo.DTTranslatedFSString: case DTTranslatedFSString:
panic("Not implemented") panic("Not implemented")
var v lsgo.TranslatedFSString var v TranslatedFSString
// v, err = ReadTranslatedFSString(r, Version) // v, err = ReadTranslatedFSString(r, Version)
attr.Value = v attr.Value = v
@ -417,7 +411,7 @@ func ReadLSBAttr(r io.ReadSeeker, name string, dt lsgo.DataType, endianness bina
return attr, err return attr, err
case lsgo.DTScratchBuffer: case DTScratchBuffer:
panic("Not implemented") panic("Not implemented")
v := make([]byte, length) v := make([]byte, length)
@ -429,11 +423,6 @@ func ReadLSBAttr(r io.ReadSeeker, name string, dt lsgo.DataType, endianness bina
return attr, err return attr, err
default: default:
return lsgo.ReadAttribute(r, name, dt, uint(length), l) return ReadAttribute(r, name, dt, uint(length), l)
} }
} }
func init() {
lsgo.RegisterFormat("lsb", Signature, Read)
lsgo.RegisterFormat("lsb", PreBG3Signature, Read)
}

View File

@ -1,24 +1,61 @@
package lsf package lsgo
import ( import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"io" "io"
"strconv" "strconv"
"strings"
"git.narnian.us/lordwelch/lsgo"
"github.com/go-kit/kit/log" "github.com/go-kit/kit/log"
) )
const Signature = "LSOF" var (
LSFSignature = [4]byte{0x4C, 0x53, 0x4F, 0x46}
Logger log.Logger = log.NewNopLogger()
)
type Header struct { // NewFilter allows filtering of l
func NewFilter(f map[string][]string, l log.Logger) log.Logger {
return filter{
filter: f,
next: l,
}
}
type filter struct {
next log.Logger
filter map[string][]string
}
func (f filter) Log(keyvals ...interface{}) error {
var allowed = true // allow everything
for i := 0; i < len(keyvals)-1; i += 2 {
if v, ok := keyvals[i].(string); ok { // key
if fil, ok := f.filter[v]; ok { // key has a filter
if v, ok = keyvals[i+1].(string); ok { // value is a string
allowed = false // this key has a filter deny everything except what the filter allows
for _, fi := range fil {
if strings.Contains(v, fi) {
allowed = true
}
}
}
}
}
}
if allowed {
return f.next.Log(keyvals...)
}
return nil
}
type LSFHeader struct {
// LSOF file signature // LSOF file signature
Signature [4]byte Signature [4]byte
// Version of the LSOF file D:OS EE is version 1/2, D:OS 2 is version 3 // Version of the LSOF file D:OS EE is version 1/2, D:OS 2 is version 3
Version lsgo.FileVersion Version FileVersion
// Possibly version number? (major, minor, rev, build) // Possibly version number? (major, minor, rev, build)
EngineVersion uint32 EngineVersion uint32
@ -46,6 +83,7 @@ type Header struct {
// Compressed size of the raw value buffer // Compressed size of the raw value buffer
ValuesSizeOnDisk uint32 ValuesSizeOnDisk uint32
// summary
// Uses the same format as packages (see BinUtils.MakeCompressionFlags) // Uses the same format as packages (see BinUtils.MakeCompressionFlags)
CompressionFlags byte CompressionFlags byte
@ -58,158 +96,166 @@ type Header struct {
Extended uint32 Extended uint32
} }
func (h *Header) Read(r io.ReadSeeker) error { func (lsfh *LSFHeader) Read(r io.ReadSeeker) error {
var ( var (
l log.Logger l log.Logger
pos int64 pos int64
n int n int
err error err error
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsf", "part", "header") l = log.With(Logger, "component", "LS converter", "file type", "lsf", "part", "header")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
n, err = r.Read(h.Signature[:]) n, err = r.Read(lsfh.Signature[:])
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Signature", "read", n, "start position", pos, "value", string(h.Signature[:])) l.Log("member", "Signature", "read", n, "start position", pos, "value", string(lsfh.Signature[:]))
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Version) err = binary.Read(r, binary.LittleEndian, &lsfh.Version)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Version", "read", n, "start position", pos, "value", h.Version) l.Log("member", "Version", "read", n, "start position", pos, "value", lsfh.Version)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.EngineVersion) err = binary.Read(r, binary.LittleEndian, &lsfh.EngineVersion)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "EngineVersion", "read", n, "start position", pos, "value", fmt.Sprintf("%d.%d.%d.%d", (h.EngineVersion&0xf0000000)>>28, (h.EngineVersion&0xf000000)>>24, (h.EngineVersion&0xff0000)>>16, (h.EngineVersion&0xffff))) l.Log("member", "EngineVersion", "read", n, "start position", pos, "value", fmt.Sprintf("%d.%d.%d.%d", (lsfh.EngineVersion&0xf0000000)>>28, (lsfh.EngineVersion&0xf000000)>>24, (lsfh.EngineVersion&0xff0000)>>16, (lsfh.EngineVersion&0xffff)))
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.StringsUncompressedSize) err = binary.Read(r, binary.LittleEndian, &lsfh.StringsUncompressedSize)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "StringsUncompressedSize", "read", n, "start position", pos, "value", h.StringsUncompressedSize) l.Log("member", "StringsUncompressedSize", "read", n, "start position", pos, "value", lsfh.StringsUncompressedSize)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.StringsSizeOnDisk) err = binary.Read(r, binary.LittleEndian, &lsfh.StringsSizeOnDisk)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "StringsSizeOnDisk", "read", n, "start position", pos, "value", h.StringsSizeOnDisk) l.Log("member", "StringsSizeOnDisk", "read", n, "start position", pos, "value", lsfh.StringsSizeOnDisk)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.NodesUncompressedSize) err = binary.Read(r, binary.LittleEndian, &lsfh.NodesUncompressedSize)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "NodesUncompressedSize", "read", n, "start position", pos, "value", h.NodesUncompressedSize) l.Log("member", "NodesUncompressedSize", "read", n, "start position", pos, "value", lsfh.NodesUncompressedSize)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.NodesSizeOnDisk) err = binary.Read(r, binary.LittleEndian, &lsfh.NodesSizeOnDisk)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "NodesSizeOnDisk", "read", n, "start position", pos, "value", h.NodesSizeOnDisk) l.Log("member", "NodesSizeOnDisk", "read", n, "start position", pos, "value", lsfh.NodesSizeOnDisk)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.AttributesUncompressedSize) err = binary.Read(r, binary.LittleEndian, &lsfh.AttributesUncompressedSize)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "AttributesUncompressedSize", "read", n, "start position", pos, "value", h.AttributesUncompressedSize) l.Log("member", "AttributesUncompressedSize", "read", n, "start position", pos, "value", lsfh.AttributesUncompressedSize)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.AttributesSizeOnDisk) err = binary.Read(r, binary.LittleEndian, &lsfh.AttributesSizeOnDisk)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "AttributesSizeOnDisk", "read", n, "start position", pos, "value", h.AttributesSizeOnDisk) l.Log("member", "AttributesSizeOnDisk", "read", n, "start position", pos, "value", lsfh.AttributesSizeOnDisk)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.ValuesUncompressedSize) err = binary.Read(r, binary.LittleEndian, &lsfh.ValuesUncompressedSize)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "ValuesUncompressedSize", "read", n, "start position", pos, "value", h.ValuesUncompressedSize) l.Log("member", "ValuesUncompressedSize", "read", n, "start position", pos, "value", lsfh.ValuesUncompressedSize)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.ValuesSizeOnDisk) err = binary.Read(r, binary.LittleEndian, &lsfh.ValuesSizeOnDisk)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "ValuesSizeOnDisk", "read", n, "start position", pos, "value", h.ValuesSizeOnDisk) l.Log("member", "ValuesSizeOnDisk", "read", n, "start position", pos, "value", lsfh.ValuesSizeOnDisk)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.CompressionFlags) err = binary.Read(r, binary.LittleEndian, &lsfh.CompressionFlags)
n = 1 n = 1
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "CompressionFlags", "read", n, "start position", pos, "value", h.CompressionFlags) l.Log("member", "CompressionFlags", "read", n, "start position", pos, "value", lsfh.CompressionFlags)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Unknown2) err = binary.Read(r, binary.LittleEndian, &lsfh.Unknown2)
n = 1 n = 1
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Unknown2", "read", n, "start position", pos, "value", h.Unknown2) l.Log("member", "Unknown2", "read", n, "start position", pos, "value", lsfh.Unknown2)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Unknown3) err = binary.Read(r, binary.LittleEndian, &lsfh.Unknown3)
n = 2 n = 2
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Unknown3", "read", n, "start position", pos, "value", h.Unknown3) l.Log("member", "Unknown3", "read", n, "start position", pos, "value", lsfh.Unknown3)
pos += int64(n) pos += int64(n)
err = binary.Read(r, binary.LittleEndian, &h.Extended) err = binary.Read(r, binary.LittleEndian, &lsfh.Extended)
n = 4 n = 4
if err != nil { if err != nil {
return err return err
} }
l.Log("member", "Extended", "read", n, "start position", pos, "value", h.Extended) l.Log("member", "Extended", "read", n, "start position", pos, "value", lsfh.Extended)
pos += int64(n) pos += int64(n)
if !h.IsCompressed() { if !lsfh.IsCompressed() {
h.NodesSizeOnDisk = h.NodesUncompressedSize lsfh.NodesSizeOnDisk = lsfh.NodesUncompressedSize
h.AttributesSizeOnDisk = h.AttributesUncompressedSize lsfh.AttributesSizeOnDisk = lsfh.AttributesUncompressedSize
h.StringsSizeOnDisk = h.StringsUncompressedSize lsfh.StringsSizeOnDisk = lsfh.StringsUncompressedSize
h.ValuesSizeOnDisk = h.ValuesUncompressedSize lsfh.ValuesSizeOnDisk = lsfh.ValuesUncompressedSize
} }
return nil return nil
} }
func (h Header) IsCompressed() bool { func (lsfh LSFHeader) IsCompressed() bool {
return lsgo.CompressionFlagsToMethod(h.CompressionFlags) != lsgo.CMNone && lsgo.CompressionFlagsToMethod(h.CompressionFlags) != lsgo.CMInvalid return CompressionFlagsToMethod(lsfh.CompressionFlags) != CMNone && CompressionFlagsToMethod(lsfh.CompressionFlags) != CMInvalid
} }
type NodeEntry struct { type NodeEntry struct {
Long bool Long bool
// summary
// (16-bit MSB: index into name hash table, 16-bit LSB: offset in hash chain) // (16-bit MSB: index into name hash table, 16-bit LSB: offset in hash chain)
NameHashTableIndex uint32 NameHashTableIndex uint32
// summary
// (-1: node has no attributes) // (-1: node has no attributes)
FirstAttributeIndex int32 FirstAttributeIndex int32
// summary
// (-1: this node is a root region) // (-1: this node is a root region)
ParentIndex int32 ParentIndex int32
// summary
// (-1: this is the last node) // (-1: this is the last node)
NextSiblingIndex int32 NextSiblingIndex int32
} }
@ -228,11 +274,12 @@ func (ne *NodeEntry) readShort(r io.ReadSeeker) error {
err error err error
n int n int
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsf", "part", "short node") l = log.With(Logger, "component", "LS converter", "file type", "lsf", "part", "short node")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
err = binary.Read(r, binary.LittleEndian, &ne.NameHashTableIndex) err = binary.Read(r, binary.LittleEndian, &ne.NameHashTableIndex)
n = 4 n = 4
if err != nil { if err != nil {
// logger.Println(err, "ne.NameHashTableIndex", ne.NameHashTableIndex)
return err return err
} }
l.Log("member", "NameHashTableIndex", "read", n, "start position", pos, "value", strconv.Itoa(ne.NameIndex())+" "+strconv.Itoa(ne.NameOffset())) l.Log("member", "NameHashTableIndex", "read", n, "start position", pos, "value", strconv.Itoa(ne.NameIndex())+" "+strconv.Itoa(ne.NameOffset()))
@ -241,6 +288,7 @@ func (ne *NodeEntry) readShort(r io.ReadSeeker) error {
err = binary.Read(r, binary.LittleEndian, &ne.FirstAttributeIndex) err = binary.Read(r, binary.LittleEndian, &ne.FirstAttributeIndex)
n = 4 n = 4
if err != nil { if err != nil {
// logger.Println(err, "ne.FirstAttributeIndex", ne.FirstAttributeIndex)
return err return err
} }
l.Log("member", "NameHashTableIndex", "read", n, "start position", pos, "value", ne.FirstAttributeIndex) l.Log("member", "NameHashTableIndex", "read", n, "start position", pos, "value", ne.FirstAttributeIndex)
@ -249,6 +297,7 @@ func (ne *NodeEntry) readShort(r io.ReadSeeker) error {
err = binary.Read(r, binary.LittleEndian, &ne.ParentIndex) err = binary.Read(r, binary.LittleEndian, &ne.ParentIndex)
n = 4 n = 4
if err != nil { if err != nil {
// logger.Println(err, "ne.ParentIndex", ne.ParentIndex)
return err return err
} }
l.Log("member", "NameHashTableIndex", "read", n, "start position", pos, "value", ne.ParentIndex) l.Log("member", "NameHashTableIndex", "read", n, "start position", pos, "value", ne.ParentIndex)
@ -263,8 +312,8 @@ func (ne *NodeEntry) readLong(r io.ReadSeeker) error {
err error err error
n int n int
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsf", "part", "long node") l = log.With(Logger, "component", "LS converter", "file type", "lsf", "part", "long node")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
err = binary.Read(r, binary.LittleEndian, &ne.NameHashTableIndex) err = binary.Read(r, binary.LittleEndian, &ne.NameHashTableIndex)
n = 4 n = 4
if err != nil { if err != nil {
@ -310,6 +359,7 @@ func (ne NodeEntry) NameOffset() int {
// Processed node information for a node in the LSF file // Processed node information for a node in the LSF file
type NodeInfo struct { type NodeInfo struct {
// summary
// (-1: this node is a root region) // (-1: this node is a root region)
ParentIndex int ParentIndex int
@ -320,6 +370,8 @@ type NodeInfo struct {
// Offset in hash chain // Offset in hash chain
NameOffset int NameOffset int
// summary
// (-1: node has no attributes) // (-1: node has no attributes)
FirstAttributeIndex int FirstAttributeIndex int
} }
@ -327,16 +379,23 @@ type NodeInfo struct {
// attribute extension in the LSF file // attribute extension in the LSF file
type AttributeEntry struct { type AttributeEntry struct {
Long bool Long bool
// summary
// (16-bit MSB: index into name hash table, 16-bit LSB: offset in hash chain) // (16-bit MSB: index into name hash table, 16-bit LSB: offset in hash chain)
NameHashTableIndex uint32 NameHashTableIndex uint32
// summary
// 26-bit MSB: Length of this attribute // 26-bit MSB: Length of this attribute
TypeAndLength uint32 TypeAndLength uint32
// summary
// Note: These indexes are assigned seemingly arbitrarily, and are not necessarily indices into the node list // Note: These indexes are assigned seemingly arbitrarily, and are not necessarily indices into the node list
NodeIndex int32 NodeIndex int32
// summary
// Note: These indexes are assigned seemingly arbitrarily, and are not necessarily indices into the node list // Note: These indexes are assigned seemingly arbitrarily, and are not necessarily indices into the node list
NextAttributeIndex int32 NextAttributeIndex int32
@ -358,8 +417,8 @@ func (ae *AttributeEntry) readShort(r io.ReadSeeker) error {
err error err error
n int n int
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsf", "part", "short attribute") l = log.With(Logger, "component", "LS converter", "file type", "lsf", "part", "short attribute")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
err = binary.Read(r, binary.LittleEndian, &ae.NameHashTableIndex) err = binary.Read(r, binary.LittleEndian, &ae.NameHashTableIndex)
n = 4 n = 4
@ -395,8 +454,8 @@ func (ae *AttributeEntry) readLong(r io.ReadSeeker) error {
err error err error
n int n int
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsf", "part", "long attribute") l = log.With(Logger, "component", "LS converter", "file type", "lsf", "part", "long attribute")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
err = binary.Read(r, binary.LittleEndian, &ae.NameHashTableIndex) err = binary.Read(r, binary.LittleEndian, &ae.NameHashTableIndex)
n = 4 n = 4
@ -444,8 +503,8 @@ func (ae AttributeEntry) NameOffset() int {
} }
// Type of this attribute (see NodeAttribute.DataType) // Type of this attribute (see NodeAttribute.DataType)
func (ae AttributeEntry) TypeID() lsgo.DataType { func (ae AttributeEntry) TypeID() DataType {
return lsgo.DataType(ae.TypeAndLength & 0x3f) return DataType(ae.TypeAndLength & 0x3f)
} }
// Length of this attribute // Length of this attribute
@ -463,13 +522,14 @@ type AttributeInfo struct {
NameOffset int NameOffset int
// Type of this attribute (see NodeAttribute.DataType) // Type of this attribute (see NodeAttribute.DataType)
TypeID lsgo.DataType TypeID DataType
// Length of this attribute // Length of this attribute
Length uint Length uint
// Absolute position of attribute data in the values section // Absolute position of attribute data in the values section
DataOffset uint DataOffset uint
// summary
// (-1: this is the last attribute) // (-1: this is the last attribute)
NextAttributeIndex int NextAttributeIndex int
@ -486,8 +546,8 @@ func ReadNames(r io.ReadSeeker) ([][]string, error) {
pos int64 pos int64
n int n int
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsf", "part", "names") l = log.With(Logger, "component", "LS converter", "file type", "lsf", "part", "names")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
err = binary.Read(r, binary.LittleEndian, &numHashEntries) err = binary.Read(r, binary.LittleEndian, &numHashEntries)
n = 4 n = 4
@ -499,17 +559,15 @@ func ReadNames(r io.ReadSeeker) ([][]string, error) {
names = make([][]string, int(numHashEntries)) names = make([][]string, int(numHashEntries))
for i := range names { for i := range names {
var numStrings uint16 var numStrings uint16
err = binary.Read(r, binary.LittleEndian, &numStrings) err = binary.Read(r, binary.LittleEndian, &numStrings)
n = 4 n = 4
if err != nil {
return nil, err
}
l.Log("member", "numStrings", "read", n, "start position", pos, "value", numStrings) l.Log("member", "numStrings", "read", n, "start position", pos, "value", numStrings)
pos += int64(n) pos += int64(n)
hash := make([]string, int(numStrings)) var hash = make([]string, int(numStrings))
for x := range hash { for x := range hash {
var ( var (
nameLen uint16 nameLen uint16
@ -533,13 +591,16 @@ func ReadNames(r io.ReadSeeker) ([][]string, error) {
pos += int64(n) pos += int64(n)
hash[x] = string(name) hash[x] = string(name)
} }
names[i] = hash names[i] = hash
} }
return names, nil return names, nil
} }
func readNodeInfo(r io.ReadSeeker, longNodes bool) ([]NodeInfo, error) { func readNodeInfo(r io.ReadSeeker, longNodes bool) ([]NodeInfo, error) {
// Console.WriteLine(" ----- DUMP OF NODE TABLE -----");
var ( var (
nodes []NodeInfo nodes []NodeInfo
err error err error
@ -548,6 +609,7 @@ func readNodeInfo(r io.ReadSeeker, longNodes bool) ([]NodeInfo, error) {
for err == nil { for err == nil {
var node NodeInfo var node NodeInfo
// var pos = lsfr.Position;
item := &NodeEntry{Long: longNodes} item := &NodeEntry{Long: longNodes}
err = item.Read(r) err = item.Read(r)
@ -556,6 +618,11 @@ func readNodeInfo(r io.ReadSeeker, longNodes bool) ([]NodeInfo, error) {
node.NameIndex = item.NameIndex() node.NameIndex = item.NameIndex()
node.NameOffset = item.NameOffset() node.NameOffset = item.NameOffset()
node.ParentIndex = int(item.ParentIndex) node.ParentIndex = int(item.ParentIndex)
// Console.WriteLine(String.Format(
// "{0}: {1} @ {2:X} (parent {3}, firstAttribute {4})",
// index, Names[node.NameIndex][node.NameOffset], pos, node.ParentIndex,
// node.FirstAttributeIndex
// ));
nodes = append(nodes, node) nodes = append(nodes, node)
index++ index++
@ -583,6 +650,7 @@ func readAttributeInfo(r io.ReadSeeker, long bool) []AttributeInfo {
break break
} }
// pretty.Log( attribute)
if long { if long {
dataOffset = uint(attribute.Offset) dataOffset = uint(attribute.Offset)
nextAttrIndex = int(attribute.NextAttributeIndex) nextAttrIndex = int(attribute.NextAttributeIndex)
@ -599,6 +667,7 @@ func readAttributeInfo(r io.ReadSeeker, long bool) []AttributeInfo {
if !long { if !long {
// get index of previous attribute for node // get index of previous attribute for node
if indexOfLastAttr, ok := prevAttributeRefs[int(attribute.NodeIndex)]; ok { // previous attribute exists for current node set the next attribute index for the previous node to this attribute if indexOfLastAttr, ok := prevAttributeRefs[int(attribute.NodeIndex)]; ok { // previous attribute exists for current node set the next attribute index for the previous node to this attribute
attributes[indexOfLastAttr].NextAttributeIndex = index attributes[indexOfLastAttr].NextAttributeIndex = index
} }
@ -633,9 +702,19 @@ func readAttributeInfo(r io.ReadSeeker, long bool) []AttributeInfo {
// ); // );
// Console.WriteLine(debug); // Console.WriteLine(debug);
// } // }
} }
func Read(r io.ReadSeeker) (lsgo.Resource, error) { type HeaderError struct {
Expected []byte
Got []byte
}
func (he HeaderError) Error() string {
return fmt.Sprintf("Invalid LSF signature; expected %v, got %v", he.Expected, he.Got)
}
func ReadLSF(r io.ReadSeeker) (Resource, error) {
var ( var (
err error err error
@ -649,36 +728,38 @@ func Read(r io.ReadSeeker) (lsgo.Resource, error) {
attributeInfo []AttributeInfo attributeInfo []AttributeInfo
// Node instances // Node instances
nodeInstances []*lsgo.Node nodeInstances []*Node
) )
var ( var (
l log.Logger l log.Logger
pos, npos int64 pos, npos int64
// n int // n int
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsf", "part", "file") l = log.With(Logger, "component", "LS converter", "file type", "lsf", "part", "file")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
l.Log("member", "header", "start position", pos) l.Log("member", "header", "start position", pos)
hdr := &Header{} hdr := &LSFHeader{}
err = hdr.Read(r) err = hdr.Read(r)
if err != nil || (string(hdr.Signature[:]) != Signature) { if err != nil || (hdr.Signature != LSFSignature) {
return lsgo.Resource{}, lsgo.HeaderError{Expected: Signature, Got: hdr.Signature[:]} return Resource{}, HeaderError{LSFSignature[:], hdr.Signature[:]}
} }
if hdr.Version < lsgo.VerInitial || hdr.Version > lsgo.MaxVersion { if hdr.Version < VerInitial || hdr.Version > MaxVersion {
return lsgo.Resource{}, fmt.Errorf("LSF version %v is not supported", hdr.Version) return Resource{}, fmt.Errorf("LSF version %v is not supported", hdr.Version)
} }
isCompressed := lsgo.CompressionFlagsToMethod(hdr.CompressionFlags) != lsgo.CMNone && lsgo.CompressionFlagsToMethod(hdr.CompressionFlags) != lsgo.CMInvalid isCompressed := CompressionFlagsToMethod(hdr.CompressionFlags) != CMNone && CompressionFlagsToMethod(hdr.CompressionFlags) != CMInvalid
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
l.Log("member", "LSF names", "start position", pos) l.Log("member", "LSF names", "start position", pos)
if hdr.StringsSizeOnDisk > 0 || hdr.StringsUncompressedSize > 0 { if hdr.StringsSizeOnDisk > 0 || hdr.StringsUncompressedSize > 0 {
uncompressed := lsgo.LimitReadSeeker(r, int64(hdr.StringsSizeOnDisk)) var (
uncompressed = LimitReadSeeker(r, int64(hdr.StringsSizeOnDisk))
)
if isCompressed { if isCompressed {
uncompressed = lsgo.Decompress(uncompressed, int(hdr.StringsUncompressedSize), hdr.CompressionFlags, false) uncompressed = Decompress(uncompressed, int(hdr.StringsUncompressedSize), hdr.CompressionFlags, false)
} }
// using (var nodesFile = new FileStream("names.bin", FileMode.Create, FileAccess.Write)) // using (var nodesFile = new FileStream("names.bin", FileMode.Create, FileAccess.Write))
@ -687,12 +768,13 @@ func Read(r io.ReadSeeker) (lsgo.Resource, error) {
// } // }
names, err = ReadNames(uncompressed) names, err = ReadNames(uncompressed)
// pretty.Log(len(names), names)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return lsgo.Resource{}, err return Resource{}, err
} }
} }
npos, _ = r.Seek(0, io.SeekCurrent) npos, err = r.Seek(0, io.SeekCurrent)
l.Log("member", "LSF nodes", "start position", npos) l.Log("member", "LSF nodes", "start position", npos)
if npos != pos+int64(hdr.StringsSizeOnDisk) { if npos != pos+int64(hdr.StringsSizeOnDisk) {
l.Log("member", "LSF nodes", "msg", "seeking to correct offset", "current", npos, "wanted", pos+int64(hdr.StringsSizeOnDisk)) l.Log("member", "LSF nodes", "msg", "seeking to correct offset", "current", npos, "wanted", pos+int64(hdr.StringsSizeOnDisk))
@ -701,9 +783,11 @@ func Read(r io.ReadSeeker) (lsgo.Resource, error) {
pos = npos pos = npos
} }
if hdr.NodesSizeOnDisk > 0 || hdr.NodesUncompressedSize > 0 { if hdr.NodesSizeOnDisk > 0 || hdr.NodesUncompressedSize > 0 {
uncompressed := lsgo.LimitReadSeeker(r, int64(hdr.NodesSizeOnDisk)) var (
uncompressed = LimitReadSeeker(r, int64(hdr.NodesSizeOnDisk))
)
if isCompressed { if isCompressed {
uncompressed = lsgo.Decompress(uncompressed, int(hdr.NodesUncompressedSize), hdr.CompressionFlags, hdr.Version >= lsgo.VerChunkedCompress) uncompressed = Decompress(uncompressed, int(hdr.NodesUncompressedSize), hdr.CompressionFlags, hdr.Version >= VerChunkedCompress)
} }
// using (var nodesFile = new FileStream("nodes.bin", FileMode.Create, FileAccess.Write)) // using (var nodesFile = new FileStream("nodes.bin", FileMode.Create, FileAccess.Write))
@ -711,14 +795,16 @@ func Read(r io.ReadSeeker) (lsgo.Resource, error) {
// nodesFile.Write(uncompressed, 0, uncompressed.Length); // nodesFile.Write(uncompressed, 0, uncompressed.Length);
// } // }
longNodes := hdr.Version >= lsgo.VerExtendedNodes && hdr.Extended == 1 longNodes := hdr.Version >= VerExtendedNodes && hdr.Extended == 1
nodeInfo, err = readNodeInfo(uncompressed, longNodes) nodeInfo, err = readNodeInfo(uncompressed, longNodes)
// pretty.Log(err, nodeInfo)
// logger.Printf("region 1 name: %v", names[nodeInfo[0].NameIndex])
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return lsgo.Resource{}, err return Resource{}, err
} }
} }
npos, _ = r.Seek(0, io.SeekCurrent) npos, err = r.Seek(0, io.SeekCurrent)
l.Log("member", "LSF attributes", "start position", npos) l.Log("member", "LSF attributes", "start position", npos)
if npos != pos+int64(hdr.NodesSizeOnDisk) { if npos != pos+int64(hdr.NodesSizeOnDisk) {
l.Log("msg", "seeking to correct offset", "current", npos, "wanted", pos+int64(hdr.NodesSizeOnDisk)) l.Log("msg", "seeking to correct offset", "current", npos, "wanted", pos+int64(hdr.NodesSizeOnDisk))
@ -727,9 +813,11 @@ func Read(r io.ReadSeeker) (lsgo.Resource, error) {
pos = npos pos = npos
} }
if hdr.AttributesSizeOnDisk > 0 || hdr.AttributesUncompressedSize > 0 { if hdr.AttributesSizeOnDisk > 0 || hdr.AttributesUncompressedSize > 0 {
var uncompressed io.ReadSeeker = lsgo.LimitReadSeeker(r, int64(hdr.AttributesSizeOnDisk)) var (
uncompressed io.ReadSeeker = LimitReadSeeker(r, int64(hdr.AttributesSizeOnDisk))
)
if isCompressed { if isCompressed {
uncompressed = lsgo.Decompress(uncompressed, int(hdr.AttributesUncompressedSize), hdr.CompressionFlags, hdr.Version >= lsgo.VerChunkedCompress) uncompressed = Decompress(uncompressed, int(hdr.AttributesUncompressedSize), hdr.CompressionFlags, hdr.Version >= VerChunkedCompress)
} }
// using (var attributesFile = new FileStream("attributes.bin", FileMode.Create, FileAccess.Write)) // using (var attributesFile = new FileStream("attributes.bin", FileMode.Create, FileAccess.Write))
@ -737,11 +825,13 @@ func Read(r io.ReadSeeker) (lsgo.Resource, error) {
// attributesFile.Write(uncompressed, 0, uncompressed.Length); // attributesFile.Write(uncompressed, 0, uncompressed.Length);
// } // }
longAttributes := hdr.Version >= lsgo.VerExtendedNodes && hdr.Extended == 1 longAttributes := hdr.Version >= VerExtendedNodes && hdr.Extended == 1
attributeInfo = readAttributeInfo(uncompressed, longAttributes) attributeInfo = readAttributeInfo(uncompressed, longAttributes)
// logger.Printf("attribute 1 name: %v", names[attributeInfo[0].NameIndex])
// pretty.Log(attributeInfo)
} }
npos, _ = r.Seek(0, io.SeekCurrent) npos, err = r.Seek(0, io.SeekCurrent)
l.Log("member", "LSF values", "start position", npos) l.Log("member", "LSF values", "start position", npos)
if npos != pos+int64(hdr.AttributesSizeOnDisk) { if npos != pos+int64(hdr.AttributesSizeOnDisk) {
l.Log("msg", "seeking to correct offset", "current", npos, "wanted", pos+int64(hdr.AttributesSizeOnDisk)) l.Log("msg", "seeking to correct offset", "current", npos, "wanted", pos+int64(hdr.AttributesSizeOnDisk))
@ -749,16 +839,18 @@ func Read(r io.ReadSeeker) (lsgo.Resource, error) {
} else { } else {
pos = npos pos = npos
} }
var uncompressed io.ReadSeeker = lsgo.LimitReadSeeker(r, int64(hdr.ValuesSizeOnDisk)) var (
uncompressed io.ReadSeeker = LimitReadSeeker(r, int64(hdr.ValuesSizeOnDisk))
)
if hdr.ValuesSizeOnDisk > 0 || hdr.ValuesUncompressedSize > 0 { if hdr.ValuesSizeOnDisk > 0 || hdr.ValuesUncompressedSize > 0 {
if isCompressed { if isCompressed {
uncompressed = lsgo.Decompress(r, int(hdr.ValuesUncompressedSize), hdr.CompressionFlags, hdr.Version >= lsgo.VerChunkedCompress) uncompressed = Decompress(r, int(hdr.ValuesUncompressedSize), hdr.CompressionFlags, hdr.Version >= VerChunkedCompress)
} }
} }
res := lsgo.Resource{} res := Resource{}
valueStart, _ := uncompressed.Seek(0, io.SeekCurrent) valueStart, _ = uncompressed.Seek(0, io.SeekCurrent)
nodeInstances, err = ReadRegions(uncompressed, valueStart, names, nodeInfo, attributeInfo, hdr.Version, hdr.EngineVersion) nodeInstances, err = ReadRegions(uncompressed, names, nodeInfo, attributeInfo, hdr.Version, hdr.EngineVersion)
if err != nil { if err != nil {
return res, err return res, err
} }
@ -773,14 +865,20 @@ func Read(r io.ReadSeeker) (lsgo.Resource, error) {
res.Metadata.Revision = (hdr.EngineVersion & 0xff0000) >> 16 res.Metadata.Revision = (hdr.EngineVersion & 0xff0000) >> 16
res.Metadata.Build = (hdr.EngineVersion & 0xffff) res.Metadata.Build = (hdr.EngineVersion & 0xffff)
// pretty.Log(res)
return res, nil return res, nil
} }
func ReadRegions(r io.ReadSeeker, valueStart int64, names [][]string, nodeInfo []NodeInfo, attributeInfo []AttributeInfo, version lsgo.FileVersion, engineVersion uint32) ([]*lsgo.Node, error) { var valueStart int64
NodeInstances := make([]*lsgo.Node, 0, len(nodeInfo))
func ReadRegions(r io.ReadSeeker, names [][]string, nodeInfo []NodeInfo, attributeInfo []AttributeInfo, version FileVersion, engineVersion uint32) ([]*Node, error) {
NodeInstances := make([]*Node, 0, len(nodeInfo))
for _, nodeInfo := range nodeInfo { for _, nodeInfo := range nodeInfo {
if nodeInfo.ParentIndex == -1 { if nodeInfo.ParentIndex == -1 {
region, err := ReadNode(r, valueStart, nodeInfo, names, attributeInfo, version, engineVersion) region, err := ReadNode(r, nodeInfo, names, attributeInfo, version, engineVersion)
// pretty.Log(err, region)
region.RegionName = region.Name region.RegionName = region.Name
NodeInstances = append(NodeInstances, &region) NodeInstances = append(NodeInstances, &region)
@ -789,7 +887,9 @@ func ReadRegions(r io.ReadSeeker, valueStart int64, names [][]string, nodeInfo [
return NodeInstances, err return NodeInstances, err
} }
} else { } else {
node, err := ReadNode(r, valueStart, nodeInfo, names, attributeInfo, version, engineVersion) node, err := ReadNode(r, nodeInfo, names, attributeInfo, version, engineVersion)
// pretty.Log(err, node)
node.Parent = NodeInstances[nodeInfo.ParentIndex] node.Parent = NodeInstances[nodeInfo.ParentIndex]
NodeInstances = append(NodeInstances, &node) NodeInstances = append(NodeInstances, &node)
@ -803,17 +903,17 @@ func ReadRegions(r io.ReadSeeker, valueStart int64, names [][]string, nodeInfo [
return NodeInstances, nil return NodeInstances, nil
} }
func ReadNode(r io.ReadSeeker, valueStart int64, ni NodeInfo, names [][]string, attributeInfo []AttributeInfo, version lsgo.FileVersion, engineVersion uint32) (lsgo.Node, error) { func ReadNode(r io.ReadSeeker, ni NodeInfo, names [][]string, attributeInfo []AttributeInfo, version FileVersion, engineVersion uint32) (Node, error) {
var ( var (
node = lsgo.Node{} node = Node{}
index = ni.FirstAttributeIndex index = ni.FirstAttributeIndex
err error err error
l log.Logger l log.Logger
pos int64 pos int64
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsf", "part", "node") l = log.With(Logger, "component", "LS converter", "file type", "lsf", "part", "node")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
node.Name = names[ni.NameIndex][ni.NameOffset] node.Name = names[ni.NameIndex][ni.NameOffset]
@ -822,7 +922,7 @@ func ReadNode(r io.ReadSeeker, valueStart int64, ni NodeInfo, names [][]string,
for index != -1 { for index != -1 {
var ( var (
attribute = attributeInfo[index] attribute = attributeInfo[index]
v lsgo.NodeAttribute v NodeAttribute
) )
if valueStart+int64(attribute.DataOffset) != pos { if valueStart+int64(attribute.DataOffset) != pos {
@ -837,16 +937,18 @@ func ReadNode(r io.ReadSeeker, valueStart int64, ni NodeInfo, names [][]string,
return node, err return node, err
} }
index = attribute.NextAttributeIndex index = attribute.NextAttributeIndex
// Console.WriteLine(String.Format(" {0:X}: {1} ({2})", attribute.DataOffset, names[attribute.NameIndex][attribute.NameOffset], value));
} }
return node, nil return node, nil
} }
func ReadLSFAttribute(r io.ReadSeeker, name string, dt lsgo.DataType, length uint, version lsgo.FileVersion, engineVersion uint32) (lsgo.NodeAttribute, error) { func ReadLSFAttribute(r io.ReadSeeker, name string, dt DataType, length uint, version FileVersion, engineVersion uint32) (NodeAttribute, error) {
// LSF and LSB serialize the buffer types differently, so specialized // LSF and LSB serialize the buffer types differently, so specialized
// code is added to the LSB and LSf serializers, and the common code is // code is added to the LSB and LSf serializers, and the common code is
// available in BinUtils.ReadAttribute() // available in BinUtils.ReadAttribute()
var ( var (
attr = lsgo.NodeAttribute{ attr = NodeAttribute{
Type: dt, Type: dt,
Name: name, Name: name,
} }
@ -855,13 +957,13 @@ func ReadLSFAttribute(r io.ReadSeeker, name string, dt lsgo.DataType, length uin
l log.Logger l log.Logger
pos int64 pos int64
) )
l = log.With(lsgo.Logger, "component", "LS converter", "file type", "lsf", "part", "attribute") l = log.With(Logger, "component", "LS converter", "file type", "lsf", "part", "attribute")
pos, _ = r.Seek(0, io.SeekCurrent) pos, err = r.Seek(0, io.SeekCurrent)
switch dt { switch dt {
case lsgo.DTString, lsgo.DTPath, lsgo.DTFixedString, lsgo.DTLSString, lsgo.DTWString, lsgo.DTLSWString: case DTString, DTPath, DTFixedString, DTLSString, DTWString, DTLSWString:
var v string var v string
v, err = lsgo.ReadCString(r, int(length)) v, err = ReadCString(r, int(length))
attr.Value = v attr.Value = v
l.Log("member", name, "read", length, "start position", pos, "value", attr.Value) l.Log("member", name, "read", length, "start position", pos, "value", attr.Value)
@ -869,9 +971,9 @@ func ReadLSFAttribute(r io.ReadSeeker, name string, dt lsgo.DataType, length uin
return attr, err return attr, err
case lsgo.DTTranslatedString: case DTTranslatedString:
var v lsgo.TranslatedString var v TranslatedString
v, err = lsgo.ReadTranslatedString(r, version, engineVersion) v, err = ReadTranslatedString(r, version, engineVersion)
attr.Value = v attr.Value = v
l.Log("member", name, "read", length, "start position", pos, "value", attr.Value) l.Log("member", name, "read", length, "start position", pos, "value", attr.Value)
@ -879,9 +981,9 @@ func ReadLSFAttribute(r io.ReadSeeker, name string, dt lsgo.DataType, length uin
return attr, err return attr, err
case lsgo.DTTranslatedFSString: case DTTranslatedFSString:
var v lsgo.TranslatedFSString var v TranslatedFSString
v, err = lsgo.ReadTranslatedFSString(r, version) v, err = ReadTranslatedFSString(r, version)
attr.Value = v attr.Value = v
l.Log("member", name, "read", length, "start position", pos, "value", attr.Value) l.Log("member", name, "read", length, "start position", pos, "value", attr.Value)
@ -889,7 +991,7 @@ func ReadLSFAttribute(r io.ReadSeeker, name string, dt lsgo.DataType, length uin
return attr, err return attr, err
case lsgo.DTScratchBuffer: case DTScratchBuffer:
v := make([]byte, length) v := make([]byte, length)
_, err = r.Read(v) _, err = r.Read(v)
@ -901,10 +1003,147 @@ func ReadLSFAttribute(r io.ReadSeeker, name string, dt lsgo.DataType, length uin
return attr, err return attr, err
default: default:
return lsgo.ReadAttribute(r, name, dt, length, l) return ReadAttribute(r, name, dt, length, l)
} }
} }
func init() { func ReadTranslatedString(r io.ReadSeeker, version FileVersion, engineVersion uint32) (TranslatedString, error) {
lsgo.RegisterFormat("lsf", Signature, Read) var (
str TranslatedString
err error
)
if version >= VerBG3 || engineVersion == 0x4000001d {
// logger.Println("decoding bg3 data")
var version uint16
err = binary.Read(r, binary.LittleEndian, &version)
if err != nil {
return str, err
}
str.Version = version
err = binary.Read(r, binary.LittleEndian, &version)
if err != nil {
return str, err
}
if version == 0 {
str.Value, err = ReadCString(r, int(str.Version))
if err != nil {
return str, err
}
str.Version = 0
} else {
_, err = r.Seek(-2, io.SeekCurrent)
}
} else {
str.Version = 0
var (
vlength int32
v []byte
// n int
)
err = binary.Read(r, binary.LittleEndian, &vlength)
if err != nil {
return str, err
}
v = make([]byte, vlength)
_, err = r.Read(v)
if err != nil {
return str, err
}
str.Value = string(v)
}
var handleLength int32
err = binary.Read(r, binary.LittleEndian, &handleLength)
if err != nil {
return str, err
}
str.Handle, err = ReadCString(r, int(handleLength))
if err != nil {
return str, err
}
// logger.Printf("handle %s; %v", str.Handle, err)
return str, nil
}
func ReadTranslatedFSString(r io.ReadSeeker, version FileVersion) (TranslatedFSString, error) {
var (
str = TranslatedFSString{}
err error
)
if version >= VerBG3 {
var version uint16
err = binary.Read(r, binary.LittleEndian, &version)
if err != nil {
return str, err
}
str.Version = version
} else {
str.Version = 0
var (
length int32
)
err = binary.Read(r, binary.LittleEndian, &length)
if err != nil {
return str, err
}
str.Value, err = ReadCString(r, int(length))
if err != nil {
return str, err
}
}
var handleLength int32
err = binary.Read(r, binary.LittleEndian, &handleLength)
if err != nil {
return str, err
}
str.Handle, err = ReadCString(r, int(handleLength))
if err != nil {
return str, err
}
var arguments int32
err = binary.Read(r, binary.LittleEndian, &arguments)
if err != nil {
return str, err
}
str.Arguments = make([]TranslatedFSStringArgument, 0, arguments)
for i := 0; i < int(arguments); i++ {
arg := TranslatedFSStringArgument{}
var argKeyLength int32
err = binary.Read(r, binary.LittleEndian, &argKeyLength)
if err != nil {
return str, err
}
arg.Key, err = ReadCString(r, int(argKeyLength))
if err != nil {
return str, err
}
arg.String, err = ReadTranslatedFSString(r, version)
if err != nil {
return str, err
}
var argValueLength int32
err = binary.Read(r, binary.LittleEndian, &argValueLength)
if err != nil {
return str, err
}
arg.Value, err = ReadCString(r, int(argValueLength))
if err != nil {
return str, err
}
str.Arguments = append(str.Arguments, arg)
}
return str, nil
} }

44
lsgo.go
View File

@ -1,44 +0,0 @@
package lsgo
import (
"strings"
"github.com/go-kit/kit/log"
)
var Logger log.Logger = log.NewNopLogger()
// NewFilter allows filtering of l
func NewFilter(f map[string][]string, l log.Logger) log.Logger {
return filter{
filter: f,
next: l,
}
}
type filter struct {
next log.Logger
filter map[string][]string
}
func (f filter) Log(keyvals ...interface{}) error {
allowed := true // allow everything
for i := 0; i < len(keyvals)-1; i += 2 {
if v, ok := keyvals[i].(string); ok { // key
if fil, ok := f.filter[v]; ok { // key has a filter
if v, ok = keyvals[i+1].(string); ok { // value is a string
allowed = false // this key has a filter deny everything except what the filter allows
for _, fi := range fil {
if strings.Contains(v, fi) {
allowed = true
}
}
}
}
}
}
if allowed {
return f.next.Log(keyvals...)
}
return nil
}

View File

@ -1,37 +0,0 @@
{
"folders":
[
{
"path": "."
}
],
"settings":
{
"LSP":
{
"gopls":
{
"settings":
{
"gopls":
{
"usePlaceholders": true,
"buildFlags":
[
"-tags",
"noasm"
],
"directoryFilters":
[
"third_party"
],
"analyses": {
"shadow": true,
"unusedparams": true
}
}
}
}
}
}
}

View File

@ -15,12 +15,18 @@ type LSMetadata struct {
Build uint32 `xml:"build,attr"` Build uint32 `xml:"build,attr"`
} }
type format struct {
name, magic string
read func(io.Reader) (Resource, error)
}
type Resource struct { type Resource struct {
Metadata LSMetadata `xml:"version"` Metadata LSMetadata `xml:"version"`
Regions []*Node `xml:"region"` Regions []*Node `xml:"region"`
} }
func (r *Resource) Read(io.Reader) { func (r *Resource) Read(io.Reader) {
} }
// public Resource() // public Resource()
@ -37,7 +43,7 @@ type Node struct {
RegionName string `xml:"-"` RegionName string `xml:"-"`
} }
func (n *Node) MarshalXML(e *xml.Encoder, start xml.StartElement) error { func (n Node) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
R := xml.Name{ R := xml.Name{
Local: "region", Local: "region",
} }