mirror of https://github.com/status-im/op-geth.git
cmd/swarm: support exporting, importing chunk db (#14868)
This commit is contained in:
parent
53f3460ab5
commit
e9b850805e
|
@ -1,38 +0,0 @@
|
||||||
// Copyright 2017 The go-ethereum Authors
|
|
||||||
// This file is part of go-ethereum.
|
|
||||||
//
|
|
||||||
// go-ethereum is free software: you can redistribute it and/or modify
|
|
||||||
// it under the terms of the GNU General Public License as published by
|
|
||||||
// the Free Software Foundation, either version 3 of the License, or
|
|
||||||
// (at your option) any later version.
|
|
||||||
//
|
|
||||||
// go-ethereum is distributed in the hope that it will be useful,
|
|
||||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
// GNU General Public License for more details.
|
|
||||||
//
|
|
||||||
// You should have received a copy of the GNU General Public License
|
|
||||||
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/ethereum/go-ethereum/cmd/utils"
|
|
||||||
"github.com/ethereum/go-ethereum/swarm/storage"
|
|
||||||
"gopkg.in/urfave/cli.v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func cleandb(ctx *cli.Context) {
|
|
||||||
args := ctx.Args()
|
|
||||||
if len(args) != 1 {
|
|
||||||
utils.Fatalf("Need path to chunks database as the first and only argument")
|
|
||||||
}
|
|
||||||
|
|
||||||
chunkDbPath := args[0]
|
|
||||||
hash := storage.MakeHashFunc("SHA3")
|
|
||||||
dbStore, err := storage.NewDbStore(chunkDbPath, hash, 10000000, 0)
|
|
||||||
if err != nil {
|
|
||||||
utils.Fatalf("Cannot initialise dbstore: %v", err)
|
|
||||||
}
|
|
||||||
dbStore.Cleanup()
|
|
||||||
}
|
|
|
@ -0,0 +1,116 @@
|
||||||
|
// Copyright 2017 The go-ethereum Authors
|
||||||
|
// This file is part of go-ethereum.
|
||||||
|
//
|
||||||
|
// go-ethereum is free software: you can redistribute it and/or modify
|
||||||
|
// it under the terms of the GNU General Public License as published by
|
||||||
|
// the Free Software Foundation, either version 3 of the License, or
|
||||||
|
// (at your option) any later version.
|
||||||
|
//
|
||||||
|
// go-ethereum is distributed in the hope that it will be useful,
|
||||||
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
// GNU General Public License for more details.
|
||||||
|
//
|
||||||
|
// You should have received a copy of the GNU General Public License
|
||||||
|
// along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/ethereum/go-ethereum/cmd/utils"
|
||||||
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
"github.com/ethereum/go-ethereum/swarm/storage"
|
||||||
|
"gopkg.in/urfave/cli.v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
func dbExport(ctx *cli.Context) {
|
||||||
|
args := ctx.Args()
|
||||||
|
if len(args) != 2 {
|
||||||
|
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to write the tar archive to, - for stdout)")
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := openDbStore(args[0])
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("error opening local chunk database: %s", err)
|
||||||
|
}
|
||||||
|
defer store.Close()
|
||||||
|
|
||||||
|
var out io.Writer
|
||||||
|
if args[1] == "-" {
|
||||||
|
out = os.Stdout
|
||||||
|
} else {
|
||||||
|
f, err := os.Create(args[1])
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("error opening output file: %s", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
out = f
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := store.Export(out)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("error exporting local chunk database: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info(fmt.Sprintf("successfully exported %d chunks", count))
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbImport(ctx *cli.Context) {
|
||||||
|
args := ctx.Args()
|
||||||
|
if len(args) != 2 {
|
||||||
|
utils.Fatalf("invalid arguments, please specify both <chunkdb> (path to a local chunk database) and <file> (path to read the tar archive from, - for stdin)")
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := openDbStore(args[0])
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("error opening local chunk database: %s", err)
|
||||||
|
}
|
||||||
|
defer store.Close()
|
||||||
|
|
||||||
|
var in io.Reader
|
||||||
|
if args[1] == "-" {
|
||||||
|
in = os.Stdin
|
||||||
|
} else {
|
||||||
|
f, err := os.Open(args[1])
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("error opening input file: %s", err)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
in = f
|
||||||
|
}
|
||||||
|
|
||||||
|
count, err := store.Import(in)
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("error importing local chunk database: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info(fmt.Sprintf("successfully imported %d chunks", count))
|
||||||
|
}
|
||||||
|
|
||||||
|
func dbClean(ctx *cli.Context) {
|
||||||
|
args := ctx.Args()
|
||||||
|
if len(args) != 1 {
|
||||||
|
utils.Fatalf("invalid arguments, please specify <chunkdb> (path to a local chunk database)")
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := openDbStore(args[0])
|
||||||
|
if err != nil {
|
||||||
|
utils.Fatalf("error opening local chunk database: %s", err)
|
||||||
|
}
|
||||||
|
defer store.Close()
|
||||||
|
|
||||||
|
store.Cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
func openDbStore(path string) (*storage.DbStore, error) {
|
||||||
|
if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid chunkdb path: %s", err)
|
||||||
|
}
|
||||||
|
hash := storage.MakeHashFunc("SHA3")
|
||||||
|
return storage.NewDbStore(path, hash, 10000000, 0)
|
||||||
|
}
|
|
@ -240,12 +240,65 @@ Removes a path from the manifest
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Action: cleandb,
|
Name: "db",
|
||||||
|
Usage: "manage the local chunk database",
|
||||||
|
ArgsUsage: "db COMMAND",
|
||||||
|
Description: `
|
||||||
|
Manage the local chunk database.
|
||||||
|
`,
|
||||||
|
Subcommands: []cli.Command{
|
||||||
|
{
|
||||||
|
Action: dbExport,
|
||||||
|
Name: "export",
|
||||||
|
Usage: "export a local chunk database as a tar archive (use - to send to stdout)",
|
||||||
|
ArgsUsage: "<chunkdb> <file>",
|
||||||
|
Description: `
|
||||||
|
Export a local chunk database as a tar archive (use - to send to stdout).
|
||||||
|
|
||||||
|
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||||
|
|
||||||
|
The export may be quite large, consider piping the output through the Unix
|
||||||
|
pv(1) tool to get a progress bar:
|
||||||
|
|
||||||
|
swarm db export ~/.ethereum/swarm/bzz-KEY/chunks - | pv > chunks.tar
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: dbImport,
|
||||||
|
Name: "import",
|
||||||
|
Usage: "import chunks from a tar archive into a local chunk database (use - to read from stdin)",
|
||||||
|
ArgsUsage: "<chunkdb> <file>",
|
||||||
|
Description: `
|
||||||
|
Import chunks from a tar archive into a local chunk database (use - to read from stdin).
|
||||||
|
|
||||||
|
swarm db import ~/.ethereum/swarm/bzz-KEY/chunks chunks.tar
|
||||||
|
|
||||||
|
The import may be quite large, consider piping the input through the Unix
|
||||||
|
pv(1) tool to get a progress bar:
|
||||||
|
|
||||||
|
pv chunks.tar | swarm db import ~/.ethereum/swarm/bzz-KEY/chunks -
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: dbClean,
|
||||||
|
Name: "clean",
|
||||||
|
Usage: "remove corrupt entries from a local chunk database",
|
||||||
|
ArgsUsage: "<chunkdb>",
|
||||||
|
Description: `
|
||||||
|
Remove corrupt entries from a local chunk database.
|
||||||
|
`,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Action: func(ctx *cli.Context) {
|
||||||
|
utils.Fatalf("ERROR: 'swarm cleandb' has been removed, please use 'swarm db clean'.")
|
||||||
|
},
|
||||||
Name: "cleandb",
|
Name: "cleandb",
|
||||||
Usage: "Cleans database of corrupted entries",
|
Usage: "DEPRECATED: use 'swarm db clean'",
|
||||||
ArgsUsage: " ",
|
ArgsUsage: " ",
|
||||||
Description: `
|
Description: `
|
||||||
Cleans database of corrupted entries.
|
DEPRECATED: use 'swarm db clean'.
|
||||||
`,
|
`,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,9 +23,13 @@
|
||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"archive/tar"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/ethereum/go-ethereum/log"
|
"github.com/ethereum/go-ethereum/log"
|
||||||
|
@ -260,6 +264,84 @@ func (s *DbStore) collectGarbage(ratio float32) {
|
||||||
s.db.Put(keyGCPos, s.gcPos)
|
s.db.Put(keyGCPos, s.gcPos)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Export writes all chunks from the store to a tar archive, returning the
|
||||||
|
// number of chunks written.
|
||||||
|
func (s *DbStore) Export(out io.Writer) (int64, error) {
|
||||||
|
tw := tar.NewWriter(out)
|
||||||
|
defer tw.Close()
|
||||||
|
|
||||||
|
it := s.db.NewIterator()
|
||||||
|
defer it.Release()
|
||||||
|
var count int64
|
||||||
|
for ok := it.Seek([]byte{kpIndex}); ok; ok = it.Next() {
|
||||||
|
key := it.Key()
|
||||||
|
if (key == nil) || (key[0] != kpIndex) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
var index dpaDBIndex
|
||||||
|
decodeIndex(it.Value(), &index)
|
||||||
|
|
||||||
|
data, err := s.db.Get(getDataKey(index.Idx))
|
||||||
|
if err != nil {
|
||||||
|
log.Warn(fmt.Sprintf("Chunk %x found but could not be accessed: %v", key[:], err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: hex.EncodeToString(key[1:]),
|
||||||
|
Mode: 0644,
|
||||||
|
Size: int64(len(data)),
|
||||||
|
}
|
||||||
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
if _, err := tw.Write(data); err != nil {
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Import reads chunks into the store from a tar archive, returning the number
|
||||||
|
// of chunks read.
|
||||||
|
func (s *DbStore) Import(in io.Reader) (int64, error) {
|
||||||
|
tr := tar.NewReader(in)
|
||||||
|
|
||||||
|
var count int64
|
||||||
|
for {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
break
|
||||||
|
} else if err != nil {
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(hdr.Name) != 64 {
|
||||||
|
log.Warn("ignoring non-chunk file", "name", hdr.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := hex.DecodeString(hdr.Name)
|
||||||
|
if err != nil {
|
||||||
|
log.Warn("ignoring invalid chunk file", "name", hdr.Name, "err", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
data, err := ioutil.ReadAll(tr)
|
||||||
|
if err != nil {
|
||||||
|
return count, err
|
||||||
|
}
|
||||||
|
|
||||||
|
s.Put(&Chunk{Key: key, SData: data})
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
|
||||||
|
return count, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *DbStore) Cleanup() {
|
func (s *DbStore) Cleanup() {
|
||||||
//Iterates over the database and checks that there are no faulty chunks
|
//Iterates over the database and checks that there are no faulty chunks
|
||||||
it := s.db.NewIterator()
|
it := s.db.NewIterator()
|
||||||
|
|
Loading…
Reference in New Issue