Browse Source

Refactor permissions and RPC admin api

Also Google Drive/Photos importer and tool to convert
data from old format to new.
Chris Walker 3 years ago
parent
commit
04f0a1d8d2

+ 1
- 1
.gitignore View File

@@ -1,3 +1,3 @@
1 1
 node_modules
2 2
 .module-cache
3
-run-with-google.sh
3
+run-with-*

+ 223
- 0
cmd/opfs-google-drive-importer/main.go View File

@@ -0,0 +1,223 @@
1
+package main
2
+
3
+import (
4
+	"encoding/json"
5
+	"flag"
6
+	"fmt"
7
+	"io"
8
+	"log"
9
+	"net/http"
10
+	"net/rpc"
11
+	"net/rpc/jsonrpc"
12
+	"os"
13
+	"os/user"
14
+	"path/filepath"
15
+	"sync"
16
+	"time"
17
+
18
+	"golang.org/x/net/context"
19
+	"golang.org/x/oauth2"
20
+	"google.golang.org/api/drive/v3"
21
+)
22
+
23
+var (
24
+	creds          = flag.String("credentials", "import-data.json", "the unique name of the oauth credential config file for this import")
25
+	folderId       = flag.String("folder-id", "", "the ID of the Google Drive Folder to use, or if blank GooglePhotos will be used.")
26
+	importDir      = flag.String("import-dir", ".", "the directory to download imports to")
27
+	gaClientId     = flag.String("ga-client", "", "your google auth client id, from the dev console")
28
+	gaClientSecret = flag.String("ga-secret", "", "your google auth client secret, from the dev console")
29
+)
30
+
31
+type DataCache struct {
32
+	Token      *oauth2.Token
33
+	LastImport time.Time //pointer so it can be omitted
34
+}
35
+
36
+var rpcClient *rpc.Client
37
+
38
+func main() {
39
+	flag.Parse()
40
+	ctx := context.Background()
41
+
42
+	config := &oauth2.Config{
43
+		ClientID:     *gaClientId,
44
+		ClientSecret: *gaClientSecret,
45
+		Scopes:       []string{drive.DrivePhotosReadonlyScope, drive.DriveReadonlyScope},
46
+		Endpoint: oauth2.Endpoint{
47
+			AuthURL:  "https://accounts.google.com/o/oauth2/auth",
48
+			TokenURL: "https://accounts.google.com/o/oauth2/token",
49
+		},
50
+		RedirectURL: "urn:ietf:wg:oauth:2.0:oob",
51
+	}
52
+
53
+	cachedData, err := getCacheFile(*creds)
54
+	if err != nil {
55
+		cachedData = &DataCache{
56
+			Token:      getTokenFromWeb(config),
57
+			LastImport: time.Now().Add(-72 * time.Hour).UTC(),
58
+		}
59
+		if writeCacheFile(*creds, cachedData) != nil {
60
+			log.Fatalln("Could not save data to cache file")
61
+		}
62
+	} else if cachedData.LastImport.Format(time.RFC3339) == "0001-01-01T00:00:00Z" {
63
+		//this isn't time.isZero(), but it's what we get back from the JSON parse.
64
+		cachedData.LastImport = time.Now().Add(-72 * time.Hour).UTC()
65
+	}
66
+
67
+	client := config.Client(ctx, cachedData.Token)
68
+
69
+	srv, err := drive.New(client)
70
+	if err != nil {
71
+		log.Fatalln("Unable to retrieve drive Client", err)
72
+	}
73
+
74
+	rpcClient, err = jsonrpc.Dail("tcp", "127.0.0.1:5122")
75
+	if err != nil {
76
+		log.Fatalln("Unable to connect to OPFS admin RPC", err)
77
+	}
78
+	//now we enter the main loop
79
+	for {
80
+		photos, err := getNewPhotosList(ctx, srv, cachedData.LastImport)
81
+		if err != nil {
82
+			log.Printf("Error getting new photos: %v\n", err)
83
+		} else {
84
+			//we have photos, do import them.
85
+			downloadAndImport(ctx, srv, photos)
86
+			log.Printf("done import of %d media\n", len(photos))
87
+			cachedData.LastImport = time.Now().UTC()
88
+			writeCacheFile(*creds, cachedData)
89
+		}
90
+
91
+		//I'd prefer some sort of push, but polling will do.
92
+		time.Sleep(time.Minute)
93
+	}
94
+}
95
+
96
+const defaultQuery = "mimeType != 'application/vnd.google-apps.folder' and (mimeType contains 'image/' or mimeType contains 'video/')"
97
+
98
+func getNewPhotosList(ctx context.Context, srv *drive.Service, lastImport time.Time) ([]*drive.File, error) {
99
+	call := srv.Files.List()
100
+	query := ""
101
+	if *folderId != "" {
102
+		call.Spaces("drive")
103
+		query = fmt.Sprintf("'%s' in parents and ", *folderId)
104
+	} else {
105
+		call.Spaces("photos")
106
+	}
107
+	call.OrderBy("createdTime desc")
108
+	//call.Fields("files(id,name,webContentLink)")
109
+	query = fmt.Sprintf("%smodifiedTime >= '%s' and %s", query, lastImport.Format(time.RFC3339), defaultQuery)
110
+	log.Println(query)
111
+	call.Q(query)
112
+	filelist := []*drive.File{}
113
+	mtx := &sync.Mutex{}
114
+	err := call.Pages(ctx, func(list *drive.FileList) error {
115
+		mtx.Lock()
116
+		defer mtx.Unlock() //in case these calls fire in parallel
117
+		filelist = append(filelist, list.Files...)
118
+		return nil
119
+	})
120
+	log.Printf("Found %d new media\n", len(filelist))
121
+	return filelist, err
122
+}
123
+
124
+func downloadAndImport(ctx context.Context, srv *drive.Service, files []*drive.File) {
125
+	for i := range files {
126
+		call := srv.Files.Get(files[i].Id)
127
+		call.Context(ctx)
128
+		res, err := call.Download()
129
+		if err != nil {
130
+			log.Printf("failed to download file: %s (%s)", files[i].Name, files[i].Id)
131
+			continue
132
+		}
133
+		importFromDownload(files[i], res)
134
+	}
135
+}
136
+
137
+func importFromDownload(file *drive.File, res *http.Response) {
138
+	//opfsd will import from a directory, so we can just download to there.
139
+	//for now we will even assume we are in that directory.
140
+	defer res.Body.Close()
141
+	name := filepath.Join(*importDir, file.Name)
142
+	if _, err := os.Stat(name); os.IsExist(err) {
143
+		name = filepath.Join(*importDir, file.Id+"-"+file.Name)
144
+	}
145
+	f, err := os.Create(name)
146
+	if err != nil {
147
+		log.Println("Could not create file", name)
148
+		return
149
+	}
150
+	defer f.Close()
151
+	io.Copy(f, res.Body)
152
+	//now trigger import
153
+	var ok bool
154
+	err := rpcClient.Call("opfs.Import", name, &ok)
155
+	if err != nil {
156
+		log.Println("Could not import file", err)
157
+	} else {
158
+		//we can remove the original.
159
+		log.Println("Import successful for file", name)
160
+		os.Remove(name)
161
+	}
162
+}
163
+
164
+// getTokenFromWeb uses Config to request a Token.
165
+// It returns the retrieved Token.
166
+func getTokenFromWeb(config *oauth2.Config) *oauth2.Token {
167
+	authURL := config.AuthCodeURL("state-token", oauth2.AccessTypeOffline)
168
+	fmt.Printf("Go to the following link in your browser then type the "+
169
+		"authorization code: \n%v\n", authURL)
170
+
171
+	var code string
172
+	if _, err := fmt.Scan(&code); err != nil {
173
+		log.Fatalf("Unable to read authorization code %v", err)
174
+	}
175
+
176
+	tok, err := config.Exchange(oauth2.NoContext, code)
177
+	if err != nil {
178
+		log.Fatalf("Unable to retrieve token from web %v", err)
179
+	}
180
+	return tok
181
+}
182
+
183
+// get the data out of the cache file
184
+func getCacheFile(name string) (*DataCache, error) {
185
+	filename, err := cacheFileName(name)
186
+	if err != nil {
187
+		return nil, err
188
+	}
189
+	//try and read from file
190
+	f, err := os.Open(filename)
191
+	if err != nil {
192
+		return nil, err
193
+	}
194
+	dc := &DataCache{}
195
+	err = json.NewDecoder(f).Decode(dc)
196
+	defer f.Close()
197
+	return dc, err
198
+}
199
+
200
+func writeCacheFile(name string, data *DataCache) error {
201
+	filename, err := cacheFileName(name)
202
+	if err != nil {
203
+		return err
204
+	}
205
+	f, err := os.Create(filename)
206
+	if err != nil {
207
+		log.Fatalf("Unable to cache oauth data: %v", err)
208
+	}
209
+	defer f.Close()
210
+	return json.NewEncoder(f).Encode(data)
211
+}
212
+
213
+// tokenCacheFile generates credential file path/filename.
214
+// It returns the generated credential path/filename.
215
+func cacheFileName(name string) (string, error) {
216
+	usr, err := user.Current()
217
+	if err != nil {
218
+		return "", err
219
+	}
220
+	tokenCacheDir := filepath.Join(usr.HomeDir, ".credentials")
221
+	os.MkdirAll(tokenCacheDir, 0700)
222
+	return filepath.Join(tokenCacheDir, name), err
223
+}

+ 12
- 28
cmd/opfsd/opfsd.go View File

@@ -1,12 +1,11 @@
1 1
 package main
2 2
 
3 3
 import (
4
-	"bufio"
5 4
 	"flag"
6 5
 	"log"
7 6
 	"net"
7
+	"net/rpc/jsonrpc"
8 8
 	"regexp"
9
-	"strings"
10 9
 	"time"
11 10
 
12 11
 	"github.com/thechriswalker/opfs-server/opfs"
@@ -22,7 +21,7 @@ var (
22 21
 	gaClientId     = flag.String("ga-client", "", "your google auth client id, from the dev console")
23 22
 	gaClientSecret = flag.String("ga-secret", "", "your google auth client secret, from the dev console")
24 23
 	origin         = flag.String("origin", "http://localhost:8080", "the origin your app is served on (for oauth redirects)")
25
-	adminAddr      = flag.String("admin", "127.0.0.1:5122", "the admin socket address")
24
+	adminAddr      = flag.String("admin", "127.0.0.1:5122", "the admin socket address for JSON-RPC")
26 25
 )
27 26
 
28 27
 var idRegex = regexp.MustCompile(`^sha1-[0-9a-f]{40}$`)
@@ -38,6 +37,8 @@ func main() {
38 37
 		return false && email == "thechriswalker@gmail.com"
39 38
 	}
40 39
 
40
+	adminIdent := opfs.Identity{Name: "thechriswalker@gmail.com", Admin: true}
41
+
41 42
 	//create the API
42 43
 	api := opfs.NewAPI(
43 44
 		opfs.VersionInfo{},
@@ -58,7 +59,13 @@ func main() {
58 59
 	photo.Register(api)
59 60
 	video.Register(api)
60 61
 
61
-	//start the admin socket for external db changes. e.g. from the importer
62
+	//start the admin socket for external db changes. e.g. from the imported
63
+	rpcServer, err := api.RPCServer(adminIdent)
64
+	if err != nil {
65
+		log.Fatal(err)
66
+	}
67
+
68
+	//
62 69
 	listener, err := net.Listen("tcp", *adminAddr)
63 70
 	if err != nil {
64 71
 		log.Fatal(err)
@@ -72,30 +79,7 @@ func main() {
72 79
 				continue
73 80
 			}
74 81
 			go func() {
75
-				defer conn.Close()
76
-				buf := bufio.NewReader(conn)
77
-				for {
78
-					//read lines as items to reindex.
79
-					//@TODO limit how much is read here, lines should be short.
80
-					//@TODO we need to handle more commands here, even if it is reindex item and reindex collection
81
-					str, err := buf.ReadString('\n')
82
-					str = strings.TrimSpace(str)
83
-					if idRegex.MatchString(str) {
84
-						if err := api.ReindexItem(str); err != nil {
85
-							conn.Write([]byte("failed to reindex: "))
86
-							conn.Write([]byte(str))
87
-							conn.Write([]byte{'\n'})
88
-							log.Println("Failed to reindex:", str)
89
-						}
90
-					} else {
91
-						//bad input
92
-						conn.Write([]byte("Bad input. goodbye"))
93
-						return
94
-					}
95
-					if err != nil {
96
-						break
97
-					}
98
-				}
82
+				rpcServer.ServeCodec(jsonrpc.NewServerCodec(conn))
99 83
 			}()
100 84
 		}
101 85
 	}()

+ 75
- 0
opfs/admin.go View File

@@ -0,0 +1,75 @@
1
+package opfs
2
+
3
+// This is where I will implement the JSON RPC over socket for admin
4
+// purposes. Mainly for import/data modifying tools to notify OPFS of
5
+// external changes. It will also be used to trigger import of files
6
+// from the local filesystem. e.g. a Google Drive tool downloads some
7
+// files and then tells OPFS to import.
8
+
9
+//  RPC Methods:
10
+//      Import(path strong, ok *bool) error
11
+//          This tells OPFS to import the item at the given path
12
+//
13
+//      Recheck(id string, ok *bool) error
14
+//          This tells OPFS that we did something external and it
15
+//          should reload info direct from the filesystem, which may
16
+//          meean deleting, updating or adding an item.
17
+
18
+import (
19
+	"net/rpc"
20
+	"os"
21
+	"path/filepath"
22
+
23
+	"github.com/thechriswalker/opfs-server/debug"
24
+)
25
+
26
+var rpcDebug = debug.Logger("rpc")
27
+
28
+//we don't return anything from these calls, except errors if needed
29
+type NoReturnValue struct{}
30
+
31
+//we embed the Api in the this to prevent erroneously exporting
32
+//unwanted methods
33
+type AdminRPC struct {
34
+	api   *Api
35
+	ident Identity
36
+}
37
+
38
+//Import a file by path.
39
+// {"jsonrpc": "2.0", "method": "opfs.Import", "params": ["/srv/personal/opfs-temp/import/IMG_20160406_131439.jpg"], "id": 1}
40
+func (a *AdminRPC) Import(path string, res *NoReturnValue) error {
41
+	rpcDebug("rpc.Import(%s) [as %s]", path, a.ident)
42
+	f, err := os.Open(path)
43
+	if err == nil {
44
+		name := filepath.Base(path)
45
+		_, err := a.api.ImportItem(a.ident, f, name, "")
46
+		if err == nil {
47
+			return nil
48
+		}
49
+		return err
50
+	}
51
+	return err
52
+}
53
+
54
+//Recheck an item by id.
55
+// {"jsonrpc": "2.0", "method": "opfs.Recheck", "params": ["sha1-98445134bbda2af1d11ad7258e0037f26d9bca12"], "id": 1}
56
+func (a *AdminRPC) Recheck(id string, res *NoReturnValue) error {
57
+	rpcDebug("rpc.Recheck(%s)", id)
58
+	if !IsValidId(id) {
59
+		return errInvalidIdentifier
60
+	}
61
+	if err := a.api.ReindexItem(id); err != nil {
62
+		return err
63
+	}
64
+	return nil
65
+}
66
+
67
+//Create the RPC server. User will still have to set up the listen/accept/handle loops.
68
+func (a *Api) RPCServer(ident Identity) (*rpc.Server, error) {
69
+	r := rpc.NewServer()
70
+	err := r.RegisterName("opfs", &AdminRPC{
71
+		api:   a,
72
+		ident: ident,
73
+	})
74
+	return r, err
75
+}

+ 2
- 1
opfs/api.go View File

@@ -267,7 +267,7 @@ func (a *Api) GetThumbnail(ident Identity, meta *ItemInfo, size ThumbSize) (data
267 267
 //
268 268
 func (a *Api) ImportItem(ident Identity, file ReadSeekCloser, name, mimetype string) (*ItemInfo, *apiError) {
269 269
 	defer file.Close()
270
-	apiDebug("api.ImportItem(%s)", name)
270
+	apiDebug("api.ImportItem(%s,%s)", ident, name)
271 271
 	if !a.ShouldAllow(ident, PolicyActionImport, nil) {
272 272
 		apiDebug("api.ImportItem(): Identity not allowed to import: %s", ident)
273 273
 		return nil, NewNotAllowedError(ident, PolicyActionImport, "Sorry, you don't have permission to import items.")
@@ -293,6 +293,7 @@ func (a *Api) ImportItem(ident Identity, file ReadSeekCloser, name, mimetype str
293 293
 		return nil, NewApiError(err, "Error reading file", http.StatusInternalServerError)
294 294
 	}
295 295
 	meta.Id = fmt.Sprintf("sha1-%x", hash.Sum(nil))
296
+	meta.Owner = ident.String()
296 297
 
297 298
 	//check if we already have this.
298 299
 	//OK this could be a Policy error

+ 4
- 0
opfs/api_error.go View File

@@ -40,6 +40,10 @@ var (
40 40
 		Code:    http.StatusConflict,
41 41
 		Message: "Duplicate Item",
42 42
 	}
43
+	errInvalidIdentifier = &apiError{
44
+		Code:    http.StatusBadRequest,
45
+		Message: "Invalid Item Id",
46
+	}
43 47
 )
44 48
 
45 49
 func NewNotAllowedError(ident Identity, action PolicyAction, msg string) *apiError {

+ 2
- 2
opfs/identity.go View File

@@ -2,7 +2,7 @@ package opfs
2 2
 
3 3
 // empty is considered anonymous
4 4
 type Identity struct {
5
-	admin bool
5
+	Admin bool
6 6
 	Name  string
7 7
 }
8 8
 
@@ -11,7 +11,7 @@ func (i Identity) IsAnonymous() bool {
11 11
 }
12 12
 
13 13
 func (i Identity) IsAdmin() bool {
14
-	return i.admin
14
+	return i.Admin
15 15
 }
16 16
 
17 17
 func (i Identity) String() string {

+ 11
- 0
opfs/is_valid_id.go View File

@@ -0,0 +1,11 @@
1
+package opfs
2
+
3
+import "regexp"
4
+
5
+var idRegex = regexp.MustCompile(`^sha1-[0-9a-f]{40}$`)
6
+
7
+//currently we use hashes as ids, for content addressing, so basically our algorithm does not need to
8
+//be flexible.
9
+func IsValidId(input string) bool {
10
+	return idRegex.MatchString(input)
11
+}

+ 1
- 1
opfs/permission_layer_allow_all.go View File

@@ -15,7 +15,7 @@ var _ PermissionLayer = AllowAllAccess{}
15 15
 
16 16
 func (aaa AllowAllAccess) Identify(r *http.Request) Identity {
17 17
 	return Identity{
18
-		admin: true,
18
+		Admin: true,
19 19
 		Name:  "",
20 20
 	}
21 21
 }

+ 1
- 1
opfs/permission_layer_auth_google.go View File

@@ -146,7 +146,7 @@ func (ga *GoogleAuth) Identify(r *http.Request) Identity {
146 146
 	email, _ := semail.(string)
147 147
 	return Identity{
148 148
 		Name:  email, //if empty, anonymous
149
-		admin: ga.IsAdminUser(email),
149
+		Admin: ga.IsAdminUser(email),
150 150
 	}
151 151
 }
152 152
 

+ 173
- 0
transform-old-opfs-data-to-new-store.js View File

@@ -0,0 +1,173 @@
1
+#!/usr/bin/env node
2
+var fs = require("fs");
3
+var dirname = require("path").dirname;
4
+
5
+var hexchars = "0123456789abcdef";
6
+var allHexPairs = hexchars.split("").reduce(function(acc, char, _, chars) {
7
+    return acc.concat(chars.map(function(c){
8
+        return char + c;
9
+    }));
10
+}, []);
11
+var sourceDirectories = allHexPairs.reduce(function(acc, pair) {
12
+    return acc.concat(allHexPairs.map(function(p){
13
+        return pair + "/" + p;
14
+    }));
15
+}, [])
16
+
17
+function findAllItems(baseDir) {
18
+    var items = sourceDirectories
19
+        .map(function(dir){ return baseDir + "/" + dir; })
20
+        .filter(directoryExists)
21
+        .reduce(function(acc, dir) {
22
+            return acc.concat(findItemsInDir(dir));
23
+        }, []);
24
+    console.log("found items: ", items.length);
25
+    return items;
26
+}
27
+
28
+function directoryExists(path) {
29
+    try {
30
+        fs.accessSync(path);
31
+        return true;
32
+    } catch(e) {
33
+        return false;
34
+    }
35
+}
36
+
37
+function findItemsInDir(path) {
38
+    return fs.readdirSync(path)
39
+        .filter(function(name) {
40
+            return /\.meta$/.test(name);
41
+        })
42
+        .map(function(name) {
43
+            try {
44
+                var hash = name.replace(/\.meta$/, "");
45
+                return {
46
+                    meta: JSON.parse(fs.readFileSync(path + "/" + name)),
47
+                    data: path + "/" + hash,
48
+                    hash: hash
49
+                }
50
+            } catch(e) {
51
+                console.error("error reading JSON file at: " + path + "/" + name);
52
+                return false;
53
+            }
54
+        })
55
+        .filter(Boolean);
56
+}
57
+
58
+var exampleSourceMeta = {
59
+  "Meta": {
60
+    "Device": "LGE Nexus 5",
61
+    "Orientation": 1,
62
+    "Height": 3264,
63
+    "Width": 2448,
64
+    "Size": 1470662
65
+  },
66
+  "Tags": [],
67
+  "Type": "Photo",
68
+  "Mime": "image/jpeg",
69
+  "Hash": "sha1-020e3af1270a62085d51f80b829cc5d1a4109311",
70
+  "Name": "IMG_20141005_164818.jpg",
71
+  "Added": "2014-10-06T09:53:21Z",
72
+  "Created": "2014-10-05T16:48:18Z",
73
+  "Location": "50.827084,3.422443",
74
+  "Description": ""
75
+}
76
+
77
+var exampleDestinationMeta = {
78
+  "type": "Photo",
79
+  "mime": "image/jpeg",
80
+  "name": "IMG_20141005_164818.jpg",
81
+  "added": "2014-10-06T09:53:21Z",
82
+  "created": "2014-10-05T16:48:18Z",
83
+  "location": "50.827084,3.422443",
84
+  "description": "",
85
+  "tags": [],
86
+  "meta": {
87
+    "orientation": 1,
88
+    "device": "LGE Nexus 5"
89
+  },
90
+  "id": "sha1-020e3af1270a62085d51f80b829cc5d1a4109311",
91
+  "size": 1470662,
92
+  "width": 2448,
93
+  "height": 3264,
94
+  "owner": "thechriswalker@gmail.com"
95
+}
96
+
97
+var knownMeta = [ "Orientation", "Device", "Size", "Width", "Height" ]
98
+
99
+function transformMeta(meta) {
100
+    var transformed = {
101
+        "id": meta.Hash,
102
+        "type": meta.Type,
103
+        "mime": meta.Mime,
104
+        "name": meta.Name,
105
+        "added": meta.Added,
106
+        "created": meta.Created,
107
+        "location": meta.Location,
108
+        "description": meta.Description,
109
+        "tags": meta.Tags.slice(),
110
+        "meta": {
111
+            "orientation": meta.Meta.Orientation,
112
+            "device": meta.Meta.Device
113
+        },
114
+        "size": meta.Meta.Size,
115
+        "width": meta.Meta.Width,
116
+        "height": meta.Meta.Height,
117
+        "owner": "thechriswalker@gmail.com"
118
+    }
119
+    //add any other meta.
120
+    Object.keys(meta.Meta)
121
+        .filter(function(key) { return knownMeta.indexOf(key) === -1; })
122
+        .forEach(function(key) {
123
+            transformed.meta[key.toLowerCase()] = meta.Meta[key];
124
+        });
125
+    return transformed;
126
+}
127
+
128
+function mkdirp(path) {
129
+    var stackToCreate = [];
130
+    var current = dirname(path);
131
+    while(!directoryExists(current)){
132
+        stackToCreate.unshift(current);
133
+        current = dirname(current);
134
+    }
135
+    stackToCreate.forEach(function(dir) {
136
+        fs.mkdirSync(dir);
137
+    });
138
+}
139
+
140
+
141
+function moveAndTransform(newStore) {
142
+    return function(item) {
143
+        //the new item base is newStore + /data/<2 hex>/hash.(data|meta)
144
+        var newBaseName = newStore + "/data/" + item.hash.slice(5,7) + "/" + item.hash;
145
+        console.log("transforming metadata for:", item.hash);
146
+        var newMeta = transformMeta(item.meta);
147
+        mkdirp(newBaseName);
148
+        console.log("moving data: ", item.data, "=>", newBaseName + ".data")
149
+        fs.renameSync(item.data, newBaseName + ".data");
150
+        console.log("writing new metadata file: ", newBaseName + ".meta");
151
+        fs.writeFileSync(newBaseName + ".meta", JSON.stringify(newMeta), "utf8");
152
+    }
153
+}
154
+
155
+function theWholeShebang(oldStoreDir, newStoreDir) {
156
+    console.log("performing migration from", oldStoreDir, "to", newStoreDir);
157
+    findAllItems(oldStoreDir)
158
+        .map(moveAndTransform(newStoreDir));
159
+}
160
+
161
+var theOldStoreDir = process.argv[2];
162
+var theNewStoreDir = process.argv[3];
163
+
164
+if(!directoryExists(theOldStoreDir)) {
165
+    console.error("the old store doesn't exist: `" + theOldStoreDir + "`");
166
+    process.exit(1);
167
+}
168
+if(!directoryExists(theNewStoreDir)) {
169
+    console.error("the new store doesn't exist: `" + theNewStoreDir + "`");
170
+    process.exit(1);
171
+}
172
+
173
+theWholeShebang(theOldStoreDir, theNewStoreDir);