Browse Source

fixup - add so much

 - refactor tests
 - add byte-size helper
 - update http api (for frontend)
 - refactor main API to have multiple `store`s (allowing to put
   the index on FAST storage and the data on spinning rust...)
 - started working on collections
Chris Walker 2 years ago
parent
commit
d5011495f0

+ 8
- 1
build_with_version.sh View File

@@ -1,2 +1,9 @@
1 1
 #!/bin/sh
2
-go build -o bin/opfsd -ldflags "-X github.com/thechriswalker/opfs-server/opfs.VERSION $(git describe --tags --always --dirty)" cmd/opfsd/opfsd.go
2
+PACKAGE="github.com/thechriswalker/opfs-server/opfs"
3
+VERSION="$(git describe --tags --always --dirty)"
4
+
5
+
6
+go build \
7
+    -o bin/opfsd \
8
+    -ldflags "-X ${PACKAGE}.VERSION ${VERSION}" \
9
+    cmd/opfsd/opfsd.go

+ 30
- 10
cmd/opfsd/opfsd.go View File

@@ -4,11 +4,13 @@ import (
4 4
 	"flag"
5 5
 	"log"
6 6
 	"net"
7
+	"net/http"
7 8
 	"net/rpc/jsonrpc"
8 9
 	"regexp"
9 10
 	"time"
10 11
 
11 12
 	"github.com/thechriswalker/opfs-server/opfs"
13
+	"github.com/thechriswalker/opfs-server/pushstate"
12 14
 	"github.com/thechriswalker/opfs-server/types/photo"
13 15
 	"github.com/thechriswalker/opfs-server/types/video"
14 16
 )
@@ -20,8 +22,9 @@ var (
20 22
 	storePath      = flag.String("store", "/srv/personal/opfs-temp/store", "path to the main data store")
21 23
 	gaClientId     = flag.String("ga-client", "", "your google auth client id, from the dev console")
22 24
 	gaClientSecret = flag.String("ga-secret", "", "your google auth client secret, from the dev console")
23
-	origin         = flag.String("origin", "http://localhost:8080", "the origin your app is served on (for oauth redirects)")
25
+	origin         = flag.String("origin", "http://localhost:5123", "the origin your app is served on (for oauth redirects)")
24 26
 	adminAddr      = flag.String("admin", "127.0.0.1:5122", "the admin socket address for JSON-RPC")
27
+	uiDir          = flag.String("ui-dir", "", "the path to the root dir for the UI")
25 28
 )
26 29
 
27 30
 var idRegex = regexp.MustCompile(`^sha1-[0-9a-f]{40}$`)
@@ -29,23 +32,33 @@ var idRegex = regexp.MustCompile(`^sha1-[0-9a-f]{40}$`)
29 32
 func main() {
30 33
 	flag.Parse()
31 34
 
32
-	validUser := func(email string) bool {
33
-		return true
34
-	}
35
+	// // this function allows a config file with allowed users in it.
36
+	// validUser := func(email string) bool {
37
+	// 	return true
38
+	// }
35 39
 
36
-	adminUser := func(email string) bool {
37
-		return false && email == "thechriswalker@gmail.com"
38
-	}
40
+	// // this function allows a config file with admin users in it.
41
+	// adminUser := func(email string) bool {
42
+	// 	return /*false &&*/ email == "thechriswalker@gmail.com"
43
+	// }
39 44
 
45
+	// this is the identity used by default when the Admin RPC is accessed.
40 46
 	adminIdent := opfs.Identity{Name: "thechriswalker@gmail.com", Admin: true}
41 47
 
48
+	cacheStore := &opfs.LocalBlobStore{Root: *storePath + "/cache"}
49
+	itemStore := &opfs.LocalBlobStore{Root: *storePath + "/data"}
50
+
42 51
 	//create the API
52
+	//func NewAPI(info VersionInfo, access PermissionLayer, datastore, indexstore, cachestore BlobStore, maxThumbs int) *Api {
43 53
 	api := opfs.NewAPI(
44 54
 		opfs.VersionInfo{
45 55
 			UIVersion: "<unknown>",
46 56
 		},
47
-		opfs.NewGoogleAuth(*gaClientId, *gaClientSecret, *origin, validUser, adminUser),
48
-		&opfs.LocalBlobStore{Root: *storePath},
57
+		opfs.AllowAllAccess("thechriswalker@gmail.com"),
58
+		//opfs.NewGoogleAuth(*gaClientId, *gaClientSecret, *origin, validUser, adminUser),
59
+		itemStore,  //data
60
+		itemStore,  //meta
61
+		cacheStore, //cache
49 62
 		maxConcurrentThumbnailCreation,
50 63
 	)
51 64
 
@@ -86,9 +99,16 @@ func main() {
86 99
 		}
87 100
 	}()
88 101
 
102
+	var ui http.Handler
103
+	if *uiDir != "" {
104
+		ui = pushstate.NewHandler(http.Dir(*uiDir))
105
+	} else {
106
+		ui = nil
107
+	}
108
+
89 109
 	//fire up the http api
90 110
 	log.Println("HTTP API Listening at", *listenAddr)
91
-	if err := opfs.ListenAndServe(api, *listenAddr, true, nil); err != nil {
111
+	if err := opfs.ListenAndServe(api, *listenAddr, true, ui); err != nil {
92 112
 		log.Fatal(err)
93 113
 	}
94 114
 }

+ 21
- 84
index/filter_stringer_test.go View File

@@ -17,90 +17,27 @@ var (
17 17
 )
18 18
 
19 19
 var stringerCases = []sTest{
20
-	sTest{
21
-		In:  NewStringTermFilter("field", "term"),
22
-		Out: "string(field=field, term=term)",
23
-	},
24
-	sTest{
25
-		In:  NewStringPrefixFilter("field", "prefix"),
26
-		Out: "string(field=field, prefix=prefix*)",
27
-	},
28
-	sTest{
29
-		In:  NewStringRangeFilter("field", "min", "max", true, false),
30
-		Out: "string(field=field, range=[min,max))",
31
-	},
32
-	sTest{
33
-		In:  NewStringRangeFilter("field", "min", "max", false, true),
34
-		Out: "string(field=field, range=(min,max])",
35
-	},
36
-	sTest{
37
-		In:  NewInclusiveStringRangeFilter("field", "min", "max"),
38
-		Out: "string(field=field, range=[min,max])",
39
-	},
40
-	sTest{
41
-		In:  NewExclusiveStringRangeFilter("field", "min", "max"),
42
-		Out: "string(field=field, range=(min,max))",
43
-	},
44
-	sTest{
45
-		In:  NewMatchAllFilter(),
46
-		Out: "all()",
47
-	},
48
-	sTest{
49
-		In:  NewLogicalNotFilter(NewMatchAllFilter()),
50
-		Out: "not(all())",
51
-	},
52
-	sTest{
53
-		In:  NewFieldMissingFilter("field"),
54
-		Out: "missing(field=field)",
55
-	},
56
-	sTest{
57
-		In:  NewLogicalAndFilter(NewMatchAllFilter(), NewFieldMissingFilter("field")),
58
-		Out: "and(all() missing(field=field))",
59
-	},
60
-	sTest{
61
-		In:  NewLogicalOrFilter(NewMatchAllFilter(), NewFieldMissingFilter("field")),
62
-		Out: "or(all() missing(field=field))",
63
-	},
64
-	sTest{
65
-		In:  NewBooleanTermFilter("field", true),
66
-		Out: "bool(field=field, term=true)",
67
-	},
68
-	sTest{
69
-		In:  NewIntTermFilter("field", 1),
70
-		Out: "int(field=field, term=1)",
71
-	},
72
-	sTest{
73
-		In:  NewIntRangeFilter("field", 1, 2, true, false),
74
-		Out: "int(field=field, range=[1,2))",
75
-	},
76
-	sTest{
77
-		In:  NewInclusiveIntRangeFilter("field", 1, 2),
78
-		Out: "int(field=field, range=[1,2])",
79
-	},
80
-	sTest{
81
-		In:  NewExclusiveIntRangeFilter("field", 1, 2),
82
-		Out: "int(field=field, range=(1,2))",
83
-	},
84
-	sTest{
85
-		In:  NewTimeTermFilter("field", dayOne),
86
-		Out: "time(field=field, term=1979-12-09T12:00:00Z)",
87
-	},
88
-	sTest{
89
-		In:  NewTimeRangeFilter("field", dayOne, dayTwo, true, false),
90
-		Out: "time(field=field, range=[1979-12-09T12:00:00Z,1981-01-13T07:50:00Z))",
91
-	},
92
-	sTest{
93
-		In:  NewInclusiveTimeRangeFilter("field", dayOne, dayTwo),
94
-		Out: "time(field=field, range=[1979-12-09T12:00:00Z,1981-01-13T07:50:00Z])",
95
-	},
96
-	sTest{
97
-		In:  NewExclusiveTimeRangeFilter("field", dayOne, dayTwo),
98
-		Out: "time(field=field, range=(1979-12-09T12:00:00Z,1981-01-13T07:50:00Z))",
99
-	},
100
-	sTest{
101
-		In:  NewGeoDistanceFilter("field", -1.23, 2.34, 1001.1001),
102
-		Out: "geo(field=field, point=-1.23,2.34, radius=1001.10m)",
103
-	},
20
+	{NewStringTermFilter("field", "term"), "string(field=field, term=term)"},
21
+	{NewStringPrefixFilter("field", "prefix"), "string(field=field, prefix=prefix*)"},
22
+	{NewStringRangeFilter("field", "min", "max", true, false), "string(field=field, range=[min,max))"},
23
+	{NewStringRangeFilter("field", "min", "max", false, true), "string(field=field, range=(min,max])"},
24
+	{NewInclusiveStringRangeFilter("field", "min", "max"), "string(field=field, range=[min,max])"},
25
+	{NewExclusiveStringRangeFilter("field", "min", "max"), "string(field=field, range=(min,max))"},
26
+	{NewMatchAllFilter(), "all()"},
27
+	{NewLogicalNotFilter(NewMatchAllFilter()), "not(all())"},
28
+	{NewFieldMissingFilter("field"), "missing(field=field)"},
29
+	{NewLogicalAndFilter(NewMatchAllFilter(), NewFieldMissingFilter("field")), "and(all() missing(field=field))"},
30
+	{NewLogicalOrFilter(NewMatchAllFilter(), NewFieldMissingFilter("field")), "or(all() missing(field=field))"},
31
+	{NewBooleanTermFilter("field", true), "bool(field=field, term=true)"},
32
+	{NewIntTermFilter("field", 1), "int(field=field, term=1)"},
33
+	{NewIntRangeFilter("field", 1, 2, true, false), "int(field=field, range=[1,2))"},
34
+	{NewInclusiveIntRangeFilter("field", 1, 2), "int(field=field, range=[1,2])"},
35
+	{NewExclusiveIntRangeFilter("field", 1, 2), "int(field=field, range=(1,2))"},
36
+	{NewTimeTermFilter("field", dayOne), "time(field=field, term=1979-12-09T12:00:00Z)"},
37
+	{NewTimeRangeFilter("field", dayOne, dayTwo, true, false), "time(field=field, range=[1979-12-09T12:00:00Z,1981-01-13T07:50:00Z))"},
38
+	{NewInclusiveTimeRangeFilter("field", dayOne, dayTwo), "time(field=field, range=[1979-12-09T12:00:00Z,1981-01-13T07:50:00Z])"},
39
+	{NewExclusiveTimeRangeFilter("field", dayOne, dayTwo), "time(field=field, range=(1979-12-09T12:00:00Z,1981-01-13T07:50:00Z))"},
40
+	{NewGeoDistanceFilter("field", -1.23, 2.34, 1001.1001), "geo(field=field, point=-1.23,2.34, radius=1001.10m)"},
104 41
 }
105 42
 
106 43
 func TestFilterStringer(t *testing.T) {

+ 2
- 0
index/filter_test.go View File

@@ -36,11 +36,13 @@ func TestStringFilters(t *testing.T) {
36 36
 			&doc{id: "b", fields: tf("id:b", "s:b")},
37 37
 			&doc{id: "c", fields: tf("id:c", "s:c")},
38 38
 			&doc{id: "d", fields: tf("id:d")},
39
+			&doc{id: "e", fields: tf("id:e", "s:foo")},
39 40
 		},
40 41
 		cases: []*filterCase{
41 42
 			fc(NewStringTermFilter("id", "a"), "a"),
42 43
 			fc(NewStringPrefixFilter("id", "b"), "b"),
43 44
 			fc(NewStringPrefixFilter("string", "test"), "a", "b", "c"),
45
+			fc(NewStringPrefixFilter("string", "f"), "e"),
44 46
 			fc(NewStringRangeFilter("string", "testa", "testc", true, false), "a", "b"),
45 47
 			fc(NewInclusiveStringRangeFilter("string", "testa", "testc"), "a", "b", "c"),
46 48
 			fc(NewExclusiveStringRangeFilter("string", "testa", "testc"), "b"),

+ 6
- 6
index/index_test.go View File

@@ -91,19 +91,19 @@ var (
91 91
 	testTomorrow = time.Date(2015, 11, 10, 14, 13, 12, 0, time.UTC).Truncate(time.Second)
92 92
 )
93 93
 
94
-var ()
95
-
96 94
 var testFields = map[string]*Field{
97 95
 	"id:a": NewStringField("id", "a"),
98 96
 	"id:b": NewStringField("id", "b"),
99 97
 	"id:c": NewStringField("id", "c"),
100 98
 	"id:d": NewStringField("id", "d"),
99
+	"id:e": NewStringField("id", "e"),
101 100
 
102 101
 	//strings
103
-	"s:a": NewStringField("string", "testa"),
104
-	"s:b": NewStringField("string", "testb"),
105
-	"s:c": NewStringField("string", "testc"),
106
-	"s:d": NewStringField("string", "testd"),
102
+	"s:a":   NewStringField("string", "testa"),
103
+	"s:b":   NewStringField("string", "testb"),
104
+	"s:c":   NewStringField("string", "testc"),
105
+	"s:d":   NewStringField("string", "testd"),
106
+	"s:foo": NewStringField("string", "foo"),
107 107
 
108 108
 	//int
109 109
 	"i:0": NewIntField("int", 0),

+ 43
- 172
index/query_parser_test.go View File

@@ -11,181 +11,52 @@ type Case struct {
11 11
 }
12 12
 
13 13
 var goodCases = []Case{
14
-	Case{
15
-		Input:  ``,
16
-		Output: `all()`,
17
-	},
18
-	Case{
19
-		Input:  `tag:/star`,
20
-		Output: `string(field=tag, term=/star)`,
21
-	},
22
-	Case{
23
-		Input:  `   -"tag":/star`,
24
-		Output: `not(string(field=tag, term=/star))`,
25
-	},
26
-	Case{
27
-		Input:  `tag:album/*   `,
28
-		Output: `string(field=tag, prefix=album/*)`,
29
-	},
30
-	Case{
31
-		Input:  `meta.device:"LGE Nexus 5"`,
32
-		Output: `string(field=meta.device, term=LGE Nexus 5)`,
33
-	},
34
-	Case{
35
-		Input:  `size:[1000 to 1500]`,
36
-		Output: `int(field=size, range=[1000,1500])`,
37
-	},
38
-	Case{
39
-		Input:  `size:("1000" to "1500")`,
40
-		Output: `string(field=size, range=(1000,1500))`,
41
-	},
42
-	Case{
43
-		Input:  `created:[2014-01-01T00:00:00Z to 2015-01-01T00:00:00Z]`,
44
-		Output: `time(field=created, range=[2014-01-01T00:00:00Z,2015-01-01T00:00:00Z])`,
45
-	},
46
-	Case{
47
-		Input:  `location:near(0.00,0.00,5000)`,
48
-		Output: `geo(field=location, point=0.00,0.00, radius=5000.00m)`,
49
-	},
50
-	Case{
51
-		Input:  `location:neAr(-1.00,1.234,5000)`, //mixed case allowed
52
-		Output: `geo(field=location, point=-1.00,1.23, radius=5000.00m)`,
53
-	},
54
-	Case{
55
-		Input:  `meta.device:"LGE Nexus 5" -size:[1000 to 1500]`,
56
-		Output: `and(string(field=meta.device, term=LGE Nexus 5) not(int(field=size, range=[1000,1500])))`,
57
-	},
14
+	{``, `all()`},
15
+	{`tag:/star`, `string(field=tag, term=/star)`},
16
+	{`   -"tag":/star`, `not(string(field=tag, term=/star))`},
17
+	{`tag:album/*   `, `string(field=tag, prefix=album/*)`},
18
+	{`meta.device:"LGE Nexus 5"`, `string(field=meta.device, term=LGE Nexus 5)`},
19
+	{`size:[1000 to 1500]`, `int(field=size, range=[1000,1500])`},
20
+	{`size:("1000" to "1500")`, `string(field=size, range=(1000,1500))`},
21
+	{`created:[2014-01-01T00:00:00Z to 2015-01-01T00:00:00Z]`, `time(field=created, range=[2014-01-01T00:00:00Z,2015-01-01T00:00:00Z])`},
22
+	{`location:near(0.00,0.00,5000)`, `geo(field=location, point=0.00,0.00, radius=5000.00m)`},
23
+	{`location:neAr(-1.00,1.234,5000)`, `geo(field=location, point=-1.00,1.23, radius=5000.00m)`}, //mixed case allowed
24
+	{`meta.device:"LGE Nexus 5" -size:[1000 to 1500]`, `and(string(field=meta.device, term=LGE Nexus 5) not(int(field=size, range=[1000,1500])))`},
58 25
 }
59 26
 
60 27
 var badCases = []Case{
61
-	Case{
62
-		Input:  `test:`, //no term definition
63
-		Output: `expected 'term definition'`,
64
-	},
65
-	Case{
66
-		Input:  `test`, //no colon
67
-		Output: `token EOF`,
68
-	},
69
-	Case{
70
-		Input:  `:`, //bad start
71
-		Output: `token COLON`,
72
-	},
73
-	Case{
74
-		Input:  `&`, // illegal,
75
-		Output: `token ILLEGAL`,
76
-	},
77
-	Case{
78
-		Input:  `test:"unexpected eof`, //unexpected eof
79
-		Output: `token EOF`,
80
-	},
81
-	Case{
82
-		Input:  `field:1,23`, //comma not allowed
83
-		Output: `token COMMA`,
84
-	},
85
-	Case{
86
-		Input:  `"field:1.23":[21342 234]`, //bad range (no " to ")
87
-		Output: `expected 'to' at`,
88
-	},
89
-	Case{
90
-		Input:  `field:near(1,1,string)`, //string where float expected
91
-		Output: `expected 'floating point number'`,
92
-	},
93
-	Case{
94
-		Input:  `1-23:field`, //bad identifier
95
-		Output: `token NEGATION`,
96
-	},
97
-	Case{
98
-		Input:  `"field":[string to 2014-01-01T12:12:12Z]`, //bad range mixed types
99
-		Output: `'range types must match'`,
100
-	},
101
-	Case{
102
-		Input:  `"field":[string,2014-01-01T12:12:12Z]`, //bad range comma
103
-		Output: `token COMMA`,
104
-	},
105
-	Case{
106
-		Input:  `"field":[:string,2014-01-01T12:12:12Z]`, //bad range comma
107
-		Output: `token COLON`,
108
-	},
109
-	Case{
110
-		Input:  `"field":[2014-01-01T12:12:12Z to :]`, //bad range comma
111
-		Output: `token COLON`,
112
-	},
113
-	Case{
114
-		Input:  `"field":[string to,2014-01-01T12:12:12Z]`, //bad range comma
115
-		Output: `token COMMA`,
116
-	},
117
-	Case{
118
-		Input:  `"field":near[1,2,3]`, //bad near wrong braces
119
-		Output: `token OPEN_SQUARE`,
120
-	},
121
-	Case{
122
-		Input:  `"field":neaR(,,)`, //bad near. unexpected comma
123
-		Output: `token COMMA`,
124
-	},
125
-	Case{
126
-		Input:  `"field":neaR(1:1,0)`, //bad near. unexpected colon
127
-		Output: `token COLON`,
128
-	},
129
-	Case{
130
-		Input:  `"field":neaR(1,1,0]`, //bad near. unexpected square close
131
-		Output: `token CLOSE_SQUARE`,
132
-	},
133
-	Case{
134
-		Input:  `"field":neaR(1,1,0:`, //bad near. unexpected square close
135
-		Output: `token COLON`,
136
-	},
137
-	Case{
138
-		Input:  `"field\n"`, //bad quoted string
139
-		Output: `token ILLEGAL`,
140
-	},
141
-	Case{
142
-		Input:  `"field\n"`, //bad quoted string
143
-		Output: `token ILLEGAL`,
144
-	},
145
-	Case{
146
-		Input:  `"field\":test`, //bad quoted string, unexpected EOF
147
-		Output: `token EOF`,
148
-	},
149
-	Case{
150
-		Input:  `"field":1234-56-78T90:12:34Z`, //bad time
151
-		Output: `valid ISO8601`,
152
-	},
153
-	Case{
154
-		Input:  `"field":2015-`, //bad time
155
-		Output: `2 digit month`,
156
-	},
157
-	Case{
158
-		Input:  `"field":2015-11`, //bad time
159
-		Output: `expected '-'`,
160
-	},
161
-	Case{
162
-		Input:  `"field":2015-11-01`, //bad time
163
-		Output: `2 digit day`,
164
-	},
165
-	Case{
166
-		Input:  `"field":2015-11-01T`, //bad time
167
-		Output: `literal 'T'`,
168
-	},
169
-	Case{
170
-		Input:  `"field":2015-11-01T12`, //bad time
171
-		Output: `':'`,
172
-	},
173
-	Case{
174
-		Input:  `"field":2015-11-01T12:`, //bad time
175
-		Output: `2 digit minute`,
176
-	},
177
-	Case{
178
-		Input:  `"field":2015-11-01T12:12`, //bad time
179
-		Output: `':'`,
180
-	},
181
-	Case{
182
-		Input:  `"field":2015-11-01T12:12:12`, //bad time
183
-		Output: `2 digit second`,
184
-	},
185
-	Case{
186
-		Input:  `"field":2015-11-01T12:12:12X`, //bad time
187
-		Output: `literal 'Z'`,
188
-	},
28
+	{`test:`, `expected 'term definition'`},                                  //no term definition
29
+	{`test`, `token EOF`},                                                    //no colon
30
+	{`:`, `token COLON`},                                                     //bad start
31
+	{`&`, `token ILLEGAL`},                                                   // illegal
32
+	{`test:"unexpected eof`, `token EOF`},                                    //unexpected eof
33
+	{`field:1,23`, `token COMMA`},                                            //comma not allowed
34
+	{`"field:1.23":[21342 234]`, `expected 'to' at`},                         //bad range (no " to ")
35
+	{`field:near(1,1,string)`, `expected 'floating point number'`},           //string where float expected
36
+	{`1-23:field`, `token NEGATION`},                                         //bad identifier
37
+	{`"field":[string to 2014-01-01T12:12:12Z]`, `'range types must match'`}, //bad range mixed types
38
+	{`"field":[string,2014-01-01T12:12:12Z]`, `token COMMA`},                 //bad range comma
39
+	{`"field":[:string,2014-01-01T12:12:12Z]`, `token COLON`},                //bad range comma
40
+	{`"field":[2014-01-01T12:12:12Z to :]`, `token COLON`},                   //bad range comma
41
+	{`"field":[string to,2014-01-01T12:12:12Z]`, `token COMMA`},              //bad range comma
42
+	{`"field":near[1,2,3]`, `token OPEN_SQUARE`},                             //bad near wrong braces
43
+	{`"field":neaR(,,)`, `token COMMA`},                                      //bad near. unexpected comma
44
+	{`"field":neaR(1:1,0)`, `token COLON`},                                   //bad near. unexpected colon
45
+	{`"field":neaR(1,1,0]`, `token CLOSE_SQUARE`},                            //bad near. unexpected square close
46
+	{`"field":neaR(1,1,0:`, `token COLON`},                                   //bad near. unexpected square close
47
+	{`"field\n"`, `token ILLEGAL`},                                           //bad quoted string
48
+	{`"field\n"`, `token ILLEGAL`},                                           //bad quoted string
49
+	{`"field\":test`, `token EOF`},                                           //bad quoted string, unexpected EOF
50
+	{`"field":1234-56-78T90:12:34Z`, `valid ISO8601`},                        //bad time
51
+	{`"field":2015-`, `2 digit month`},                                       //bad time
52
+	{`"field":2015-11`, `expected '-'`},                                      //bad time
53
+	{`"field":2015-11-01`, `2 digit day`},                                    //bad time
54
+	{`"field":2015-11-01T`, `literal 'T'`},                                   //bad time
55
+	{`"field":2015-11-01T12`, `':'`},                                         //bad time
56
+	{`"field":2015-11-01T12:`, `2 digit minute`},                             //bad time
57
+	{`"field":2015-11-01T12:12`, `':'`},                                      //bad time
58
+	{`"field":2015-11-01T12:12:12`, `2 digit second`},                        //bad time
59
+	{`"field":2015-11-01T12:12:12X`, `literal 'Z'`},                          //bad time
189 60
 }
190 61
 
191 62
 func TestQueryParser(t *testing.T) {

+ 17
- 13
opfs/api.go View File

@@ -22,7 +22,9 @@ var apiDebug = debug.Logger("api")
22 22
 // It's methods are the core API of OPFS
23 23
 type Api struct {
24 24
 	Permissions PermissionLayer      //the authorisation/authentication module, if nil, the Allow access controller is used.
25
-	Store       BlobStore            //the data store for persistence of item data
25
+	Data        BlobStore            //the data store for persistence of item data
26
+	Index       BlobStore            //the data store for persistence of meta data
27
+	Cache       BlobStore            //the data store for persistence of cache data
26 28
 	Inspectors  map[string]Inspector //the mime-type => Inspector map.
27 29
 	Info        VersionInfo          //the internal version string, UI version, other meta data
28 30
 	Items       ItemIndexer          //This is our item indexer
@@ -31,17 +33,19 @@ type Api struct {
31 33
 }
32 34
 
33 35
 // this is getting to be a crazy intializer. lets make an init insteads...
34
-func NewAPI(info VersionInfo, access PermissionLayer, store BlobStore, maxThumbs int) *Api {
36
+func NewAPI(info VersionInfo, access PermissionLayer, datastore, indexstore, cachestore BlobStore, maxThumbs int) *Api {
35 37
 	a := &Api{
36 38
 		Permissions: access,
37
-		Store:       store,
39
+		Data:        datastore,
40
+		Index:       indexstore,
41
+		Cache:       cachestore,
38 42
 		Info:        info,
39 43
 		Items:       NewItemIndex(),
40 44
 		Collections: NewCollectionIndex(),
41 45
 		thumbs:      NewRateLimiter(maxThumbs),
42 46
 	}
43 47
 	if a.Permissions == nil {
44
-		a.Permissions = AllowAllAccess{}
48
+		a.Permissions = AllowAllAccess("")
45 49
 	}
46 50
 	return a
47 51
 }
@@ -71,7 +75,7 @@ func (a *Api) RegisterInspector(inspector Inspector, mimetypes ...string) {
71 75
 
72 76
 func (a *Api) IndexTheStore() error {
73 77
 	//this is the only initialisation needed.
74
-	iterator := getMetaIterator(a.Store)
78
+	iterator := getMetaIterator(a)
75 79
 	for file := range iterator.Files() {
76 80
 		if item, err := readMetaFromFile(file); err != nil {
77 81
 			iterator.Abort()
@@ -85,7 +89,7 @@ func (a *Api) IndexTheStore() error {
85 89
 		return err
86 90
 	}
87 91
 	//now collections.
88
-	iterator = getCollectionIterator(a.Store)
92
+	iterator = getCollectionIterator(a)
89 93
 	for file := range iterator.Files() {
90 94
 		if col, err := readCollectionFromFile(file); err != nil {
91 95
 			iterator.Abort()
@@ -101,7 +105,7 @@ func (a *Api) IndexTheStore() error {
101 105
 // this is when one of the other tools updates the store
102 106
 // then we (re)index the document.
103 107
 func (a *Api) ReindexItem(id string) error {
104
-	if meta, err := getMeta(a.Store, id); err != nil && !os.IsNotExist(err) {
108
+	if meta, err := getMeta(a, id); err != nil && !os.IsNotExist(err) {
105 109
 		return err
106 110
 	} else {
107 111
 		if err != nil {
@@ -170,7 +174,7 @@ func (a *Api) GetTags(ident Identity, prefix string) (map[string]int, *apiError)
170 174
 			index.NewStringPrefixFilter("tags", prefix),
171 175
 		)
172 176
 	}
173
-	return a.Items.Tags(filter), nil
177
+	return a.Items.Tags(filter, prefix), nil
174 178
 }
175 179
 
176 180
 //
@@ -203,7 +207,7 @@ func (a *Api) GetData(ident Identity, id string) (ReadSeekCloser, *apiError) {
203 207
 		//this will handle the Access Control Policy and the not found case.
204 208
 		return nil, err
205 209
 	}
206
-	file, err := getData(a.Store, id)
210
+	file, err := getData(a, id)
207 211
 	if err != nil {
208 212
 		if os.IsNotExist(err) {
209 213
 			return nil, errItemNotFound
@@ -227,7 +231,7 @@ func (a *Api) GetThumbnail(ident Identity, meta *ItemInfo, size ThumbSize) (data
227 231
 	}
228 232
 	// ok, all set. check cache, otherwise create
229 233
 	key := meta.Id + "." + size.String() + ".jpg"
230
-	if r, err := getCache(a.Store, key); err != nil {
234
+	if r, err := getCache(a, key); err != nil {
231 235
 		if os.IsNotExist(err) {
232 236
 			//we need to generate the thumbnail
233 237
 			v, err := a.thumbs.Do(key, func() (interface{}, error) {
@@ -246,7 +250,7 @@ func (a *Api) GetThumbnail(ident Identity, meta *ItemInfo, size ThumbSize) (data
246 250
 						return nil, err
247 251
 					} else {
248 252
 						//Store thumbnail in cache
249
-						return thumb, putCache(a.Store, key, bytes.NewReader(thumb))
253
+						return thumb, putCache(a, key, bytes.NewReader(thumb))
250 254
 					}
251 255
 
252 256
 				}
@@ -315,7 +319,7 @@ func (a *Api) ImportItem(ident Identity, file ReadSeekCloser, name, mimetype str
315 319
 
316 320
 	//rewind again. the store interface expects only a Reader, not a Seeker
317 321
 	file.Seek(0, 0)
318
-	if err := putItem(a.Store, meta.Id, file, meta); err != nil {
322
+	if err := putItem(a, meta.Id, file, meta); err != nil {
319 323
 		return nil, NewApiError(err, "Error storing file", http.StatusInternalServerError)
320 324
 	}
321 325
 	//now index it.
@@ -342,7 +346,7 @@ func (a *Api) UpdateMeta(ident Identity, id string, update *ItemUpdate) (*ItemIn
342 346
 		return nil, NewNotAllowedError(ident, PolicyActionUpdateMeta, "Sorry, you don't have permission to update that item")
343 347
 	}
344 348
 	updated := meta.ApplyUpdate(update)
345
-	if err := putMeta(a.Store, id, updated); err != nil {
349
+	if err := putMeta(a, id, updated); err != nil {
346 350
 		return nil, NewApiError(err, "Could not update metadata", http.StatusInternalServerError)
347 351
 	}
348 352
 	if err := a.Items.Update(updated); err != nil {

+ 74
- 0
opfs/byte_size.go View File

@@ -0,0 +1,74 @@
1
+package opfs
2
+
3
+import "fmt"
4
+
5
+type ByteSize uint64
6
+
7
+const (
8
+	B  ByteSize = 1
9
+	KB          = B << 10
10
+	MB          = KB << 10
11
+	GB          = MB << 10
12
+	TB          = GB << 10
13
+	PB          = TB << 10
14
+	EB          = PB << 10
15
+)
16
+
17
+func (b ByteSize) String() string {
18
+	switch {
19
+	case b > EB:
20
+		return fmt.Sprintf("%.1f EB", b.EBytes())
21
+	case b > PB:
22
+		return fmt.Sprintf("%.1f PB", b.PBytes())
23
+	case b > TB:
24
+		return fmt.Sprintf("%.1f TB", b.TBytes())
25
+	case b > GB:
26
+		return fmt.Sprintf("%.1f GB", b.GBytes())
27
+	case b > MB:
28
+		return fmt.Sprintf("%.1f MB", b.MBytes())
29
+	case b > KB:
30
+		return fmt.Sprintf("%.1f KB", b.KBytes())
31
+	default:
32
+		return fmt.Sprintf("%d B", b)
33
+	}
34
+}
35
+
36
+func (b ByteSize) Bytes() uint64 {
37
+	return uint64(b)
38
+}
39
+
40
+func (b ByteSize) KBytes() float64 {
41
+	v := b / KB
42
+	r := b % KB
43
+	return float64(v) + float64(r)/float64(KB)
44
+}
45
+
46
+func (b ByteSize) MBytes() float64 {
47
+	v := b / MB
48
+	r := b % MB
49
+	return float64(v) + float64(r)/float64(MB)
50
+}
51
+
52
+func (b ByteSize) GBytes() float64 {
53
+	v := b / GB
54
+	r := b % GB
55
+	return float64(v) + float64(r)/float64(GB)
56
+}
57
+
58
+func (b ByteSize) TBytes() float64 {
59
+	v := b / TB
60
+	r := b % TB
61
+	return float64(v) + float64(r)/float64(TB)
62
+}
63
+
64
+func (b ByteSize) PBytes() float64 {
65
+	v := b / PB
66
+	r := b % PB
67
+	return float64(v) + float64(r)/float64(PB)
68
+}
69
+
70
+func (b ByteSize) EBytes() float64 {
71
+	v := b / EB
72
+	r := b % EB
73
+	return float64(v) + float64(r)/float64(EB)
74
+}

+ 8
- 3
opfs/collections.go View File

@@ -1,11 +1,16 @@
1 1
 package opfs
2 2
 
3 3
 import (
4
+	"crypto/sha1"
5
+	"fmt"
6
+
4 7
 	"github.com/thechriswalker/opfs-server/index"
5 8
 )
6 9
 
7 10
 // These will be stored in an index, and persisted in the BlobStore
8 11
 type Collection struct {
12
+	Id          string   `json:id` // sha1 hash of name, used for storage
13
+	Title       string   `json:title`
9 14
 	Description string   `json:"description"`
10 15
 	Owner       string   `json:"owner"`
11 16
 	Tag         string   `json:"tag"`
@@ -13,11 +18,11 @@ type Collection struct {
13 18
 	Public      bool     `json:"public"`
14 19
 }
15 20
 
16
-func (c Collection) GetID() string {
17
-	return c.Owner + "/" + c.Tag
21
+func (c *Collection) GetID() string {
22
+	return fmt.Sprintf("sha1-%x", sha1.New().Sum([]byte(c.Owner+c.Tag)))
18 23
 }
19 24
 
20
-func (c Collection) GetFields() []*index.Field {
25
+func (c *Collection) GetFields() []*index.Field {
21 26
 	//we have tag, owner, public and shared.
22 27
 	//we do not index description.
23 28
 	numFields := 3

+ 29
- 3
opfs/http.go View File

@@ -7,7 +7,9 @@ import (
7 7
 	"net/http"
8 8
 	"os"
9 9
 	"path"
10
+	"runtime"
10 11
 	"strconv"
12
+	"strings"
11 13
 
12 14
 	"github.com/thechriswalker/opfs-server/debug"
13 15
 
@@ -19,6 +21,10 @@ const DEFAULT_SEARCH_RESULT_SIZE = 45
19 21
 const UI_PREFIX = "/ui"
20 22
 const API_PREFIX = "/api"
21 23
 
24
+//this is automagically replaced in HTTP api search contexts.
25
+//it means you cannot search for this value...
26
+const QS_MAGIC_IDENTITY = `__IDENTITY__`
27
+
22 28
 var httpDebug = debug.Logger("http")
23 29
 
24 30
 type httpApi struct {
@@ -27,12 +33,12 @@ type httpApi struct {
27 33
 }
28 34
 
29 35
 // This starts the HTTP service at the given address.
30
-func ListenAndServe(api *Api, address string, allowImportOverHTTP bool, ui http.FileSystem) error {
36
+func ListenAndServe(api *Api, address string, allowImportOverHTTP bool, ui http.Handler) error {
31 37
 	return http.ListenAndServe(address, &httpApi{api: api, mux: createMux(api, allowImportOverHTTP, ui)})
32 38
 }
33 39
 
34 40
 //build our routes
35
-func createMux(api *Api, allowImportOverHTTP bool, ui http.FileSystem) http.Handler {
41
+func createMux(api *Api, allowImportOverHTTP bool, ui http.Handler) http.Handler {
36 42
 	/*
37 43
 			Mux to provide the HTTP API calls.
38 44
 
@@ -55,7 +61,8 @@ func createMux(api *Api, allowImportOverHTTP bool, ui http.FileSystem) http.Hand
55 61
 	router := mux.NewRouter()
56 62
 	if ui != nil {
57 63
 		router.Handle("/", http.RedirectHandler(UI_PREFIX, http.StatusMovedPermanently)).Methods("HEAD", "GET")
58
-		router.Handle(UI_PREFIX, http.StripPrefix(UI_PREFIX, http.FileServer(ui))).Methods("HEAD", "GET")
64
+		router.PathPrefix(UI_PREFIX).Handler(http.StripPrefix(UI_PREFIX, ui)).Methods("HEAD", "GET")
65
+
59 66
 	} else {
60 67
 		//if no UI, then lets put the info on "/" as well.
61 68
 		router.Handle("/", http.RedirectHandler(API_PREFIX+"/info", http.StatusMovedPermanently)).Methods("GET", "HEAD")
@@ -232,6 +239,8 @@ func getPagination(r *http.Request) *Pagination {
232 239
 
233 240
 func apiGetInfo(a *Api, r *http.Request, ident Identity) ApiResponse {
234 241
 	httpDebug("ApiCall: apiGetInfo")
242
+	memstats := &runtime.MemStats{}
243
+	runtime.ReadMemStats(memstats)
235 244
 	data := map[string]interface{}{
236 245
 		"opfs": a.GetInfo(),
237 246
 		"user": map[string]interface{}{
@@ -239,6 +248,20 @@ func apiGetInfo(a *Api, r *http.Request, ident Identity) ApiResponse {
239 248
 			"admin":     ident.IsAdmin(),
240 249
 			"anonymous": ident.IsAnonymous(),
241 250
 		},
251
+		"memory": map[string]string{
252
+			"alloc":        ByteSize(memstats.Alloc).String(),
253
+			"totalAlloc":   ByteSize(memstats.TotalAlloc).String(),
254
+			"sys":          ByteSize(memstats.Sys).String(),
255
+			"lookups":      ByteSize(memstats.Lookups).String(),
256
+			"mallocs":      ByteSize(memstats.Mallocs).String(),
257
+			"frees":        ByteSize(memstats.Frees).String(),
258
+			"heapAlloc":    ByteSize(memstats.HeapAlloc).String(),
259
+			"heapSys":      ByteSize(memstats.HeapSys).String(),
260
+			"heapIdle":     ByteSize(memstats.HeapIdle).String(),
261
+			"heapInuse":    ByteSize(memstats.HeapInuse).String(),
262
+			"heapReleased": ByteSize(memstats.HeapReleased).String(),
263
+			"heapObjects":  ByteSize(memstats.HeapObjects).String(),
264
+		},
242 265
 	}
243 266
 	return &jsonApiResponse{
244 267
 		code: http.StatusOK,
@@ -286,6 +309,9 @@ func apiSearch(a *Api, r *http.Request, ident Identity) ApiResponse {
286 309
 			}
287 310
 		}
288 311
 	}
312
+	// before we parse, we perform the one bit of magic in the HTTP api.
313
+	// in the querystring we transform "__IDENTITY__" -> "\"identity\""
314
+	query.QueryString = strings.Replace(query.QueryString, QS_MAGIC_IDENTITY, strconv.Quote(ident.String()), -1)
289 315
 	httpDebug("query.Parse")
290 316
 	err := query.Parse()
291 317
 	if err != nil {

+ 19
- 12
opfs/item_index.go View File

@@ -2,6 +2,7 @@ package opfs
2 2
 
3 3
 import (
4 4
 	"sort"
5
+	"strings"
5 6
 	"sync"
6 7
 	"time"
7 8
 
@@ -13,14 +14,14 @@ var searchDebug = debug.Logger("search")
13 14
 
14 15
 // The ItemIndexer is where meta-data is stored and queried.
15 16
 type ItemIndexer interface {
16
-	Tags(filter index.Filter) map[string]int  //get all tags with a given filter, with counts
17
-	Query(query *Query) (*QueryResult, error) //Search for items matching the query
18
-	Insert(item *ItemInfo) error              //store an item in the index.
19
-	Update(item *ItemInfo) error              //update info in the index
20
-	Delete(id string) error                   //delete (permanent) from the index
21
-	Size() int                                //number of items indexed
22
-	Has(id string) bool                       //does the index know about this id?
23
-	Get(id string) *ItemInfo                  //returns a single item (or nil if not indexed)
17
+	Tags(filter index.Filter, prefix string) map[string]int //get all tags with a given filter, with counts, limit to those matching prefix (more efficient to filter as well)
18
+	Query(query *Query) (*QueryResult, error)               //Search for items matching the query
19
+	Insert(item *ItemInfo) error                            //store an item in the index.
20
+	Update(item *ItemInfo) error                            //update info in the index
21
+	Delete(id string) error                                 //delete (permanent) from the index
22
+	Size() int                                              //number of items indexed
23
+	Has(id string) bool                                     //does the index know about this id?
24
+	Get(id string) *ItemInfo                                //returns a single item (or nil if not indexed)
24 25
 }
25 26
 
26 27
 //simplest indexer possible. but we must load it from the store each time...
@@ -60,14 +61,16 @@ func (ii *ItemIndex) Get(id string) *ItemInfo {
60 61
 	return item
61 62
 }
62 63
 
63
-func (ii *ItemIndex) Tags(filter index.Filter) map[string]int {
64
+func (ii *ItemIndex) Tags(filter index.Filter, prefix string) map[string]int {
64 65
 	//now get unique tags. with counts.
65 66
 	tagCounts := map[string]int{}
66 67
 	ii.mtx.RLock()
67 68
 	defer ii.mtx.RUnlock()
68 69
 	for _, id := range ii.index.Search(filter) {
69 70
 		for _, tag := range ii.store[id].Tags {
70
-			tagCounts[tag] += 1
71
+			if strings.HasPrefix(tag, prefix) {
72
+				tagCounts[tag] += 1
73
+			}
71 74
 		}
72 75
 	}
73 76
 	return tagCounts
@@ -132,8 +135,10 @@ func (ii *ItemIndex) Query(query *Query) (*QueryResult, error) {
132 135
 
133 136
 	//prepare the result struct
134 137
 	res := &QueryResult{
135
-		Hits:  int64(len(hits)),
136
-		Items: make([]*ItemInfo, limit),
138
+		Hits:   len(hits),
139
+		Limit:  int(query.Page.Limit), //always the original limit.
140
+		Offset: offset,
141
+		Items:  make([]*ItemInfo, limit),
137 142
 	}
138 143
 
139 144
 	//now we need the mtx.
@@ -150,10 +155,12 @@ func (ii *ItemIndex) Query(query *Query) (*QueryResult, error) {
150 155
 	//now they are sorted, slice out the ones we want
151 156
 	for i := 0; i < limit; i++ {
152 157
 		res.Items[i] = ii.store[hits[offset+i]]
158
+		res.Count = i + 1
153 159
 	}
154 160
 	duration := time.Now().Sub(before)
155 161
 	res.Time = duration.String()
156 162
 	res.TimeNano = duration.Nanoseconds()
163
+
157 164
 	searchDebug("Query:duration %v", duration)
158 165
 	return res, nil
159 166
 }

+ 3
- 3
opfs/permission_layer_allow_all.go View File

@@ -9,14 +9,14 @@ import (
9 9
 
10 10
 //this restricts nothing, bypassing the auth system completely.
11 11
 //hopefully I won't need to use it.
12
-type AllowAllAccess struct{}
12
+type AllowAllAccess string
13 13
 
14
-var _ PermissionLayer = AllowAllAccess{}
14
+var _ PermissionLayer = AllowAllAccess("")
15 15
 
16 16
 func (aaa AllowAllAccess) Identify(r *http.Request) Identity {
17 17
 	return Identity{
18 18
 		Admin: true,
19
-		Name:  "",
19
+		Name:  string(aaa),
20 20
 	}
21 21
 }
22 22
 

+ 5
- 2
opfs/query.go View File

@@ -19,10 +19,13 @@ type Query struct {
19 19
 
20 20
 // A QueryResult contains metadata about total hits as well as the results themelves
21 21
 type QueryResult struct {
22
-	Hits     int64       `json:"hits"`
23
-	Items    []*ItemInfo `json:"items"`
22
+	Hits     int         `json:"hits"`      //the total number of hits in the result set
23
+	Offset   int         `json:"from"`      //the query offset
24
+	Limit    int         `json:"size"`      // the query limit
25
+	Count    int         `json:"count"`     // the actual number of results returned in items
24 26
 	Time     string      `json:"time"`      //for informational purposes, search time as time.Duration.String()
25 27
 	TimeNano int64       `json:"time_nano"` //as above, but raw nanoseconds
28
+	Items    []*ItemInfo `json:"items"`     //the actual data
26 29
 }
27 30
 
28 31
 //we only fill in the bits that won't zero correctly.

+ 40
- 36
opfs/storage.go View File

@@ -15,12 +15,12 @@ import (
15 15
 var fsdebug = debug.Logger("store")
16 16
 
17 17
 const (
18
-	METAFILE_EXT            = ".meta"
19
-	DATAFILE_EXT            = ".data"
20
-	METAFILE_MIME           = "application/vnd.opfs.meta+json"
21
-	ITEMS_STORE_PREFIX      = "data"
22
-	COLLECTION_STORE_PREFIX = "collections"
23
-	CACHE_PREFIX            = "thumbs"
18
+	METAFILE_EXT    = ".meta"
19
+	COLLECTION_EXT  = ".coll"
20
+	DATAFILE_EXT    = ".data"
21
+	CACHE_EXT       = ".cache"
22
+	METAFILE_MIME   = "application/vnd.opfs.meta+json"
23
+	COLLECTION_MIME = "application/vnf.opfs.collection+json"
24 24
 )
25 25
 
26 26
 func init() {
@@ -44,93 +44,93 @@ func getPath(id string) string {
44 44
 }
45 45
 
46 46
 func metaKey(id string) string {
47
-	return path.Join(ITEMS_STORE_PREFIX, getPath(id), id) + METAFILE_EXT
47
+	return path.Join(getPath(id), id) + METAFILE_EXT
48 48
 }
49 49
 
50 50
 func itemKey(id string) string {
51
-	return path.Join(ITEMS_STORE_PREFIX, getPath(id), id) + DATAFILE_EXT
51
+	return path.Join(getPath(id), id) + DATAFILE_EXT
52 52
 }
53 53
 
54 54
 func cacheKey(id string) string {
55
-	return path.Join(CACHE_PREFIX, id)
55
+	return path.Join(getPath(id), id) + CACHE_EXT
56 56
 }
57 57
 
58 58
 func collectionKey(id string) string {
59
-	return path.Join(COLLECTION_STORE_PREFIX, id) + METAFILE_EXT
59
+	return path.Join(getPath(id), id) + COLLECTION_EXT
60 60
 }
61 61
 
62 62
 //retrieve the metadata for an item by hash
63
-func getMeta(b BlobStore, id string) (*ItemInfo, error) {
63
+func getMeta(a *Api, id string) (*ItemInfo, error) {
64 64
 	fsdebug("getMeta(%s)", id)
65
-	if file, err := b.Get(metaKey(id)); err != nil {
65
+	if file, err := a.Index.Get(metaKey(id)); err != nil {
66 66
 		return nil, err
67 67
 	} else {
68 68
 		return readMetaFromFile(file)
69 69
 	}
70 70
 }
71 71
 
72
-func putItem(b BlobStore, id string, data io.Reader, meta *ItemInfo) error {
72
+func putItem(a *Api, id string, data io.Reader, meta *ItemInfo) error {
73 73
 	fsdebug("putItem(%s, data, meta)", id)
74 74
 	//check if this exists. if it does, then we don't need to do anything.
75 75
 	//or maybe we should merge the metadata. Currently I'm not going to
76 76
 	//overwrite (it may clear tags we have set previously)
77 77
 	key := itemKey(id)
78 78
 	//write file and meta
79
-	if err := b.Set(key, data); err != nil {
79
+	if err := a.Data.Set(key, data); err != nil {
80 80
 		return err
81 81
 	}
82
-	if err := putMeta(b, id, meta); err != nil {
83
-		b.Del(key) //remove the data.
82
+	if err := putMeta(a, id, meta); err != nil {
83
+		a.Index.Del(key) //remove the data.
84 84
 		return err
85 85
 	}
86 86
 	return nil
87 87
 }
88 88
 
89
-func putCache(b BlobStore, id string, data io.Reader) error {
89
+func putCache(a *Api, id string, data io.Reader) error {
90 90
 	fsdebug("putCache(%s)", id)
91
-	return b.Set(cacheKey(id), data)
91
+	return a.Cache.Set(cacheKey(id), data)
92 92
 }
93 93
 
94
-func getCache(b BlobStore, id string) (*os.File, error) {
94
+func getCache(a *Api, id string) (*os.File, error) {
95 95
 	fsdebug("getCache(%s)", id)
96
-	return b.Get(cacheKey(id))
96
+	return a.Cache.Get(cacheKey(id))
97 97
 }
98 98
 
99
-func getData(b BlobStore, id string) (*os.File, error) {
99
+func getData(a *Api, id string) (*os.File, error) {
100 100
 	fsdebug("getData(%s)", id)
101
-	return b.Get(itemKey(id))
101
+	return a.Data.Get(itemKey(id))
102 102
 }
103 103
 
104
-func hasItem(b BlobStore, id string) bool {
104
+func hasItem(a *Api, id string) bool {
105 105
 	fsdebug("hasItem(%s)", id)
106
-	return b.Has(itemKey(id))
106
+	return a.Data.Has(itemKey(id))
107 107
 }
108 108
 
109
-func hasMeta(b BlobStore, id string) bool {
109
+func hasMeta(a *Api, id string) bool {
110 110
 	fsdebug("hasMeta(%s)", id)
111
-	return b.Has(metaKey(id))
111
+	return a.Index.Has(metaKey(id))
112 112
 }
113 113
 
114
-func hasCollection(b BlobStore, id string) bool {
114
+func hasCollection(a *Api, id string) bool {
115 115
 	fsdebug("hasCollection(%s)", id)
116
-	return b.Has(collectionKey(id))
116
+	return a.Index.Has(collectionKey(id))
117 117
 }
118 118
 
119
-func putMeta(b BlobStore, id string, meta *ItemInfo) error {
119
+func putMeta(a *Api, id string, meta *ItemInfo) error {
120 120
 	fsdebug("putMeta(%s)", id)
121 121
 	if buf, err := json.Marshal(meta); err != nil {
122 122
 		return err
123 123
 	} else {
124
-		return b.Set(metaKey(id), bytes.NewReader(buf))
124
+		return a.Index.Set(metaKey(id), bytes.NewReader(buf))
125 125
 	}
126 126
 }
127 127
 
128
-func putCollection(b BlobStore, id string, c *Collection) error {
128
+func putCollection(a *Api, id string, c *Collection) error {
129 129
 	fsdebug("putCollection(%s)", id)
130 130
 	if buf, err := json.Marshal(c); err != nil {
131 131
 		return err
132 132
 	} else {
133
-		return b.Set(collectionKey(id), bytes.NewReader(buf))
133
+		return a.Index.Set(collectionKey(id), bytes.NewReader(buf))
134 134
 	}
135 135
 }
136 136
 
@@ -138,6 +138,10 @@ func metadataFilenamesOnly(filename string) bool {
138 138
 	return strings.HasSuffix(filename, METAFILE_EXT)
139 139
 }
140 140
 
141
+func collectionFilenamesOnly(filename string) bool {
142
+	return strings.HasSuffix(filename, COLLECTION_EXT)
143
+}
144
+
141 145
 type ItemIterator struct {
142 146
 	Items chan *ItemInfo
143 147
 	Err   error
@@ -151,14 +155,14 @@ func (i *ItemIterator) Abort() {
151 155
 
152 156
 //Load Every Metadata and return the *ItemInfo
153 157
 //this is used on startup to hydrate the Index
154
-func getMetaIterator(b BlobStore) Scanner {
158
+func getMetaIterator(a *Api) Scanner {
155 159
 	fsdebug("getMetaIterator()")
156
-	return b.Scan(ITEMS_STORE_PREFIX, metadataFilenamesOnly)
160
+	return a.Index.Scan("/", metadataFilenamesOnly)
157 161
 }
158 162
 
159
-func getCollectionIterator(b BlobStore) Scanner {
163
+func getCollectionIterator(a *Api) Scanner {
160 164
 	fsdebug("getCollectionIterator()")
161
-	return b.Scan(COLLECTION_STORE_PREFIX, metadataFilenamesOnly)
165
+	return a.Index.Scan("/", collectionFilenamesOnly)
162 166
 }
163 167
 
164 168
 //useful in conjunction with the Iterator

+ 33
- 0
pushstate/pushstate.go View File

@@ -0,0 +1,33 @@
1
+package pushstate
2
+
3
+import (
4
+	"net/http"
5
+	"path"
6
+	"strings"
7
+)
8
+
9
+type PushState struct {
10
+	fs http.FileSystem
11
+	hh http.Handler
12
+}
13
+
14
+func NewHandler(fs http.FileSystem) http.Handler {
15
+	return &PushState{
16
+		fs: fs,
17
+		hh: http.FileServer(fs),
18
+	}
19
+}
20
+
21
+func (ps *PushState) ServeHTTP(w http.ResponseWriter, r *http.Request) {
22
+	upath := r.URL.Path
23
+	if !strings.HasPrefix(upath, "/") {
24
+		upath = "/" + upath
25
+	}
26
+	file, err := ps.fs.Open(path.Clean(upath))
27
+	if err != nil {
28
+		r.URL.Path = "/"
29
+	} else {
30
+		file.Close()
31
+	}
32
+	ps.hh.ServeHTTP(w, r)
33
+}