index: pre-allocate size of ngram map

We know the number of entries we will add to the ngram maps. By
specifying the go runtime can avoid over allocating as it grows. Locally
on a small corpus this reduced the map byte size by 15%.

Co-authored-by: Stefan Hengl <stefan@sourcegraph.com>
Change-Id: I060e7aa12db726b093e7971ec6bc54ce193ec405
diff --git a/read.go b/read.go
index ec54223..841b52f 100644
--- a/read.go
+++ b/read.go
@@ -260,7 +260,7 @@
 	}
 	postingsIndex := toc.postings.relativeIndex()
 
-	ngrams := map[ngram]simpleSection{}
+	ngrams := make(map[ngram]simpleSection, len(textContent)/ngramEncoding)
 	for i := 0; i < len(textContent); i += ngramEncoding {
 		j := i / ngramEncoding
 		ng := ngram(binary.BigEndian.Uint64(textContent[i : i+ngramEncoding]))
@@ -286,7 +286,7 @@
 
 	fileNamePostingsIndex := toc.namePostings.relativeIndex()
 
-	fileNameNgrams := map[ngram][]uint32{}
+	fileNameNgrams := make(map[ngram][]uint32, len(nameNgramText)/ngramEncoding)
 	for i := 0; i < len(nameNgramText); i += ngramEncoding {
 		j := i / ngramEncoding
 		off := fileNamePostingsIndex[j]