summaryrefslogtreecommitdiffhomepage
path: root/vendor/golang.org/x/text
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/golang.org/x/text')
-rw-r--r--vendor/golang.org/x/text/internal/gen/code.go375
-rw-r--r--vendor/golang.org/x/text/internal/gen/gen.go354
-rw-r--r--vendor/golang.org/x/text/message/pipeline/extract.go821
-rw-r--r--vendor/golang.org/x/text/message/pipeline/generate.go329
-rw-r--r--vendor/golang.org/x/text/message/pipeline/message.go241
-rw-r--r--vendor/golang.org/x/text/message/pipeline/pipeline.go422
-rw-r--r--vendor/golang.org/x/text/message/pipeline/rewrite.go268
-rw-r--r--vendor/golang.org/x/text/runes/cond.go187
-rw-r--r--vendor/golang.org/x/text/runes/runes.go355
-rw-r--r--vendor/golang.org/x/text/unicode/cldr/base.go105
-rw-r--r--vendor/golang.org/x/text/unicode/cldr/cldr.go137
-rw-r--r--vendor/golang.org/x/text/unicode/cldr/collate.go363
-rw-r--r--vendor/golang.org/x/text/unicode/cldr/decode.go171
-rw-r--r--vendor/golang.org/x/text/unicode/cldr/resolve.go602
-rw-r--r--vendor/golang.org/x/text/unicode/cldr/slice.go144
-rw-r--r--vendor/golang.org/x/text/unicode/cldr/xml.go1494
16 files changed, 6368 insertions, 0 deletions
diff --git a/vendor/golang.org/x/text/internal/gen/code.go b/vendor/golang.org/x/text/internal/gen/code.go
new file mode 100644
index 0000000..75435c9
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/gen/code.go
@@ -0,0 +1,375 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gen
+
+import (
+ "bytes"
+ "encoding/gob"
+ "fmt"
+ "hash"
+ "hash/fnv"
+ "io"
+ "log"
+ "os"
+ "reflect"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// This file contains utilities for generating code.
+
+// TODO: other write methods like:
+// - slices, maps, types, etc.
+
+// CodeWriter is a utility for writing structured code. It computes the content
+// hash and size of written content. It ensures there are newlines between
+// written code blocks.
+type CodeWriter struct {
+ buf bytes.Buffer
+ Size int
+ Hash hash.Hash32 // content hash
+ gob *gob.Encoder
+ // For comments we skip the usual one-line separator if they are followed by
+ // a code block.
+ skipSep bool
+}
+
+func (w *CodeWriter) Write(p []byte) (n int, err error) {
+ return w.buf.Write(p)
+}
+
+// NewCodeWriter returns a new CodeWriter.
+func NewCodeWriter() *CodeWriter {
+ h := fnv.New32()
+ return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}
+}
+
+// WriteGoFile appends the buffer with the total size of all created structures
+// and writes it as a Go file to the given file with the given package name.
+func (w *CodeWriter) WriteGoFile(filename, pkg string) {
+ f, err := os.Create(filename)
+ if err != nil {
+ log.Fatalf("Could not create file %s: %v", filename, err)
+ }
+ defer f.Close()
+ if _, err = w.WriteGo(f, pkg, ""); err != nil {
+ log.Fatalf("Error writing file %s: %v", filename, err)
+ }
+}
+
+// WriteVersionedGoFile appends the buffer with the total size of all created
+// structures and writes it as a Go file to the given file with the given
+// package name and build tags for the current Unicode version,
+func (w *CodeWriter) WriteVersionedGoFile(filename, pkg string) {
+ tags := buildTags()
+ if tags != "" {
+ pattern := fileToPattern(filename)
+ updateBuildTags(pattern)
+ filename = fmt.Sprintf(pattern, UnicodeVersion())
+ }
+ f, err := os.Create(filename)
+ if err != nil {
+ log.Fatalf("Could not create file %s: %v", filename, err)
+ }
+ defer f.Close()
+ if _, err = w.WriteGo(f, pkg, tags); err != nil {
+ log.Fatalf("Error writing file %s: %v", filename, err)
+ }
+}
+
+// WriteGo appends the buffer with the total size of all created structures and
+// writes it as a Go file to the given writer with the given package name.
+func (w *CodeWriter) WriteGo(out io.Writer, pkg, tags string) (n int, err error) {
+ sz := w.Size
+ if sz > 0 {
+ w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32())
+ }
+ defer w.buf.Reset()
+ return WriteGo(out, pkg, tags, w.buf.Bytes())
+}
+
+func (w *CodeWriter) printf(f string, x ...interface{}) {
+ fmt.Fprintf(w, f, x...)
+}
+
+func (w *CodeWriter) insertSep() {
+ if w.skipSep {
+ w.skipSep = false
+ return
+ }
+ // Use at least two newlines to ensure a blank space between the previous
+ // block. WriteGoFile will remove extraneous newlines.
+ w.printf("\n\n")
+}
+
+// WriteComment writes a comment block. All line starts are prefixed with "//".
+// Initial empty lines are gobbled. The indentation for the first line is
+// stripped from consecutive lines.
+func (w *CodeWriter) WriteComment(comment string, args ...interface{}) {
+ s := fmt.Sprintf(comment, args...)
+ s = strings.Trim(s, "\n")
+
+ // Use at least two newlines to ensure a blank space between the previous
+ // block. WriteGoFile will remove extraneous newlines.
+ w.printf("\n\n// ")
+ w.skipSep = true
+
+ // strip first indent level.
+ sep := "\n"
+ for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] {
+ sep += s[:1]
+ }
+
+ strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s)
+
+ w.printf("\n")
+}
+
+func (w *CodeWriter) writeSizeInfo(size int) {
+ w.printf("// Size: %d bytes\n", size)
+}
+
+// WriteConst writes a constant of the given name and value.
+func (w *CodeWriter) WriteConst(name string, x interface{}) {
+ w.insertSep()
+ v := reflect.ValueOf(x)
+
+ switch v.Type().Kind() {
+ case reflect.String:
+ w.printf("const %s %s = ", name, typeName(x))
+ w.WriteString(v.String())
+ w.printf("\n")
+ default:
+ w.printf("const %s = %#v\n", name, x)
+ }
+}
+
+// WriteVar writes a variable of the given name and value.
+func (w *CodeWriter) WriteVar(name string, x interface{}) {
+ w.insertSep()
+ v := reflect.ValueOf(x)
+ oldSize := w.Size
+ sz := int(v.Type().Size())
+ w.Size += sz
+
+ switch v.Type().Kind() {
+ case reflect.String:
+ w.printf("var %s %s = ", name, typeName(x))
+ w.WriteString(v.String())
+ case reflect.Struct:
+ w.gob.Encode(x)
+ fallthrough
+ case reflect.Slice, reflect.Array:
+ w.printf("var %s = ", name)
+ w.writeValue(v)
+ w.writeSizeInfo(w.Size - oldSize)
+ default:
+ w.printf("var %s %s = ", name, typeName(x))
+ w.gob.Encode(x)
+ w.writeValue(v)
+ w.writeSizeInfo(w.Size - oldSize)
+ }
+ w.printf("\n")
+}
+
+func (w *CodeWriter) writeValue(v reflect.Value) {
+ x := v.Interface()
+ switch v.Kind() {
+ case reflect.String:
+ w.WriteString(v.String())
+ case reflect.Array:
+ // Don't double count: callers of WriteArray count on the size being
+ // added, so we need to discount it here.
+ w.Size -= int(v.Type().Size())
+ w.writeSlice(x, true)
+ case reflect.Slice:
+ w.writeSlice(x, false)
+ case reflect.Struct:
+ w.printf("%s{\n", typeName(v.Interface()))
+ t := v.Type()
+ for i := 0; i < v.NumField(); i++ {
+ w.printf("%s: ", t.Field(i).Name)
+ w.writeValue(v.Field(i))
+ w.printf(",\n")
+ }
+ w.printf("}")
+ default:
+ w.printf("%#v", x)
+ }
+}
+
+// WriteString writes a string literal.
+func (w *CodeWriter) WriteString(s string) {
+ io.WriteString(w.Hash, s) // content hash
+ w.Size += len(s)
+
+ const maxInline = 40
+ if len(s) <= maxInline {
+ w.printf("%q", s)
+ return
+ }
+
+ // We will render the string as a multi-line string.
+ const maxWidth = 80 - 4 - len(`"`) - len(`" +`)
+
+ // When starting on its own line, go fmt indents line 2+ an extra level.
+ n, max := maxWidth, maxWidth-4
+
+ // As per https://golang.org/issue/18078, the compiler has trouble
+ // compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
+ // for large N. We insert redundant, explicit parentheses to work around
+ // that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
+ // ... + s127) + etc + (etc + ... + sN).
+ explicitParens, extraComment := len(s) > 128*1024, ""
+ if explicitParens {
+ w.printf(`(`)
+ extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078"
+ }
+
+ // Print "" +\n, if a string does not start on its own line.
+ b := w.buf.Bytes()
+ if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' {
+ w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment)
+ n, max = maxWidth, maxWidth
+ }
+
+ w.printf(`"`)
+
+ for sz, p, nLines := 0, 0, 0; p < len(s); {
+ var r rune
+ r, sz = utf8.DecodeRuneInString(s[p:])
+ out := s[p : p+sz]
+ chars := 1
+ if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' {
+ switch sz {
+ case 1:
+ out = fmt.Sprintf("\\x%02x", s[p])
+ case 2, 3:
+ out = fmt.Sprintf("\\u%04x", r)
+ case 4:
+ out = fmt.Sprintf("\\U%08x", r)
+ }
+ chars = len(out)
+ } else if r == '\\' {
+ out = "\\" + string(r)
+ chars = 2
+ }
+ if n -= chars; n < 0 {
+ nLines++
+ if explicitParens && nLines&63 == 63 {
+ w.printf("\") + (\"")
+ }
+ w.printf("\" +\n\"")
+ n = max - len(out)
+ }
+ w.printf("%s", out)
+ p += sz
+ }
+ w.printf(`"`)
+ if explicitParens {
+ w.printf(`)`)
+ }
+}
+
+// WriteSlice writes a slice value.
+func (w *CodeWriter) WriteSlice(x interface{}) {
+ w.writeSlice(x, false)
+}
+
+// WriteArray writes an array value.
+func (w *CodeWriter) WriteArray(x interface{}) {
+ w.writeSlice(x, true)
+}
+
+func (w *CodeWriter) writeSlice(x interface{}, isArray bool) {
+ v := reflect.ValueOf(x)
+ w.gob.Encode(v.Len())
+ w.Size += v.Len() * int(v.Type().Elem().Size())
+ name := typeName(x)
+ if isArray {
+ name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:])
+ }
+ if isArray {
+ w.printf("%s{\n", name)
+ } else {
+ w.printf("%s{ // %d elements\n", name, v.Len())
+ }
+
+ switch kind := v.Type().Elem().Kind(); kind {
+ case reflect.String:
+ for _, s := range x.([]string) {
+ w.WriteString(s)
+ w.printf(",\n")
+ }
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ // nLine and nBlock are the number of elements per line and block.
+ nLine, nBlock, format := 8, 64, "%d,"
+ switch kind {
+ case reflect.Uint8:
+ format = "%#02x,"
+ case reflect.Uint16:
+ format = "%#04x,"
+ case reflect.Uint32:
+ nLine, nBlock, format = 4, 32, "%#08x,"
+ case reflect.Uint, reflect.Uint64:
+ nLine, nBlock, format = 4, 32, "%#016x,"
+ case reflect.Int8:
+ nLine = 16
+ }
+ n := nLine
+ for i := 0; i < v.Len(); i++ {
+ if i%nBlock == 0 && v.Len() > nBlock {
+ w.printf("// Entry %X - %X\n", i, i+nBlock-1)
+ }
+ x := v.Index(i).Interface()
+ w.gob.Encode(x)
+ w.printf(format, x)
+ if n--; n == 0 {
+ n = nLine
+ w.printf("\n")
+ }
+ }
+ w.printf("\n")
+ case reflect.Struct:
+ zero := reflect.Zero(v.Type().Elem()).Interface()
+ for i := 0; i < v.Len(); i++ {
+ x := v.Index(i).Interface()
+ w.gob.EncodeValue(v)
+ if !reflect.DeepEqual(zero, x) {
+ line := fmt.Sprintf("%#v,\n", x)
+ line = line[strings.IndexByte(line, '{'):]
+ w.printf("%d: ", i)
+ w.printf(line)
+ }
+ }
+ case reflect.Array:
+ for i := 0; i < v.Len(); i++ {
+ w.printf("%d: %#v,\n", i, v.Index(i).Interface())
+ }
+ default:
+ panic("gen: slice elem type not supported")
+ }
+ w.printf("}")
+}
+
+// WriteType writes a definition of the type of the given value and returns the
+// type name.
+func (w *CodeWriter) WriteType(x interface{}) string {
+ t := reflect.TypeOf(x)
+ w.printf("type %s struct {\n", t.Name())
+ for i := 0; i < t.NumField(); i++ {
+ w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type)
+ }
+ w.printf("}\n")
+ return t.Name()
+}
+
+// typeName returns the name of the go type of x.
+func typeName(x interface{}) string {
+ t := reflect.ValueOf(x).Type()
+ return strings.Replace(fmt.Sprint(t), "main.", "", 1)
+}
diff --git a/vendor/golang.org/x/text/internal/gen/gen.go b/vendor/golang.org/x/text/internal/gen/gen.go
new file mode 100644
index 0000000..78bfef6
--- /dev/null
+++ b/vendor/golang.org/x/text/internal/gen/gen.go
@@ -0,0 +1,354 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package gen contains common code for the various code generation tools in the
+// text repository. Its usage ensures consistency between tools.
+//
+// This package defines command line flags that are common to most generation
+// tools. The flags allow for specifying specific Unicode and CLDR versions
+// in the public Unicode data repository (https://www.unicode.org/Public).
+//
+// A local Unicode data mirror can be set through the flag -local or the
+// environment variable UNICODE_DIR. The former takes precedence. The local
+// directory should follow the same structure as the public repository.
+//
+// IANA data can also optionally be mirrored by putting it in the iana directory
+// rooted at the top of the local mirror. Beware, though, that IANA data is not
+// versioned. So it is up to the developer to use the right version.
+package gen // import "golang.org/x/text/internal/gen"
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/build"
+ "go/format"
+ "io"
+ "log"
+ "net/http"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "sync"
+ "unicode"
+
+ "golang.org/x/text/unicode/cldr"
+)
+
+var (
+ url = flag.String("url",
+ "https://www.unicode.org/Public",
+ "URL of Unicode database directory")
+ iana = flag.String("iana",
+ "http://www.iana.org",
+ "URL of the IANA repository")
+ unicodeVersion = flag.String("unicode",
+ getEnv("UNICODE_VERSION", unicode.Version),
+ "unicode version to use")
+ cldrVersion = flag.String("cldr",
+ getEnv("CLDR_VERSION", cldr.Version),
+ "cldr version to use")
+)
+
+func getEnv(name, def string) string {
+ if v := os.Getenv(name); v != "" {
+ return v
+ }
+ return def
+}
+
+// Init performs common initialization for a gen command. It parses the flags
+// and sets up the standard logging parameters.
+func Init() {
+ log.SetPrefix("")
+ log.SetFlags(log.Lshortfile)
+ flag.Parse()
+}
+
+const header = `// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+`
+
+// UnicodeVersion reports the requested Unicode version.
+func UnicodeVersion() string {
+ return *unicodeVersion
+}
+
+// CLDRVersion reports the requested CLDR version.
+func CLDRVersion() string {
+ return *cldrVersion
+}
+
+var tags = []struct{ version, buildTags string }{
+ {"9.0.0", "!go1.10"},
+ {"10.0.0", "go1.10,!go1.13"},
+ {"11.0.0", "go1.13,!go1.14"},
+ {"12.0.0", "go1.14,!go1.16"},
+ {"13.0.0", "go1.16,!go1.21"},
+ {"15.0.0", "go1.21"},
+}
+
+// buildTags reports the build tags used for the current Unicode version.
+func buildTags() string {
+ v := UnicodeVersion()
+ for _, e := range tags {
+ if e.version == v {
+ return e.buildTags
+ }
+ }
+ log.Fatalf("Unknown build tags for Unicode version %q.", v)
+ return ""
+}
+
+// IsLocal reports whether data files are available locally.
+func IsLocal() bool {
+ dir, err := localReadmeFile()
+ if err != nil {
+ return false
+ }
+ if _, err = os.Stat(dir); err != nil {
+ return false
+ }
+ return true
+}
+
+// OpenUCDFile opens the requested UCD file. The file is specified relative to
+// the public Unicode root directory. It will call log.Fatal if there are any
+// errors.
+func OpenUCDFile(file string) io.ReadCloser {
+ return openUnicode(path.Join(*unicodeVersion, "ucd", file))
+}
+
+// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there
+// are any errors.
+func OpenCLDRCoreZip() io.ReadCloser {
+ return OpenUnicodeFile("cldr", *cldrVersion, "core.zip")
+}
+
+// OpenUnicodeFile opens the requested file of the requested category from the
+// root of the Unicode data archive. The file is specified relative to the
+// public Unicode root directory. If version is "", it will use the default
+// Unicode version. It will call log.Fatal if there are any errors.
+func OpenUnicodeFile(category, version, file string) io.ReadCloser {
+ if version == "" {
+ version = UnicodeVersion()
+ }
+ return openUnicode(path.Join(category, version, file))
+}
+
+// OpenIANAFile opens the requested IANA file. The file is specified relative
+// to the IANA root, which is typically either http://www.iana.org or the
+// iana directory in the local mirror. It will call log.Fatal if there are any
+// errors.
+func OpenIANAFile(path string) io.ReadCloser {
+ return Open(*iana, "iana", path)
+}
+
+var (
+ dirMutex sync.Mutex
+ localDir string
+)
+
+const permissions = 0755
+
+func localReadmeFile() (string, error) {
+ p, err := build.Import("golang.org/x/text", "", build.FindOnly)
+ if err != nil {
+ return "", fmt.Errorf("Could not locate package: %v", err)
+ }
+ return filepath.Join(p.Dir, "DATA", "README"), nil
+}
+
+func getLocalDir() string {
+ dirMutex.Lock()
+ defer dirMutex.Unlock()
+
+ readme, err := localReadmeFile()
+ if err != nil {
+ log.Fatal(err)
+ }
+ dir := filepath.Dir(readme)
+ if _, err := os.Stat(readme); err != nil {
+ if err := os.MkdirAll(dir, permissions); err != nil {
+ log.Fatalf("Could not create directory: %v", err)
+ }
+ os.WriteFile(readme, []byte(readmeTxt), permissions)
+ }
+ return dir
+}
+
+const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT.
+
+This directory contains downloaded files used to generate the various tables
+in the golang.org/x/text subrepo.
+
+Note that the language subtag repo (iana/assignments/language-subtag-registry)
+and all other times in the iana subdirectory are not versioned and will need
+to be periodically manually updated. The easiest way to do this is to remove
+the entire iana directory. This is mostly of concern when updating the language
+package.
+`
+
+// Open opens subdir/path if a local directory is specified and the file exists,
+// where subdir is a directory relative to the local root, or fetches it from
+// urlRoot/path otherwise. It will call log.Fatal if there are any errors.
+func Open(urlRoot, subdir, path string) io.ReadCloser {
+ file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path))
+ return open(file, urlRoot, path)
+}
+
+func openUnicode(path string) io.ReadCloser {
+ file := filepath.Join(getLocalDir(), filepath.FromSlash(path))
+ return open(file, *url, path)
+}
+
+// TODO: automatically periodically update non-versioned files.
+
+func open(file, urlRoot, path string) io.ReadCloser {
+ if f, err := os.Open(file); err == nil {
+ return f
+ }
+ r := get(urlRoot, path)
+ defer r.Close()
+ b, err := io.ReadAll(r)
+ if err != nil {
+ log.Fatalf("Could not download file: %v", err)
+ }
+ os.MkdirAll(filepath.Dir(file), permissions)
+ if err := os.WriteFile(file, b, permissions); err != nil {
+ log.Fatalf("Could not create file: %v", err)
+ }
+ return io.NopCloser(bytes.NewReader(b))
+}
+
+func get(root, path string) io.ReadCloser {
+ url := root + "/" + path
+ fmt.Printf("Fetching %s...", url)
+ defer fmt.Println(" done.")
+ resp, err := http.Get(url)
+ if err != nil {
+ log.Fatalf("HTTP GET: %v", err)
+ }
+ if resp.StatusCode != 200 {
+ log.Fatalf("Bad GET status for %q: %q", url, resp.Status)
+ }
+ return resp.Body
+}
+
+// TODO: use Write*Version in all applicable packages.
+
+// WriteUnicodeVersion writes a constant for the Unicode version from which the
+// tables are generated.
+func WriteUnicodeVersion(w io.Writer) {
+ fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n")
+ fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion())
+}
+
+// WriteCLDRVersion writes a constant for the CLDR version from which the
+// tables are generated.
+func WriteCLDRVersion(w io.Writer) {
+ fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n")
+ fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion())
+}
+
+// WriteGoFile prepends a standard file comment and package statement to the
+// given bytes, applies gofmt, and writes them to a file with the given name.
+// It will call log.Fatal if there are any errors.
+func WriteGoFile(filename, pkg string, b []byte) {
+ w, err := os.Create(filename)
+ if err != nil {
+ log.Fatalf("Could not create file %s: %v", filename, err)
+ }
+ defer w.Close()
+ if _, err = WriteGo(w, pkg, "", b); err != nil {
+ log.Fatalf("Error writing file %s: %v", filename, err)
+ }
+}
+
+func fileToPattern(filename string) string {
+ suffix := ".go"
+ if strings.HasSuffix(filename, "_test.go") {
+ suffix = "_test.go"
+ }
+ prefix := filename[:len(filename)-len(suffix)]
+ return fmt.Sprint(prefix, "%s", suffix)
+}
+
+// tagLines returns the //go:build lines to add to the file.
+func tagLines(tags string) string {
+ return "//go:build " + strings.ReplaceAll(tags, ",", " && ") + "\n"
+}
+
+func updateBuildTags(pattern string) {
+ for _, t := range tags {
+ oldFile := fmt.Sprintf(pattern, t.version)
+ b, err := os.ReadFile(oldFile)
+ if err != nil {
+ continue
+ }
+ b = regexp.MustCompile(`//go:build.*\n`).ReplaceAll(b, []byte(tagLines(t.buildTags)))
+ err = os.WriteFile(oldFile, b, 0644)
+ if err != nil {
+ log.Fatal(err)
+ }
+ }
+}
+
+// WriteVersionedGoFile prepends a standard file comment, adds build tags to
+// version the file for the current Unicode version, and package statement to
+// the given bytes, applies gofmt, and writes them to a file with the given
+// name. It will call log.Fatal if there are any errors.
+func WriteVersionedGoFile(filename, pkg string, b []byte) {
+ pattern := fileToPattern(filename)
+ updateBuildTags(pattern)
+ filename = fmt.Sprintf(pattern, UnicodeVersion())
+
+ w, err := os.Create(filename)
+ if err != nil {
+ log.Fatalf("Could not create file %s: %v", filename, err)
+ }
+ defer w.Close()
+ if _, err = WriteGo(w, pkg, buildTags(), b); err != nil {
+ log.Fatalf("Error writing file %s: %v", filename, err)
+ }
+}
+
+// WriteGo prepends a standard file comment and package statement to the given
+// bytes, applies gofmt, and writes them to w.
+func WriteGo(w io.Writer, pkg, tags string, b []byte) (n int, err error) {
+ src := []byte(header)
+ if tags != "" {
+ src = append(src, tagLines(tags)...)
+ src = append(src, '\n')
+ }
+ src = append(src, fmt.Sprintf("package %s\n\n", pkg)...)
+ src = append(src, b...)
+ formatted, err := format.Source(src)
+ if err != nil {
+ // Print the generated code even in case of an error so that the
+ // returned error can be meaningfully interpreted.
+ n, _ = w.Write(src)
+ return n, err
+ }
+ return w.Write(formatted)
+}
+
+// Repackage rewrites a Go file from belonging to package main to belonging to
+// the given package.
+func Repackage(inFile, outFile, pkg string) {
+ src, err := os.ReadFile(inFile)
+ if err != nil {
+ log.Fatalf("reading %s: %v", inFile, err)
+ }
+ const toDelete = "package main\n\n"
+ i := bytes.Index(src, []byte(toDelete))
+ if i < 0 {
+ log.Fatalf("Could not find %q in %s.", toDelete, inFile)
+ }
+ w := &bytes.Buffer{}
+ w.Write(src[i+len(toDelete):])
+ WriteGoFile(outFile, pkg, w.Bytes())
+}
diff --git a/vendor/golang.org/x/text/message/pipeline/extract.go b/vendor/golang.org/x/text/message/pipeline/extract.go
new file mode 100644
index 0000000..a15a7f9
--- /dev/null
+++ b/vendor/golang.org/x/text/message/pipeline/extract.go
@@ -0,0 +1,821 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pipeline
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/format"
+ "go/token"
+ "go/types"
+ "path/filepath"
+ "sort"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ fmtparser "golang.org/x/text/internal/format"
+ "golang.org/x/tools/go/callgraph"
+ "golang.org/x/tools/go/callgraph/cha"
+ "golang.org/x/tools/go/loader"
+ "golang.org/x/tools/go/ssa"
+ "golang.org/x/tools/go/ssa/ssautil"
+)
+
+const debug = false
+
+// TODO:
+// - merge information into existing files
+// - handle different file formats (PO, XLIFF)
+// - handle features (gender, plural)
+// - message rewriting
+
+// - `msg:"etc"` tags
+
+// Extract extracts all strings form the package defined in Config.
+func Extract(c *Config) (*State, error) {
+ x, err := newExtracter(c)
+ if err != nil {
+ return nil, wrap(err, "")
+ }
+
+ if err := x.seedEndpoints(); err != nil {
+ return nil, err
+ }
+ x.extractMessages()
+
+ return &State{
+ Config: *c,
+ program: x.iprog,
+ Extracted: Messages{
+ Language: c.SourceLanguage,
+ Messages: x.messages,
+ },
+ }, nil
+}
+
+type extracter struct {
+ conf loader.Config
+ iprog *loader.Program
+ prog *ssa.Program
+ callGraph *callgraph.Graph
+
+ // Calls and other expressions to collect.
+ globals map[token.Pos]*constData
+ funcs map[token.Pos]*callData
+ messages []Message
+}
+
+func newExtracter(c *Config) (x *extracter, err error) {
+ x = &extracter{
+ conf: loader.Config{},
+ globals: map[token.Pos]*constData{},
+ funcs: map[token.Pos]*callData{},
+ }
+
+ x.iprog, err = loadPackages(&x.conf, c.Packages)
+ if err != nil {
+ return nil, wrap(err, "")
+ }
+
+ x.prog = ssautil.CreateProgram(x.iprog, ssa.GlobalDebug|ssa.BareInits)
+ x.prog.Build()
+
+ x.callGraph = cha.CallGraph(x.prog)
+
+ return x, nil
+}
+
+func (x *extracter) globalData(pos token.Pos) *constData {
+ cd := x.globals[pos]
+ if cd == nil {
+ cd = &constData{}
+ x.globals[pos] = cd
+ }
+ return cd
+}
+
+func (x *extracter) seedEndpoints() error {
+ pkgInfo := x.iprog.Package("golang.org/x/text/message")
+ if pkgInfo == nil {
+ return errors.New("pipeline: golang.org/x/text/message is not imported")
+ }
+ pkg := x.prog.Package(pkgInfo.Pkg)
+ typ := types.NewPointer(pkg.Type("Printer").Type())
+
+ x.processGlobalVars()
+
+ x.handleFunc(x.prog.LookupMethod(typ, pkg.Pkg, "Printf"), &callData{
+ formatPos: 1,
+ argPos: 2,
+ isMethod: true,
+ })
+ x.handleFunc(x.prog.LookupMethod(typ, pkg.Pkg, "Sprintf"), &callData{
+ formatPos: 1,
+ argPos: 2,
+ isMethod: true,
+ })
+ x.handleFunc(x.prog.LookupMethod(typ, pkg.Pkg, "Fprintf"), &callData{
+ formatPos: 2,
+ argPos: 3,
+ isMethod: true,
+ })
+ return nil
+}
+
+// processGlobalVars finds string constants that are assigned to global
+// variables.
+func (x *extracter) processGlobalVars() {
+ for _, p := range x.prog.AllPackages() {
+ m, ok := p.Members["init"]
+ if !ok {
+ continue
+ }
+ for _, b := range m.(*ssa.Function).Blocks {
+ for _, i := range b.Instrs {
+ s, ok := i.(*ssa.Store)
+ if !ok {
+ continue
+ }
+ a, ok := s.Addr.(*ssa.Global)
+ if !ok {
+ continue
+ }
+ t := a.Type()
+ for {
+ p, ok := t.(*types.Pointer)
+ if !ok {
+ break
+ }
+ t = p.Elem()
+ }
+ if b, ok := t.(*types.Basic); !ok || b.Kind() != types.String {
+ continue
+ }
+ x.visitInit(a, s.Val)
+ }
+ }
+ }
+}
+
+type constData struct {
+ call *callData // to provide a signature for the constants
+ values []constVal
+ others []token.Pos // Assigned to other global data.
+}
+
+func (d *constData) visit(x *extracter, f func(c constant.Value)) {
+ for _, v := range d.values {
+ f(v.value)
+ }
+ for _, p := range d.others {
+ if od, ok := x.globals[p]; ok {
+ od.visit(x, f)
+ }
+ }
+}
+
+type constVal struct {
+ value constant.Value
+ pos token.Pos
+}
+
+type callData struct {
+ call ssa.CallInstruction
+ expr *ast.CallExpr
+ formats []constant.Value
+
+ callee *callData
+ isMethod bool
+ formatPos int
+ argPos int // varargs at this position in the call
+ argTypes []int // arguments extractable from this position
+}
+
+func (c *callData) callFormatPos() int {
+ c = c.callee
+ if c.isMethod {
+ return c.formatPos - 1
+ }
+ return c.formatPos
+}
+
+func (c *callData) callArgsStart() int {
+ c = c.callee
+ if c.isMethod {
+ return c.argPos - 1
+ }
+ return c.argPos
+}
+
+func (c *callData) Pos() token.Pos { return c.call.Pos() }
+func (c *callData) Pkg() *types.Package { return c.call.Parent().Pkg.Pkg }
+
+func (x *extracter) handleFunc(f *ssa.Function, fd *callData) {
+ for _, e := range x.callGraph.Nodes[f].In {
+ if e.Pos() == 0 {
+ continue
+ }
+
+ call := e.Site
+ caller := x.funcs[call.Pos()]
+ if caller != nil {
+ // TODO: theoretically a format string could be passed to multiple
+ // arguments of a function. Support this eventually.
+ continue
+ }
+ x.debug(call, "CALL", f.String())
+
+ caller = &callData{
+ call: call,
+ callee: fd,
+ formatPos: -1,
+ argPos: -1,
+ }
+ // Offset by one if we are invoking an interface method.
+ offset := 0
+ if call.Common().IsInvoke() {
+ offset = -1
+ }
+ x.funcs[call.Pos()] = caller
+ if fd.argPos >= 0 {
+ x.visitArgs(caller, call.Common().Args[fd.argPos+offset])
+ }
+ x.visitFormats(caller, call.Common().Args[fd.formatPos+offset])
+ }
+}
+
+type posser interface {
+ Pos() token.Pos
+ Parent() *ssa.Function
+}
+
+func (x *extracter) debug(v posser, header string, args ...interface{}) {
+ if debug {
+ pos := ""
+ if p := v.Parent(); p != nil {
+ pos = posString(&x.conf, p.Package().Pkg, v.Pos())
+ }
+ if header != "CALL" && header != "INSERT" {
+ header = " " + header
+ }
+ fmt.Printf("%-32s%-10s%-15T ", pos+fmt.Sprintf("@%d", v.Pos()), header, v)
+ for _, a := range args {
+ fmt.Printf(" %v", a)
+ }
+ fmt.Println()
+ }
+}
+
+// visitInit evaluates and collects values assigned to global variables in an
+// init function.
+func (x *extracter) visitInit(global *ssa.Global, v ssa.Value) {
+ if v == nil {
+ return
+ }
+ x.debug(v, "GLOBAL", v)
+
+ switch v := v.(type) {
+ case *ssa.Phi:
+ for _, e := range v.Edges {
+ x.visitInit(global, e)
+ }
+
+ case *ssa.Const:
+ // Only record strings with letters.
+ if str := constant.StringVal(v.Value); isMsg(str) {
+ cd := x.globalData(global.Pos())
+ cd.values = append(cd.values, constVal{v.Value, v.Pos()})
+ }
+ // TODO: handle %m-directive.
+
+ case *ssa.Global:
+ cd := x.globalData(global.Pos())
+ cd.others = append(cd.others, v.Pos())
+
+ case *ssa.FieldAddr, *ssa.Field:
+ // TODO: mark field index v.Field of v.X.Type() for extraction. extract
+ // an example args as to give parameters for the translator.
+
+ case *ssa.Slice:
+ if v.Low == nil && v.High == nil && v.Max == nil {
+ x.visitInit(global, v.X)
+ }
+
+ case *ssa.Alloc:
+ if ref := v.Referrers(); ref == nil {
+ for _, r := range *ref {
+ values := []ssa.Value{}
+ for _, o := range r.Operands(nil) {
+ if o == nil || *o == v {
+ continue
+ }
+ values = append(values, *o)
+ }
+ // TODO: return something different if we care about multiple
+ // values as well.
+ if len(values) == 1 {
+ x.visitInit(global, values[0])
+ }
+ }
+ }
+
+ case ssa.Instruction:
+ rands := v.Operands(nil)
+ if len(rands) == 1 && rands[0] != nil {
+ x.visitInit(global, *rands[0])
+ }
+ }
+ return
+}
+
+// visitFormats finds the original source of the value. The returned index is
+// position of the argument if originated from a function argument or -1
+// otherwise.
+func (x *extracter) visitFormats(call *callData, v ssa.Value) {
+ if v == nil {
+ return
+ }
+ x.debug(v, "VALUE", v)
+
+ switch v := v.(type) {
+ case *ssa.Phi:
+ for _, e := range v.Edges {
+ x.visitFormats(call, e)
+ }
+
+ case *ssa.Const:
+ // Only record strings with letters.
+ if isMsg(constant.StringVal(v.Value)) {
+ x.debug(call.call, "FORMAT", v.Value.ExactString())
+ call.formats = append(call.formats, v.Value)
+ }
+ // TODO: handle %m-directive.
+
+ case *ssa.Global:
+ x.globalData(v.Pos()).call = call
+
+ case *ssa.FieldAddr, *ssa.Field:
+ // TODO: mark field index v.Field of v.X.Type() for extraction. extract
+ // an example args as to give parameters for the translator.
+
+ case *ssa.Slice:
+ if v.Low == nil && v.High == nil && v.Max == nil {
+ x.visitFormats(call, v.X)
+ }
+
+ case *ssa.Parameter:
+ // TODO: handle the function for the index parameter.
+ f := v.Parent()
+ for i, p := range f.Params {
+ if p == v {
+ if call.formatPos < 0 {
+ call.formatPos = i
+ // TODO: is there a better way to detect this is calling
+ // a method rather than a function?
+ call.isMethod = len(f.Params) > f.Signature.Params().Len()
+ x.handleFunc(v.Parent(), call)
+ } else if debug && i != call.formatPos {
+ // TODO: support this.
+ fmt.Printf("WARNING:%s: format string passed to arg %d and %d\n",
+ posString(&x.conf, call.Pkg(), call.Pos()),
+ call.formatPos, i)
+ }
+ }
+ }
+
+ case *ssa.Alloc:
+ if ref := v.Referrers(); ref == nil {
+ for _, r := range *ref {
+ values := []ssa.Value{}
+ for _, o := range r.Operands(nil) {
+ if o == nil || *o == v {
+ continue
+ }
+ values = append(values, *o)
+ }
+ // TODO: return something different if we care about multiple
+ // values as well.
+ if len(values) == 1 {
+ x.visitFormats(call, values[0])
+ }
+ }
+ }
+
+ // TODO:
+ // case *ssa.Index:
+ // // Get all values in the array if applicable
+ // case *ssa.IndexAddr:
+ // // Get all values in the slice or *array if applicable.
+ // case *ssa.Lookup:
+ // // Get all values in the map if applicable.
+
+ case *ssa.FreeVar:
+ // TODO: find the link between free variables and parameters:
+ //
+ // func freeVar(p *message.Printer, str string) {
+ // fn := func(p *message.Printer) {
+ // p.Printf(str)
+ // }
+ // fn(p)
+ // }
+
+ case *ssa.Call:
+
+ case ssa.Instruction:
+ rands := v.Operands(nil)
+ if len(rands) == 1 && rands[0] != nil {
+ x.visitFormats(call, *rands[0])
+ }
+ }
+}
+
+// Note: a function may have an argument marked as both format and passthrough.
+
+// visitArgs collects information on arguments. For wrapped functions it will
+// just determine the position of the variable args slice.
+func (x *extracter) visitArgs(fd *callData, v ssa.Value) {
+ if v == nil {
+ return
+ }
+ x.debug(v, "ARGV", v)
+ switch v := v.(type) {
+
+ case *ssa.Slice:
+ if v.Low == nil && v.High == nil && v.Max == nil {
+ x.visitArgs(fd, v.X)
+ }
+
+ case *ssa.Parameter:
+ // TODO: handle the function for the index parameter.
+ f := v.Parent()
+ for i, p := range f.Params {
+ if p == v {
+ fd.argPos = i
+ }
+ }
+
+ case *ssa.Alloc:
+ if ref := v.Referrers(); ref == nil {
+ for _, r := range *ref {
+ values := []ssa.Value{}
+ for _, o := range r.Operands(nil) {
+ if o == nil || *o == v {
+ continue
+ }
+ values = append(values, *o)
+ }
+ // TODO: return something different if we care about
+ // multiple values as well.
+ if len(values) == 1 {
+ x.visitArgs(fd, values[0])
+ }
+ }
+ }
+
+ case ssa.Instruction:
+ rands := v.Operands(nil)
+ if len(rands) == 1 && rands[0] != nil {
+ x.visitArgs(fd, *rands[0])
+ }
+ }
+}
+
+// print returns Go syntax for the specified node.
+func (x *extracter) print(n ast.Node) string {
+ var buf bytes.Buffer
+ format.Node(&buf, x.conf.Fset, n)
+ return buf.String()
+}
+
+type packageExtracter struct {
+ f *ast.File
+ x *extracter
+ info *loader.PackageInfo
+ cmap ast.CommentMap
+}
+
+func (px packageExtracter) getComment(n ast.Node) string {
+ cs := px.cmap.Filter(n).Comments()
+ if len(cs) > 0 {
+ return strings.TrimSpace(cs[0].Text())
+ }
+ return ""
+}
+
+func (x *extracter) extractMessages() {
+ prog := x.iprog
+ keys := make([]*types.Package, 0, len(x.iprog.AllPackages))
+ for k := range x.iprog.AllPackages {
+ keys = append(keys, k)
+ }
+ sort.Slice(keys, func(i, j int) bool { return keys[i].Path() < keys[j].Path() })
+ files := []packageExtracter{}
+ for _, k := range keys {
+ info := x.iprog.AllPackages[k]
+ for _, f := range info.Files {
+ // Associate comments with nodes.
+ px := packageExtracter{
+ f, x, info,
+ ast.NewCommentMap(prog.Fset, f, f.Comments),
+ }
+ files = append(files, px)
+ }
+ }
+ for _, px := range files {
+ ast.Inspect(px.f, func(n ast.Node) bool {
+ switch v := n.(type) {
+ case *ast.CallExpr:
+ if d := x.funcs[v.Lparen]; d != nil {
+ d.expr = v
+ }
+ }
+ return true
+ })
+ }
+ for _, px := range files {
+ ast.Inspect(px.f, func(n ast.Node) bool {
+ switch v := n.(type) {
+ case *ast.CallExpr:
+ return px.handleCall(v)
+ case *ast.ValueSpec:
+ return px.handleGlobal(v)
+ }
+ return true
+ })
+ }
+}
+
+func (px packageExtracter) handleGlobal(spec *ast.ValueSpec) bool {
+ comment := px.getComment(spec)
+
+ for _, ident := range spec.Names {
+ data, ok := px.x.globals[ident.Pos()]
+ if !ok {
+ continue
+ }
+ name := ident.Name
+ var arguments []argument
+ if data.call != nil {
+ arguments = px.getArguments(data.call)
+ } else if !strings.HasPrefix(name, "msg") && !strings.HasPrefix(name, "Msg") {
+ continue
+ }
+ data.visit(px.x, func(c constant.Value) {
+ px.addMessage(spec.Pos(), []string{name}, c, comment, arguments)
+ })
+ }
+
+ return true
+}
+
+func (px packageExtracter) handleCall(call *ast.CallExpr) bool {
+ x := px.x
+ data := x.funcs[call.Lparen]
+ if data == nil || len(data.formats) == 0 {
+ return true
+ }
+ if data.expr != call {
+ panic("invariant `data.call != call` failed")
+ }
+ x.debug(data.call, "INSERT", data.formats)
+
+ argn := data.callFormatPos()
+ if argn >= len(call.Args) {
+ return true
+ }
+ format := call.Args[argn]
+
+ arguments := px.getArguments(data)
+
+ comment := ""
+ key := []string{}
+ if ident, ok := format.(*ast.Ident); ok {
+ key = append(key, ident.Name)
+ if v, ok := ident.Obj.Decl.(*ast.ValueSpec); ok && v.Comment != nil {
+ // TODO: get comment above ValueSpec as well
+ comment = v.Comment.Text()
+ }
+ }
+ if c := px.getComment(call.Args[0]); c != "" {
+ comment = c
+ }
+
+ formats := data.formats
+ for _, c := range formats {
+ px.addMessage(call.Lparen, key, c, comment, arguments)
+ }
+ return true
+}
+
+func (px packageExtracter) getArguments(data *callData) []argument {
+ arguments := []argument{}
+ x := px.x
+ info := px.info
+ if data.callArgsStart() >= 0 {
+ args := data.expr.Args[data.callArgsStart():]
+ for i, arg := range args {
+ expr := x.print(arg)
+ val := ""
+ if v := info.Types[arg].Value; v != nil {
+ val = v.ExactString()
+ switch arg.(type) {
+ case *ast.BinaryExpr, *ast.UnaryExpr:
+ expr = val
+ }
+ }
+ arguments = append(arguments, argument{
+ ArgNum: i + 1,
+ Type: info.Types[arg].Type.String(),
+ UnderlyingType: info.Types[arg].Type.Underlying().String(),
+ Expr: expr,
+ Value: val,
+ Comment: px.getComment(arg),
+ Position: posString(&x.conf, info.Pkg, arg.Pos()),
+ // TODO report whether it implements
+ // interfaces plural.Interface,
+ // gender.Interface.
+ })
+ }
+ }
+ return arguments
+}
+
+func (px packageExtracter) addMessage(
+ pos token.Pos,
+ key []string,
+ c constant.Value,
+ comment string,
+ arguments []argument) {
+ x := px.x
+ fmtMsg := constant.StringVal(c)
+
+ ph := placeholders{index: map[string]string{}}
+
+ trimmed, _, _ := trimWS(fmtMsg)
+
+ p := fmtparser.Parser{}
+ simArgs := make([]interface{}, len(arguments))
+ for i, v := range arguments {
+ simArgs[i] = v
+ }
+ msg := ""
+ p.Reset(simArgs)
+ for p.SetFormat(trimmed); p.Scan(); {
+ name := ""
+ var arg *argument
+ switch p.Status {
+ case fmtparser.StatusText:
+ msg += p.Text()
+ continue
+ case fmtparser.StatusSubstitution,
+ fmtparser.StatusBadWidthSubstitution,
+ fmtparser.StatusBadPrecSubstitution:
+ arguments[p.ArgNum-1].used = true
+ arg = &arguments[p.ArgNum-1]
+ name = getID(arg)
+ case fmtparser.StatusBadArgNum, fmtparser.StatusMissingArg:
+ arg = &argument{
+ ArgNum: p.ArgNum,
+ Position: posString(&x.conf, px.info.Pkg, pos),
+ }
+ name, arg.UnderlyingType = verbToPlaceholder(p.Text(), p.ArgNum)
+ }
+ sub := p.Text()
+ if !p.HasIndex {
+ r, sz := utf8.DecodeLastRuneInString(sub)
+ sub = fmt.Sprintf("%s[%d]%c", sub[:len(sub)-sz], p.ArgNum, r)
+ }
+ msg += fmt.Sprintf("{%s}", ph.addArg(arg, name, sub))
+ }
+ key = append(key, msg)
+
+ // Add additional Placeholders that can be used in translations
+ // that are not present in the string.
+ for _, arg := range arguments {
+ if arg.used {
+ continue
+ }
+ ph.addArg(&arg, getID(&arg), fmt.Sprintf("%%[%d]v", arg.ArgNum))
+ }
+
+ x.messages = append(x.messages, Message{
+ ID: key,
+ Key: fmtMsg,
+ Message: Text{Msg: msg},
+ // TODO(fix): this doesn't get the before comment.
+ Comment: comment,
+ Placeholders: ph.slice,
+ Position: posString(&x.conf, px.info.Pkg, pos),
+ })
+}
+
+func posString(conf *loader.Config, pkg *types.Package, pos token.Pos) string {
+ p := conf.Fset.Position(pos)
+ file := fmt.Sprintf("%s:%d:%d", filepath.Base(p.Filename), p.Line, p.Column)
+ return filepath.Join(pkg.Path(), file)
+}
+
+func getID(arg *argument) string {
+ s := getLastComponent(arg.Expr)
+ s = strip(s)
+ s = strings.Replace(s, " ", "", -1)
+ // For small variable names, use user-defined types for more info.
+ if len(s) <= 2 && arg.UnderlyingType != arg.Type {
+ s = getLastComponent(arg.Type)
+ }
+ return strings.Title(s)
+}
+
+// strip is a dirty hack to convert function calls to placeholder IDs.
+func strip(s string) string {
+ s = strings.Map(func(r rune) rune {
+ if unicode.IsSpace(r) || r == '-' {
+ return '_'
+ }
+ if !unicode.In(r, unicode.Letter, unicode.Mark, unicode.Number) {
+ return -1
+ }
+ return r
+ }, s)
+ // Strip "Get" from getter functions.
+ if strings.HasPrefix(s, "Get") || strings.HasPrefix(s, "get") {
+ if len(s) > len("get") {
+ r, _ := utf8.DecodeRuneInString(s)
+ if !unicode.In(r, unicode.Ll, unicode.M) { // not lower or mark
+ s = s[len("get"):]
+ }
+ }
+ }
+ return s
+}
+
+// verbToPlaceholder gives a name for a placeholder based on the substitution
+// verb. This is only to be used if there is otherwise no other type information
+// available.
+func verbToPlaceholder(sub string, pos int) (name, underlying string) {
+ r, _ := utf8.DecodeLastRuneInString(sub)
+ name = fmt.Sprintf("Arg_%d", pos)
+ switch r {
+ case 's', 'q':
+ underlying = "string"
+ case 'd':
+ name = "Integer"
+ underlying = "int"
+ case 'e', 'f', 'g':
+ name = "Number"
+ underlying = "float64"
+ case 'm':
+ name = "Message"
+ underlying = "string"
+ default:
+ underlying = "interface{}"
+ }
+ return name, underlying
+}
+
+type placeholders struct {
+ index map[string]string
+ slice []Placeholder
+}
+
+func (p *placeholders) addArg(arg *argument, name, sub string) (id string) {
+ id = name
+ alt, ok := p.index[id]
+ for i := 1; ok && alt != sub; i++ {
+ id = fmt.Sprintf("%s_%d", name, i)
+ alt, ok = p.index[id]
+ }
+ p.index[id] = sub
+ p.slice = append(p.slice, Placeholder{
+ ID: id,
+ String: sub,
+ Type: arg.Type,
+ UnderlyingType: arg.UnderlyingType,
+ ArgNum: arg.ArgNum,
+ Expr: arg.Expr,
+ Comment: arg.Comment,
+ })
+ return id
+}
+
+func getLastComponent(s string) string {
+ return s[1+strings.LastIndexByte(s, '.'):]
+}
+
+// isMsg returns whether s should be translated.
+func isMsg(s string) bool {
+ // TODO: parse as format string and omit strings that contain letters
+ // coming from format verbs.
+ for _, r := range s {
+ if unicode.In(r, unicode.L) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/text/message/pipeline/generate.go b/vendor/golang.org/x/text/message/pipeline/generate.go
new file mode 100644
index 0000000..f747c37
--- /dev/null
+++ b/vendor/golang.org/x/text/message/pipeline/generate.go
@@ -0,0 +1,329 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pipeline
+
+import (
+ "fmt"
+ "go/build"
+ "io"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "text/template"
+
+ "golang.org/x/text/collate"
+ "golang.org/x/text/feature/plural"
+ "golang.org/x/text/internal"
+ "golang.org/x/text/internal/catmsg"
+ "golang.org/x/text/internal/gen"
+ "golang.org/x/text/language"
+ "golang.org/x/tools/go/loader"
+)
+
+var transRe = regexp.MustCompile(`messages\.(.*)\.json`)
+
+// Generate writes a Go file that defines a Catalog with translated messages.
+// Translations are retrieved from s.Messages, not s.Translations, so it
+// is assumed Merge has been called.
+func (s *State) Generate() error {
+ path := s.Config.GenPackage
+ if path == "" {
+ path = "."
+ }
+ isDir := path[0] == '.'
+ prog, err := loadPackages(&loader.Config{}, []string{path})
+ if err != nil {
+ return wrap(err, "could not load package")
+ }
+ pkgs := prog.InitialPackages()
+ if len(pkgs) != 1 {
+ return errorf("more than one package selected: %v", pkgs)
+ }
+ pkg := pkgs[0].Pkg.Name()
+
+ cw, err := s.generate()
+ if err != nil {
+ return err
+ }
+ if !isDir {
+ gopath := filepath.SplitList(build.Default.GOPATH)[0]
+ path = filepath.Join(gopath, "src", filepath.FromSlash(pkgs[0].Pkg.Path()))
+ }
+ if len(s.Config.GenFile) == 0 {
+ cw.WriteGo(os.Stdout, pkg, "")
+ return nil
+ }
+ if filepath.IsAbs(s.Config.GenFile) {
+ path = s.Config.GenFile
+ } else {
+ path = filepath.Join(path, s.Config.GenFile)
+ }
+ cw.WriteGoFile(path, pkg) // TODO: WriteGoFile should return error.
+ return err
+}
+
+// WriteGen writes a Go file with the given package name to w that defines a
+// Catalog with translated messages. Translations are retrieved from s.Messages,
+// not s.Translations, so it is assumed Merge has been called.
+func (s *State) WriteGen(w io.Writer, pkg string) error {
+ cw, err := s.generate()
+ if err != nil {
+ return err
+ }
+ _, err = cw.WriteGo(w, pkg, "")
+ return err
+}
+
+// Generate is deprecated; use (*State).Generate().
+func Generate(w io.Writer, pkg string, extracted *Messages, trans ...Messages) (n int, err error) {
+ s := State{
+ Extracted: *extracted,
+ Translations: trans,
+ }
+ cw, err := s.generate()
+ if err != nil {
+ return 0, err
+ }
+ return cw.WriteGo(w, pkg, "")
+}
+
+func (s *State) generate() (*gen.CodeWriter, error) {
+ // Build up index of translations and original messages.
+ translations := map[language.Tag]map[string]Message{}
+ languages := []language.Tag{}
+ usedKeys := map[string]int{}
+
+ for _, loc := range s.Messages {
+ tag := loc.Language
+ if _, ok := translations[tag]; !ok {
+ translations[tag] = map[string]Message{}
+ languages = append(languages, tag)
+ }
+ for _, m := range loc.Messages {
+ if !m.Translation.IsEmpty() {
+ for _, id := range m.ID {
+ if _, ok := translations[tag][id]; ok {
+ warnf("Duplicate translation in locale %q for message %q", tag, id)
+ }
+ translations[tag][id] = m
+ }
+ }
+ }
+ }
+
+ // Verify completeness and register keys.
+ internal.SortTags(languages)
+
+ langVars := []string{}
+ for _, tag := range languages {
+ langVars = append(langVars, strings.Replace(tag.String(), "-", "_", -1))
+ dict := translations[tag]
+ for _, msg := range s.Extracted.Messages {
+ for _, id := range msg.ID {
+ if trans, ok := dict[id]; ok && !trans.Translation.IsEmpty() {
+ if _, ok := usedKeys[msg.Key]; !ok {
+ usedKeys[msg.Key] = len(usedKeys)
+ }
+ break
+ }
+ // TODO: log missing entry.
+ warnf("%s: Missing entry for %q.", tag, id)
+ }
+ }
+ }
+
+ cw := gen.NewCodeWriter()
+
+ x := &struct {
+ Fallback language.Tag
+ Languages []string
+ }{
+ Fallback: s.Extracted.Language,
+ Languages: langVars,
+ }
+
+ if err := lookup.Execute(cw, x); err != nil {
+ return nil, wrap(err, "error")
+ }
+
+ keyToIndex := []string{}
+ for k := range usedKeys {
+ keyToIndex = append(keyToIndex, k)
+ }
+ sort.Strings(keyToIndex)
+ fmt.Fprint(cw, "var messageKeyToIndex = map[string]int{\n")
+ for _, k := range keyToIndex {
+ fmt.Fprintf(cw, "%q: %d,\n", k, usedKeys[k])
+ }
+ fmt.Fprint(cw, "}\n\n")
+
+ for i, tag := range languages {
+ dict := translations[tag]
+ a := make([]string, len(usedKeys))
+ for _, msg := range s.Extracted.Messages {
+ for _, id := range msg.ID {
+ if trans, ok := dict[id]; ok && !trans.Translation.IsEmpty() {
+ m, err := assemble(&msg, &trans.Translation)
+ if err != nil {
+ return nil, wrap(err, "error")
+ }
+ _, leadWS, trailWS := trimWS(msg.Key)
+ if leadWS != "" || trailWS != "" {
+ m = catmsg.Affix{
+ Message: m,
+ Prefix: leadWS,
+ Suffix: trailWS,
+ }
+ }
+ // TODO: support macros.
+ data, err := catmsg.Compile(tag, nil, m)
+ if err != nil {
+ return nil, wrap(err, "error")
+ }
+ key := usedKeys[msg.Key]
+ if d := a[key]; d != "" && d != data {
+ warnf("Duplicate non-consistent translation for key %q, picking the one for message %q", msg.Key, id)
+ }
+ a[key] = string(data)
+ break
+ }
+ }
+ }
+ index := []uint32{0}
+ p := 0
+ for _, s := range a {
+ p += len(s)
+ index = append(index, uint32(p))
+ }
+
+ cw.WriteVar(langVars[i]+"Index", index)
+ cw.WriteConst(langVars[i]+"Data", strings.Join(a, ""))
+ }
+ return cw, nil
+}
+
+func assemble(m *Message, t *Text) (msg catmsg.Message, err error) {
+ keys := []string{}
+ for k := range t.Var {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ var a []catmsg.Message
+ for _, k := range keys {
+ t := t.Var[k]
+ m, err := assemble(m, &t)
+ if err != nil {
+ return nil, err
+ }
+ a = append(a, &catmsg.Var{Name: k, Message: m})
+ }
+ if t.Select != nil {
+ s, err := assembleSelect(m, t.Select)
+ if err != nil {
+ return nil, err
+ }
+ a = append(a, s)
+ }
+ if t.Msg != "" {
+ sub, err := m.Substitute(t.Msg)
+ if err != nil {
+ return nil, err
+ }
+ a = append(a, catmsg.String(sub))
+ }
+ switch len(a) {
+ case 0:
+ return nil, errorf("generate: empty message")
+ case 1:
+ return a[0], nil
+ default:
+ return catmsg.FirstOf(a), nil
+
+ }
+}
+
+func assembleSelect(m *Message, s *Select) (msg catmsg.Message, err error) {
+ cases := []string{}
+ for c := range s.Cases {
+ cases = append(cases, c)
+ }
+ sortCases(cases)
+
+ caseMsg := []interface{}{}
+ for _, c := range cases {
+ cm := s.Cases[c]
+ m, err := assemble(m, &cm)
+ if err != nil {
+ return nil, err
+ }
+ caseMsg = append(caseMsg, c, m)
+ }
+
+ ph := m.Placeholder(s.Arg)
+
+ switch s.Feature {
+ case "plural":
+ // TODO: only printf-style selects are supported as of yet.
+ return plural.Selectf(ph.ArgNum, ph.String, caseMsg...), nil
+ }
+ return nil, errorf("unknown feature type %q", s.Feature)
+}
+
+func sortCases(cases []string) {
+ // TODO: implement full interface.
+ sort.Slice(cases, func(i, j int) bool {
+ switch {
+ case cases[i] != "other" && cases[j] == "other":
+ return true
+ case cases[i] == "other" && cases[j] != "other":
+ return false
+ }
+ // the following code relies on '<' < '=' < any letter.
+ return cmpNumeric(cases[i], cases[j]) == -1
+ })
+}
+
+var cmpNumeric = collate.New(language.Und, collate.Numeric).CompareString
+
+var lookup = template.Must(template.New("gen").Parse(`
+import (
+ "golang.org/x/text/language"
+ "golang.org/x/text/message"
+ "golang.org/x/text/message/catalog"
+)
+
+type dictionary struct {
+ index []uint32
+ data string
+}
+
+func (d *dictionary) Lookup(key string) (data string, ok bool) {
+ p, ok := messageKeyToIndex[key]
+ if !ok {
+ return "", false
+ }
+ start, end := d.index[p], d.index[p+1]
+ if start == end {
+ return "", false
+ }
+ return d.data[start:end], true
+}
+
+func init() {
+ dict := map[string]catalog.Dictionary{
+ {{range .Languages}}"{{.}}": &dictionary{index: {{.}}Index, data: {{.}}Data },
+ {{end}}
+ }
+ fallback := language.MustParse("{{.Fallback}}")
+ cat, err := catalog.NewFromMap(dict, catalog.Fallback(fallback))
+ if err != nil {
+ panic(err)
+ }
+ message.DefaultCatalog = cat
+}
+
+`))
diff --git a/vendor/golang.org/x/text/message/pipeline/message.go b/vendor/golang.org/x/text/message/pipeline/message.go
new file mode 100644
index 0000000..c83a8fd
--- /dev/null
+++ b/vendor/golang.org/x/text/message/pipeline/message.go
@@ -0,0 +1,241 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pipeline
+
+import (
+ "encoding/json"
+ "errors"
+ "strings"
+
+ "golang.org/x/text/language"
+)
+
+// TODO: these definitions should be moved to a package so that the can be used
+// by other tools.
+
+// The file contains the structures used to define translations of a certain
+// messages.
+//
+// A translation may have multiple translations strings, or messages, depending
+// on the feature values of the various arguments. For instance, consider
+// a hypothetical translation from English to English, where the source defines
+// the format string "%d file(s) remaining".
+// See the examples directory for examples of extracted messages.
+
+// Messages is used to store translations for a single language.
+type Messages struct {
+ Language language.Tag `json:"language"`
+ Messages []Message `json:"messages"`
+ Macros map[string]Text `json:"macros,omitempty"`
+}
+
+// A Message describes a message to be translated.
+type Message struct {
+ // ID contains a list of identifiers for the message.
+ ID IDList `json:"id"`
+ // Key is the string that is used to look up the message at runtime.
+ Key string `json:"key,omitempty"`
+ Meaning string `json:"meaning,omitempty"`
+ Message Text `json:"message"`
+ Translation Text `json:"translation"`
+
+ Comment string `json:"comment,omitempty"`
+ TranslatorComment string `json:"translatorComment,omitempty"`
+
+ Placeholders []Placeholder `json:"placeholders,omitempty"`
+
+ // Fuzzy indicates that the provide translation needs review by a
+ // translator, for instance because it was derived from automated
+ // translation.
+ Fuzzy bool `json:"fuzzy,omitempty"`
+
+ // TODO: default placeholder syntax is {foo}. Allow alternative escaping
+ // like `foo`.
+
+ // Extraction information.
+ Position string `json:"position,omitempty"` // filePosition:line
+}
+
+// Placeholder reports the placeholder for the given ID if it is defined or nil
+// otherwise.
+func (m *Message) Placeholder(id string) *Placeholder {
+ for _, p := range m.Placeholders {
+ if p.ID == id {
+ return &p
+ }
+ }
+ return nil
+}
+
+// Substitute replaces placeholders in msg with their original value.
+func (m *Message) Substitute(msg string) (sub string, err error) {
+ last := 0
+ for i := 0; i < len(msg); {
+ pLeft := strings.IndexByte(msg[i:], '{')
+ if pLeft == -1 {
+ break
+ }
+ pLeft += i
+ pRight := strings.IndexByte(msg[pLeft:], '}')
+ if pRight == -1 {
+ return "", errorf("unmatched '}'")
+ }
+ pRight += pLeft
+ id := strings.TrimSpace(msg[pLeft+1 : pRight])
+ i = pRight + 1
+ if id != "" && id[0] == '$' {
+ continue
+ }
+ sub += msg[last:pLeft]
+ last = i
+ ph := m.Placeholder(id)
+ if ph == nil {
+ return "", errorf("unknown placeholder %q in message %q", id, msg)
+ }
+ sub += ph.String
+ }
+ sub += msg[last:]
+ return sub, err
+}
+
+var errIncompatibleMessage = errors.New("messages incompatible")
+
+func checkEquivalence(a, b *Message) error {
+ for _, v := range a.ID {
+ for _, w := range b.ID {
+ if v == w {
+ return nil
+ }
+ }
+ }
+ // TODO: canonicalize placeholders and check for type equivalence.
+ return errIncompatibleMessage
+}
+
+// A Placeholder is a part of the message that should not be changed by a
+// translator. It can be used to hide or prettify format strings (e.g. %d or
+// {{.Count}}), hide HTML, or mark common names that should not be translated.
+type Placeholder struct {
+ // ID is the placeholder identifier without the curly braces.
+ ID string `json:"id"`
+
+ // String is the string with which to replace the placeholder. This may be a
+ // formatting string (for instance "%d" or "{{.Count}}") or a literal string
+ // (<div>).
+ String string `json:"string"`
+
+ Type string `json:"type"`
+ UnderlyingType string `json:"underlyingType"`
+ // ArgNum and Expr are set if the placeholder is a substitution of an
+ // argument.
+ ArgNum int `json:"argNum,omitempty"`
+ Expr string `json:"expr,omitempty"`
+
+ Comment string `json:"comment,omitempty"`
+ Example string `json:"example,omitempty"`
+
+ // Features contains the features that are available for the implementation
+ // of this argument.
+ Features []Feature `json:"features,omitempty"`
+}
+
+// An argument contains information about the arguments passed to a message.
+type argument struct {
+ // ArgNum corresponds to the number that should be used for explicit argument indexes (e.g.
+ // "%[1]d").
+ ArgNum int `json:"argNum,omitempty"`
+
+ used bool // Used by Placeholder
+ Type string `json:"type"`
+ UnderlyingType string `json:"underlyingType"`
+ Expr string `json:"expr"`
+ Value string `json:"value,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Position string `json:"position,omitempty"`
+}
+
+// Feature holds information about a feature that can be implemented by
+// an Argument.
+type Feature struct {
+ Type string `json:"type"` // Right now this is only gender and plural.
+
+ // TODO: possible values and examples for the language under consideration.
+
+}
+
+// Text defines a message to be displayed.
+type Text struct {
+ // Msg and Select contains the message to be displayed. Msg may be used as
+ // a fallback value if none of the select cases match.
+ Msg string `json:"msg,omitempty"`
+ Select *Select `json:"select,omitempty"`
+
+ // Var defines a map of variables that may be substituted in the selected
+ // message.
+ Var map[string]Text `json:"var,omitempty"`
+
+ // Example contains an example message formatted with default values.
+ Example string `json:"example,omitempty"`
+}
+
+// IsEmpty reports whether this Text can generate anything.
+func (t *Text) IsEmpty() bool {
+ return t.Msg == "" && t.Select == nil && t.Var == nil
+}
+
+// rawText erases the UnmarshalJSON method.
+type rawText Text
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (t *Text) UnmarshalJSON(b []byte) error {
+ if b[0] == '"' {
+ return json.Unmarshal(b, &t.Msg)
+ }
+ return json.Unmarshal(b, (*rawText)(t))
+}
+
+// MarshalJSON implements json.Marshaler.
+func (t *Text) MarshalJSON() ([]byte, error) {
+ if t.Select == nil && t.Var == nil && t.Example == "" {
+ return json.Marshal(t.Msg)
+ }
+ return json.Marshal((*rawText)(t))
+}
+
+// IDList is a set identifiers that each may refer to possibly different
+// versions of the same message. When looking up a messages, the first
+// identifier in the list takes precedence.
+type IDList []string
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (id *IDList) UnmarshalJSON(b []byte) error {
+ if b[0] == '"' {
+ *id = []string{""}
+ return json.Unmarshal(b, &((*id)[0]))
+ }
+ return json.Unmarshal(b, (*[]string)(id))
+}
+
+// MarshalJSON implements json.Marshaler.
+func (id *IDList) MarshalJSON() ([]byte, error) {
+ if len(*id) == 1 {
+ return json.Marshal((*id)[0])
+ }
+ return json.Marshal((*[]string)(id))
+}
+
+// Select selects a Text based on the feature value associated with a feature of
+// a certain argument.
+type Select struct {
+ Feature string `json:"feature"` // Name of Feature type (e.g plural)
+ Arg string `json:"arg"` // The placeholder ID
+ Cases map[string]Text `json:"cases"`
+}
+
+// TODO: order matters, but can we derive the ordering from the case keys?
+// type Case struct {
+// Key string `json:"key"`
+// Value Text `json:"value"`
+// }
diff --git a/vendor/golang.org/x/text/message/pipeline/pipeline.go b/vendor/golang.org/x/text/message/pipeline/pipeline.go
new file mode 100644
index 0000000..34f15f8
--- /dev/null
+++ b/vendor/golang.org/x/text/message/pipeline/pipeline.go
@@ -0,0 +1,422 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package pipeline provides tools for creating translation pipelines.
+//
+// NOTE: UNDER DEVELOPMENT. API MAY CHANGE.
+package pipeline
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "go/build"
+ "go/parser"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "text/template"
+ "unicode"
+
+ "golang.org/x/text/internal"
+ "golang.org/x/text/language"
+ "golang.org/x/text/runes"
+ "golang.org/x/tools/go/loader"
+)
+
+const (
+ extractFile = "extracted.gotext.json"
+ outFile = "out.gotext.json"
+ gotextSuffix = "gotext.json"
+)
+
+// Config contains configuration for the translation pipeline.
+type Config struct {
+ // Supported indicates the languages for which data should be generated.
+ // The default is to support all locales for which there are matching
+ // translation files.
+ Supported []language.Tag
+
+ // --- Extraction
+
+ SourceLanguage language.Tag
+
+ Packages []string
+
+ // --- File structure
+
+ // Dir is the root dir for all operations.
+ Dir string
+
+ // TranslationsPattern is a regular expression to match incoming translation
+ // files. These files may appear in any directory rooted at Dir.
+ // language for the translation files is determined as follows:
+ // 1. From the Language field in the file.
+ // 2. If not present, from a valid language tag in the filename, separated
+ // by dots (e.g. "en-US.json" or "incoming.pt_PT.xmb").
+ // 3. If not present, from a the closest subdirectory in which the file
+ // is contained that parses as a valid language tag.
+ TranslationsPattern string
+
+ // OutPattern defines the location for translation files for a certain
+ // language. The default is "{{.Dir}}/{{.Language}}/out.{{.Ext}}"
+ OutPattern string
+
+ // Format defines the file format for generated translation files.
+ // The default is XMB. Alternatives are GetText, XLIFF, L20n, GoText.
+ Format string
+
+ Ext string
+
+ // TODO:
+ // Actions are additional actions to be performed after the initial extract
+ // and merge.
+ // Actions []struct {
+ // Name string
+ // Options map[string]string
+ // }
+
+ // --- Generation
+
+ // GenFile may be in a different package. It is not defined, it will
+ // be written to stdout.
+ GenFile string
+
+ // GenPackage is the package or relative path into which to generate the
+ // file. If not specified it is relative to the current directory.
+ GenPackage string
+
+ // DeclareVar defines a variable to which to assign the generated Catalog.
+ DeclareVar string
+
+ // SetDefault determines whether to assign the generated Catalog to
+ // message.DefaultCatalog. The default for this is true if DeclareVar is
+ // not defined, false otherwise.
+ SetDefault bool
+
+ // TODO:
+ // - Printf-style configuration
+ // - Template-style configuration
+ // - Extraction options
+ // - Rewrite options
+ // - Generation options
+}
+
+// Operations:
+// - extract: get the strings
+// - disambiguate: find messages with the same key, but possible different meaning.
+// - create out: create a list of messages that need translations
+// - load trans: load the list of current translations
+// - merge: assign list of translations as done
+// - (action)expand: analyze features and create example sentences for each version.
+// - (action)googletrans: pre-populate messages with automatic translations.
+// - (action)export: send out messages somewhere non-standard
+// - (action)import: load messages from somewhere non-standard
+// - vet program: don't pass "foo" + var + "bar" strings. Not using funcs for translated strings.
+// - vet trans: coverage: all translations/ all features.
+// - generate: generate Go code
+
+// State holds all accumulated information on translations during processing.
+type State struct {
+ Config Config
+
+ Package string
+ program *loader.Program
+
+ Extracted Messages `json:"messages"`
+
+ // Messages includes all messages for which there need to be translations.
+ // Duplicates may be eliminated. Generation will be done from these messages
+ // (usually after merging).
+ Messages []Messages
+
+ // Translations are incoming translations for the application messages.
+ Translations []Messages
+}
+
+func (s *State) dir() string {
+ if d := s.Config.Dir; d != "" {
+ return d
+ }
+ return "./locales"
+}
+
+func outPattern(s *State) (string, error) {
+ c := s.Config
+ pat := c.OutPattern
+ if pat == "" {
+ pat = "{{.Dir}}/{{.Language}}/out.{{.Ext}}"
+ }
+
+ ext := c.Ext
+ if ext == "" {
+ ext = c.Format
+ }
+ if ext == "" {
+ ext = gotextSuffix
+ }
+ t, err := template.New("").Parse(pat)
+ if err != nil {
+ return "", wrap(err, "error parsing template")
+ }
+ buf := bytes.Buffer{}
+ err = t.Execute(&buf, map[string]string{
+ "Dir": s.dir(),
+ "Language": "%s",
+ "Ext": ext,
+ })
+ return filepath.FromSlash(buf.String()), wrap(err, "incorrect OutPattern")
+}
+
+var transRE = regexp.MustCompile(`.*\.` + gotextSuffix)
+
+// Import loads existing translation files.
+func (s *State) Import() error {
+ outPattern, err := outPattern(s)
+ if err != nil {
+ return err
+ }
+ re := transRE
+ if pat := s.Config.TranslationsPattern; pat != "" {
+ if re, err = regexp.Compile(pat); err != nil {
+ return wrapf(err, "error parsing regexp %q", s.Config.TranslationsPattern)
+ }
+ }
+ x := importer{s, outPattern, re}
+ return x.walkImport(s.dir(), s.Config.SourceLanguage)
+}
+
+type importer struct {
+ state *State
+ outPattern string
+ transFile *regexp.Regexp
+}
+
+func (i *importer) walkImport(path string, tag language.Tag) error {
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return nil
+ }
+ for _, f := range files {
+ name := f.Name()
+ tag := tag
+ if f.IsDir() {
+ if t, err := language.Parse(name); err == nil {
+ tag = t
+ }
+ // We ignore errors
+ if err := i.walkImport(filepath.Join(path, name), tag); err != nil {
+ return err
+ }
+ continue
+ }
+ for _, l := range strings.Split(name, ".") {
+ if t, err := language.Parse(l); err == nil {
+ tag = t
+ }
+ }
+ file := filepath.Join(path, name)
+ // TODO: Should we skip files that match output files?
+ if fmt.Sprintf(i.outPattern, tag) == file {
+ continue
+ }
+ // TODO: handle different file formats.
+ if !i.transFile.MatchString(name) {
+ continue
+ }
+ b, err := ioutil.ReadFile(file)
+ if err != nil {
+ return wrap(err, "read file failed")
+ }
+ var translations Messages
+ if err := json.Unmarshal(b, &translations); err != nil {
+ return wrap(err, "parsing translation file failed")
+ }
+ i.state.Translations = append(i.state.Translations, translations)
+ }
+ return nil
+}
+
+// Merge merges the extracted messages with the existing translations.
+func (s *State) Merge() error {
+ if s.Messages != nil {
+ panic("already merged")
+ }
+ // Create an index for each unique message.
+ // Duplicates are okay as long as the substitution arguments are okay as
+ // well.
+ // Top-level messages are okay to appear in multiple substitution points.
+
+ // Collect key equivalence.
+ msgs := []*Message{}
+ keyToIDs := map[string]*Message{}
+ for _, m := range s.Extracted.Messages {
+ m := m
+ if prev, ok := keyToIDs[m.Key]; ok {
+ if err := checkEquivalence(&m, prev); err != nil {
+ warnf("Key %q matches conflicting messages: %v and %v", m.Key, prev.ID, m.ID)
+ // TODO: track enough information so that the rewriter can
+ // suggest/disambiguate messages.
+ }
+ // TODO: add position to message.
+ continue
+ }
+ i := len(msgs)
+ msgs = append(msgs, &m)
+ keyToIDs[m.Key] = msgs[i]
+ }
+
+ // Messages with different keys may still refer to the same translated
+ // message (e.g. different whitespace). Filter these.
+ idMap := map[string]bool{}
+ filtered := []*Message{}
+ for _, m := range msgs {
+ found := false
+ for _, id := range m.ID {
+ found = found || idMap[id]
+ }
+ if !found {
+ filtered = append(filtered, m)
+ }
+ for _, id := range m.ID {
+ idMap[id] = true
+ }
+ }
+
+ // Build index of translations.
+ translations := map[language.Tag]map[string]Message{}
+ languages := append([]language.Tag{}, s.Config.Supported...)
+
+ for _, t := range s.Translations {
+ tag := t.Language
+ if _, ok := translations[tag]; !ok {
+ translations[tag] = map[string]Message{}
+ languages = append(languages, tag)
+ }
+ for _, m := range t.Messages {
+ if !m.Translation.IsEmpty() {
+ for _, id := range m.ID {
+ if _, ok := translations[tag][id]; ok {
+ warnf("Duplicate translation in locale %q for message %q", tag, id)
+ }
+ translations[tag][id] = m
+ }
+ }
+ }
+ }
+ languages = internal.UniqueTags(languages)
+
+ for _, tag := range languages {
+ ms := Messages{Language: tag}
+ for _, orig := range filtered {
+ m := *orig
+ m.Key = ""
+ m.Position = ""
+
+ for _, id := range m.ID {
+ if t, ok := translations[tag][id]; ok {
+ m.Translation = t.Translation
+ if t.TranslatorComment != "" {
+ m.TranslatorComment = t.TranslatorComment
+ m.Fuzzy = t.Fuzzy
+ }
+ break
+ }
+ }
+ if tag == s.Config.SourceLanguage && m.Translation.IsEmpty() {
+ m.Translation = m.Message
+ if m.TranslatorComment == "" {
+ m.TranslatorComment = "Copied from source."
+ m.Fuzzy = true
+ }
+ }
+ // TODO: if translation is empty: pre-expand based on available
+ // linguistic features. This may also be done as a plugin.
+ ms.Messages = append(ms.Messages, m)
+ }
+ s.Messages = append(s.Messages, ms)
+ }
+ return nil
+}
+
+// Export writes out the messages to translation out files.
+func (s *State) Export() error {
+ path, err := outPattern(s)
+ if err != nil {
+ return wrap(err, "export failed")
+ }
+ for _, out := range s.Messages {
+ // TODO: inject translations from existing files to avoid retranslation.
+ data, err := json.MarshalIndent(out, "", " ")
+ if err != nil {
+ return wrap(err, "JSON marshal failed")
+ }
+ file := fmt.Sprintf(path, out.Language)
+ if err := os.MkdirAll(filepath.Dir(file), 0755); err != nil {
+ return wrap(err, "dir create failed")
+ }
+ if err := ioutil.WriteFile(file, data, 0644); err != nil {
+ return wrap(err, "write failed")
+ }
+ }
+ return nil
+}
+
+var (
+ ws = runes.In(unicode.White_Space).Contains
+ notWS = runes.NotIn(unicode.White_Space).Contains
+)
+
+func trimWS(s string) (trimmed, leadWS, trailWS string) {
+ trimmed = strings.TrimRightFunc(s, ws)
+ trailWS = s[len(trimmed):]
+ if i := strings.IndexFunc(trimmed, notWS); i > 0 {
+ leadWS = trimmed[:i]
+ trimmed = trimmed[i:]
+ }
+ return trimmed, leadWS, trailWS
+}
+
+// NOTE: The command line tool already prefixes with "gotext:".
+var (
+ wrap = func(err error, msg string) error {
+ if err == nil {
+ return nil
+ }
+ return fmt.Errorf("%s: %v", msg, err)
+ }
+ wrapf = func(err error, msg string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ return wrap(err, fmt.Sprintf(msg, args...))
+ }
+ errorf = fmt.Errorf
+)
+
+func warnf(format string, args ...interface{}) {
+ // TODO: don't log.
+ log.Printf(format, args...)
+}
+
+func loadPackages(conf *loader.Config, args []string) (*loader.Program, error) {
+ if len(args) == 0 {
+ args = []string{"."}
+ }
+
+ conf.Build = &build.Default
+ conf.ParserMode = parser.ParseComments
+
+ // Use the initial packages from the command line.
+ args, err := conf.FromArgs(args, false)
+ if err != nil {
+ return nil, wrap(err, "loading packages failed")
+ }
+
+ // Load, parse and type-check the whole program.
+ return conf.Load()
+}
diff --git a/vendor/golang.org/x/text/message/pipeline/rewrite.go b/vendor/golang.org/x/text/message/pipeline/rewrite.go
new file mode 100644
index 0000000..cf1511f
--- /dev/null
+++ b/vendor/golang.org/x/text/message/pipeline/rewrite.go
@@ -0,0 +1,268 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pipeline
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/constant"
+ "go/format"
+ "go/token"
+ "io"
+ "os"
+ "strings"
+
+ "golang.org/x/tools/go/loader"
+)
+
+const printerType = "golang.org/x/text/message.Printer"
+
+// Rewrite rewrites the Go files in a single package to use the localization
+// machinery and rewrites strings to adopt best practices when possible.
+// If w is not nil the generated files are written to it, each files with a
+// "--- <filename>" header. Otherwise the files are overwritten.
+func Rewrite(w io.Writer, args ...string) error {
+ conf := &loader.Config{
+ AllowErrors: true, // Allow unused instances of message.Printer.
+ }
+ prog, err := loadPackages(conf, args)
+ if err != nil {
+ return wrap(err, "")
+ }
+
+ for _, info := range prog.InitialPackages() {
+ for _, f := range info.Files {
+ // Associate comments with nodes.
+
+ // Pick up initialized Printers at the package level.
+ r := rewriter{info: info, conf: conf}
+ for _, n := range info.InitOrder {
+ if t := r.info.Types[n.Rhs].Type.String(); strings.HasSuffix(t, printerType) {
+ r.printerVar = n.Lhs[0].Name()
+ }
+ }
+
+ ast.Walk(&r, f)
+
+ w := w
+ if w == nil {
+ var err error
+ if w, err = os.Create(conf.Fset.File(f.Pos()).Name()); err != nil {
+ return wrap(err, "open failed")
+ }
+ } else {
+ fmt.Fprintln(w, "---", conf.Fset.File(f.Pos()).Name())
+ }
+
+ if err := format.Node(w, conf.Fset, f); err != nil {
+ return wrap(err, "go format failed")
+ }
+ }
+ }
+
+ return nil
+}
+
+type rewriter struct {
+ info *loader.PackageInfo
+ conf *loader.Config
+ printerVar string
+}
+
+// print returns Go syntax for the specified node.
+func (r *rewriter) print(n ast.Node) string {
+ var buf bytes.Buffer
+ format.Node(&buf, r.conf.Fset, n)
+ return buf.String()
+}
+
+func (r *rewriter) Visit(n ast.Node) ast.Visitor {
+ // Save the state by scope.
+ if _, ok := n.(*ast.BlockStmt); ok {
+ r := *r
+ return &r
+ }
+ // Find Printers created by assignment.
+ stmt, ok := n.(*ast.AssignStmt)
+ if ok {
+ for _, v := range stmt.Lhs {
+ if r.printerVar == r.print(v) {
+ r.printerVar = ""
+ }
+ }
+ for i, v := range stmt.Rhs {
+ if t := r.info.Types[v].Type.String(); strings.HasSuffix(t, printerType) {
+ r.printerVar = r.print(stmt.Lhs[i])
+ return r
+ }
+ }
+ }
+ // Find Printers created by variable declaration.
+ spec, ok := n.(*ast.ValueSpec)
+ if ok {
+ for _, v := range spec.Names {
+ if r.printerVar == r.print(v) {
+ r.printerVar = ""
+ }
+ }
+ for i, v := range spec.Values {
+ if t := r.info.Types[v].Type.String(); strings.HasSuffix(t, printerType) {
+ r.printerVar = r.print(spec.Names[i])
+ return r
+ }
+ }
+ }
+ if r.printerVar == "" {
+ return r
+ }
+ call, ok := n.(*ast.CallExpr)
+ if !ok {
+ return r
+ }
+
+ // TODO: Handle literal values?
+ sel, ok := call.Fun.(*ast.SelectorExpr)
+ if !ok {
+ return r
+ }
+ meth := r.info.Selections[sel]
+
+ source := r.print(sel.X)
+ fun := r.print(sel.Sel)
+ if meth != nil {
+ source = meth.Recv().String()
+ fun = meth.Obj().Name()
+ }
+
+ // TODO: remove cheap hack and check if the type either
+ // implements some interface or is specifically of type
+ // "golang.org/x/text/message".Printer.
+ m, ok := rewriteFuncs[source]
+ if !ok {
+ return r
+ }
+
+ rewriteType, ok := m[fun]
+ if !ok {
+ return r
+ }
+ ident := ast.NewIdent(r.printerVar)
+ ident.NamePos = sel.X.Pos()
+ sel.X = ident
+ if rewriteType.method != "" {
+ sel.Sel.Name = rewriteType.method
+ }
+
+ // Analyze arguments.
+ argn := rewriteType.arg
+ if rewriteType.format || argn >= len(call.Args) {
+ return r
+ }
+ hasConst := false
+ for _, a := range call.Args[argn:] {
+ if v := r.info.Types[a].Value; v != nil && v.Kind() == constant.String {
+ hasConst = true
+ break
+ }
+ }
+ if !hasConst {
+ return r
+ }
+ sel.Sel.Name = rewriteType.methodf
+
+ // We are done if there is only a single string that does not need to be
+ // escaped.
+ if len(call.Args) == 1 {
+ s, ok := constStr(r.info, call.Args[0])
+ if ok && !strings.Contains(s, "%") && !rewriteType.newLine {
+ return r
+ }
+ }
+
+ // Rewrite arguments as format string.
+ expr := &ast.BasicLit{
+ ValuePos: call.Lparen,
+ Kind: token.STRING,
+ }
+ newArgs := append(call.Args[:argn:argn], expr)
+ newStr := []string{}
+ for i, a := range call.Args[argn:] {
+ if s, ok := constStr(r.info, a); ok {
+ newStr = append(newStr, strings.Replace(s, "%", "%%", -1))
+ } else {
+ newStr = append(newStr, "%v")
+ newArgs = append(newArgs, call.Args[argn+i])
+ }
+ }
+ s := strings.Join(newStr, rewriteType.sep)
+ if rewriteType.newLine {
+ s += "\n"
+ }
+ expr.Value = fmt.Sprintf("%q", s)
+
+ call.Args = newArgs
+
+ // TODO: consider creating an expression instead of a constant string and
+ // then wrapping it in an escape function or so:
+ // call.Args[argn+i] = &ast.CallExpr{
+ // Fun: &ast.SelectorExpr{
+ // X: ast.NewIdent("message"),
+ // Sel: ast.NewIdent("Lookup"),
+ // },
+ // Args: []ast.Expr{a},
+ // }
+ // }
+
+ return r
+}
+
+type rewriteType struct {
+ // method is the name of the equivalent method on a printer, or "" if it is
+ // the same.
+ method string
+
+ // methodf is the method to use if the arguments can be rewritten as a
+ // arguments to a printf-style call.
+ methodf string
+
+ // format is true if the method takes a formatting string followed by
+ // substitution arguments.
+ format bool
+
+ // arg indicates the position of the argument to extract. If all is
+ // positive, all arguments from this argument onwards needs to be extracted.
+ arg int
+
+ sep string
+ newLine bool
+}
+
+// rewriteFuncs list functions that can be directly mapped to the printer
+// functions of the message package.
+var rewriteFuncs = map[string]map[string]rewriteType{
+ // TODO: Printer -> *golang.org/x/text/message.Printer
+ "fmt": {
+ "Print": rewriteType{methodf: "Printf"},
+ "Sprint": rewriteType{methodf: "Sprintf"},
+ "Fprint": rewriteType{methodf: "Fprintf"},
+
+ "Println": rewriteType{methodf: "Printf", sep: " ", newLine: true},
+ "Sprintln": rewriteType{methodf: "Sprintf", sep: " ", newLine: true},
+ "Fprintln": rewriteType{methodf: "Fprintf", sep: " ", newLine: true},
+
+ "Printf": rewriteType{method: "Printf", format: true},
+ "Sprintf": rewriteType{method: "Sprintf", format: true},
+ "Fprintf": rewriteType{method: "Fprintf", format: true},
+ },
+}
+
+func constStr(info *loader.PackageInfo, e ast.Expr) (s string, ok bool) {
+ v := info.Types[e].Value
+ if v == nil || v.Kind() != constant.String {
+ return "", false
+ }
+ return constant.StringVal(v), true
+}
diff --git a/vendor/golang.org/x/text/runes/cond.go b/vendor/golang.org/x/text/runes/cond.go
new file mode 100644
index 0000000..df7aa02
--- /dev/null
+++ b/vendor/golang.org/x/text/runes/cond.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runes
+
+import (
+ "unicode/utf8"
+
+ "golang.org/x/text/transform"
+)
+
+// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
+// This is done for various reasons:
+// - To retain the semantics of the Nop transformer: if input is passed to a Nop
+// one would expect it to be unchanged.
+// - It would be very expensive to pass a converted RuneError to a transformer:
+// a transformer might need more source bytes after RuneError, meaning that
+// the only way to pass it safely is to create a new buffer and manage the
+// intermingling of RuneErrors and normal input.
+// - Many transformers leave ill-formed UTF-8 as is, so this is not
+// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
+// logical consequence of the operation (as for Map) or if it otherwise would
+// pose security concerns (as for Remove).
+// - An alternative would be to return an error on ill-formed UTF-8, but this
+// would be inconsistent with other operations.
+
+// If returns a transformer that applies tIn to consecutive runes for which
+// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
+// is called on tIn and tNotIn at the start of each run. A Nop transformer will
+// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
+// to RuneError to determine which transformer to apply, but is passed as is to
+// the respective transformer.
+func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
+ if tIn == nil && tNotIn == nil {
+ return Transformer{transform.Nop}
+ }
+ if tIn == nil {
+ tIn = transform.Nop
+ }
+ if tNotIn == nil {
+ tNotIn = transform.Nop
+ }
+ sIn, ok := tIn.(transform.SpanningTransformer)
+ if !ok {
+ sIn = dummySpan{tIn}
+ }
+ sNotIn, ok := tNotIn.(transform.SpanningTransformer)
+ if !ok {
+ sNotIn = dummySpan{tNotIn}
+ }
+
+ a := &cond{
+ tIn: sIn,
+ tNotIn: sNotIn,
+ f: s.Contains,
+ }
+ a.Reset()
+ return Transformer{a}
+}
+
+type dummySpan struct{ transform.Transformer }
+
+func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
+ return 0, transform.ErrEndOfSpan
+}
+
+type cond struct {
+ tIn, tNotIn transform.SpanningTransformer
+ f func(rune) bool
+ check func(rune) bool // current check to perform
+ t transform.SpanningTransformer // current transformer to use
+}
+
+// Reset implements transform.Transformer.
+func (t *cond) Reset() {
+ t.check = t.is
+ t.t = t.tIn
+ t.t.Reset() // notIn will be reset on first usage.
+}
+
+func (t *cond) is(r rune) bool {
+ if t.f(r) {
+ return true
+ }
+ t.check = t.isNot
+ t.t = t.tNotIn
+ t.tNotIn.Reset()
+ return false
+}
+
+func (t *cond) isNot(r rune) bool {
+ if !t.f(r) {
+ return true
+ }
+ t.check = t.is
+ t.t = t.tIn
+ t.tIn.Reset()
+ return false
+}
+
+// This implementation of Span doesn't help all too much, but it needs to be
+// there to satisfy this package's Transformer interface.
+// TODO: there are certainly room for improvements, though. For example, if
+// t.t == transform.Nop (which will a common occurrence) it will save a bundle
+// to special-case that loop.
+func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
+ p := 0
+ for n < len(src) && err == nil {
+ // Don't process too much at a time as the Spanner that will be
+ // called on this block may terminate early.
+ const maxChunk = 4096
+ max := len(src)
+ if v := n + maxChunk; v < max {
+ max = v
+ }
+ atEnd := false
+ size := 0
+ current := t.t
+ for ; p < max; p += size {
+ r := rune(src[p])
+ if r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
+ if !atEOF && !utf8.FullRune(src[p:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ }
+ if !t.check(r) {
+ // The next rune will be the start of a new run.
+ atEnd = true
+ break
+ }
+ }
+ n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
+ n += n2
+ if err2 != nil {
+ return n, err2
+ }
+ // At this point either err != nil or t.check will pass for the rune at p.
+ p = n + size
+ }
+ return n, err
+}
+
+func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ p := 0
+ for nSrc < len(src) && err == nil {
+ // Don't process too much at a time, as the work might be wasted if the
+ // destination buffer isn't large enough to hold the result or a
+ // transform returns an error early.
+ const maxChunk = 4096
+ max := len(src)
+ if n := nSrc + maxChunk; n < len(src) {
+ max = n
+ }
+ atEnd := false
+ size := 0
+ current := t.t
+ for ; p < max; p += size {
+ r := rune(src[p])
+ if r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
+ if !atEOF && !utf8.FullRune(src[p:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ }
+ if !t.check(r) {
+ // The next rune will be the start of a new run.
+ atEnd = true
+ break
+ }
+ }
+ nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
+ nDst += nDst2
+ nSrc += nSrc2
+ if err2 != nil {
+ return nDst, nSrc, err2
+ }
+ // At this point either err != nil or t.check will pass for the rune at p.
+ p = nSrc + size
+ }
+ return nDst, nSrc, err
+}
diff --git a/vendor/golang.org/x/text/runes/runes.go b/vendor/golang.org/x/text/runes/runes.go
new file mode 100644
index 0000000..930e87f
--- /dev/null
+++ b/vendor/golang.org/x/text/runes/runes.go
@@ -0,0 +1,355 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package runes provide transforms for UTF-8 encoded text.
+package runes // import "golang.org/x/text/runes"
+
+import (
+ "unicode"
+ "unicode/utf8"
+
+ "golang.org/x/text/transform"
+)
+
+// A Set is a collection of runes.
+type Set interface {
+ // Contains returns true if r is contained in the set.
+ Contains(r rune) bool
+}
+
+type setFunc func(rune) bool
+
+func (s setFunc) Contains(r rune) bool {
+ return s(r)
+}
+
+// Note: using funcs here instead of wrapping types result in cleaner
+// documentation and a smaller API.
+
+// In creates a Set with a Contains method that returns true for all runes in
+// the given RangeTable.
+func In(rt *unicode.RangeTable) Set {
+ return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
+}
+
+// NotIn creates a Set with a Contains method that returns true for all runes not
+// in the given RangeTable.
+func NotIn(rt *unicode.RangeTable) Set {
+ return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
+}
+
+// Predicate creates a Set with a Contains method that returns f(r).
+func Predicate(f func(rune) bool) Set {
+ return setFunc(f)
+}
+
+// Transformer implements the transform.Transformer interface.
+type Transformer struct {
+ t transform.SpanningTransformer
+}
+
+func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ return t.t.Transform(dst, src, atEOF)
+}
+
+func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
+ return t.t.Span(b, atEOF)
+}
+
+func (t Transformer) Reset() { t.t.Reset() }
+
+// Bytes returns a new byte slice with the result of converting b using t. It
+// calls Reset on t. It returns nil if any error was found. This can only happen
+// if an error-producing Transformer is passed to If.
+func (t Transformer) Bytes(b []byte) []byte {
+ b, _, err := transform.Bytes(t, b)
+ if err != nil {
+ return nil
+ }
+ return b
+}
+
+// String returns a string with the result of converting s using t. It calls
+// Reset on t. It returns the empty string if any error was found. This can only
+// happen if an error-producing Transformer is passed to If.
+func (t Transformer) String(s string) string {
+ s, _, err := transform.String(t, s)
+ if err != nil {
+ return ""
+ }
+ return s
+}
+
+// TODO:
+// - Copy: copying strings and bytes in whole-rune units.
+// - Validation (maybe)
+// - Well-formed-ness (maybe)
+
+const runeErrorString = string(utf8.RuneError)
+
+// Remove returns a Transformer that removes runes r for which s.Contains(r).
+// Illegal input bytes are replaced by RuneError before being passed to f.
+func Remove(s Set) Transformer {
+ if f, ok := s.(setFunc); ok {
+ // This little trick cuts the running time of BenchmarkRemove for sets
+ // created by Predicate roughly in half.
+ // TODO: special-case RangeTables as well.
+ return Transformer{remove(f)}
+ }
+ return Transformer{remove(s.Contains)}
+}
+
+// TODO: remove transform.RemoveFunc.
+
+type remove func(r rune) bool
+
+func (remove) Reset() {}
+
+// Span implements transform.Spanner.
+func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
+ for r, size := rune(0), 0; n < len(src); {
+ if r = rune(src[n]); r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[n:]) {
+ err = transform.ErrShortSrc
+ } else {
+ err = transform.ErrEndOfSpan
+ }
+ break
+ }
+ if t(r) {
+ err = transform.ErrEndOfSpan
+ break
+ }
+ n += size
+ }
+ return
+}
+
+// Transform implements transform.Transformer.
+func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ for r, size := rune(0), 0; nSrc < len(src); {
+ if r = rune(src[nSrc]); r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+ // We replace illegal bytes with RuneError. Not doing so might
+ // otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
+ // The resulting byte sequence may subsequently contain runes
+ // for which t(r) is true that were passed unnoticed.
+ if !t(utf8.RuneError) {
+ if nDst+3 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst+0] = runeErrorString[0]
+ dst[nDst+1] = runeErrorString[1]
+ dst[nDst+2] = runeErrorString[2]
+ nDst += 3
+ }
+ nSrc++
+ continue
+ }
+ if t(r) {
+ nSrc += size
+ continue
+ }
+ if nDst+size > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ for i := 0; i < size; i++ {
+ dst[nDst] = src[nSrc]
+ nDst++
+ nSrc++
+ }
+ }
+ return
+}
+
+// Map returns a Transformer that maps the runes in the input using the given
+// mapping. Illegal bytes in the input are converted to utf8.RuneError before
+// being passed to the mapping func.
+func Map(mapping func(rune) rune) Transformer {
+ return Transformer{mapper(mapping)}
+}
+
+type mapper func(rune) rune
+
+func (mapper) Reset() {}
+
+// Span implements transform.Spanner.
+func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
+ for r, size := rune(0), 0; n < len(src); n += size {
+ if r = rune(src[n]); r < utf8.RuneSelf {
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[n:]) {
+ err = transform.ErrShortSrc
+ } else {
+ err = transform.ErrEndOfSpan
+ }
+ break
+ }
+ if t(r) != r {
+ err = transform.ErrEndOfSpan
+ break
+ }
+ }
+ return n, err
+}
+
+// Transform implements transform.Transformer.
+func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ var replacement rune
+ var b [utf8.UTFMax]byte
+
+ for r, size := rune(0), 0; nSrc < len(src); {
+ if r = rune(src[nSrc]); r < utf8.RuneSelf {
+ if replacement = t(r); replacement < utf8.RuneSelf {
+ if nDst == len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst] = byte(replacement)
+ nDst++
+ nSrc++
+ continue
+ }
+ size = 1
+ } else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
+ // Invalid rune.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+
+ if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
+ if nDst+3 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst+0] = runeErrorString[0]
+ dst[nDst+1] = runeErrorString[1]
+ dst[nDst+2] = runeErrorString[2]
+ nDst += 3
+ nSrc++
+ continue
+ }
+ } else if replacement = t(r); replacement == r {
+ if nDst+size > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ for i := 0; i < size; i++ {
+ dst[nDst] = src[nSrc]
+ nDst++
+ nSrc++
+ }
+ continue
+ }
+
+ n := utf8.EncodeRune(b[:], replacement)
+
+ if nDst+n > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ for i := 0; i < n; i++ {
+ dst[nDst] = b[i]
+ nDst++
+ }
+ nSrc += size
+ }
+ return
+}
+
+// ReplaceIllFormed returns a transformer that replaces all input bytes that are
+// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
+func ReplaceIllFormed() Transformer {
+ return Transformer{&replaceIllFormed{}}
+}
+
+type replaceIllFormed struct{ transform.NopResetter }
+
+func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
+ for n < len(src) {
+ // ASCII fast path.
+ if src[n] < utf8.RuneSelf {
+ n++
+ continue
+ }
+
+ r, size := utf8.DecodeRune(src[n:])
+
+ // Look for a valid non-ASCII rune.
+ if r != utf8.RuneError || size != 1 {
+ n += size
+ continue
+ }
+
+ // Look for short source data.
+ if !atEOF && !utf8.FullRune(src[n:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+
+ // We have an invalid rune.
+ err = transform.ErrEndOfSpan
+ break
+ }
+ return n, err
+}
+
+func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
+ for nSrc < len(src) {
+ // ASCII fast path.
+ if r := src[nSrc]; r < utf8.RuneSelf {
+ if nDst == len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst] = r
+ nDst++
+ nSrc++
+ continue
+ }
+
+ // Look for a valid non-ASCII rune.
+ if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
+ if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
+ err = transform.ErrShortDst
+ break
+ }
+ nDst += size
+ nSrc += size
+ continue
+ }
+
+ // Look for short source data.
+ if !atEOF && !utf8.FullRune(src[nSrc:]) {
+ err = transform.ErrShortSrc
+ break
+ }
+
+ // We have an invalid rune.
+ if nDst+3 > len(dst) {
+ err = transform.ErrShortDst
+ break
+ }
+ dst[nDst+0] = runeErrorString[0]
+ dst[nDst+1] = runeErrorString[1]
+ dst[nDst+2] = runeErrorString[2]
+ nDst += 3
+ nSrc++
+ }
+ return nDst, nSrc, err
+}
diff --git a/vendor/golang.org/x/text/unicode/cldr/base.go b/vendor/golang.org/x/text/unicode/cldr/base.go
new file mode 100644
index 0000000..b71420c
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/cldr/base.go
@@ -0,0 +1,105 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cldr
+
+import (
+ "encoding/xml"
+ "regexp"
+ "strconv"
+)
+
+// Elem is implemented by every XML element.
+type Elem interface {
+ setEnclosing(Elem)
+ setName(string)
+ enclosing() Elem
+
+ GetCommon() *Common
+}
+
+type hidden struct {
+ CharData string `xml:",chardata"`
+ Alias *struct {
+ Common
+ Source string `xml:"source,attr"`
+ Path string `xml:"path,attr"`
+ } `xml:"alias"`
+ Def *struct {
+ Common
+ Choice string `xml:"choice,attr,omitempty"`
+ Type string `xml:"type,attr,omitempty"`
+ } `xml:"default"`
+}
+
+// Common holds several of the most common attributes and sub elements
+// of an XML element.
+type Common struct {
+ XMLName xml.Name
+ name string
+ enclElem Elem
+ Type string `xml:"type,attr,omitempty"`
+ Reference string `xml:"reference,attr,omitempty"`
+ Alt string `xml:"alt,attr,omitempty"`
+ ValidSubLocales string `xml:"validSubLocales,attr,omitempty"`
+ Draft string `xml:"draft,attr,omitempty"`
+ hidden
+}
+
+// Default returns the default type to select from the enclosed list
+// or "" if no default value is specified.
+func (e *Common) Default() string {
+ if e.Def == nil {
+ return ""
+ }
+ if e.Def.Choice != "" {
+ return e.Def.Choice
+ } else if e.Def.Type != "" {
+ // Type is still used by the default element in collation.
+ return e.Def.Type
+ }
+ return ""
+}
+
+// Element returns the XML element name.
+func (e *Common) Element() string {
+ return e.name
+}
+
+// GetCommon returns e. It is provided such that Common implements Elem.
+func (e *Common) GetCommon() *Common {
+ return e
+}
+
+// Data returns the character data accumulated for this element.
+func (e *Common) Data() string {
+ e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode)
+ return e.CharData
+}
+
+func (e *Common) setName(s string) {
+ e.name = s
+}
+
+func (e *Common) enclosing() Elem {
+ return e.enclElem
+}
+
+func (e *Common) setEnclosing(en Elem) {
+ e.enclElem = en
+}
+
+// Escape characters that can be escaped without further escaping the string.
+var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`)
+
+// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string.
+// It assumes the input string is correctly formatted.
+func replaceUnicode(s string) string {
+ if s[1] == '#' {
+ r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32)
+ return string(rune(r))
+ }
+ r, _, _, _ := strconv.UnquoteChar(s, 0)
+ return string(r)
+}
diff --git a/vendor/golang.org/x/text/unicode/cldr/cldr.go b/vendor/golang.org/x/text/unicode/cldr/cldr.go
new file mode 100644
index 0000000..f39b2e3
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/cldr/cldr.go
@@ -0,0 +1,137 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run makexml.go -output xml.go
+
+// Package cldr provides a parser for LDML and related XML formats.
+//
+// This package is intended to be used by the table generation tools for the
+// various packages in x/text and is not internal for historical reasons.
+//
+// As the XML types are generated from the CLDR DTD, and as the CLDR standard is
+// periodically amended, this package may change considerably over time. This
+// mostly means that data may appear and disappear between versions. That is,
+// old code should keep compiling for newer versions, but data may have moved or
+// changed. CLDR version 22 is the first version supported by this package.
+// Older versions may not work.
+package cldr // import "golang.org/x/text/unicode/cldr"
+
+import (
+ "fmt"
+ "sort"
+)
+
+// CLDR provides access to parsed data of the Unicode Common Locale Data Repository.
+type CLDR struct {
+ parent map[string][]string
+ locale map[string]*LDML
+ resolved map[string]*LDML
+ bcp47 *LDMLBCP47
+ supp *SupplementalData
+}
+
+func makeCLDR() *CLDR {
+ return &CLDR{
+ parent: make(map[string][]string),
+ locale: make(map[string]*LDML),
+ resolved: make(map[string]*LDML),
+ bcp47: &LDMLBCP47{},
+ supp: &SupplementalData{},
+ }
+}
+
+// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned.
+func (cldr *CLDR) BCP47() *LDMLBCP47 {
+ return nil
+}
+
+// Draft indicates the draft level of an element.
+type Draft int
+
+const (
+ Approved Draft = iota
+ Contributed
+ Provisional
+ Unconfirmed
+)
+
+var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""}
+
+// ParseDraft returns the Draft value corresponding to the given string. The
+// empty string corresponds to Approved.
+func ParseDraft(level string) (Draft, error) {
+ if level == "" {
+ return Approved, nil
+ }
+ for i, s := range drafts {
+ if level == s {
+ return Unconfirmed - Draft(i), nil
+ }
+ }
+ return Approved, fmt.Errorf("cldr: unknown draft level %q", level)
+}
+
+func (d Draft) String() string {
+ return drafts[len(drafts)-1-int(d)]
+}
+
+// SetDraftLevel sets which draft levels to include in the evaluated LDML.
+// Any draft element for which the draft level is higher than lev will be excluded.
+// If multiple draft levels are available for a single element, the one with the
+// lowest draft level will be selected, unless preferDraft is true, in which case
+// the highest draft will be chosen.
+// It is assumed that the underlying LDML is canonicalized.
+func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) {
+ // TODO: implement
+ cldr.resolved = make(map[string]*LDML)
+}
+
+// RawLDML returns the LDML XML for id in unresolved form.
+// id must be one of the strings returned by Locales.
+func (cldr *CLDR) RawLDML(loc string) *LDML {
+ return cldr.locale[loc]
+}
+
+// LDML returns the fully resolved LDML XML for loc, which must be one of
+// the strings returned by Locales.
+//
+// Deprecated: Use RawLDML and implement inheritance manually or using the
+// internal cldrtree package.
+// Inheritance has changed quite a bit since the onset of this package and in
+// practice data often represented in a way where knowledge of how it was
+// inherited is relevant.
+func (cldr *CLDR) LDML(loc string) (*LDML, error) {
+ return cldr.resolve(loc)
+}
+
+// Supplemental returns the parsed supplemental data. If no such data was parsed,
+// nil is returned.
+func (cldr *CLDR) Supplemental() *SupplementalData {
+ return cldr.supp
+}
+
+// Locales returns the locales for which there exist files.
+// Valid sublocales for which there is no file are not included.
+// The root locale is always sorted first.
+func (cldr *CLDR) Locales() []string {
+ loc := []string{"root"}
+ hasRoot := false
+ for l, _ := range cldr.locale {
+ if l == "root" {
+ hasRoot = true
+ continue
+ }
+ loc = append(loc, l)
+ }
+ sort.Strings(loc[1:])
+ if !hasRoot {
+ return loc[1:]
+ }
+ return loc
+}
+
+// Get fills in the fields of x based on the XPath path.
+func Get(e Elem, path string) (res Elem, err error) {
+ return walkXPath(e, path)
+}
diff --git a/vendor/golang.org/x/text/unicode/cldr/collate.go b/vendor/golang.org/x/text/unicode/cldr/collate.go
new file mode 100644
index 0000000..5794ae4
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/cldr/collate.go
@@ -0,0 +1,363 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cldr
+
+import (
+ "bufio"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+)
+
+// RuleProcessor can be passed to Collator's Process method, which
+// parses the rules and calls the respective method for each rule found.
+type RuleProcessor interface {
+ Reset(anchor string, before int) error
+ Insert(level int, str, context, extend string) error
+ Index(id string)
+}
+
+const (
+ // cldrIndex is a Unicode-reserved sentinel value used to mark the start
+ // of a grouping within an index.
+ // We ignore any rule that starts with this rune.
+ // See https://unicode.org/reports/tr35/#Collation_Elements for details.
+ cldrIndex = "\uFDD0"
+
+ // specialAnchor is the format in which to represent logical reset positions,
+ // such as "first tertiary ignorable".
+ specialAnchor = "<%s/>"
+)
+
+// Process parses the rules for the tailorings of this collation
+// and calls the respective methods of p for each rule found.
+func (c Collation) Process(p RuleProcessor) (err error) {
+ if len(c.Cr) > 0 {
+ if len(c.Cr) > 1 {
+ return fmt.Errorf("multiple cr elements, want 0 or 1")
+ }
+ return processRules(p, c.Cr[0].Data())
+ }
+ if c.Rules.Any != nil {
+ return c.processXML(p)
+ }
+ return errors.New("no tailoring data")
+}
+
+// processRules parses rules in the Collation Rule Syntax defined in
+// https://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings.
+func processRules(p RuleProcessor, s string) (err error) {
+ chk := func(s string, e error) string {
+ if err == nil {
+ err = e
+ }
+ return s
+ }
+ i := 0 // Save the line number for use after the loop.
+ scanner := bufio.NewScanner(strings.NewReader(s))
+ for ; scanner.Scan() && err == nil; i++ {
+ for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) {
+ level := 5
+ var ch byte
+ switch ch, s = s[0], s[1:]; ch {
+ case '&': // followed by <anchor> or '[' <key> ']'
+ if s = skipSpace(s); consume(&s, '[') {
+ s = chk(parseSpecialAnchor(p, s))
+ } else {
+ s = chk(parseAnchor(p, 0, s))
+ }
+ case '<': // sort relation '<'{1,4}, optionally followed by '*'.
+ for level = 1; consume(&s, '<'); level++ {
+ }
+ if level > 4 {
+ err = fmt.Errorf("level %d > 4", level)
+ }
+ fallthrough
+ case '=': // identity relation, optionally followed by *.
+ if consume(&s, '*') {
+ s = chk(parseSequence(p, level, s))
+ } else {
+ s = chk(parseOrder(p, level, s))
+ }
+ default:
+ chk("", fmt.Errorf("illegal operator %q", ch))
+ break
+ }
+ }
+ }
+ if chk("", scanner.Err()); err != nil {
+ return fmt.Errorf("%d: %v", i, err)
+ }
+ return nil
+}
+
+// parseSpecialAnchor parses the anchor syntax which is either of the form
+//
+// ['before' <level>] <anchor>
+//
+// or
+//
+// [<label>]
+//
+// The starting should already be consumed.
+func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) {
+ i := strings.IndexByte(s, ']')
+ if i == -1 {
+ return "", errors.New("unmatched bracket")
+ }
+ a := strings.TrimSpace(s[:i])
+ s = s[i+1:]
+ if strings.HasPrefix(a, "before ") {
+ l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3)
+ if err != nil {
+ return s, err
+ }
+ return parseAnchor(p, int(l), s)
+ }
+ return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0)
+}
+
+func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) {
+ anchor, s, err := scanString(s)
+ if err != nil {
+ return s, err
+ }
+ return s, p.Reset(anchor, level)
+}
+
+func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) {
+ var value, context, extend string
+ if value, s, err = scanString(s); err != nil {
+ return s, err
+ }
+ if strings.HasPrefix(value, cldrIndex) {
+ p.Index(value[len(cldrIndex):])
+ return
+ }
+ if consume(&s, '|') {
+ if context, s, err = scanString(s); err != nil {
+ return s, errors.New("missing string after context")
+ }
+ }
+ if consume(&s, '/') {
+ if extend, s, err = scanString(s); err != nil {
+ return s, errors.New("missing string after extension")
+ }
+ }
+ return s, p.Insert(level, value, context, extend)
+}
+
+// scanString scans a single input string.
+func scanString(s string) (str, tail string, err error) {
+ if s = skipSpace(s); s == "" {
+ return s, s, errors.New("missing string")
+ }
+ buf := [16]byte{} // small but enough to hold most cases.
+ value := buf[:0]
+ for s != "" {
+ if consume(&s, '\'') {
+ i := strings.IndexByte(s, '\'')
+ if i == -1 {
+ return "", "", errors.New(`unmatched single quote`)
+ }
+ if i == 0 {
+ value = append(value, '\'')
+ } else {
+ value = append(value, s[:i]...)
+ }
+ s = s[i+1:]
+ continue
+ }
+ r, sz := utf8.DecodeRuneInString(s)
+ if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) {
+ break
+ }
+ value = append(value, s[:sz]...)
+ s = s[sz:]
+ }
+ return string(value), skipSpace(s), nil
+}
+
+func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) {
+ if s = skipSpace(s); s == "" {
+ return s, errors.New("empty sequence")
+ }
+ last := rune(0)
+ for s != "" {
+ r, sz := utf8.DecodeRuneInString(s)
+ s = s[sz:]
+
+ if r == '-' {
+ // We have a range. The first element was already written.
+ if last == 0 {
+ return s, errors.New("range without starter value")
+ }
+ r, sz = utf8.DecodeRuneInString(s)
+ s = s[sz:]
+ if r == utf8.RuneError || r < last {
+ return s, fmt.Errorf("invalid range %q-%q", last, r)
+ }
+ for i := last + 1; i <= r; i++ {
+ if err := p.Insert(level, string(i), "", ""); err != nil {
+ return s, err
+ }
+ }
+ last = 0
+ continue
+ }
+
+ if unicode.IsSpace(r) || unicode.IsPunct(r) {
+ break
+ }
+
+ // normal case
+ if err := p.Insert(level, string(r), "", ""); err != nil {
+ return s, err
+ }
+ last = r
+ }
+ return s, nil
+}
+
+func skipSpace(s string) string {
+ return strings.TrimLeftFunc(s, unicode.IsSpace)
+}
+
+// consume returns whether the next byte is ch. If so, it gobbles it by
+// updating s.
+func consume(s *string, ch byte) (ok bool) {
+ if *s == "" || (*s)[0] != ch {
+ return false
+ }
+ *s = (*s)[1:]
+ return true
+}
+
+// The following code parses Collation rules of CLDR version 24 and before.
+
+var lmap = map[byte]int{
+ 'p': 1,
+ 's': 2,
+ 't': 3,
+ 'i': 5,
+}
+
+type rulesElem struct {
+ Rules struct {
+ Common
+ Any []*struct {
+ XMLName xml.Name
+ rule
+ } `xml:",any"`
+ } `xml:"rules"`
+}
+
+type rule struct {
+ Value string `xml:",chardata"`
+ Before string `xml:"before,attr"`
+ Any []*struct {
+ XMLName xml.Name
+ rule
+ } `xml:",any"`
+}
+
+var emptyValueError = errors.New("cldr: empty rule value")
+
+func (r *rule) value() (string, error) {
+ // Convert hexadecimal Unicode codepoint notation to a string.
+ s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode)
+ r.Value = s
+ if s == "" {
+ if len(r.Any) != 1 {
+ return "", emptyValueError
+ }
+ r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local)
+ r.Any = nil
+ } else if len(r.Any) != 0 {
+ return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any)
+ }
+ return r.Value, nil
+}
+
+func (r rule) process(p RuleProcessor, name, context, extend string) error {
+ v, err := r.value()
+ if err != nil {
+ return err
+ }
+ switch name {
+ case "p", "s", "t", "i":
+ if strings.HasPrefix(v, cldrIndex) {
+ p.Index(v[len(cldrIndex):])
+ return nil
+ }
+ if err := p.Insert(lmap[name[0]], v, context, extend); err != nil {
+ return err
+ }
+ case "pc", "sc", "tc", "ic":
+ level := lmap[name[0]]
+ for _, s := range v {
+ if err := p.Insert(level, string(s), context, extend); err != nil {
+ return err
+ }
+ }
+ default:
+ return fmt.Errorf("cldr: unsupported tag: %q", name)
+ }
+ return nil
+}
+
+// processXML parses the format of CLDR versions 24 and older.
+func (c Collation) processXML(p RuleProcessor) (err error) {
+ // Collation is generated and defined in xml.go.
+ var v string
+ for _, r := range c.Rules.Any {
+ switch r.XMLName.Local {
+ case "reset":
+ level := 0
+ switch r.Before {
+ case "primary", "1":
+ level = 1
+ case "secondary", "2":
+ level = 2
+ case "tertiary", "3":
+ level = 3
+ case "":
+ default:
+ return fmt.Errorf("cldr: unknown level %q", r.Before)
+ }
+ v, err = r.value()
+ if err == nil {
+ err = p.Reset(v, level)
+ }
+ case "x":
+ var context, extend string
+ for _, r1 := range r.Any {
+ v, err = r1.value()
+ switch r1.XMLName.Local {
+ case "context":
+ context = v
+ case "extend":
+ extend = v
+ }
+ }
+ for _, r1 := range r.Any {
+ if t := r1.XMLName.Local; t == "context" || t == "extend" {
+ continue
+ }
+ r1.rule.process(p, r1.XMLName.Local, context, extend)
+ }
+ default:
+ err = r.rule.process(p, r.XMLName.Local, "", "")
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/vendor/golang.org/x/text/unicode/cldr/decode.go b/vendor/golang.org/x/text/unicode/cldr/decode.go
new file mode 100644
index 0000000..7a8fb5a
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/cldr/decode.go
@@ -0,0 +1,171 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cldr
+
+import (
+ "archive/zip"
+ "bytes"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "log"
+ "os"
+ "path/filepath"
+ "regexp"
+)
+
+// A Decoder loads an archive of CLDR data.
+type Decoder struct {
+ dirFilter []string
+ sectionFilter []string
+ loader Loader
+ cldr *CLDR
+ curLocale string
+}
+
+// SetSectionFilter takes a list top-level LDML element names to which
+// evaluation of LDML should be limited. It automatically calls SetDirFilter.
+func (d *Decoder) SetSectionFilter(filter ...string) {
+ d.sectionFilter = filter
+ // TODO: automatically set dir filter
+}
+
+// SetDirFilter limits the loading of LDML XML files of the specified directories.
+// Note that sections may be split across directories differently for different CLDR versions.
+// For more robust code, use SetSectionFilter.
+func (d *Decoder) SetDirFilter(dir ...string) {
+ d.dirFilter = dir
+}
+
+// A Loader provides access to the files of a CLDR archive.
+type Loader interface {
+ Len() int
+ Path(i int) string
+ Reader(i int) (io.ReadCloser, error)
+}
+
+var fileRe = regexp.MustCompile(`.*[/\\](.*)[/\\](.*)\.xml`)
+
+// Decode loads and decodes the files represented by l.
+func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) {
+ d.cldr = makeCLDR()
+ for i := 0; i < l.Len(); i++ {
+ fname := l.Path(i)
+ if m := fileRe.FindStringSubmatch(fname); m != nil {
+ if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) {
+ continue
+ }
+ var r io.ReadCloser
+ if r, err = l.Reader(i); err == nil {
+ err = d.decode(m[1], m[2], r)
+ r.Close()
+ }
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+ d.cldr.finalize(d.sectionFilter)
+ return d.cldr, nil
+}
+
+func (d *Decoder) decode(dir, id string, r io.Reader) error {
+ var v interface{}
+ var l *LDML
+ cldr := d.cldr
+ switch {
+ case dir == "supplemental":
+ v = cldr.supp
+ case dir == "transforms":
+ return nil
+ case dir == "bcp47":
+ v = cldr.bcp47
+ case dir == "validity":
+ return nil
+ default:
+ ok := false
+ if v, ok = cldr.locale[id]; !ok {
+ l = &LDML{}
+ v, cldr.locale[id] = l, l
+ }
+ }
+ x := xml.NewDecoder(r)
+ if err := x.Decode(v); err != nil {
+ log.Printf("%s/%s: %v", dir, id, err)
+ return err
+ }
+ if l != nil {
+ if l.Identity == nil {
+ return fmt.Errorf("%s/%s: missing identity element", dir, id)
+ }
+ // TODO: verify when CLDR bug https://unicode.org/cldr/trac/ticket/8970
+ // is resolved.
+ // path := strings.Split(id, "_")
+ // if lang := l.Identity.Language.Type; lang != path[0] {
+ // return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0])
+ // }
+ }
+ return nil
+}
+
+type pathLoader []string
+
+func makePathLoader(path string) (pl pathLoader, err error) {
+ err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error {
+ pl = append(pl, path)
+ return err
+ })
+ return pl, err
+}
+
+func (pl pathLoader) Len() int {
+ return len(pl)
+}
+
+func (pl pathLoader) Path(i int) string {
+ return pl[i]
+}
+
+func (pl pathLoader) Reader(i int) (io.ReadCloser, error) {
+ return os.Open(pl[i])
+}
+
+// DecodePath loads CLDR data from the given path.
+func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) {
+ loader, err := makePathLoader(path)
+ if err != nil {
+ return nil, err
+ }
+ return d.Decode(loader)
+}
+
+type zipLoader struct {
+ r *zip.Reader
+}
+
+func (zl zipLoader) Len() int {
+ return len(zl.r.File)
+}
+
+func (zl zipLoader) Path(i int) string {
+ return zl.r.File[i].Name
+}
+
+func (zl zipLoader) Reader(i int) (io.ReadCloser, error) {
+ return zl.r.File[i].Open()
+}
+
+// DecodeZip loads CLDR data from the zip archive for which r is the source.
+func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) {
+ buffer, err := io.ReadAll(r)
+ if err != nil {
+ return nil, err
+ }
+ archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
+ if err != nil {
+ return nil, err
+ }
+ return d.Decode(zipLoader{archive})
+}
diff --git a/vendor/golang.org/x/text/unicode/cldr/resolve.go b/vendor/golang.org/x/text/unicode/cldr/resolve.go
new file mode 100644
index 0000000..31cc7be
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/cldr/resolve.go
@@ -0,0 +1,602 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cldr
+
+// This file implements the various inheritance constructs defined by LDML.
+// See https://www.unicode.org/reports/tr35/#Inheritance_and_Validity
+// for more details.
+
+import (
+ "fmt"
+ "log"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// fieldIter iterates over fields in a struct. It includes
+// fields of embedded structs.
+type fieldIter struct {
+ v reflect.Value
+ index, n []int
+}
+
+func iter(v reflect.Value) fieldIter {
+ if v.Kind() != reflect.Struct {
+ log.Panicf("value %v must be a struct", v)
+ }
+ i := fieldIter{
+ v: v,
+ index: []int{0},
+ n: []int{v.NumField()},
+ }
+ i.descent()
+ return i
+}
+
+func (i *fieldIter) descent() {
+ for f := i.field(); f.Anonymous && f.Type.NumField() > 0; f = i.field() {
+ i.index = append(i.index, 0)
+ i.n = append(i.n, f.Type.NumField())
+ }
+}
+
+func (i *fieldIter) done() bool {
+ return len(i.index) == 1 && i.index[0] >= i.n[0]
+}
+
+func skip(f reflect.StructField) bool {
+ return !f.Anonymous && (f.Name[0] < 'A' || f.Name[0] > 'Z')
+}
+
+func (i *fieldIter) next() {
+ for {
+ k := len(i.index) - 1
+ i.index[k]++
+ if i.index[k] < i.n[k] {
+ if !skip(i.field()) {
+ break
+ }
+ } else {
+ if k == 0 {
+ return
+ }
+ i.index = i.index[:k]
+ i.n = i.n[:k]
+ }
+ }
+ i.descent()
+}
+
+func (i *fieldIter) value() reflect.Value {
+ return i.v.FieldByIndex(i.index)
+}
+
+func (i *fieldIter) field() reflect.StructField {
+ return i.v.Type().FieldByIndex(i.index)
+}
+
+type visitor func(v reflect.Value) error
+
+var stopDescent = fmt.Errorf("do not recurse")
+
+func (f visitor) visit(x interface{}) error {
+ return f.visitRec(reflect.ValueOf(x))
+}
+
+// visit recursively calls f on all nodes in v.
+func (f visitor) visitRec(v reflect.Value) error {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return nil
+ }
+ return f.visitRec(v.Elem())
+ }
+ if err := f(v); err != nil {
+ if err == stopDescent {
+ return nil
+ }
+ return err
+ }
+ switch v.Kind() {
+ case reflect.Struct:
+ for i := iter(v); !i.done(); i.next() {
+ if err := f.visitRec(i.value()); err != nil {
+ return err
+ }
+ }
+ case reflect.Slice:
+ for i := 0; i < v.Len(); i++ {
+ if err := f.visitRec(v.Index(i)); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// getPath is used for error reporting purposes only.
+func getPath(e Elem) string {
+ if e == nil {
+ return "<nil>"
+ }
+ if e.enclosing() == nil {
+ return e.GetCommon().name
+ }
+ if e.GetCommon().Type == "" {
+ return fmt.Sprintf("%s.%s", getPath(e.enclosing()), e.GetCommon().name)
+ }
+ return fmt.Sprintf("%s.%s[type=%s]", getPath(e.enclosing()), e.GetCommon().name, e.GetCommon().Type)
+}
+
+// xmlName returns the xml name of the element or attribute
+func xmlName(f reflect.StructField) (name string, attr bool) {
+ tags := strings.Split(f.Tag.Get("xml"), ",")
+ for _, s := range tags {
+ attr = attr || s == "attr"
+ }
+ return tags[0], attr
+}
+
+func findField(v reflect.Value, key string) (reflect.Value, error) {
+ v = reflect.Indirect(v)
+ for i := iter(v); !i.done(); i.next() {
+ if n, _ := xmlName(i.field()); n == key {
+ return i.value(), nil
+ }
+ }
+ return reflect.Value{}, fmt.Errorf("cldr: no field %q in element %#v", key, v.Interface())
+}
+
+var xpathPart = regexp.MustCompile(`(\pL+)(?:\[@(\pL+)='([\w-]+)'\])?`)
+
+func walkXPath(e Elem, path string) (res Elem, err error) {
+ for _, c := range strings.Split(path, "/") {
+ if c == ".." {
+ if e = e.enclosing(); e == nil {
+ panic("path ..")
+ return nil, fmt.Errorf(`cldr: ".." moves past root in path %q`, path)
+ }
+ continue
+ } else if c == "" {
+ continue
+ }
+ m := xpathPart.FindStringSubmatch(c)
+ if len(m) == 0 || len(m[0]) != len(c) {
+ return nil, fmt.Errorf("cldr: syntax error in path component %q", c)
+ }
+ v, err := findField(reflect.ValueOf(e), m[1])
+ if err != nil {
+ return nil, err
+ }
+ switch v.Kind() {
+ case reflect.Slice:
+ i := 0
+ if m[2] != "" || v.Len() > 1 {
+ if m[2] == "" {
+ m[2] = "type"
+ if m[3] = e.GetCommon().Default(); m[3] == "" {
+ return nil, fmt.Errorf("cldr: type selector or default value needed for element %s", m[1])
+ }
+ }
+ for ; i < v.Len(); i++ {
+ vi := v.Index(i)
+ key, err := findField(vi.Elem(), m[2])
+ if err != nil {
+ return nil, err
+ }
+ key = reflect.Indirect(key)
+ if key.Kind() == reflect.String && key.String() == m[3] {
+ break
+ }
+ }
+ }
+ if i == v.Len() || v.Index(i).IsNil() {
+ return nil, fmt.Errorf("no %s found with %s==%s", m[1], m[2], m[3])
+ }
+ e = v.Index(i).Interface().(Elem)
+ case reflect.Ptr:
+ if v.IsNil() {
+ return nil, fmt.Errorf("cldr: element %q not found within element %q", m[1], e.GetCommon().name)
+ }
+ var ok bool
+ if e, ok = v.Interface().(Elem); !ok {
+ return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
+ } else if m[2] != "" || m[3] != "" {
+ return nil, fmt.Errorf("cldr: no type selector allowed for element %s", m[1])
+ }
+ default:
+ return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
+ }
+ }
+ return e, nil
+}
+
+const absPrefix = "//ldml/"
+
+func (cldr *CLDR) resolveAlias(e Elem, src, path string) (res Elem, err error) {
+ if src != "locale" {
+ if !strings.HasPrefix(path, absPrefix) {
+ return nil, fmt.Errorf("cldr: expected absolute path, found %q", path)
+ }
+ path = path[len(absPrefix):]
+ if e, err = cldr.resolve(src); err != nil {
+ return nil, err
+ }
+ }
+ return walkXPath(e, path)
+}
+
+func (cldr *CLDR) resolveAndMergeAlias(e Elem) error {
+ alias := e.GetCommon().Alias
+ if alias == nil {
+ return nil
+ }
+ a, err := cldr.resolveAlias(e, alias.Source, alias.Path)
+ if err != nil {
+ return fmt.Errorf("%v: error evaluating path %q: %v", getPath(e), alias.Path, err)
+ }
+ // Ensure alias node was already evaluated. TODO: avoid double evaluation.
+ err = cldr.resolveAndMergeAlias(a)
+ v := reflect.ValueOf(e).Elem()
+ for i := iter(reflect.ValueOf(a).Elem()); !i.done(); i.next() {
+ if vv := i.value(); vv.Kind() != reflect.Ptr || !vv.IsNil() {
+ if _, attr := xmlName(i.field()); !attr {
+ v.FieldByIndex(i.index).Set(vv)
+ }
+ }
+ }
+ return err
+}
+
+func (cldr *CLDR) aliasResolver() visitor {
+ return func(v reflect.Value) (err error) {
+ if e, ok := v.Addr().Interface().(Elem); ok {
+ err = cldr.resolveAndMergeAlias(e)
+ if err == nil && blocking[e.GetCommon().name] {
+ return stopDescent
+ }
+ }
+ return err
+ }
+}
+
+// elements within blocking elements do not inherit.
+// Taken from CLDR's supplementalMetaData.xml.
+var blocking = map[string]bool{
+ "identity": true,
+ "supplementalData": true,
+ "cldrTest": true,
+ "collation": true,
+ "transform": true,
+}
+
+// Distinguishing attributes affect inheritance; two elements with different
+// distinguishing attributes are treated as different for purposes of inheritance,
+// except when such attributes occur in the indicated elements.
+// Taken from CLDR's supplementalMetaData.xml.
+var distinguishing = map[string][]string{
+ "key": nil,
+ "request_id": nil,
+ "id": nil,
+ "registry": nil,
+ "alt": nil,
+ "iso4217": nil,
+ "iso3166": nil,
+ "mzone": nil,
+ "from": nil,
+ "to": nil,
+ "type": []string{
+ "abbreviationFallback",
+ "default",
+ "mapping",
+ "measurementSystem",
+ "preferenceOrdering",
+ },
+ "numberSystem": nil,
+}
+
+func in(set []string, s string) bool {
+ for _, v := range set {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+// attrKey computes a key based on the distinguishable attributes of
+// an element and its values.
+func attrKey(v reflect.Value, exclude ...string) string {
+ parts := []string{}
+ ename := v.Interface().(Elem).GetCommon().name
+ v = v.Elem()
+ for i := iter(v); !i.done(); i.next() {
+ if name, attr := xmlName(i.field()); attr {
+ if except, ok := distinguishing[name]; ok && !in(exclude, name) && !in(except, ename) {
+ v := i.value()
+ if v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+ if v.IsValid() {
+ parts = append(parts, fmt.Sprintf("%s=%s", name, v.String()))
+ }
+ }
+ }
+ }
+ sort.Strings(parts)
+ return strings.Join(parts, ";")
+}
+
+// Key returns a key for e derived from all distinguishing attributes
+// except those specified by exclude.
+func Key(e Elem, exclude ...string) string {
+ return attrKey(reflect.ValueOf(e), exclude...)
+}
+
+// linkEnclosing sets the enclosing element as well as the name
+// for all sub-elements of child, recursively.
+func linkEnclosing(parent, child Elem) {
+ child.setEnclosing(parent)
+ v := reflect.ValueOf(child).Elem()
+ for i := iter(v); !i.done(); i.next() {
+ vf := i.value()
+ if vf.Kind() == reflect.Slice {
+ for j := 0; j < vf.Len(); j++ {
+ linkEnclosing(child, vf.Index(j).Interface().(Elem))
+ }
+ } else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
+ linkEnclosing(child, vf.Interface().(Elem))
+ }
+ }
+}
+
+func setNames(e Elem, name string) {
+ e.setName(name)
+ v := reflect.ValueOf(e).Elem()
+ for i := iter(v); !i.done(); i.next() {
+ vf := i.value()
+ name, _ = xmlName(i.field())
+ if vf.Kind() == reflect.Slice {
+ for j := 0; j < vf.Len(); j++ {
+ setNames(vf.Index(j).Interface().(Elem), name)
+ }
+ } else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
+ setNames(vf.Interface().(Elem), name)
+ }
+ }
+}
+
+// deepCopy copies elements of v recursively. All elements of v that may
+// be modified by inheritance are explicitly copied.
+func deepCopy(v reflect.Value) reflect.Value {
+ switch v.Kind() {
+ case reflect.Ptr:
+ if v.IsNil() || v.Elem().Kind() != reflect.Struct {
+ return v
+ }
+ nv := reflect.New(v.Elem().Type())
+ nv.Elem().Set(v.Elem())
+ deepCopyRec(nv.Elem(), v.Elem())
+ return nv
+ case reflect.Slice:
+ nv := reflect.MakeSlice(v.Type(), v.Len(), v.Len())
+ for i := 0; i < v.Len(); i++ {
+ deepCopyRec(nv.Index(i), v.Index(i))
+ }
+ return nv
+ }
+ panic("deepCopy: must be called with pointer or slice")
+}
+
+// deepCopyRec is only called by deepCopy.
+func deepCopyRec(nv, v reflect.Value) {
+ if v.Kind() == reflect.Struct {
+ t := v.Type()
+ for i := 0; i < v.NumField(); i++ {
+ if name, attr := xmlName(t.Field(i)); name != "" && !attr {
+ deepCopyRec(nv.Field(i), v.Field(i))
+ }
+ }
+ } else {
+ nv.Set(deepCopy(v))
+ }
+}
+
+// newNode is used to insert a missing node during inheritance.
+func (cldr *CLDR) newNode(v, enc reflect.Value) reflect.Value {
+ n := reflect.New(v.Type())
+ for i := iter(v); !i.done(); i.next() {
+ if name, attr := xmlName(i.field()); name == "" || attr {
+ n.Elem().FieldByIndex(i.index).Set(i.value())
+ }
+ }
+ n.Interface().(Elem).GetCommon().setEnclosing(enc.Addr().Interface().(Elem))
+ return n
+}
+
+// v, parent must be pointers to struct
+func (cldr *CLDR) inheritFields(v, parent reflect.Value) (res reflect.Value, err error) {
+ t := v.Type()
+ nv := reflect.New(t)
+ nv.Elem().Set(v)
+ for i := iter(v); !i.done(); i.next() {
+ vf := i.value()
+ f := i.field()
+ name, attr := xmlName(f)
+ if name == "" || attr {
+ continue
+ }
+ pf := parent.FieldByIndex(i.index)
+ if blocking[name] {
+ if vf.IsNil() {
+ vf = pf
+ }
+ nv.Elem().FieldByIndex(i.index).Set(deepCopy(vf))
+ continue
+ }
+ switch f.Type.Kind() {
+ case reflect.Ptr:
+ if f.Type.Elem().Kind() == reflect.Struct {
+ if !vf.IsNil() {
+ if vf, err = cldr.inheritStructPtr(vf, pf); err != nil {
+ return reflect.Value{}, err
+ }
+ vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
+ nv.Elem().FieldByIndex(i.index).Set(vf)
+ } else if !pf.IsNil() {
+ n := cldr.newNode(pf.Elem(), v)
+ if vf, err = cldr.inheritStructPtr(n, pf); err != nil {
+ return reflect.Value{}, err
+ }
+ vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
+ nv.Elem().FieldByIndex(i.index).Set(vf)
+ }
+ }
+ case reflect.Slice:
+ vf, err := cldr.inheritSlice(nv.Elem(), vf, pf)
+ if err != nil {
+ return reflect.Zero(t), err
+ }
+ nv.Elem().FieldByIndex(i.index).Set(vf)
+ }
+ }
+ return nv, nil
+}
+
+func root(e Elem) *LDML {
+ for ; e.enclosing() != nil; e = e.enclosing() {
+ }
+ return e.(*LDML)
+}
+
+// inheritStructPtr first merges possible aliases in with v and then inherits
+// any underspecified elements from parent.
+func (cldr *CLDR) inheritStructPtr(v, parent reflect.Value) (r reflect.Value, err error) {
+ if !v.IsNil() {
+ e := v.Interface().(Elem).GetCommon()
+ alias := e.Alias
+ if alias == nil && !parent.IsNil() {
+ alias = parent.Interface().(Elem).GetCommon().Alias
+ }
+ if alias != nil {
+ a, err := cldr.resolveAlias(v.Interface().(Elem), alias.Source, alias.Path)
+ if a != nil {
+ if v, err = cldr.inheritFields(v.Elem(), reflect.ValueOf(a).Elem()); err != nil {
+ return reflect.Value{}, err
+ }
+ }
+ }
+ if !parent.IsNil() {
+ return cldr.inheritFields(v.Elem(), parent.Elem())
+ }
+ } else if parent.IsNil() {
+ panic("should not reach here")
+ }
+ return v, nil
+}
+
+// Must be slice of struct pointers.
+func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value, err error) {
+ t := v.Type()
+ index := make(map[string]reflect.Value)
+ if !v.IsNil() {
+ for i := 0; i < v.Len(); i++ {
+ vi := v.Index(i)
+ key := attrKey(vi)
+ index[key] = vi
+ }
+ }
+ if !parent.IsNil() {
+ for i := 0; i < parent.Len(); i++ {
+ vi := parent.Index(i)
+ key := attrKey(vi)
+ if w, ok := index[key]; ok {
+ index[key], err = cldr.inheritStructPtr(w, vi)
+ } else {
+ n := cldr.newNode(vi.Elem(), enc)
+ index[key], err = cldr.inheritStructPtr(n, vi)
+ }
+ index[key].Interface().(Elem).setEnclosing(enc.Addr().Interface().(Elem))
+ if err != nil {
+ return v, err
+ }
+ }
+ }
+ keys := make([]string, 0, len(index))
+ for k, _ := range index {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ sl := reflect.MakeSlice(t, len(index), len(index))
+ for i, k := range keys {
+ sl.Index(i).Set(index[k])
+ }
+ return sl, nil
+}
+
+func parentLocale(loc string) string {
+ parts := strings.Split(loc, "_")
+ if len(parts) == 1 {
+ return "root"
+ }
+ parts = parts[:len(parts)-1]
+ key := strings.Join(parts, "_")
+ return key
+}
+
+func (cldr *CLDR) resolve(loc string) (res *LDML, err error) {
+ if r := cldr.resolved[loc]; r != nil {
+ return r, nil
+ }
+ x := cldr.RawLDML(loc)
+ if x == nil {
+ return nil, fmt.Errorf("cldr: unknown locale %q", loc)
+ }
+ var v reflect.Value
+ if loc == "root" {
+ x = deepCopy(reflect.ValueOf(x)).Interface().(*LDML)
+ linkEnclosing(nil, x)
+ err = cldr.aliasResolver().visit(x)
+ } else {
+ key := parentLocale(loc)
+ var parent *LDML
+ for ; cldr.locale[key] == nil; key = parentLocale(key) {
+ }
+ if parent, err = cldr.resolve(key); err != nil {
+ return nil, err
+ }
+ v, err = cldr.inheritFields(reflect.ValueOf(x).Elem(), reflect.ValueOf(parent).Elem())
+ x = v.Interface().(*LDML)
+ linkEnclosing(nil, x)
+ }
+ if err != nil {
+ return nil, err
+ }
+ cldr.resolved[loc] = x
+ return x, err
+}
+
+// finalize finalizes the initialization of the raw LDML structs. It also
+// removed unwanted fields, as specified by filter, so that they will not
+// be unnecessarily evaluated.
+func (cldr *CLDR) finalize(filter []string) {
+ for _, x := range cldr.locale {
+ if filter != nil {
+ v := reflect.ValueOf(x).Elem()
+ t := v.Type()
+ for i := 0; i < v.NumField(); i++ {
+ f := t.Field(i)
+ name, _ := xmlName(f)
+ if name != "" && name != "identity" && !in(filter, name) {
+ v.Field(i).Set(reflect.Zero(f.Type))
+ }
+ }
+ }
+ linkEnclosing(nil, x) // for resolving aliases and paths
+ setNames(x, "ldml")
+ }
+}
diff --git a/vendor/golang.org/x/text/unicode/cldr/slice.go b/vendor/golang.org/x/text/unicode/cldr/slice.go
new file mode 100644
index 0000000..388c983
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/cldr/slice.go
@@ -0,0 +1,144 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cldr
+
+import (
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// Slice provides utilities for modifying slices of elements.
+// It can be wrapped around any slice of which the element type implements
+// interface Elem.
+type Slice struct {
+ ptr reflect.Value
+ typ reflect.Type
+}
+
+// Value returns the reflect.Value of the underlying slice.
+func (s *Slice) Value() reflect.Value {
+ return s.ptr.Elem()
+}
+
+// MakeSlice wraps a pointer to a slice of Elems.
+// It replaces the array pointed to by the slice so that subsequent modifications
+// do not alter the data in a CLDR type.
+// It panics if an incorrect type is passed.
+func MakeSlice(slicePtr interface{}) Slice {
+ ptr := reflect.ValueOf(slicePtr)
+ if ptr.Kind() != reflect.Ptr {
+ panic(fmt.Sprintf("MakeSlice: argument must be pointer to slice, found %v", ptr.Type()))
+ }
+ sl := ptr.Elem()
+ if sl.Kind() != reflect.Slice {
+ panic(fmt.Sprintf("MakeSlice: argument must point to a slice, found %v", sl.Type()))
+ }
+ intf := reflect.TypeOf((*Elem)(nil)).Elem()
+ if !sl.Type().Elem().Implements(intf) {
+ panic(fmt.Sprintf("MakeSlice: element type of slice (%v) does not implement Elem", sl.Type().Elem()))
+ }
+ nsl := reflect.MakeSlice(sl.Type(), sl.Len(), sl.Len())
+ reflect.Copy(nsl, sl)
+ sl.Set(nsl)
+ return Slice{
+ ptr: ptr,
+ typ: sl.Type().Elem().Elem(),
+ }
+}
+
+func (s Slice) indexForAttr(a string) []int {
+ for i := iter(reflect.Zero(s.typ)); !i.done(); i.next() {
+ if n, _ := xmlName(i.field()); n == a {
+ return i.index
+ }
+ }
+ panic(fmt.Sprintf("MakeSlice: no attribute %q for type %v", a, s.typ))
+}
+
+// Filter filters s to only include elements for which fn returns true.
+func (s Slice) Filter(fn func(e Elem) bool) {
+ k := 0
+ sl := s.Value()
+ for i := 0; i < sl.Len(); i++ {
+ vi := sl.Index(i)
+ if fn(vi.Interface().(Elem)) {
+ sl.Index(k).Set(vi)
+ k++
+ }
+ }
+ sl.Set(sl.Slice(0, k))
+}
+
+// Group finds elements in s for which fn returns the same value and groups
+// them in a new Slice.
+func (s Slice) Group(fn func(e Elem) string) []Slice {
+ m := make(map[string][]reflect.Value)
+ sl := s.Value()
+ for i := 0; i < sl.Len(); i++ {
+ vi := sl.Index(i)
+ key := fn(vi.Interface().(Elem))
+ m[key] = append(m[key], vi)
+ }
+ keys := []string{}
+ for k, _ := range m {
+ keys = append(keys, k)
+ }
+ sort.Strings(keys)
+ res := []Slice{}
+ for _, k := range keys {
+ nsl := reflect.New(sl.Type())
+ nsl.Elem().Set(reflect.Append(nsl.Elem(), m[k]...))
+ res = append(res, MakeSlice(nsl.Interface()))
+ }
+ return res
+}
+
+// SelectAnyOf filters s to contain only elements for which attr matches
+// any of the values.
+func (s Slice) SelectAnyOf(attr string, values ...string) {
+ index := s.indexForAttr(attr)
+ s.Filter(func(e Elem) bool {
+ vf := reflect.ValueOf(e).Elem().FieldByIndex(index)
+ return in(values, vf.String())
+ })
+}
+
+// SelectOnePerGroup filters s to include at most one element e per group of
+// elements matching Key(attr), where e has an attribute a that matches any
+// the values in v.
+// If more than one element in a group matches a value in v preference
+// is given to the element that matches the first value in v.
+func (s Slice) SelectOnePerGroup(a string, v []string) {
+ index := s.indexForAttr(a)
+ grouped := s.Group(func(e Elem) string { return Key(e, a) })
+ sl := s.Value()
+ sl.Set(sl.Slice(0, 0))
+ for _, g := range grouped {
+ e := reflect.Value{}
+ found := len(v)
+ gsl := g.Value()
+ for i := 0; i < gsl.Len(); i++ {
+ vi := gsl.Index(i).Elem().FieldByIndex(index)
+ j := 0
+ for ; j < len(v) && v[j] != vi.String(); j++ {
+ }
+ if j < found {
+ found = j
+ e = gsl.Index(i)
+ }
+ }
+ if found < len(v) {
+ sl.Set(reflect.Append(sl, e))
+ }
+ }
+}
+
+// SelectDraft drops all elements from the list with a draft level smaller than d
+// and selects the highest draft level of the remaining.
+// This method assumes that the input CLDR is canonicalized.
+func (s Slice) SelectDraft(d Draft) {
+ s.SelectOnePerGroup("draft", drafts[len(drafts)-2-int(d):])
+}
diff --git a/vendor/golang.org/x/text/unicode/cldr/xml.go b/vendor/golang.org/x/text/unicode/cldr/xml.go
new file mode 100644
index 0000000..bbae53b
--- /dev/null
+++ b/vendor/golang.org/x/text/unicode/cldr/xml.go
@@ -0,0 +1,1494 @@
+// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
+
+package cldr
+
+// LDMLBCP47 holds information on allowable values for various variables in LDML.
+type LDMLBCP47 struct {
+ Common
+ Version *struct {
+ Common
+ Number string `xml:"number,attr"`
+ } `xml:"version"`
+ Generation *struct {
+ Common
+ Date string `xml:"date,attr"`
+ } `xml:"generation"`
+ Keyword []*struct {
+ Common
+ Key []*struct {
+ Common
+ Extension string `xml:"extension,attr"`
+ Name string `xml:"name,attr"`
+ Description string `xml:"description,attr"`
+ Deprecated string `xml:"deprecated,attr"`
+ Preferred string `xml:"preferred,attr"`
+ Alias string `xml:"alias,attr"`
+ ValueType string `xml:"valueType,attr"`
+ Since string `xml:"since,attr"`
+ Type []*struct {
+ Common
+ Name string `xml:"name,attr"`
+ Description string `xml:"description,attr"`
+ Deprecated string `xml:"deprecated,attr"`
+ Preferred string `xml:"preferred,attr"`
+ Alias string `xml:"alias,attr"`
+ Since string `xml:"since,attr"`
+ } `xml:"type"`
+ } `xml:"key"`
+ } `xml:"keyword"`
+ Attribute []*struct {
+ Common
+ Name string `xml:"name,attr"`
+ Description string `xml:"description,attr"`
+ Deprecated string `xml:"deprecated,attr"`
+ Preferred string `xml:"preferred,attr"`
+ Since string `xml:"since,attr"`
+ } `xml:"attribute"`
+}
+
+// SupplementalData holds information relevant for internationalization
+// and proper use of CLDR, but that is not contained in the locale hierarchy.
+type SupplementalData struct {
+ Common
+ Version *struct {
+ Common
+ Number string `xml:"number,attr"`
+ } `xml:"version"`
+ Generation *struct {
+ Common
+ Date string `xml:"date,attr"`
+ } `xml:"generation"`
+ CurrencyData *struct {
+ Common
+ Fractions []*struct {
+ Common
+ Info []*struct {
+ Common
+ Iso4217 string `xml:"iso4217,attr"`
+ Digits string `xml:"digits,attr"`
+ Rounding string `xml:"rounding,attr"`
+ CashDigits string `xml:"cashDigits,attr"`
+ CashRounding string `xml:"cashRounding,attr"`
+ } `xml:"info"`
+ } `xml:"fractions"`
+ Region []*struct {
+ Common
+ Iso3166 string `xml:"iso3166,attr"`
+ Currency []*struct {
+ Common
+ Before string `xml:"before,attr"`
+ From string `xml:"from,attr"`
+ To string `xml:"to,attr"`
+ Iso4217 string `xml:"iso4217,attr"`
+ Digits string `xml:"digits,attr"`
+ Rounding string `xml:"rounding,attr"`
+ CashRounding string `xml:"cashRounding,attr"`
+ Tender string `xml:"tender,attr"`
+ Alternate []*struct {
+ Common
+ Iso4217 string `xml:"iso4217,attr"`
+ } `xml:"alternate"`
+ } `xml:"currency"`
+ } `xml:"region"`
+ } `xml:"currencyData"`
+ TerritoryContainment *struct {
+ Common
+ Group []*struct {
+ Common
+ Contains string `xml:"contains,attr"`
+ Grouping string `xml:"grouping,attr"`
+ Status string `xml:"status,attr"`
+ } `xml:"group"`
+ } `xml:"territoryContainment"`
+ SubdivisionContainment *struct {
+ Common
+ Subgroup []*struct {
+ Common
+ Subtype string `xml:"subtype,attr"`
+ Contains string `xml:"contains,attr"`
+ } `xml:"subgroup"`
+ } `xml:"subdivisionContainment"`
+ LanguageData *struct {
+ Common
+ Language []*struct {
+ Common
+ Scripts string `xml:"scripts,attr"`
+ Territories string `xml:"territories,attr"`
+ Variants string `xml:"variants,attr"`
+ } `xml:"language"`
+ } `xml:"languageData"`
+ TerritoryInfo *struct {
+ Common
+ Territory []*struct {
+ Common
+ Gdp string `xml:"gdp,attr"`
+ LiteracyPercent string `xml:"literacyPercent,attr"`
+ Population string `xml:"population,attr"`
+ LanguagePopulation []*struct {
+ Common
+ LiteracyPercent string `xml:"literacyPercent,attr"`
+ WritingPercent string `xml:"writingPercent,attr"`
+ PopulationPercent string `xml:"populationPercent,attr"`
+ OfficialStatus string `xml:"officialStatus,attr"`
+ } `xml:"languagePopulation"`
+ } `xml:"territory"`
+ } `xml:"territoryInfo"`
+ PostalCodeData *struct {
+ Common
+ PostCodeRegex []*struct {
+ Common
+ TerritoryId string `xml:"territoryId,attr"`
+ } `xml:"postCodeRegex"`
+ } `xml:"postalCodeData"`
+ CalendarData *struct {
+ Common
+ Calendar []*struct {
+ Common
+ Territories string `xml:"territories,attr"`
+ CalendarSystem *Common `xml:"calendarSystem"`
+ Eras *struct {
+ Common
+ Era []*struct {
+ Common
+ Start string `xml:"start,attr"`
+ End string `xml:"end,attr"`
+ } `xml:"era"`
+ } `xml:"eras"`
+ } `xml:"calendar"`
+ } `xml:"calendarData"`
+ CalendarPreferenceData *struct {
+ Common
+ CalendarPreference []*struct {
+ Common
+ Territories string `xml:"territories,attr"`
+ Ordering string `xml:"ordering,attr"`
+ } `xml:"calendarPreference"`
+ } `xml:"calendarPreferenceData"`
+ WeekData *struct {
+ Common
+ MinDays []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ Territories string `xml:"territories,attr"`
+ } `xml:"minDays"`
+ FirstDay []*struct {
+ Common
+ Day string `xml:"day,attr"`
+ Territories string `xml:"territories,attr"`
+ } `xml:"firstDay"`
+ WeekendStart []*struct {
+ Common
+ Day string `xml:"day,attr"`
+ Territories string `xml:"territories,attr"`
+ } `xml:"weekendStart"`
+ WeekendEnd []*struct {
+ Common
+ Day string `xml:"day,attr"`
+ Territories string `xml:"territories,attr"`
+ } `xml:"weekendEnd"`
+ WeekOfPreference []*struct {
+ Common
+ Locales string `xml:"locales,attr"`
+ Ordering string `xml:"ordering,attr"`
+ } `xml:"weekOfPreference"`
+ } `xml:"weekData"`
+ TimeData *struct {
+ Common
+ Hours []*struct {
+ Common
+ Allowed string `xml:"allowed,attr"`
+ Preferred string `xml:"preferred,attr"`
+ Regions string `xml:"regions,attr"`
+ } `xml:"hours"`
+ } `xml:"timeData"`
+ MeasurementData *struct {
+ Common
+ MeasurementSystem []*struct {
+ Common
+ Category string `xml:"category,attr"`
+ Territories string `xml:"territories,attr"`
+ } `xml:"measurementSystem"`
+ PaperSize []*struct {
+ Common
+ Territories string `xml:"territories,attr"`
+ } `xml:"paperSize"`
+ } `xml:"measurementData"`
+ UnitPreferenceData *struct {
+ Common
+ UnitPreferences []*struct {
+ Common
+ Category string `xml:"category,attr"`
+ Usage string `xml:"usage,attr"`
+ Scope string `xml:"scope,attr"`
+ UnitPreference []*struct {
+ Common
+ Regions string `xml:"regions,attr"`
+ } `xml:"unitPreference"`
+ } `xml:"unitPreferences"`
+ } `xml:"unitPreferenceData"`
+ TimezoneData *struct {
+ Common
+ MapTimezones []*struct {
+ Common
+ OtherVersion string `xml:"otherVersion,attr"`
+ TypeVersion string `xml:"typeVersion,attr"`
+ MapZone []*struct {
+ Common
+ Other string `xml:"other,attr"`
+ Territory string `xml:"territory,attr"`
+ } `xml:"mapZone"`
+ } `xml:"mapTimezones"`
+ ZoneFormatting []*struct {
+ Common
+ Multizone string `xml:"multizone,attr"`
+ TzidVersion string `xml:"tzidVersion,attr"`
+ ZoneItem []*struct {
+ Common
+ Territory string `xml:"territory,attr"`
+ Aliases string `xml:"aliases,attr"`
+ } `xml:"zoneItem"`
+ } `xml:"zoneFormatting"`
+ } `xml:"timezoneData"`
+ Characters *struct {
+ Common
+ CharacterFallback []*struct {
+ Common
+ Character []*struct {
+ Common
+ Value string `xml:"value,attr"`
+ Substitute []*Common `xml:"substitute"`
+ } `xml:"character"`
+ } `xml:"character-fallback"`
+ } `xml:"characters"`
+ Transforms *struct {
+ Common
+ Transform []*struct {
+ Common
+ Source string `xml:"source,attr"`
+ Target string `xml:"target,attr"`
+ Variant string `xml:"variant,attr"`
+ Direction string `xml:"direction,attr"`
+ Alias string `xml:"alias,attr"`
+ BackwardAlias string `xml:"backwardAlias,attr"`
+ Visibility string `xml:"visibility,attr"`
+ Comment []*Common `xml:"comment"`
+ TRule []*Common `xml:"tRule"`
+ } `xml:"transform"`
+ } `xml:"transforms"`
+ Metadata *struct {
+ Common
+ AttributeOrder *Common `xml:"attributeOrder"`
+ ElementOrder *Common `xml:"elementOrder"`
+ SerialElements *Common `xml:"serialElements"`
+ Suppress *struct {
+ Common
+ Attributes []*struct {
+ Common
+ Element string `xml:"element,attr"`
+ Attribute string `xml:"attribute,attr"`
+ AttributeValue string `xml:"attributeValue,attr"`
+ } `xml:"attributes"`
+ } `xml:"suppress"`
+ Validity *struct {
+ Common
+ Variable []*struct {
+ Common
+ Id string `xml:"id,attr"`
+ } `xml:"variable"`
+ AttributeValues []*struct {
+ Common
+ Dtds string `xml:"dtds,attr"`
+ Elements string `xml:"elements,attr"`
+ Attributes string `xml:"attributes,attr"`
+ Order string `xml:"order,attr"`
+ } `xml:"attributeValues"`
+ } `xml:"validity"`
+ Alias *struct {
+ Common
+ LanguageAlias []*struct {
+ Common
+ Replacement string `xml:"replacement,attr"`
+ Reason string `xml:"reason,attr"`
+ } `xml:"languageAlias"`
+ ScriptAlias []*struct {
+ Common
+ Replacement string `xml:"replacement,attr"`
+ Reason string `xml:"reason,attr"`
+ } `xml:"scriptAlias"`
+ TerritoryAlias []*struct {
+ Common
+ Replacement string `xml:"replacement,attr"`
+ Reason string `xml:"reason,attr"`
+ } `xml:"territoryAlias"`
+ SubdivisionAlias []*struct {
+ Common
+ Replacement string `xml:"replacement,attr"`
+ Reason string `xml:"reason,attr"`
+ } `xml:"subdivisionAlias"`
+ VariantAlias []*struct {
+ Common
+ Replacement string `xml:"replacement,attr"`
+ Reason string `xml:"reason,attr"`
+ } `xml:"variantAlias"`
+ ZoneAlias []*struct {
+ Common
+ Replacement string `xml:"replacement,attr"`
+ Reason string `xml:"reason,attr"`
+ } `xml:"zoneAlias"`
+ } `xml:"alias"`
+ Deprecated *struct {
+ Common
+ DeprecatedItems []*struct {
+ Common
+ Elements string `xml:"elements,attr"`
+ Attributes string `xml:"attributes,attr"`
+ Values string `xml:"values,attr"`
+ } `xml:"deprecatedItems"`
+ } `xml:"deprecated"`
+ Distinguishing *struct {
+ Common
+ DistinguishingItems []*struct {
+ Common
+ Exclude string `xml:"exclude,attr"`
+ Elements string `xml:"elements,attr"`
+ Attributes string `xml:"attributes,attr"`
+ } `xml:"distinguishingItems"`
+ } `xml:"distinguishing"`
+ Blocking *struct {
+ Common
+ BlockingItems []*struct {
+ Common
+ Elements string `xml:"elements,attr"`
+ } `xml:"blockingItems"`
+ } `xml:"blocking"`
+ CoverageAdditions *struct {
+ Common
+ LanguageCoverage []*struct {
+ Common
+ Values string `xml:"values,attr"`
+ } `xml:"languageCoverage"`
+ ScriptCoverage []*struct {
+ Common
+ Values string `xml:"values,attr"`
+ } `xml:"scriptCoverage"`
+ TerritoryCoverage []*struct {
+ Common
+ Values string `xml:"values,attr"`
+ } `xml:"territoryCoverage"`
+ CurrencyCoverage []*struct {
+ Common
+ Values string `xml:"values,attr"`
+ } `xml:"currencyCoverage"`
+ TimezoneCoverage []*struct {
+ Common
+ Values string `xml:"values,attr"`
+ } `xml:"timezoneCoverage"`
+ } `xml:"coverageAdditions"`
+ SkipDefaultLocale *struct {
+ Common
+ Services string `xml:"services,attr"`
+ } `xml:"skipDefaultLocale"`
+ DefaultContent *struct {
+ Common
+ Locales string `xml:"locales,attr"`
+ } `xml:"defaultContent"`
+ } `xml:"metadata"`
+ CodeMappings *struct {
+ Common
+ LanguageCodes []*struct {
+ Common
+ Alpha3 string `xml:"alpha3,attr"`
+ } `xml:"languageCodes"`
+ TerritoryCodes []*struct {
+ Common
+ Numeric string `xml:"numeric,attr"`
+ Alpha3 string `xml:"alpha3,attr"`
+ Fips10 string `xml:"fips10,attr"`
+ Internet string `xml:"internet,attr"`
+ } `xml:"territoryCodes"`
+ CurrencyCodes []*struct {
+ Common
+ Numeric string `xml:"numeric,attr"`
+ } `xml:"currencyCodes"`
+ } `xml:"codeMappings"`
+ ParentLocales *struct {
+ Common
+ ParentLocale []*struct {
+ Common
+ Parent string `xml:"parent,attr"`
+ Locales string `xml:"locales,attr"`
+ } `xml:"parentLocale"`
+ } `xml:"parentLocales"`
+ LikelySubtags *struct {
+ Common
+ LikelySubtag []*struct {
+ Common
+ From string `xml:"from,attr"`
+ To string `xml:"to,attr"`
+ } `xml:"likelySubtag"`
+ } `xml:"likelySubtags"`
+ MetazoneInfo *struct {
+ Common
+ Timezone []*struct {
+ Common
+ UsesMetazone []*struct {
+ Common
+ From string `xml:"from,attr"`
+ To string `xml:"to,attr"`
+ Mzone string `xml:"mzone,attr"`
+ } `xml:"usesMetazone"`
+ } `xml:"timezone"`
+ } `xml:"metazoneInfo"`
+ Plurals []*struct {
+ Common
+ PluralRules []*struct {
+ Common
+ Locales string `xml:"locales,attr"`
+ PluralRule []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"pluralRule"`
+ } `xml:"pluralRules"`
+ PluralRanges []*struct {
+ Common
+ Locales string `xml:"locales,attr"`
+ PluralRange []*struct {
+ Common
+ Start string `xml:"start,attr"`
+ End string `xml:"end,attr"`
+ Result string `xml:"result,attr"`
+ } `xml:"pluralRange"`
+ } `xml:"pluralRanges"`
+ } `xml:"plurals"`
+ TelephoneCodeData *struct {
+ Common
+ CodesByTerritory []*struct {
+ Common
+ Territory string `xml:"territory,attr"`
+ TelephoneCountryCode []*struct {
+ Common
+ Code string `xml:"code,attr"`
+ From string `xml:"from,attr"`
+ To string `xml:"to,attr"`
+ } `xml:"telephoneCountryCode"`
+ } `xml:"codesByTerritory"`
+ } `xml:"telephoneCodeData"`
+ NumberingSystems *struct {
+ Common
+ NumberingSystem []*struct {
+ Common
+ Id string `xml:"id,attr"`
+ Radix string `xml:"radix,attr"`
+ Digits string `xml:"digits,attr"`
+ Rules string `xml:"rules,attr"`
+ } `xml:"numberingSystem"`
+ } `xml:"numberingSystems"`
+ Bcp47KeywordMappings *struct {
+ Common
+ MapKeys *struct {
+ Common
+ KeyMap []*struct {
+ Common
+ Bcp47 string `xml:"bcp47,attr"`
+ } `xml:"keyMap"`
+ } `xml:"mapKeys"`
+ MapTypes []*struct {
+ Common
+ TypeMap []*struct {
+ Common
+ Bcp47 string `xml:"bcp47,attr"`
+ } `xml:"typeMap"`
+ } `xml:"mapTypes"`
+ } `xml:"bcp47KeywordMappings"`
+ Gender *struct {
+ Common
+ PersonList []*struct {
+ Common
+ Locales string `xml:"locales,attr"`
+ } `xml:"personList"`
+ } `xml:"gender"`
+ References *struct {
+ Common
+ Reference []*struct {
+ Common
+ Uri string `xml:"uri,attr"`
+ } `xml:"reference"`
+ } `xml:"references"`
+ LanguageMatching *struct {
+ Common
+ LanguageMatches []*struct {
+ Common
+ ParadigmLocales []*struct {
+ Common
+ Locales string `xml:"locales,attr"`
+ } `xml:"paradigmLocales"`
+ MatchVariable []*struct {
+ Common
+ Id string `xml:"id,attr"`
+ Value string `xml:"value,attr"`
+ } `xml:"matchVariable"`
+ LanguageMatch []*struct {
+ Common
+ Desired string `xml:"desired,attr"`
+ Supported string `xml:"supported,attr"`
+ Percent string `xml:"percent,attr"`
+ Distance string `xml:"distance,attr"`
+ Oneway string `xml:"oneway,attr"`
+ } `xml:"languageMatch"`
+ } `xml:"languageMatches"`
+ } `xml:"languageMatching"`
+ DayPeriodRuleSet []*struct {
+ Common
+ DayPeriodRules []*struct {
+ Common
+ Locales string `xml:"locales,attr"`
+ DayPeriodRule []*struct {
+ Common
+ At string `xml:"at,attr"`
+ After string `xml:"after,attr"`
+ Before string `xml:"before,attr"`
+ From string `xml:"from,attr"`
+ To string `xml:"to,attr"`
+ } `xml:"dayPeriodRule"`
+ } `xml:"dayPeriodRules"`
+ } `xml:"dayPeriodRuleSet"`
+ MetaZones *struct {
+ Common
+ MetazoneInfo *struct {
+ Common
+ Timezone []*struct {
+ Common
+ UsesMetazone []*struct {
+ Common
+ From string `xml:"from,attr"`
+ To string `xml:"to,attr"`
+ Mzone string `xml:"mzone,attr"`
+ } `xml:"usesMetazone"`
+ } `xml:"timezone"`
+ } `xml:"metazoneInfo"`
+ MapTimezones *struct {
+ Common
+ OtherVersion string `xml:"otherVersion,attr"`
+ TypeVersion string `xml:"typeVersion,attr"`
+ MapZone []*struct {
+ Common
+ Other string `xml:"other,attr"`
+ Territory string `xml:"territory,attr"`
+ } `xml:"mapZone"`
+ } `xml:"mapTimezones"`
+ } `xml:"metaZones"`
+ PrimaryZones *struct {
+ Common
+ PrimaryZone []*struct {
+ Common
+ Iso3166 string `xml:"iso3166,attr"`
+ } `xml:"primaryZone"`
+ } `xml:"primaryZones"`
+ WindowsZones *struct {
+ Common
+ MapTimezones *struct {
+ Common
+ OtherVersion string `xml:"otherVersion,attr"`
+ TypeVersion string `xml:"typeVersion,attr"`
+ MapZone []*struct {
+ Common
+ Other string `xml:"other,attr"`
+ Territory string `xml:"territory,attr"`
+ } `xml:"mapZone"`
+ } `xml:"mapTimezones"`
+ } `xml:"windowsZones"`
+ CoverageLevels *struct {
+ Common
+ ApprovalRequirements *struct {
+ Common
+ ApprovalRequirement []*struct {
+ Common
+ Votes string `xml:"votes,attr"`
+ Locales string `xml:"locales,attr"`
+ Paths string `xml:"paths,attr"`
+ } `xml:"approvalRequirement"`
+ } `xml:"approvalRequirements"`
+ CoverageVariable []*struct {
+ Common
+ Key string `xml:"key,attr"`
+ Value string `xml:"value,attr"`
+ } `xml:"coverageVariable"`
+ CoverageLevel []*struct {
+ Common
+ InLanguage string `xml:"inLanguage,attr"`
+ InScript string `xml:"inScript,attr"`
+ InTerritory string `xml:"inTerritory,attr"`
+ Value string `xml:"value,attr"`
+ Match string `xml:"match,attr"`
+ } `xml:"coverageLevel"`
+ } `xml:"coverageLevels"`
+ IdValidity *struct {
+ Common
+ Id []*struct {
+ Common
+ IdStatus string `xml:"idStatus,attr"`
+ } `xml:"id"`
+ } `xml:"idValidity"`
+ RgScope *struct {
+ Common
+ RgPath []*struct {
+ Common
+ Path string `xml:"path,attr"`
+ } `xml:"rgPath"`
+ } `xml:"rgScope"`
+ LanguageGroups *struct {
+ Common
+ LanguageGroup []*struct {
+ Common
+ Parent string `xml:"parent,attr"`
+ } `xml:"languageGroup"`
+ } `xml:"languageGroups"`
+}
+
+// LDML is the top-level type for locale-specific data.
+type LDML struct {
+ Common
+ Version string `xml:"version,attr"`
+ Identity *struct {
+ Common
+ Version *struct {
+ Common
+ Number string `xml:"number,attr"`
+ } `xml:"version"`
+ Generation *struct {
+ Common
+ Date string `xml:"date,attr"`
+ } `xml:"generation"`
+ Language *Common `xml:"language"`
+ Script *Common `xml:"script"`
+ Territory *Common `xml:"territory"`
+ Variant *Common `xml:"variant"`
+ } `xml:"identity"`
+ LocaleDisplayNames *LocaleDisplayNames `xml:"localeDisplayNames"`
+ Layout *struct {
+ Common
+ Orientation []*struct {
+ Common
+ Characters string `xml:"characters,attr"`
+ Lines string `xml:"lines,attr"`
+ CharacterOrder []*Common `xml:"characterOrder"`
+ LineOrder []*Common `xml:"lineOrder"`
+ } `xml:"orientation"`
+ InList []*struct {
+ Common
+ Casing string `xml:"casing,attr"`
+ } `xml:"inList"`
+ InText []*Common `xml:"inText"`
+ } `xml:"layout"`
+ ContextTransforms *struct {
+ Common
+ ContextTransformUsage []*struct {
+ Common
+ ContextTransform []*Common `xml:"contextTransform"`
+ } `xml:"contextTransformUsage"`
+ } `xml:"contextTransforms"`
+ Characters *struct {
+ Common
+ ExemplarCharacters []*Common `xml:"exemplarCharacters"`
+ Ellipsis []*Common `xml:"ellipsis"`
+ MoreInformation []*Common `xml:"moreInformation"`
+ Stopwords []*struct {
+ Common
+ StopwordList []*Common `xml:"stopwordList"`
+ } `xml:"stopwords"`
+ IndexLabels []*struct {
+ Common
+ IndexSeparator []*Common `xml:"indexSeparator"`
+ CompressedIndexSeparator []*Common `xml:"compressedIndexSeparator"`
+ IndexRangePattern []*Common `xml:"indexRangePattern"`
+ IndexLabelBefore []*Common `xml:"indexLabelBefore"`
+ IndexLabelAfter []*Common `xml:"indexLabelAfter"`
+ IndexLabel []*struct {
+ Common
+ IndexSource string `xml:"indexSource,attr"`
+ Priority string `xml:"priority,attr"`
+ } `xml:"indexLabel"`
+ } `xml:"indexLabels"`
+ Mapping []*struct {
+ Common
+ Registry string `xml:"registry,attr"`
+ } `xml:"mapping"`
+ ParseLenients []*struct {
+ Common
+ Scope string `xml:"scope,attr"`
+ Level string `xml:"level,attr"`
+ ParseLenient []*struct {
+ Common
+ Sample string `xml:"sample,attr"`
+ } `xml:"parseLenient"`
+ } `xml:"parseLenients"`
+ } `xml:"characters"`
+ Delimiters *struct {
+ Common
+ QuotationStart []*Common `xml:"quotationStart"`
+ QuotationEnd []*Common `xml:"quotationEnd"`
+ AlternateQuotationStart []*Common `xml:"alternateQuotationStart"`
+ AlternateQuotationEnd []*Common `xml:"alternateQuotationEnd"`
+ } `xml:"delimiters"`
+ Measurement *struct {
+ Common
+ MeasurementSystem []*Common `xml:"measurementSystem"`
+ PaperSize []*struct {
+ Common
+ Height []*Common `xml:"height"`
+ Width []*Common `xml:"width"`
+ } `xml:"paperSize"`
+ } `xml:"measurement"`
+ Dates *struct {
+ Common
+ LocalizedPatternChars []*Common `xml:"localizedPatternChars"`
+ DateRangePattern []*Common `xml:"dateRangePattern"`
+ Calendars *struct {
+ Common
+ Calendar []*Calendar `xml:"calendar"`
+ } `xml:"calendars"`
+ Fields *struct {
+ Common
+ Field []*struct {
+ Common
+ DisplayName []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"displayName"`
+ Relative []*Common `xml:"relative"`
+ RelativeTime []*struct {
+ Common
+ RelativeTimePattern []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"relativeTimePattern"`
+ } `xml:"relativeTime"`
+ RelativePeriod []*Common `xml:"relativePeriod"`
+ } `xml:"field"`
+ } `xml:"fields"`
+ TimeZoneNames *TimeZoneNames `xml:"timeZoneNames"`
+ } `xml:"dates"`
+ Numbers *Numbers `xml:"numbers"`
+ Units *struct {
+ Common
+ Unit []*struct {
+ Common
+ DisplayName []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"displayName"`
+ UnitPattern []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"unitPattern"`
+ PerUnitPattern []*Common `xml:"perUnitPattern"`
+ } `xml:"unit"`
+ UnitLength []*struct {
+ Common
+ CompoundUnit []*struct {
+ Common
+ CompoundUnitPattern []*Common `xml:"compoundUnitPattern"`
+ } `xml:"compoundUnit"`
+ Unit []*struct {
+ Common
+ DisplayName []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"displayName"`
+ UnitPattern []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"unitPattern"`
+ PerUnitPattern []*Common `xml:"perUnitPattern"`
+ } `xml:"unit"`
+ CoordinateUnit []*struct {
+ Common
+ CoordinateUnitPattern []*Common `xml:"coordinateUnitPattern"`
+ } `xml:"coordinateUnit"`
+ } `xml:"unitLength"`
+ DurationUnit []*struct {
+ Common
+ DurationUnitPattern []*Common `xml:"durationUnitPattern"`
+ } `xml:"durationUnit"`
+ } `xml:"units"`
+ ListPatterns *struct {
+ Common
+ ListPattern []*struct {
+ Common
+ ListPatternPart []*Common `xml:"listPatternPart"`
+ } `xml:"listPattern"`
+ } `xml:"listPatterns"`
+ Collations *struct {
+ Common
+ Version string `xml:"version,attr"`
+ DefaultCollation *Common `xml:"defaultCollation"`
+ Collation []*Collation `xml:"collation"`
+ } `xml:"collations"`
+ Posix *struct {
+ Common
+ Messages []*struct {
+ Common
+ Yesstr []*Common `xml:"yesstr"`
+ Nostr []*Common `xml:"nostr"`
+ Yesexpr []*Common `xml:"yesexpr"`
+ Noexpr []*Common `xml:"noexpr"`
+ } `xml:"messages"`
+ } `xml:"posix"`
+ CharacterLabels *struct {
+ Common
+ CharacterLabelPattern []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"characterLabelPattern"`
+ CharacterLabel []*Common `xml:"characterLabel"`
+ } `xml:"characterLabels"`
+ Segmentations *struct {
+ Common
+ Segmentation []*struct {
+ Common
+ Variables *struct {
+ Common
+ Variable []*struct {
+ Common
+ Id string `xml:"id,attr"`
+ } `xml:"variable"`
+ } `xml:"variables"`
+ SegmentRules *struct {
+ Common
+ Rule []*struct {
+ Common
+ Id string `xml:"id,attr"`
+ } `xml:"rule"`
+ } `xml:"segmentRules"`
+ Exceptions *struct {
+ Common
+ Exception []*Common `xml:"exception"`
+ } `xml:"exceptions"`
+ Suppressions *struct {
+ Common
+ Suppression []*Common `xml:"suppression"`
+ } `xml:"suppressions"`
+ } `xml:"segmentation"`
+ } `xml:"segmentations"`
+ Rbnf *struct {
+ Common
+ RulesetGrouping []*struct {
+ Common
+ Ruleset []*struct {
+ Common
+ Access string `xml:"access,attr"`
+ AllowsParsing string `xml:"allowsParsing,attr"`
+ Rbnfrule []*struct {
+ Common
+ Value string `xml:"value,attr"`
+ Radix string `xml:"radix,attr"`
+ Decexp string `xml:"decexp,attr"`
+ } `xml:"rbnfrule"`
+ } `xml:"ruleset"`
+ } `xml:"rulesetGrouping"`
+ } `xml:"rbnf"`
+ Annotations *struct {
+ Common
+ Annotation []*struct {
+ Common
+ Cp string `xml:"cp,attr"`
+ Tts string `xml:"tts,attr"`
+ } `xml:"annotation"`
+ } `xml:"annotations"`
+ Metadata *struct {
+ Common
+ CasingData *struct {
+ Common
+ CasingItem []*struct {
+ Common
+ Override string `xml:"override,attr"`
+ ForceError string `xml:"forceError,attr"`
+ } `xml:"casingItem"`
+ } `xml:"casingData"`
+ } `xml:"metadata"`
+ References *struct {
+ Common
+ Reference []*struct {
+ Common
+ Uri string `xml:"uri,attr"`
+ } `xml:"reference"`
+ } `xml:"references"`
+}
+
+// Collation contains rules that specify a certain sort-order,
+// as a tailoring of the root order.
+// The parsed rules are obtained by passing a RuleProcessor to Collation's
+// Process method.
+type Collation struct {
+ Common
+ Visibility string `xml:"visibility,attr"`
+ Base *Common `xml:"base"`
+ Import []*struct {
+ Common
+ Source string `xml:"source,attr"`
+ } `xml:"import"`
+ Settings *struct {
+ Common
+ Strength string `xml:"strength,attr"`
+ Alternate string `xml:"alternate,attr"`
+ Backwards string `xml:"backwards,attr"`
+ Normalization string `xml:"normalization,attr"`
+ CaseLevel string `xml:"caseLevel,attr"`
+ CaseFirst string `xml:"caseFirst,attr"`
+ HiraganaQuaternary string `xml:"hiraganaQuaternary,attr"`
+ MaxVariable string `xml:"maxVariable,attr"`
+ Numeric string `xml:"numeric,attr"`
+ Private string `xml:"private,attr"`
+ VariableTop string `xml:"variableTop,attr"`
+ Reorder string `xml:"reorder,attr"`
+ } `xml:"settings"`
+ SuppressContractions *Common `xml:"suppress_contractions"`
+ Optimize *Common `xml:"optimize"`
+ Cr []*Common `xml:"cr"`
+ rulesElem
+}
+
+// Calendar specifies the fields used for formatting and parsing dates and times.
+// The month and quarter names are identified numerically, starting at 1.
+// The day (of the week) names are identified with short strings, since there is
+// no universally-accepted numeric designation.
+type Calendar struct {
+ Common
+ Months *struct {
+ Common
+ MonthContext []*struct {
+ Common
+ MonthWidth []*struct {
+ Common
+ Month []*struct {
+ Common
+ Yeartype string `xml:"yeartype,attr"`
+ } `xml:"month"`
+ } `xml:"monthWidth"`
+ } `xml:"monthContext"`
+ } `xml:"months"`
+ MonthNames *struct {
+ Common
+ Month []*struct {
+ Common
+ Yeartype string `xml:"yeartype,attr"`
+ } `xml:"month"`
+ } `xml:"monthNames"`
+ MonthAbbr *struct {
+ Common
+ Month []*struct {
+ Common
+ Yeartype string `xml:"yeartype,attr"`
+ } `xml:"month"`
+ } `xml:"monthAbbr"`
+ MonthPatterns *struct {
+ Common
+ MonthPatternContext []*struct {
+ Common
+ MonthPatternWidth []*struct {
+ Common
+ MonthPattern []*Common `xml:"monthPattern"`
+ } `xml:"monthPatternWidth"`
+ } `xml:"monthPatternContext"`
+ } `xml:"monthPatterns"`
+ Days *struct {
+ Common
+ DayContext []*struct {
+ Common
+ DayWidth []*struct {
+ Common
+ Day []*Common `xml:"day"`
+ } `xml:"dayWidth"`
+ } `xml:"dayContext"`
+ } `xml:"days"`
+ DayNames *struct {
+ Common
+ Day []*Common `xml:"day"`
+ } `xml:"dayNames"`
+ DayAbbr *struct {
+ Common
+ Day []*Common `xml:"day"`
+ } `xml:"dayAbbr"`
+ Quarters *struct {
+ Common
+ QuarterContext []*struct {
+ Common
+ QuarterWidth []*struct {
+ Common
+ Quarter []*Common `xml:"quarter"`
+ } `xml:"quarterWidth"`
+ } `xml:"quarterContext"`
+ } `xml:"quarters"`
+ Week *struct {
+ Common
+ MinDays []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"minDays"`
+ FirstDay []*struct {
+ Common
+ Day string `xml:"day,attr"`
+ } `xml:"firstDay"`
+ WeekendStart []*struct {
+ Common
+ Day string `xml:"day,attr"`
+ Time string `xml:"time,attr"`
+ } `xml:"weekendStart"`
+ WeekendEnd []*struct {
+ Common
+ Day string `xml:"day,attr"`
+ Time string `xml:"time,attr"`
+ } `xml:"weekendEnd"`
+ } `xml:"week"`
+ Am []*Common `xml:"am"`
+ Pm []*Common `xml:"pm"`
+ DayPeriods *struct {
+ Common
+ DayPeriodContext []*struct {
+ Common
+ DayPeriodWidth []*struct {
+ Common
+ DayPeriod []*Common `xml:"dayPeriod"`
+ } `xml:"dayPeriodWidth"`
+ } `xml:"dayPeriodContext"`
+ } `xml:"dayPeriods"`
+ Eras *struct {
+ Common
+ EraNames *struct {
+ Common
+ Era []*Common `xml:"era"`
+ } `xml:"eraNames"`
+ EraAbbr *struct {
+ Common
+ Era []*Common `xml:"era"`
+ } `xml:"eraAbbr"`
+ EraNarrow *struct {
+ Common
+ Era []*Common `xml:"era"`
+ } `xml:"eraNarrow"`
+ } `xml:"eras"`
+ CyclicNameSets *struct {
+ Common
+ CyclicNameSet []*struct {
+ Common
+ CyclicNameContext []*struct {
+ Common
+ CyclicNameWidth []*struct {
+ Common
+ CyclicName []*Common `xml:"cyclicName"`
+ } `xml:"cyclicNameWidth"`
+ } `xml:"cyclicNameContext"`
+ } `xml:"cyclicNameSet"`
+ } `xml:"cyclicNameSets"`
+ DateFormats *struct {
+ Common
+ DateFormatLength []*struct {
+ Common
+ DateFormat []*struct {
+ Common
+ Pattern []*struct {
+ Common
+ Numbers string `xml:"numbers,attr"`
+ Count string `xml:"count,attr"`
+ } `xml:"pattern"`
+ DisplayName []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"displayName"`
+ } `xml:"dateFormat"`
+ } `xml:"dateFormatLength"`
+ } `xml:"dateFormats"`
+ TimeFormats *struct {
+ Common
+ TimeFormatLength []*struct {
+ Common
+ TimeFormat []*struct {
+ Common
+ Pattern []*struct {
+ Common
+ Numbers string `xml:"numbers,attr"`
+ Count string `xml:"count,attr"`
+ } `xml:"pattern"`
+ DisplayName []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"displayName"`
+ } `xml:"timeFormat"`
+ } `xml:"timeFormatLength"`
+ } `xml:"timeFormats"`
+ DateTimeFormats *struct {
+ Common
+ DateTimeFormatLength []*struct {
+ Common
+ DateTimeFormat []*struct {
+ Common
+ Pattern []*struct {
+ Common
+ Numbers string `xml:"numbers,attr"`
+ Count string `xml:"count,attr"`
+ } `xml:"pattern"`
+ DisplayName []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"displayName"`
+ } `xml:"dateTimeFormat"`
+ } `xml:"dateTimeFormatLength"`
+ AvailableFormats []*struct {
+ Common
+ DateFormatItem []*struct {
+ Common
+ Id string `xml:"id,attr"`
+ Count string `xml:"count,attr"`
+ } `xml:"dateFormatItem"`
+ } `xml:"availableFormats"`
+ AppendItems []*struct {
+ Common
+ AppendItem []*struct {
+ Common
+ Request string `xml:"request,attr"`
+ } `xml:"appendItem"`
+ } `xml:"appendItems"`
+ IntervalFormats []*struct {
+ Common
+ IntervalFormatFallback []*Common `xml:"intervalFormatFallback"`
+ IntervalFormatItem []*struct {
+ Common
+ Id string `xml:"id,attr"`
+ GreatestDifference []*struct {
+ Common
+ Id string `xml:"id,attr"`
+ } `xml:"greatestDifference"`
+ } `xml:"intervalFormatItem"`
+ } `xml:"intervalFormats"`
+ } `xml:"dateTimeFormats"`
+ Fields []*struct {
+ Common
+ Field []*struct {
+ Common
+ DisplayName []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"displayName"`
+ Relative []*Common `xml:"relative"`
+ RelativeTime []*struct {
+ Common
+ RelativeTimePattern []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"relativeTimePattern"`
+ } `xml:"relativeTime"`
+ RelativePeriod []*Common `xml:"relativePeriod"`
+ } `xml:"field"`
+ } `xml:"fields"`
+}
+type TimeZoneNames struct {
+ Common
+ HourFormat []*Common `xml:"hourFormat"`
+ HoursFormat []*Common `xml:"hoursFormat"`
+ GmtFormat []*Common `xml:"gmtFormat"`
+ GmtZeroFormat []*Common `xml:"gmtZeroFormat"`
+ RegionFormat []*Common `xml:"regionFormat"`
+ FallbackFormat []*Common `xml:"fallbackFormat"`
+ FallbackRegionFormat []*Common `xml:"fallbackRegionFormat"`
+ AbbreviationFallback []*Common `xml:"abbreviationFallback"`
+ PreferenceOrdering []*Common `xml:"preferenceOrdering"`
+ SingleCountries []*struct {
+ Common
+ List string `xml:"list,attr"`
+ } `xml:"singleCountries"`
+ Zone []*struct {
+ Common
+ Long []*struct {
+ Common
+ Generic []*Common `xml:"generic"`
+ Standard []*Common `xml:"standard"`
+ Daylight []*Common `xml:"daylight"`
+ } `xml:"long"`
+ Short []*struct {
+ Common
+ Generic []*Common `xml:"generic"`
+ Standard []*Common `xml:"standard"`
+ Daylight []*Common `xml:"daylight"`
+ } `xml:"short"`
+ CommonlyUsed []*struct {
+ Common
+ Used string `xml:"used,attr"`
+ } `xml:"commonlyUsed"`
+ ExemplarCity []*Common `xml:"exemplarCity"`
+ } `xml:"zone"`
+ Metazone []*struct {
+ Common
+ Long []*struct {
+ Common
+ Generic []*Common `xml:"generic"`
+ Standard []*Common `xml:"standard"`
+ Daylight []*Common `xml:"daylight"`
+ } `xml:"long"`
+ Short []*struct {
+ Common
+ Generic []*Common `xml:"generic"`
+ Standard []*Common `xml:"standard"`
+ Daylight []*Common `xml:"daylight"`
+ } `xml:"short"`
+ CommonlyUsed []*struct {
+ Common
+ Used string `xml:"used,attr"`
+ } `xml:"commonlyUsed"`
+ } `xml:"metazone"`
+}
+
+// LocaleDisplayNames specifies localized display names for scripts, languages,
+// countries, currencies, and variants.
+type LocaleDisplayNames struct {
+ Common
+ LocaleDisplayPattern *struct {
+ Common
+ LocalePattern []*Common `xml:"localePattern"`
+ LocaleSeparator []*Common `xml:"localeSeparator"`
+ LocaleKeyTypePattern []*Common `xml:"localeKeyTypePattern"`
+ } `xml:"localeDisplayPattern"`
+ Languages *struct {
+ Common
+ Language []*Common `xml:"language"`
+ } `xml:"languages"`
+ Scripts *struct {
+ Common
+ Script []*Common `xml:"script"`
+ } `xml:"scripts"`
+ Territories *struct {
+ Common
+ Territory []*Common `xml:"territory"`
+ } `xml:"territories"`
+ Subdivisions *struct {
+ Common
+ Subdivision []*Common `xml:"subdivision"`
+ } `xml:"subdivisions"`
+ Variants *struct {
+ Common
+ Variant []*Common `xml:"variant"`
+ } `xml:"variants"`
+ Keys *struct {
+ Common
+ Key []*Common `xml:"key"`
+ } `xml:"keys"`
+ Types *struct {
+ Common
+ Type []*struct {
+ Common
+ Key string `xml:"key,attr"`
+ } `xml:"type"`
+ } `xml:"types"`
+ TransformNames *struct {
+ Common
+ TransformName []*Common `xml:"transformName"`
+ } `xml:"transformNames"`
+ MeasurementSystemNames *struct {
+ Common
+ MeasurementSystemName []*Common `xml:"measurementSystemName"`
+ } `xml:"measurementSystemNames"`
+ CodePatterns *struct {
+ Common
+ CodePattern []*Common `xml:"codePattern"`
+ } `xml:"codePatterns"`
+}
+
+// Numbers supplies information for formatting and parsing numbers and currencies.
+type Numbers struct {
+ Common
+ DefaultNumberingSystem []*Common `xml:"defaultNumberingSystem"`
+ OtherNumberingSystems []*struct {
+ Common
+ Native []*Common `xml:"native"`
+ Traditional []*Common `xml:"traditional"`
+ Finance []*Common `xml:"finance"`
+ } `xml:"otherNumberingSystems"`
+ MinimumGroupingDigits []*Common `xml:"minimumGroupingDigits"`
+ Symbols []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ Decimal []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"decimal"`
+ Group []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"group"`
+ List []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"list"`
+ PercentSign []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"percentSign"`
+ NativeZeroDigit []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"nativeZeroDigit"`
+ PatternDigit []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"patternDigit"`
+ PlusSign []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"plusSign"`
+ MinusSign []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"minusSign"`
+ Exponential []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"exponential"`
+ SuperscriptingExponent []*Common `xml:"superscriptingExponent"`
+ PerMille []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"perMille"`
+ Infinity []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"infinity"`
+ Nan []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"nan"`
+ CurrencyDecimal []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"currencyDecimal"`
+ CurrencyGroup []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"currencyGroup"`
+ TimeSeparator []*Common `xml:"timeSeparator"`
+ } `xml:"symbols"`
+ DecimalFormats []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ DecimalFormatLength []*struct {
+ Common
+ DecimalFormat []*struct {
+ Common
+ Pattern []*struct {
+ Common
+ Numbers string `xml:"numbers,attr"`
+ Count string `xml:"count,attr"`
+ } `xml:"pattern"`
+ } `xml:"decimalFormat"`
+ } `xml:"decimalFormatLength"`
+ } `xml:"decimalFormats"`
+ ScientificFormats []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ ScientificFormatLength []*struct {
+ Common
+ ScientificFormat []*struct {
+ Common
+ Pattern []*struct {
+ Common
+ Numbers string `xml:"numbers,attr"`
+ Count string `xml:"count,attr"`
+ } `xml:"pattern"`
+ } `xml:"scientificFormat"`
+ } `xml:"scientificFormatLength"`
+ } `xml:"scientificFormats"`
+ PercentFormats []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ PercentFormatLength []*struct {
+ Common
+ PercentFormat []*struct {
+ Common
+ Pattern []*struct {
+ Common
+ Numbers string `xml:"numbers,attr"`
+ Count string `xml:"count,attr"`
+ } `xml:"pattern"`
+ } `xml:"percentFormat"`
+ } `xml:"percentFormatLength"`
+ } `xml:"percentFormats"`
+ CurrencyFormats []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ CurrencySpacing []*struct {
+ Common
+ BeforeCurrency []*struct {
+ Common
+ CurrencyMatch []*Common `xml:"currencyMatch"`
+ SurroundingMatch []*Common `xml:"surroundingMatch"`
+ InsertBetween []*Common `xml:"insertBetween"`
+ } `xml:"beforeCurrency"`
+ AfterCurrency []*struct {
+ Common
+ CurrencyMatch []*Common `xml:"currencyMatch"`
+ SurroundingMatch []*Common `xml:"surroundingMatch"`
+ InsertBetween []*Common `xml:"insertBetween"`
+ } `xml:"afterCurrency"`
+ } `xml:"currencySpacing"`
+ CurrencyFormatLength []*struct {
+ Common
+ CurrencyFormat []*struct {
+ Common
+ Pattern []*struct {
+ Common
+ Numbers string `xml:"numbers,attr"`
+ Count string `xml:"count,attr"`
+ } `xml:"pattern"`
+ } `xml:"currencyFormat"`
+ } `xml:"currencyFormatLength"`
+ UnitPattern []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"unitPattern"`
+ } `xml:"currencyFormats"`
+ Currencies *struct {
+ Common
+ Currency []*struct {
+ Common
+ Pattern []*struct {
+ Common
+ Numbers string `xml:"numbers,attr"`
+ Count string `xml:"count,attr"`
+ } `xml:"pattern"`
+ DisplayName []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"displayName"`
+ Symbol []*Common `xml:"symbol"`
+ Decimal []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"decimal"`
+ Group []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ } `xml:"group"`
+ } `xml:"currency"`
+ } `xml:"currencies"`
+ MiscPatterns []*struct {
+ Common
+ NumberSystem string `xml:"numberSystem,attr"`
+ Pattern []*struct {
+ Common
+ Numbers string `xml:"numbers,attr"`
+ Count string `xml:"count,attr"`
+ } `xml:"pattern"`
+ } `xml:"miscPatterns"`
+ MinimalPairs []*struct {
+ Common
+ PluralMinimalPairs []*struct {
+ Common
+ Count string `xml:"count,attr"`
+ } `xml:"pluralMinimalPairs"`
+ OrdinalMinimalPairs []*struct {
+ Common
+ Ordinal string `xml:"ordinal,attr"`
+ } `xml:"ordinalMinimalPairs"`
+ } `xml:"minimalPairs"`
+}
+
+// Version is the version of CLDR from which the XML definitions are generated.
+const Version = "32"