Involved Source Filesbuilder.goclone.gocompare.goreader.goreplace.gosearch.go Package strings implements simple functions to manipulate UTF-8 encoded strings.
For information about UTF-8 strings in Go, see https://blog.golang.org/strings.
Code Examples
package main
import (
"fmt"
"strings"
)
func main() {
var b strings.Builder
for i := 3; i >= 1; i-- {
fmt.Fprintf(&b, "%d...", i)
}
b.WriteString("ignition")
fmt.Println(b.String())
}
package main
import (
"fmt"
"strings"
"unsafe"
)
func main() {
s := "abc"
clone := strings.Clone(s)
fmt.Println(s == clone)
fmt.Println(unsafe.StringData(s) == unsafe.StringData(clone))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.Compare("a", "b"))
fmt.Println(strings.Compare("a", "a"))
fmt.Println(strings.Compare("b", "a"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.Contains("seafood", "foo"))
fmt.Println(strings.Contains("seafood", "bar"))
fmt.Println(strings.Contains("seafood", ""))
fmt.Println(strings.Contains("", ""))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.ContainsAny("team", "i"))
fmt.Println(strings.ContainsAny("fail", "ui"))
fmt.Println(strings.ContainsAny("ure", "ui"))
fmt.Println(strings.ContainsAny("failure", "ui"))
fmt.Println(strings.ContainsAny("foo", ""))
fmt.Println(strings.ContainsAny("", ""))
}
package main
import (
"fmt"
"strings"
)
func main() {
// Finds whether a string contains a particular Unicode code point.
// The code point for the lowercase letter "a", for example, is 97.
fmt.Println(strings.ContainsRune("aardvark", 97))
fmt.Println(strings.ContainsRune("timeout", 97))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.Count("cheese", "e"))
fmt.Println(strings.Count("five", "")) // before & after each rune
}
package main
import (
"fmt"
"strings"
)
func main() {
show := func(s, sep string) {
before, after, found := strings.Cut(s, sep)
fmt.Printf("Cut(%q, %q) = %q, %q, %v\n", s, sep, before, after, found)
}
show("Gopher", "Go")
show("Gopher", "ph")
show("Gopher", "er")
show("Gopher", "Badger")
}
package main
import (
"fmt"
"strings"
)
func main() {
show := func(s, sep string) {
after, found := strings.CutPrefix(s, sep)
fmt.Printf("CutPrefix(%q, %q) = %q, %v\n", s, sep, after, found)
}
show("Gopher", "Go")
show("Gopher", "ph")
}
package main
import (
"fmt"
"strings"
)
func main() {
show := func(s, sep string) {
before, found := strings.CutSuffix(s, sep)
fmt.Printf("CutSuffix(%q, %q) = %q, %v\n", s, sep, before, found)
}
show("Gopher", "Go")
show("Gopher", "er")
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.EqualFold("Go", "go"))
fmt.Println(strings.EqualFold("AB", "ab")) // true because comparison uses simple case-folding
fmt.Println(strings.EqualFold("ß", "ss")) // false because comparison does not use full case-folding
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Printf("Fields are: %q", strings.Fields(" foo bar baz "))
}
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
f := func(c rune) bool {
return !unicode.IsLetter(c) && !unicode.IsNumber(c)
}
fmt.Printf("Fields are: %q", strings.FieldsFunc(" foo1;bar2,baz3...", f))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.HasPrefix("Gopher", "Go"))
fmt.Println(strings.HasPrefix("Gopher", "C"))
fmt.Println(strings.HasPrefix("Gopher", ""))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.HasSuffix("Amigo", "go"))
fmt.Println(strings.HasSuffix("Amigo", "O"))
fmt.Println(strings.HasSuffix("Amigo", "Ami"))
fmt.Println(strings.HasSuffix("Amigo", ""))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.Index("chicken", "ken"))
fmt.Println(strings.Index("chicken", "dmr"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.IndexAny("chicken", "aeiouy"))
fmt.Println(strings.IndexAny("crwth", "aeiouy"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.IndexByte("golang", 'g'))
fmt.Println(strings.IndexByte("gophers", 'h'))
fmt.Println(strings.IndexByte("golang", 'x'))
}
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
f := func(c rune) bool {
return unicode.Is(unicode.Han, c)
}
fmt.Println(strings.IndexFunc("Hello, 世界", f))
fmt.Println(strings.IndexFunc("Hello, world", f))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.IndexRune("chicken", 'k'))
fmt.Println(strings.IndexRune("chicken", 'd'))
}
package main
import (
"fmt"
"strings"
)
func main() {
s := []string{"foo", "bar", "baz"}
fmt.Println(strings.Join(s, ", "))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.Index("go gopher", "go"))
fmt.Println(strings.LastIndex("go gopher", "go"))
fmt.Println(strings.LastIndex("go gopher", "rodent"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.LastIndexAny("go gopher", "go"))
fmt.Println(strings.LastIndexAny("go gopher", "rodent"))
fmt.Println(strings.LastIndexAny("go gopher", "fail"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.LastIndexByte("Hello, world", 'l'))
fmt.Println(strings.LastIndexByte("Hello, world", 'o'))
fmt.Println(strings.LastIndexByte("Hello, world", 'x'))
}
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
fmt.Println(strings.LastIndexFunc("go 123", unicode.IsNumber))
fmt.Println(strings.LastIndexFunc("123 go", unicode.IsNumber))
fmt.Println(strings.LastIndexFunc("go", unicode.IsNumber))
}
package main
import (
"fmt"
"strings"
)
func main() {
rot13 := func(r rune) rune {
switch {
case r >= 'A' && r <= 'Z':
return 'A' + (r-'A'+13)%26
case r >= 'a' && r <= 'z':
return 'a' + (r-'a'+13)%26
}
return r
}
fmt.Println(strings.Map(rot13, "'Twas brillig and the slithy gopher..."))
}
package main
import (
"fmt"
"strings"
)
func main() {
r := strings.NewReplacer("<", "<", ">", ">")
fmt.Println(r.Replace("This is <b>HTML</b>!"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println("ba" + strings.Repeat("na", 2))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.Replace("oink oink oink", "k", "ky", 2))
fmt.Println(strings.Replace("oink oink oink", "oink", "moo", -1))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.ReplaceAll("oink oink oink", "oink", "moo"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Printf("%q\n", strings.Split("a,b,c", ","))
fmt.Printf("%q\n", strings.Split("a man a plan a canal panama", "a "))
fmt.Printf("%q\n", strings.Split(" xyz ", ""))
fmt.Printf("%q\n", strings.Split("", "Bernardo O'Higgins"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Printf("%q\n", strings.SplitAfter("a,b,c", ","))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Printf("%q\n", strings.SplitAfterN("a,b,c", ",", 2))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Printf("%q\n", strings.SplitN("a,b,c", ",", 2))
z := strings.SplitN("a,b,c", ",", 0)
fmt.Printf("%q (nil = %v)\n", z, z == nil)
}
package main
import (
"fmt"
"strings"
)
func main() {
// Compare this example to the ToTitle example.
fmt.Println(strings.Title("her royal highness"))
fmt.Println(strings.Title("loud noises"))
fmt.Println(strings.Title("хлеб"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.ToLower("Gopher"))
}
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
fmt.Println(strings.ToLowerSpecial(unicode.TurkishCase, "Önnek İş"))
}
package main
import (
"fmt"
"strings"
)
func main() {
// Compare this example to the Title example.
fmt.Println(strings.ToTitle("her royal highness"))
fmt.Println(strings.ToTitle("loud noises"))
fmt.Println(strings.ToTitle("хлеб"))
}
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
fmt.Println(strings.ToTitleSpecial(unicode.TurkishCase, "dünyanın ilk borsa yapısı Aizonai kabul edilir"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.ToUpper("Gopher"))
}
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
fmt.Println(strings.ToUpperSpecial(unicode.TurkishCase, "örnek iş"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Printf("%s\n", strings.ToValidUTF8("abc", "\uFFFD"))
fmt.Printf("%s\n", strings.ToValidUTF8("a\xffb\xC0\xAFc\xff", ""))
fmt.Printf("%s\n", strings.ToValidUTF8("\xed\xa0\x80", "abc"))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Print(strings.Trim("¡¡¡Hello, Gophers!!!", "!¡"))
}
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
fmt.Print(strings.TrimFunc("¡¡¡Hello, Gophers!!!", func(r rune) bool {
return !unicode.IsLetter(r) && !unicode.IsNumber(r)
}))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Print(strings.TrimLeft("¡¡¡Hello, Gophers!!!", "!¡"))
}
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
fmt.Print(strings.TrimLeftFunc("¡¡¡Hello, Gophers!!!", func(r rune) bool {
return !unicode.IsLetter(r) && !unicode.IsNumber(r)
}))
}
package main
import (
"fmt"
"strings"
)
func main() {
var s = "¡¡¡Hello, Gophers!!!"
s = strings.TrimPrefix(s, "¡¡¡Hello, ")
s = strings.TrimPrefix(s, "¡¡¡Howdy, ")
fmt.Print(s)
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Print(strings.TrimRight("¡¡¡Hello, Gophers!!!", "!¡"))
}
package main
import (
"fmt"
"strings"
"unicode"
)
func main() {
fmt.Print(strings.TrimRightFunc("¡¡¡Hello, Gophers!!!", func(r rune) bool {
return !unicode.IsLetter(r) && !unicode.IsNumber(r)
}))
}
package main
import (
"fmt"
"strings"
)
func main() {
fmt.Println(strings.TrimSpace(" \t\n Hello, Gophers \n\t\r\n"))
}
package main
import (
"fmt"
"strings"
)
func main() {
var s = "¡¡¡Hello, Gophers!!!"
s = strings.TrimSuffix(s, ", Gophers!!!")
s = strings.TrimSuffix(s, ", Marmots!!!")
fmt.Print(s)
}
Package-Level Type Names (total 13, in which 3 are exported)
/* sort exporteds by: | */
A Builder is used to efficiently build a string using Write methods.
It minimizes memory copying. The zero value is ready to use.
Do not copy a non-zero Builder. // of receiver, to detect copies by valuebuf[]byte Cap returns the capacity of the builder's underlying byte slice. It is the
total space allocated for the string being built and includes any bytes
already written. Grow grows b's capacity, if necessary, to guarantee space for
another n bytes. After Grow(n), at least n bytes can be written to b
without another allocation. If n is negative, Grow panics. Len returns the number of accumulated bytes; b.Len() == len(b.String()). Reset resets the Builder to be empty. String returns the accumulated string. Write appends the contents of p to b's buffer.
Write always returns len(p), nil. WriteByte appends the byte c to b's buffer.
The returned error is always nil. WriteRune appends the UTF-8 encoding of Unicode code point r to b's buffer.
It returns the length of r and a nil error. WriteString appends the contents of s to b's buffer.
It returns the length of s and a nil error.(*Builder) copyCheck() grow copies the buffer to a new, larger buffer so that there are at least n
bytes of capacity beyond len(b.buf).
*Builder : fmt.Stringer
*Builder : internal/bisect.Writer
*Builder : io.ByteWriter
*Builder : io.StringWriter
*Builder : io.Writer
*Builder : context.stringer
*Builder : crypto/tls.transcriptHash
*Builder : net/http.http2stringWriter
*Builder : runtime.stringer
func github.com/gotd/td/tdp.format(b *Builder, prefix string, opt tdp.options, obj tdp.Object)
func github.com/gotd/td/tdp.formatValue(b *Builder, prefix, fieldName string, opt tdp.options, v reflect.Value)
func mime.closeWord(buf *Builder)
func mime.writeQString(buf *Builder, s string)
func mime.(*WordDecoder).convert(buf *Builder, charset string, content []byte) error
func mime.WordEncoder.bEncode(buf *Builder, charset, s string)
func mime.WordEncoder.openWord(buf *Builder, charset string)
func mime.WordEncoder.qEncode(buf *Builder, charset, s string)
func mime.WordEncoder.splitWord(buf *Builder, charset string)
func regexp/syntax.bw(b *Builder, args ...string)
func regexp/syntax.dumpInst(b *Builder, i *syntax.Inst)
func regexp/syntax.dumpProg(b *Builder, p *syntax.Prog)
func regexp/syntax.escape(b *Builder, r rune, force bool)
func regexp/syntax.writeRegexp(b *Builder, re *syntax.Regexp)
A Reader implements the io.Reader, io.ReaderAt, io.ByteReader, io.ByteScanner,
io.RuneReader, io.RuneScanner, io.Seeker, and io.WriterTo interfaces by reading
from a string.
The zero value for Reader operates like a Reader of an empty string. // current reading index // index of previous rune; or < 0sstring Len returns the number of bytes of the unread portion of the
string. Read implements the io.Reader interface. ReadAt implements the io.ReaderAt interface. ReadByte implements the io.ByteReader interface. ReadRune implements the io.RuneReader interface. Reset resets the Reader to be reading from s. Seek implements the io.Seeker interface. Size returns the original length of the underlying string.
Size is the number of bytes available for reading via ReadAt.
The returned value is always the same and is not affected by calls
to any other method. UnreadByte implements the io.ByteScanner interface. UnreadRune implements the io.RuneScanner interface. WriteTo implements the io.WriterTo interface.
*Reader : compress/flate.Reader
*Reader : github.com/klauspost/compress/flate.Reader
*Reader : io.ByteReader
*Reader : io.ByteScanner
*Reader : io.Reader
*Reader : io.ReaderAt
*Reader : io.ReadSeeker
*Reader : io.RuneReader
*Reader : io.RuneScanner
*Reader : io.Seeker
*Reader : io.WriterTo
func NewReader(s string) *Reader
Replacer replaces a list of strings with replacements.
It is safe for concurrent use by multiple goroutines.oldnew[]string // guards buildOnce methodrreplacer Replace returns a copy of s with all replacements performed. WriteString writes s to w with all replacements performed.(*Replacer) build() replacer(*Replacer) buildOnce()
*Replacer : replacer
func NewReplacer(oldnew ...string) *Replacer
var mime/multipart.quoteEscaper *Replacer
var net/http.cookieNameSanitizer *Replacer
var net/http.headerNewlineToSpace *Replacer
var net/http.htmlReplacer *Replacer
Write writes to the buffer to satisfy io.Writer. WriteString writes to the buffer without string->[]byte->string allocations.
*appendSliceWriter : internal/bisect.Writer
*appendSliceWriter : io.StringWriter
*appendSliceWriter : io.Writer
*appendSliceWriter : crypto/tls.transcriptHash
*appendSliceWriter : net/http.http2stringWriter
asciiSet is a 32-byte value, where each bit represents the presence of a
given ASCII character in the set. The 128-bits of the lower 16 bytes,
starting with the least-significant bit of the lowest word to the
most-significant bit of the highest word, map to the full range of all
128 ASCII characters. The 128-bits of the upper 16 bytes will be zeroed,
ensuring that any non-ASCII character will be reported as not in the set.
This allocates a total of 32 bytes even though the upper half
is unused to avoid bounds checks in asciiSet.contains. contains reports whether c is inside the set.
func makeASCIISet(chars string) (as asciiSet, ok bool)
func trimLeftASCII(s string, as *asciiSet) string
func trimRightASCII(s string, as *asciiSet) string
byteReplacer is the implementation that's used when all the "old"
and "new" values are single ASCII bytes.
The array contains replacement bytes indexed by old byte.(*byteReplacer) Replace(s string) string(*byteReplacer) WriteString(w io.Writer, s string) (n int, err error)
*byteReplacer : replacer
byteStringReplacer is the implementation that's used when all the
"old" values are single ASCII bytes but the "new" values vary in size. replacements contains replacement byte slices indexed by old byte.
A nil []byte means that the old byte should not be replaced. toReplace keeps a list of bytes to replace. Depending on length of toReplace
and length of target string it may be faster to use Count, or a plain loop.
We store single byte as a string, because Count takes a string.(*byteStringReplacer) Replace(s string) string(*byteStringReplacer) WriteString(w io.Writer, s string) (n int, err error)
*byteStringReplacer : replacer
genericReplacer is the fully generic algorithm.
It's used as a fallback when nothing faster can be used. mapping maps from key bytes to a dense index for trieNode.table.roottrieNode tableSize is the size of a trie node's lookup table. It is the number
of unique key bytes.(*genericReplacer) Replace(s string) string(*genericReplacer) WriteString(w io.Writer, s string) (n int, err error)(*genericReplacer) lookup(s string, ignoreRoot bool) (val string, keylen int, found bool)
*genericReplacer : replacer
func makeGenericReplacer(oldnew []string) *genericReplacer
singleStringReplacer is the implementation that's used when there is only
one string to replace (and that string has more than one byte).finder*stringFinder value is the new string that replaces that pattern when it's found.(*singleStringReplacer) Replace(s string) string(*singleStringReplacer) WriteString(w io.Writer, s string) (n int, err error)
*singleStringReplacer : replacer
func makeSingleStringReplacer(pattern string, value string) *singleStringReplacer
stringFinder efficiently finds strings in a source text. It's implemented
using the Boyer-Moore string search algorithm:
https://en.wikipedia.org/wiki/Boyer-Moore_string_search_algorithm
https://www.cs.utexas.edu/~moore/publications/fstrpos.pdf (note: this aged
document uses 1-based indexing) badCharSkip[b] contains the distance between the last byte of pattern
and the rightmost occurrence of b in pattern. If b is not in pattern,
badCharSkip[b] is len(pattern).
Whenever a mismatch is found with byte b in the text, we can safely
shift the matching frame at least badCharSkip[b] until the next time
the matching char could be in alignment. goodSuffixSkip[i] defines how far we can shift the matching frame given
that the suffix pattern[i+1:] matches, but the byte pattern[i] does
not. There are two cases to consider:
1. The matched suffix occurs elsewhere in pattern (with a different
byte preceding it that we might possibly match). In this case, we can
shift the matching frame to align with the next suffix chunk. For
example, the pattern "mississi" has the suffix "issi" next occurring
(in right-to-left order) at index 1, so goodSuffixSkip[3] ==
shift+len(suffix) == 3+4 == 7.
2. If the matched suffix does not occur elsewhere in pattern, then the
matching frame may share part of its prefix with the end of the
matching suffix. In this case, goodSuffixSkip[i] will contain how far
to shift the frame to align this portion of the prefix to the
suffix. For example, in the pattern "abcxxxabc", when the first
mismatch from the back is found to be in position 3, the matching
suffix "xxabc" is not found elsewhere in the pattern. However, its
rightmost "abc" (at position 6) is a prefix of the whole pattern, so
goodSuffixSkip[3] == shift+len(suffix) == 6+5 == 11. pattern is the string that we are searching for in the text. next returns the index in text of the first occurrence of the pattern. If
the pattern is not found, it returns -1.
func makeStringFinder(pattern string) *stringFinder
trieNode is a node in a lookup trie for prioritized key/value pairs. Keys
and values may be empty. For example, the trie containing keys "ax", "ay",
"bcbc", "x" and "xy" could have eight nodes:
n0 -
n1 a-
n2 .x+
n3 .y+
n4 b-
n5 .cbc+
n6 x+
n7 .y+
n0 is the root node, and its children are n1, n4 and n6; n1's children are
n2 and n3; n4's child is n5; n6's child is n7. Nodes n0, n1 and n4 (marked
with a trailing "-") are partial keys, and nodes n2, n3, n5, n6 and n7
(marked with a trailing "+") are complete keys.next*trieNode prefix is the difference in keys between this trie node and the next.
In the example above, node n4 has prefix "cbc" and n4's next node is n5.
Node n5 has no children and so has zero prefix, next and table fields. priority is the priority (higher is more important) of the trie node's
key/value pair; keys are not necessarily matched shortest- or longest-
first. Priority is positive if this node is a complete key, and zero
otherwise. In the example above, positive/zero priorities are marked
with a trailing "+" or "-". table is a lookup table indexed by the next byte in the key, after
remapping that byte through genericReplacer.mapping to create a dense
index. In the example above, the keys only use 'a', 'b', 'c', 'x' and
'y', which remap to 0, 1, 2, 3 and 4. All other bytes remap to 5, and
genericReplacer.tableSize will be 5. Node n0's table will be
[]*trieNode{ 0:n1, 1:n4, 3:n6 }, where the 0, 1 and 3 are the remapped
'a', 'b' and 'x'. value is the value of the trie node's key/value pair. It is empty if
this node is not a complete key.(*trieNode) add(key, val string, priority int, r *genericReplacer)
Package-Level Functions (total 71, in which 52 are exported)
Clone returns a fresh copy of s.
It guarantees to make a copy of s into a new allocation,
which can be important when retaining only a small substring
of a much larger string. Using Clone can help such programs
use less memory. Of course, since using Clone makes a copy,
overuse of Clone can make programs use more memory.
Clone should typically be used only rarely, and only when
profiling indicates that it is needed.
For strings of length zero the string "" will be returned
and no allocation is made.
Compare returns an integer comparing two strings lexicographically.
The result will be 0 if a == b, -1 if a < b, and +1 if a > b.
Compare is included only for symmetry with package bytes.
It is usually clearer and always faster to use the built-in
string comparison operators ==, <, >, and so on.
Contains reports whether substr is within s.
ContainsAny reports whether any Unicode code points in chars are within s.
ContainsFunc reports whether any Unicode code points r within s satisfy f(r).
ContainsRune reports whether the Unicode code point r is within s.
Count counts the number of non-overlapping instances of substr in s.
If substr is an empty string, Count returns 1 + the number of Unicode code points in s.
Cut slices s around the first instance of sep,
returning the text before and after sep.
The found result reports whether sep appears in s.
If sep does not appear in s, cut returns s, "", false.
CutPrefix returns s without the provided leading prefix string
and reports whether it found the prefix.
If s doesn't start with prefix, CutPrefix returns s, false.
If prefix is the empty string, CutPrefix returns s, true.
CutSuffix returns s without the provided ending suffix string
and reports whether it found the suffix.
If s doesn't end with suffix, CutSuffix returns s, false.
If suffix is the empty string, CutSuffix returns s, true.
EqualFold reports whether s and t, interpreted as UTF-8 strings,
are equal under simple Unicode case-folding, which is a more general
form of case-insensitivity.
Fields splits the string s around each instance of one or more consecutive white space
characters, as defined by unicode.IsSpace, returning a slice of substrings of s or an
empty slice if s contains only white space.
FieldsFunc splits the string s at each run of Unicode code points c satisfying f(c)
and returns an array of slices of s. If all code points in s satisfy f(c) or the
string is empty, an empty slice is returned.
FieldsFunc makes no guarantees about the order in which it calls f(c)
and assumes that f always returns the same value for a given c.
HasPrefix tests whether the string s begins with prefix.
HasSuffix tests whether the string s ends with suffix.
Index returns the index of the first instance of substr in s, or -1 if substr is not present in s.
IndexAny returns the index of the first instance of any Unicode code point
from chars in s, or -1 if no Unicode code point from chars is present in s.
IndexByte returns the index of the first instance of c in s, or -1 if c is not present in s.
IndexFunc returns the index into s of the first Unicode
code point satisfying f(c), or -1 if none do.
IndexRune returns the index of the first instance of the Unicode code point
r, or -1 if rune is not present in s.
If r is utf8.RuneError, it returns the first instance of any
invalid UTF-8 byte sequence.
Join concatenates the elements of its first argument to create a single string. The separator
string sep is placed between elements in the resulting string.
LastIndex returns the index of the last instance of substr in s, or -1 if substr is not present in s.
LastIndexAny returns the index of the last instance of any Unicode code
point from chars in s, or -1 if no Unicode code point from chars is
present in s.
LastIndexByte returns the index of the last instance of c in s, or -1 if c is not present in s.
LastIndexFunc returns the index into s of the last
Unicode code point satisfying f(c), or -1 if none do.
Map returns a copy of the string s with all its characters modified
according to the mapping function. If mapping returns a negative value, the character is
dropped from the string with no replacement.
NewReader returns a new Reader reading from s.
It is similar to bytes.NewBufferString but more efficient and non-writable.
NewReplacer returns a new Replacer from a list of old, new string
pairs. Replacements are performed in the order they appear in the
target string, without overlapping matches. The old string
comparisons are done in argument order.
NewReplacer panics if given an odd number of arguments.
Repeat returns a new string consisting of count copies of the string s.
It panics if count is negative or if the result of (len(s) * count)
overflows.
Replace returns a copy of the string s with the first n
non-overlapping instances of old replaced by new.
If old is empty, it matches at the beginning of the string
and after each UTF-8 sequence, yielding up to k+1 replacements
for a k-rune string.
If n < 0, there is no limit on the number of replacements.
ReplaceAll returns a copy of the string s with all
non-overlapping instances of old replaced by new.
If old is empty, it matches at the beginning of the string
and after each UTF-8 sequence, yielding up to k+1 replacements
for a k-rune string.
Split slices s into all substrings separated by sep and returns a slice of
the substrings between those separators.
If s does not contain sep and sep is not empty, Split returns a
slice of length 1 whose only element is s.
If sep is empty, Split splits after each UTF-8 sequence. If both s
and sep are empty, Split returns an empty slice.
It is equivalent to SplitN with a count of -1.
To split around the first instance of a separator, see Cut.
SplitAfter slices s into all substrings after each instance of sep and
returns a slice of those substrings.
If s does not contain sep and sep is not empty, SplitAfter returns
a slice of length 1 whose only element is s.
If sep is empty, SplitAfter splits after each UTF-8 sequence. If
both s and sep are empty, SplitAfter returns an empty slice.
It is equivalent to SplitAfterN with a count of -1.
SplitAfterN slices s into substrings after each instance of sep and
returns a slice of those substrings.
The count determines the number of substrings to return:
n > 0: at most n substrings; the last substring will be the unsplit remainder.
n == 0: the result is nil (zero substrings)
n < 0: all substrings
Edge cases for s and sep (for example, empty strings) are handled
as described in the documentation for SplitAfter.
SplitN slices s into substrings separated by sep and returns a slice of
the substrings between those separators.
The count determines the number of substrings to return:
n > 0: at most n substrings; the last substring will be the unsplit remainder.
n == 0: the result is nil (zero substrings)
n < 0: all substrings
Edge cases for s and sep (for example, empty strings) are handled
as described in the documentation for Split.
To split around the first instance of a separator, see Cut.
Title returns a copy of the string s with all Unicode letters that begin words
mapped to their Unicode title case.
Deprecated: The rule Title uses for word boundaries does not handle Unicode
punctuation properly. Use golang.org/x/text/cases instead.
ToLower returns s with all Unicode letters mapped to their lower case.
ToLowerSpecial returns a copy of the string s with all Unicode letters mapped to their
lower case using the case mapping specified by c.
ToTitle returns a copy of the string s with all Unicode letters mapped to
their Unicode title case.
ToTitleSpecial returns a copy of the string s with all Unicode letters mapped to their
Unicode title case, giving priority to the special casing rules.
ToUpper returns s with all Unicode letters mapped to their upper case.
ToUpperSpecial returns a copy of the string s with all Unicode letters mapped to their
upper case using the case mapping specified by c.
ToValidUTF8 returns a copy of the string s with each run of invalid UTF-8 byte sequences
replaced by the replacement string, which may be empty.
Trim returns a slice of the string s with all leading and
trailing Unicode code points contained in cutset removed.
TrimFunc returns a slice of the string s with all leading
and trailing Unicode code points c satisfying f(c) removed.
TrimLeft returns a slice of the string s with all leading
Unicode code points contained in cutset removed.
To remove a prefix, use TrimPrefix instead.
TrimLeftFunc returns a slice of the string s with all leading
Unicode code points c satisfying f(c) removed.
TrimPrefix returns s without the provided leading prefix string.
If s doesn't start with prefix, s is returned unchanged.
TrimRight returns a slice of the string s, with all trailing
Unicode code points contained in cutset removed.
To remove a suffix, use TrimSuffix instead.
TrimRightFunc returns a slice of the string s with all trailing
Unicode code points c satisfying f(c) removed.
TrimSpace returns a slice of the string s, with all leading
and trailing white space removed, as defined by Unicode.
TrimSuffix returns s without the provided trailing suffix string.
If s doesn't end with suffix, s is returned unchanged.
explode splits s into a slice of UTF-8 strings,
one string per Unicode character up to a maximum of n (n < 0 means no limit).
Invalid UTF-8 bytes are sliced individually.
Generic split: splits after each instance of sep,
including sepSave bytes of sep in the subarrays.
noescape hides a pointer from escape analysis. It is the identity function
but escape analysis doesn't think the output depends on the input.
noescape is inlined and currently compiles down to zero instructions.
USE CAREFULLY!
This was copied from the runtime; see issues 23382 and 7921.
Package-Level Constants (total 2, neither is exported)
countCutOff controls the ratio of a string length to a number of replacements
at which (*byteStringReplacer).Replace switches algorithms.
For strings with higher ration of length to replacements than that value,
we call Count, for each replacement from toReplace.
For strings, with a lower ratio we use simple loop, because of Count overhead.
countCutOff is an empirically determined overhead multiplier.
TODO(tocarip) revisit once we have register-based abi/mid-stack inlining.
The pages are generated with Goldsv0.6.7. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds.