From 56d5d0a900197fb2de46120a0eda649f2c17448f Mon Sep 17 00:00:00 2001 From: Tyler Bui-Palsulich <26876514+tbpg@users.noreply.github.com> Date: Mon, 15 Mar 2021 12:24:03 -0400 Subject: [PATCH] feat(internal/godocfx): handle Markdown content (#3816) The first commit copied the go/doc package into `third_party`. Then, I added ToMarkdown to the doc package. I got the idea from @dmitshur, who pointed me to http://golang.org/issues/34875. Once the comment is converted to Markdown, it gets converted to HTML by `goldmark`. Future changes will: * Add syntax highlighting for code blocks. * Maybe add \` tags around code elements in the comment. * Maybe support Markdown in other doc comments. --- header_test.go | 15 +- internal/godocfx/go.mod | 1 + internal/godocfx/go.sum | 2 + internal/godocfx/parse.go | 17 +- internal/godocfx/testdata/golden/index.yml | 152 ++-- internal/kokoro/vet.sh | 6 +- third_party/go/doc/Makefile | 7 + third_party/go/doc/comment.go | 571 +++++++++++++ third_party/go/doc/comment_test.go | 249 ++++++ third_party/go/doc/doc.go | 224 +++++ third_party/go/doc/example.go | 549 ++++++++++++ third_party/go/doc/example_test.go | 720 ++++++++++++++++ third_party/go/doc/exports.go | 311 +++++++ third_party/go/doc/filter.go | 107 +++ third_party/go/doc/headscan.go | 115 +++ third_party/go/doc/reader.go | 919 +++++++++++++++++++++ third_party/go/doc/synopsis.go | 85 ++ third_party/go/doc/synopsis_test.go | 54 ++ third_party/pkgsite/print_type.go | 3 +- 19 files changed, 4021 insertions(+), 86 deletions(-) create mode 100644 third_party/go/doc/Makefile create mode 100644 third_party/go/doc/comment.go create mode 100644 third_party/go/doc/comment_test.go create mode 100644 third_party/go/doc/doc.go create mode 100644 third_party/go/doc/example.go create mode 100644 third_party/go/doc/example_test.go create mode 100644 third_party/go/doc/exports.go create mode 100644 third_party/go/doc/filter.go create mode 100644 third_party/go/doc/headscan.go create mode 100644 third_party/go/doc/reader.go create mode 100644 third_party/go/doc/synopsis.go create mode 100644 third_party/go/doc/synopsis_test.go diff --git a/header_test.go b/header_test.go index 359242e1d2f..96b28544b84 100644 --- a/header_test.go +++ b/header_test.go @@ -39,8 +39,19 @@ func TestLicense(t *testing.T) { "cmd/go-cloud-debug-agent/internal/debug/elf/elf.go": true, // From https://github.com/golang/pkgsite. - "third_party/pkgsite/print_type.go": true, - "third_party/pkgsite/synopsis.go": true, + "third_party/pkgsite/print_type.go": true, + "third_party/pkgsite/synopsis.go": true, + "third_party/go/doc/comment.go": true, + "third_party/go/doc/comment_test.go": true, + "third_party/go/doc/doc.go": true, + "third_party/go/doc/example.go": true, + "third_party/go/doc/example_test.go": true, + "third_party/go/doc/exports.go": true, + "third_party/go/doc/filter.go": true, + "third_party/go/doc/headscan.go": true, + "third_party/go/doc/reader.go": true, + "third_party/go/doc/synopsis.go": true, + "third_party/go/doc/synopsis_test.go": true, } err := filepath.Walk(".", func(path string, fi os.FileInfo, err error) error { if err != nil { diff --git a/internal/godocfx/go.mod b/internal/godocfx/go.mod index 32976d3d48f..ed8a1e4d1d6 100644 --- a/internal/godocfx/go.mod +++ b/internal/godocfx/go.mod @@ -8,6 +8,7 @@ require ( cloud.google.com/go/datastore v1.1.0 cloud.google.com/go/storage v1.11.0 github.com/kr/pretty v0.2.1 // indirect + github.com/yuin/goldmark v1.3.2 golang.org/x/mod v0.4.1 // indirect golang.org/x/tools v0.1.0 gopkg.in/yaml.v2 v2.4.0 diff --git a/internal/godocfx/go.sum b/internal/godocfx/go.sum index 0e5b26dcc98..d4ff0462945 100644 --- a/internal/godocfx/go.sum +++ b/internal/godocfx/go.sum @@ -96,6 +96,8 @@ github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.2 h1:YjHC5TgyMmHpicTgEqDN0Q96Xo8K6tLXPnmNOHXCgs0= +github.com/yuin/goldmark v1.3.2/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= diff --git a/internal/godocfx/parse.go b/internal/godocfx/parse.go index cabe8ceb834..c5f4d4d4574 100644 --- a/internal/godocfx/parse.go +++ b/internal/godocfx/parse.go @@ -26,7 +26,6 @@ import ( "bytes" "fmt" "go/ast" - "go/doc" "go/format" "go/parser" "go/printer" @@ -39,7 +38,10 @@ import ( "strconv" "strings" + "cloud.google.com/go/third_party/go/doc" "cloud.google.com/go/third_party/pkgsite" + "github.com/yuin/goldmark" + "github.com/yuin/goldmark/renderer/html" "golang.org/x/tools/go/packages" ) @@ -575,8 +577,17 @@ func buildTOC(mod string, pis []pkgInfo, extraFiles []extraFile) tableOfContents func toHTML(s string) string { buf := &bytes.Buffer{} - doc.ToHTML(buf, s, nil) - return buf.String() + // First, convert to Markdown. + doc.ToMarkdown(buf, s, nil) + + // Then, handle Markdown stuff, like lists and links. + md := goldmark.New(goldmark.WithRendererOptions(html.WithUnsafe())) + mdBuf := &bytes.Buffer{} + if err := md.Convert(buf.Bytes(), mdBuf); err != nil { + panic(err) + } + + return mdBuf.String() } type pkgInfo struct { diff --git a/internal/godocfx/testdata/golden/index.yml b/internal/godocfx/testdata/golden/index.yml index d27f1b544d0..6dc8c6cb3d6 100644 --- a/internal/godocfx/testdata/golden/index.yml +++ b/internal/godocfx/testdata/golden/index.yml @@ -3,95 +3,91 @@ items: - uid: cloud.google.com/go/storage name: cloud.google.com/go/storage id: storage - summary: "

\nPackage storage provides an easy way to work with Google Cloud Storage.\nGoogle - Cloud Storage stores data in named objects, which are grouped into buckets.\n

\n

\nMore - information about Google Cloud Storage is available at\nhttps://cloud.google.com/storage/docs.\n

\n

\nSee - https://godoc.org/cloud.google.com/go - for authentication, timeouts,\nconnection pooling and similar aspects of this - package.\n

\n

\nAll of the methods of this package use exponential backoff - to retry calls that fail\nwith certain errors, as described in\nhttps://cloud.google.com/storage/docs/exponential-backoff. - Retrying continues\nindefinitely unless the controlling context is canceled or - the client is closed. See\ncontext.WithTimeout and context.WithCancel.\n

\n

Creating a Client

\n

\nTo start working with - this package, create a client:\n

\n
ctx := context.Background()\nclient,
-    err := storage.NewClient(ctx)\nif err != nil {\n    // TODO: Handle error.\n}\n
\n

\nThe - client will use your default application credentials. Clients should be\nreused - instead of created as needed. The methods of Client are safe for\nconcurrent use - by multiple goroutines.\n

\n

\nIf you only wish to access public data, you - can create\nan unauthenticated client with\n

\n
client, err := storage.NewClient(ctx,
-    option.WithoutAuthentication())\n
\n

Buckets

\n

\nA + summary: "

Package storage provides an easy way to work with Google Cloud Storage.\nGoogle + Cloud Storage stores data in named objects, which are grouped into buckets.

\n

More + information about Google Cloud Storage is available at\nhttps://cloud.google.com/storage/docs.

\n

See + https://godoc.org/cloud.google.com/go for authentication, timeouts,\nconnection + pooling and similar aspects of this package.

\n

All of the methods of this + package use exponential backoff to retry calls that fail\nwith certain errors, + as described in\nhttps://cloud.google.com/storage/docs/exponential-backoff. Retrying + continues\nindefinitely unless the controlling context is canceled or the client + is closed. See\ncontext.WithTimeout and context.WithCancel.

\n

Creating + a Client

\n

To start working with this package, create a client:

\n
ctx
+    := context.Background()\nclient, err := storage.NewClient(ctx)\nif err != nil
+    {\n    // TODO: Handle error.\n}\n
\n

The client will use your default + application credentials. Clients should be\nreused instead of created as needed. + The methods of Client are safe for\nconcurrent use by multiple goroutines.

\n

If + you only wish to access public data, you can create\nan unauthenticated client + with

\n
client, err := storage.NewClient(ctx, option.WithoutAuthentication())\n
\n

Buckets

\n

A Google Cloud Storage bucket is a collection of objects. To work with a\nbucket, - make a bucket handle:\n

\n
bkt := client.Bucket(bucketName)\n
\n

\nA - handle is a reference to a bucket. You can have a handle even if the\nbucket doesn't - exist yet. To create a bucket in Google Cloud Storage,\ncall Create on the handle:\n

\n
if
-    err := bkt.Create(ctx, projectID, nil); err != nil {\n    // TODO: Handle error.\n}\n
\n

\nNote + make a bucket handle:

\n
bkt := client.Bucket(bucketName)\n
\n

A + handle is a reference to a bucket. You can have a handle even if the\nbucket doesn't + exist yet. To create a bucket in Google Cloud Storage,\ncall Create on the handle:

\n
if
+    err := bkt.Create(ctx, projectID, nil); err != nil {\n    // TODO: Handle error.\n}\n
\n

Note that although buckets are associated with projects, bucket names are\nglobal across - all projects.\n

\n

\nEach bucket has associated metadata, represented in - this package by\nBucketAttrs. The third argument to BucketHandle.Create allows - you to set\nthe initial BucketAttrs of a bucket. To retrieve a bucket's attributes, - use\nAttrs:\n

\n
attrs, err := bkt.Attrs(ctx)\nif err != nil {\n    //
-    TODO: Handle error.\n}\nfmt.Printf("bucket %s, created at %s, is located in
-    %s with storage class %s\\n",\n    attrs.Name, attrs.Created, attrs.Location,
-    attrs.StorageClass)\n
\n

Objects

\n

\nAn object - holds arbitrary data as a sequence of bytes, like a file. You\nrefer to objects - using a handle, just as with buckets, but unlike buckets\nyou don't explicitly - create an object. Instead, the first time you write\nto an object it will be created. - You can use the standard Go io.Reader\nand io.Writer interfaces to read and write - object data:\n

\n
obj := bkt.Object("data")\n// Write something
-    to obj.\n// w implements io.Writer.\nw := obj.NewWriter(ctx)\n// Write some text
-    to obj. This will either create the object or overwrite whatever is there already.\nif
-    _, err := fmt.Fprintf(w, "This object contains text.\\n"); err != nil
-    {\n    // TODO: Handle error.\n}\n// Close, just like writing a file.\nif err
-    := w.Close(); err != nil {\n    // TODO: Handle error.\n}\n\n// Read it back.\nr,
-    err := obj.NewReader(ctx)\nif err != nil {\n    // TODO: Handle error.\n}\ndefer
-    r.Close()\nif _, err := io.Copy(os.Stdout, r); err != nil {\n    // TODO: Handle
-    error.\n}\n// Prints "This object contains text."\n
\n

\nObjects - also have attributes, which you can fetch with Attrs:\n

\n
objAttrs, err
-    := obj.Attrs(ctx)\nif err != nil {\n    // TODO: Handle error.\n}\nfmt.Printf("object
-    %s has size %d and can be read using %s\\n",\n    objAttrs.Name, objAttrs.Size,
-    objAttrs.MediaLink)\n
\n

Listing objects

\n

\nListing - objects in a bucket is done with the Bucket.Objects method:\n

\n
query
-    := &storage.Query{Prefix: ""}\n\nvar names []string\nit := bkt.Objects(ctx,
+    all projects.

\n

Each bucket has associated metadata, represented in this + package by\nBucketAttrs. The third argument to BucketHandle.Create allows you + to set\nthe initial BucketAttrs of a bucket. To retrieve a bucket's attributes, + use\nAttrs:

\n
attrs, err := bkt.Attrs(ctx)\nif err != nil {\n    //
+    TODO: Handle error.\n}\nfmt.Printf("bucket %s, created at %s, is located
+    in %s with storage class %s\\n",\n    attrs.Name, attrs.Created, attrs.Location,
+    attrs.StorageClass)\n
\n

Objects

\n

An object holds arbitrary + data as a sequence of bytes, like a file. You\nrefer to objects using a handle, + just as with buckets, but unlike buckets\nyou don't explicitly create an object. + Instead, the first time you write\nto an object it will be created. You can use + the standard Go io.Reader\nand io.Writer interfaces to read and write object data:

\n
obj
+    := bkt.Object("data")\n// Write something to obj.\n// w implements io.Writer.\nw
+    := obj.NewWriter(ctx)\n// Write some text to obj. This will either create the
+    object or overwrite whatever is there already.\nif _, err := fmt.Fprintf(w, "This
+    object contains text.\\n"); err != nil {\n    // TODO: Handle error.\n}\n//
+    Close, just like writing a file.\nif err := w.Close(); err != nil {\n    // TODO:
+    Handle error.\n}\n\n// Read it back.\nr, err := obj.NewReader(ctx)\nif err !=
+    nil {\n    // TODO: Handle error.\n}\ndefer r.Close()\nif _, err := io.Copy(os.Stdout,
+    r); err != nil {\n    // TODO: Handle error.\n}\n// Prints "This object contains
+    text."\n
\n

Objects also have attributes, which you can fetch + with Attrs:

\n
objAttrs, err := obj.Attrs(ctx)\nif err != nil {\n
+    \   // TODO: Handle error.\n}\nfmt.Printf("object %s has size %d and can
+    be read using %s\\n",\n    objAttrs.Name, objAttrs.Size, objAttrs.MediaLink)\n
\n

Listing + objects

\n

Listing objects in a bucket is done with the Bucket.Objects method:

\n
query
+    := &storage.Query{Prefix: ""}\n\nvar names []string\nit := bkt.Objects(ctx,
     query)\nfor {\n    attrs, err := it.Next()\n    if err == iterator.Done {\n        break\n
     \   }\n    if err != nil {\n        log.Fatal(err)\n    }\n    names = append(names,
-    attrs.Name)\n}\n
\n

\nIf only a subset of object attributes is needed when - listing, specifying this\nsubset using Query.SetAttrSelection may speed up the - listing process:\n

\n
query := &storage.Query{Prefix: ""}\nquery.SetAttrSelection([]string{"Name"})\n\n//
-    ... as before\n
\n

ACLs

\n

\nBoth objects and buckets - have ACLs (Access Control Lists). An ACL is a list of\nACLRules, each of which - specifies the role of a user, group or project. ACLs\nare suitable for fine-grained - control, but you may prefer using IAM to control\naccess at the project level - (see\nhttps://cloud.google.com/storage/docs/access-control/iam).\n

\n

\nTo - list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:\n

\n
acls,
+    attrs.Name)\n}\n
\n

If only a subset of object attributes is needed + when listing, specifying this\nsubset using Query.SetAttrSelection may speed up + the listing process:

\n
query := &storage.Query{Prefix: ""}\nquery.SetAttrSelection([]string{"Name"})\n\n//
+    ... as before\n
\n

ACLs

\n

Both objects and buckets have + ACLs (Access Control Lists). An ACL is a list of\nACLRules, each of which specifies + the role of a user, group or project. ACLs\nare suitable for fine-grained control, + but you may prefer using IAM to control\naccess at the project level (see\nhttps://cloud.google.com/storage/docs/access-control/iam).

\n

To + list the ACLs of a bucket or object, obtain an ACLHandle and call its List method:

\n
acls,
     err := obj.ACL().List(ctx)\nif err != nil {\n    // TODO: Handle error.\n}\nfor
-    _, rule := range acls {\n    fmt.Printf("%s has role %s\\n", rule.Entity,
-    rule.Role)\n}\n
\n

\nYou can also set and delete ACLs.\n

\n

Conditions

\n

\nEvery + _, rule := range acls {\n fmt.Printf("%s has role %s\\n", rule.Entity, + rule.Role)\n}\n

\n

You can also set and delete ACLs.

\n

Conditions

\n

Every object has a generation and a metageneration. The generation changes\nwhenever the content changes, and the metageneration changes whenever the\nmetadata changes. Conditions let you check these values before an operation;\nthe operation only executes if the conditions match. You can use conditions to\nprevent race conditions - in read-modify-write operations.\n

\n

\nFor example, say you've read - an object's metadata into objAttrs. Now\nyou want to write to that object, - but only if its contents haven't changed\nsince you read it. Here is how to - express that:\n

\n
w = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)\n//
-    Proceed with writing as above.\n
\n

Signed URLs

\n

\nYou - can obtain a URL that lets anyone read or write an object for a limited time.\nYou - don't need to create a client to do this. See the documentation of\nSignedURL - for details.\n

\n
url, err := storage.SignedURL(bucketName, "shared-object",
-    opts)\nif err != nil {\n    // TODO: Handle error.\n}\nfmt.Println(url)\n
\n

Post Policy V4 Signed Request

\n

\nA - type of signed request that allows uploads through HTML forms directly to Cloud - Storage with\ntemporary permission. Conditions can be applied to restrict how - the HTML form is used and exercised\nby a user.\n

\n

\nFor more information, - please see https://cloud.google.com/storage/docs/xml-api/post-object - as well\nas the documentation of GenerateSignedPostPolicyV4.\n

\n
pv4,
+    in read-modify-write operations.

\n

For example, say you've read an object's + metadata into objAttrs. Now\nyou want to write to that object, but only if its + contents haven't changed\nsince you read it. Here is how to express that:

\n
w
+    = obj.If(storage.Conditions{GenerationMatch: objAttrs.Generation}).NewWriter(ctx)\n//
+    Proceed with writing as above.\n
\n

Signed URLs

\n

You can + obtain a URL that lets anyone read or write an object for a limited time.\nYou + don't need to create a client to do this. See the documentation of\nSignedURL + for details.

\n
url, err := storage.SignedURL(bucketName, "shared-object",
+    opts)\nif err != nil {\n    // TODO: Handle error.\n}\nfmt.Println(url)\n
\n

Post + Policy V4 Signed Request

\n

A type of signed request that allows uploads + through HTML forms directly to Cloud Storage with\ntemporary permission. Conditions + can be applied to restrict how the HTML form is used and exercised\nby a user.

\n

For + more information, please see https://cloud.google.com/storage/docs/xml-api/post-object + as well\nas the documentation of GenerateSignedPostPolicyV4.

\n
pv4,
     err := storage.GenerateSignedPostPolicyV4(bucketName, objectName, opts)\nif err
-    != nil {\n    // TODO: Handle error.\n}\nfmt.Printf("URL: %s\\nFields; %v\\n",
-    pv4.URL, pv4.Fields)\n
\n

Errors

\n

\nErrors returned - by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).\nThese + != nil {\n // TODO: Handle error.\n}\nfmt.Printf("URL: %s\\nFields; %v\\n", + pv4.URL, pv4.Fields)\n

\n

Errors

\n

Errors returned by this + client are often of the type googleapi.Error.\nThese errors can be introspected for more information by type asserting to the richer - `googleapi.Error` type. For example:\n

\n
if e, ok := err.(*googleapi.Error);
-    ok {\n\t  if e.Code == 409 { ... }\n}\n
\n" + googleapi.Error type. For example:

\n
if e, ok := err.(*googleapi.Error);
+    ok {\n\t  if e.Code == 409 { ... }\n}\n
\n" type: package langs: - go diff --git a/internal/kokoro/vet.sh b/internal/kokoro/vet.sh index 47a71530a37..1bb90bbf723 100755 --- a/internal/kokoro/vet.sh +++ b/internal/kokoro/vet.sh @@ -78,7 +78,8 @@ golint ./... 2>&1 | ( grep -v "a blank import should be only in a main or test package" | grep -v "method ExecuteSql should be ExecuteSQL" | grep -vE "spanner/spansql/(sql|types).go:.*should have comment" | - grep -vE "\.pb\.go:" + grep -vE "\.pb\.go:" | + grep -v "third_party/go/doc" ) | tee /dev/stderr | (! read) @@ -100,7 +101,8 @@ staticcheck -go 1.11 ./... 2>&1 | ( grep -v bigtable/reader.go | grep -v internal/btree/btree.go | grep -v container/apiv1/mock_test.go | - grep -v third_party/pkgsite/synopsis.go + grep -v third_party/pkgsite/synopsis.go | + grep -v third_party/go/doc ) | tee /dev/stderr | (! read) diff --git a/third_party/go/doc/Makefile b/third_party/go/doc/Makefile new file mode 100644 index 00000000000..ca4948f91c2 --- /dev/null +++ b/third_party/go/doc/Makefile @@ -0,0 +1,7 @@ +# Copyright 2009 The Go Authors. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +# Script to test heading detection heuristic +headscan: headscan.go + go build headscan.go diff --git a/third_party/go/doc/comment.go b/third_party/go/doc/comment.go new file mode 100644 index 00000000000..9b3732901cf --- /dev/null +++ b/third_party/go/doc/comment.go @@ -0,0 +1,571 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.16 + +// Godoc comment extraction and comment -> HTML formatting. + +package doc + +import ( + "bytes" + "io" + "regexp" + "strings" + "text/template" // for HTMLEscape + "unicode" + "unicode/utf8" +) + +const ( + ldquo = "“" + rdquo = "”" + ulquo = "“" + urquo = "”" +) + +var ( + htmlQuoteReplacer = strings.NewReplacer(ulquo, ldquo, urquo, rdquo) + unicodeQuoteReplacer = strings.NewReplacer("``", ulquo, "''", urquo) +) + +// Escape comment text for HTML. If nice is set, +// also turn `` into “ and '' into ”. +func commentEscape(w io.Writer, text string, nice, md bool) { + if nice { + // In the first pass, we convert `` and '' into their unicode equivalents. + // This prevents them from being escaped in HTMLEscape. + text = convertQuotes(text) + var buf bytes.Buffer + if md { + buf.WriteString(text) + } else { + template.HTMLEscape(&buf, []byte(text)) + } + // Now we convert the unicode quotes to their HTML escaped entities to maintain old behavior. + // We need to use a temp buffer to read the string back and do the conversion, + // otherwise HTMLEscape will escape & to & + htmlQuoteReplacer.WriteString(w, buf.String()) + return + } + if md { + w.Write([]byte(text)) + } else { + template.HTMLEscape(w, []byte(text)) + } +} + +func convertQuotes(text string) string { + return unicodeQuoteReplacer.Replace(text) +} + +const ( + // Regexp for Go identifiers + identRx = `[\pL_][\pL_0-9]*` + + // Regexp for URLs + // Match parens, and check later for balance - see #5043, #22285 + // Match .,:;?! within path, but not at end - see #18139, #16565 + // This excludes some rare yet valid urls ending in common punctuation + // in order to allow sentences ending in URLs. + + // protocol (required) e.g. http + protoPart = `(https?|ftp|file|gopher|mailto|nntp)` + // host (required) e.g. www.example.com or [::1]:8080 + hostPart = `([a-zA-Z0-9_@\-.\[\]:]+)` + // path+query+fragment (optional) e.g. /path/index.html?q=foo#bar + pathPart = `([.,:;?!]*[a-zA-Z0-9$'()*+&#=@~_/\-\[\]%])*` + + urlRx = protoPart + `://` + hostPart + pathPart +) + +var matchRx = regexp.MustCompile(`(` + urlRx + `)|(` + identRx + `)`) + +var ( + html_a = []byte(``) + html_enda = []byte("") + html_i = []byte("") + html_endi = []byte("") + html_p = []byte("

\n") + html_endp = []byte("

\n") + html_pre = []byte("
")
+	html_endpre = []byte("
\n") + html_h = []byte(`

`) + html_endh = []byte("

\n") +) + +var ( + md_i = []byte("_") + md_endi = []byte("_") + md_p = []byte("\n") + md_endp = []byte("\n") + md_pre = []byte("```\n") + md_endpre = []byte("```\n") + md_h = []byte(`### `) + md_endh = []byte("\n") +) + +// Emphasize and escape a line of text for HTML. URLs are converted into links; +// if the URL also appears in the words map, the link is taken from the map (if +// the corresponding map value is the empty string, the URL is not converted +// into a link). Go identifiers that appear in the words map are italicized; if +// the corresponding map value is not the empty string, it is considered a URL +// and the word is converted into a link. If nice is set, the remaining text's +// appearance is improved where it makes sense (e.g., `` is turned into “ +// and '' into ”). +func emphasize(w io.Writer, line string, words map[string]string, nice, md bool) { + for { + m := matchRx.FindStringSubmatchIndex(line) + if m == nil { + break + } + // m >= 6 (two parenthesized sub-regexps in matchRx, 1st one is urlRx) + + // write text before match + commentEscape(w, line[0:m[0]], nice, md) + + // adjust match for URLs + match := line[m[0]:m[1]] + if strings.Contains(match, "://") { + m0, m1 := m[0], m[1] + for _, s := range []string{"()", "{}", "[]"} { + open, close := s[:1], s[1:] // E.g., "(" and ")" + // require opening parentheses before closing parentheses (#22285) + if i := strings.Index(match, close); i >= 0 && i < strings.Index(match, open) { + m1 = m0 + i + match = line[m0:m1] + } + // require balanced pairs of parentheses (#5043) + for i := 0; strings.Count(match, open) != strings.Count(match, close) && i < 10; i++ { + m1 = strings.LastIndexAny(line[:m1], s) + match = line[m0:m1] + } + } + if m1 != m[1] { + // redo matching with shortened line for correct indices + m = matchRx.FindStringSubmatchIndex(line[:m[0]+len(match)]) + } + } + + // analyze match + url := "" + italics := false + if words != nil { + url, italics = words[match] + } + if m[2] >= 0 { + // match against first parenthesized sub-regexp; must be match against urlRx + if !italics { + // no alternative URL in words list, use match instead + url = match + } + italics = false // don't italicize URLs + } + + // write match + if len(url) > 0 { + // Don't linkify md content. MD processor will handle. + if !md { + w.Write(html_a) + template.HTMLEscape(w, []byte(url)) + w.Write(html_aq) + } + } + if italics { + if md { + w.Write(md_i) + } else { + w.Write(html_i) + } + } + commentEscape(w, match, nice, md) + if italics { + if md { + w.Write(md_endi) + } else { + w.Write(html_endi) + } + } + if len(url) > 0 && !md { + w.Write(html_enda) + } + + // advance + line = line[m[1]:] + } + commentEscape(w, line, nice, md) +} + +func indentLen(s string) int { + i := 0 + for i < len(s) && (s[i] == ' ' || s[i] == '\t') { + i++ + } + return i +} + +func isBlank(s string) bool { + return len(s) == 0 || (len(s) == 1 && s[0] == '\n') +} + +func commonPrefix(a, b string) string { + i := 0 + for i < len(a) && i < len(b) && a[i] == b[i] { + i++ + } + return a[0:i] +} + +func unindent(block []string) { + if len(block) == 0 { + return + } + + // compute maximum common white prefix + prefix := block[0][0:indentLen(block[0])] + for _, line := range block { + if !isBlank(line) { + prefix = commonPrefix(prefix, line[0:indentLen(line)]) + } + } + n := len(prefix) + + // remove + for i, line := range block { + if !isBlank(line) { + block[i] = line[n:] + } + } +} + +// heading returns the trimmed line if it passes as a section heading; +// otherwise it returns the empty string. +func heading(line string) string { + line = strings.TrimSpace(line) + if len(line) == 0 { + return "" + } + + // a heading must start with an uppercase letter + r, _ := utf8.DecodeRuneInString(line) + if !unicode.IsLetter(r) || !unicode.IsUpper(r) { + return "" + } + + // it must end in a letter or digit: + r, _ = utf8.DecodeLastRuneInString(line) + if !unicode.IsLetter(r) && !unicode.IsDigit(r) { + return "" + } + + // exclude lines with illegal characters. we allow "()," + if strings.ContainsAny(line, ";:!?+*/=[]{}_^°&§~%#@<\">\\") { + return "" + } + + // allow "'" for possessive "'s" only + for b := line; ; { + i := strings.IndexRune(b, '\'') + if i < 0 { + break + } + if i+1 >= len(b) || b[i+1] != 's' || (i+2 < len(b) && b[i+2] != ' ') { + return "" // not followed by "s " + } + b = b[i+2:] + } + + // allow "." when followed by non-space + for b := line; ; { + i := strings.IndexRune(b, '.') + if i < 0 { + break + } + if i+1 >= len(b) || b[i+1] == ' ' { + return "" // not followed by non-space + } + b = b[i+1:] + } + + return line +} + +type op int + +const ( + opPara op = iota + opHead + opPre +) + +type block struct { + op op + lines []string +} + +var nonAlphaNumRx = regexp.MustCompile(`[^a-zA-Z0-9]`) + +func anchorID(line string) string { + // Add a "hdr-" prefix to avoid conflicting with IDs used for package symbols. + return "hdr-" + nonAlphaNumRx.ReplaceAllString(line, "_") +} + +// ToHTML converts comment text to formatted HTML. +// The comment was prepared by DocReader, +// so it is known not to have leading, trailing blank lines +// nor to have trailing spaces at the end of lines. +// The comment markers have already been removed. +// +// Each span of unindented non-blank lines is converted into +// a single paragraph. There is one exception to the rule: a span that +// consists of a single line, is followed by another paragraph span, +// begins with a capital letter, and contains no punctuation +// other than parentheses and commas is formatted as a heading. +// +// A span of indented lines is converted into a
 block,
+// with the common indent prefix removed.
+//
+// URLs in the comment text are converted into links; if the URL also appears
+// in the words map, the link is taken from the map (if the corresponding map
+// value is the empty string, the URL is not converted into a link).
+//
+// A pair of (consecutive) backticks (`) is converted to a unicode left quote (“), and a pair of (consecutive)
+// single quotes (') is converted to a unicode right quote (”).
+//
+// Go identifiers that appear in the words map are italicized; if the corresponding
+// map value is not the empty string, it is considered a URL and the word is converted
+// into a link.
+func ToHTML(w io.Writer, text string, words map[string]string) {
+	for _, b := range blocks(text) {
+		switch b.op {
+		case opPara:
+			w.Write(html_p)
+			for _, line := range b.lines {
+				emphasize(w, line, words, true, false)
+			}
+			w.Write(html_endp)
+		case opHead:
+			w.Write(html_h)
+			id := ""
+			for _, line := range b.lines {
+				if id == "" {
+					id = anchorID(line)
+					w.Write([]byte(id))
+					w.Write(html_hq)
+				}
+				commentEscape(w, line, true, false)
+			}
+			if id == "" {
+				w.Write(html_hq)
+			}
+			w.Write(html_endh)
+		case opPre:
+			w.Write(html_pre)
+			for _, line := range b.lines {
+				emphasize(w, line, nil, false, false)
+			}
+			w.Write(html_endpre)
+		}
+	}
+}
+
+// ToMarkdown is like ToHTML, but Markdown. See http://golang.org/issues/34875.
+func ToMarkdown(w io.Writer, text string, words map[string]string) {
+	for _, b := range blocks(text) {
+		switch b.op {
+		case opPara:
+			w.Write(md_p)
+			for _, line := range b.lines {
+				emphasize(w, line, words, true, true)
+			}
+			w.Write(md_endp)
+		case opHead:
+			w.Write(md_h)
+			for _, line := range b.lines {
+				commentEscape(w, line, true, true)
+			}
+			w.Write(md_endh)
+		case opPre:
+			w.Write(md_pre)
+			for _, line := range b.lines {
+				emphasize(w, line, nil, false, true)
+			}
+			w.Write(md_endpre)
+		}
+	}
+}
+
+func blocks(text string) []block {
+	var (
+		out  []block
+		para []string
+
+		lastWasBlank   = false
+		lastWasHeading = false
+	)
+
+	close := func() {
+		if para != nil {
+			out = append(out, block{opPara, para})
+			para = nil
+		}
+	}
+
+	lines := strings.SplitAfter(text, "\n")
+	unindent(lines)
+	for i := 0; i < len(lines); {
+		line := lines[i]
+		if isBlank(line) {
+			// close paragraph
+			close()
+			i++
+			lastWasBlank = true
+			continue
+		}
+		if indentLen(line) > 0 {
+			// close paragraph
+			close()
+
+			// count indented or blank lines
+			j := i + 1
+			for j < len(lines) && (isBlank(lines[j]) || indentLen(lines[j]) > 0) {
+				j++
+			}
+			// but not trailing blank lines
+			for j > i && isBlank(lines[j-1]) {
+				j--
+			}
+			pre := lines[i:j]
+			i = j
+
+			unindent(pre)
+
+			// put those lines in a pre block
+			out = append(out, block{opPre, pre})
+			lastWasHeading = false
+			continue
+		}
+
+		if lastWasBlank && !lastWasHeading && i+2 < len(lines) &&
+			isBlank(lines[i+1]) && !isBlank(lines[i+2]) && indentLen(lines[i+2]) == 0 {
+			// current line is non-blank, surrounded by blank lines
+			// and the next non-blank line is not indented: this
+			// might be a heading.
+			if head := heading(line); head != "" {
+				close()
+				out = append(out, block{opHead, []string{head}})
+				i += 2
+				lastWasHeading = true
+				continue
+			}
+		}
+
+		// open paragraph
+		lastWasBlank = false
+		lastWasHeading = false
+		para = append(para, lines[i])
+		i++
+	}
+	close()
+
+	return out
+}
+
+// ToText prepares comment text for presentation in textual output.
+// It wraps paragraphs of text to width or fewer Unicode code points
+// and then prefixes each line with the indent. In preformatted sections
+// (such as program text), it prefixes each non-blank line with preIndent.
+//
+// A pair of (consecutive) backticks (`) is converted to a unicode left quote (“), and a pair of (consecutive)
+// single quotes (') is converted to a unicode right quote (”).
+func ToText(w io.Writer, text string, indent, preIndent string, width int) {
+	l := lineWrapper{
+		out:    w,
+		width:  width,
+		indent: indent,
+	}
+	for _, b := range blocks(text) {
+		switch b.op {
+		case opPara:
+			// l.write will add leading newline if required
+			for _, line := range b.lines {
+				line = convertQuotes(line)
+				l.write(line)
+			}
+			l.flush()
+		case opHead:
+			w.Write(nl)
+			for _, line := range b.lines {
+				line = convertQuotes(line)
+				l.write(line + "\n")
+			}
+			l.flush()
+		case opPre:
+			w.Write(nl)
+			for _, line := range b.lines {
+				if isBlank(line) {
+					w.Write([]byte("\n"))
+				} else {
+					w.Write([]byte(preIndent))
+					w.Write([]byte(line))
+				}
+			}
+		}
+	}
+}
+
+type lineWrapper struct {
+	out       io.Writer
+	printed   bool
+	width     int
+	indent    string
+	n         int
+	pendSpace int
+}
+
+var nl = []byte("\n")
+var space = []byte(" ")
+var prefix = []byte("// ")
+
+func (l *lineWrapper) write(text string) {
+	if l.n == 0 && l.printed {
+		l.out.Write(nl) // blank line before new paragraph
+	}
+	l.printed = true
+
+	needsPrefix := false
+	isComment := strings.HasPrefix(text, "//")
+	for _, f := range strings.Fields(text) {
+		w := utf8.RuneCountInString(f)
+		// wrap if line is too long
+		if l.n > 0 && l.n+l.pendSpace+w > l.width {
+			l.out.Write(nl)
+			l.n = 0
+			l.pendSpace = 0
+			needsPrefix = isComment && !strings.HasPrefix(f, "//")
+		}
+		if l.n == 0 {
+			l.out.Write([]byte(l.indent))
+		}
+		if needsPrefix {
+			l.out.Write(prefix)
+			needsPrefix = false
+		}
+		l.out.Write(space[:l.pendSpace])
+		l.out.Write([]byte(f))
+		l.n += l.pendSpace + w
+		l.pendSpace = 1
+	}
+}
+
+func (l *lineWrapper) flush() {
+	if l.n == 0 {
+		return
+	}
+	l.out.Write(nl)
+	l.pendSpace = 0
+	l.n = 0
+}
diff --git a/third_party/go/doc/comment_test.go b/third_party/go/doc/comment_test.go
new file mode 100644
index 00000000000..62aa2bcd413
--- /dev/null
+++ b/third_party/go/doc/comment_test.go
@@ -0,0 +1,249 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.16
+
+package doc
+
+import (
+	"bytes"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+var headingTests = []struct {
+	line string
+	ok   bool
+}{
+	{"Section", true},
+	{"A typical usage", true},
+	{"ΔΛΞ is Greek", true},
+	{"Foo 42", true},
+	{"", false},
+	{"section", false},
+	{"A typical usage:", false},
+	{"This code:", false},
+	{"δ is Greek", false},
+	{"Foo §", false},
+	{"Fermat's Last Sentence", true},
+	{"Fermat's", true},
+	{"'sX", false},
+	{"Ted 'Too' Bar", false},
+	{"Use n+m", false},
+	{"Scanning:", false},
+	{"N:M", false},
+}
+
+func TestIsHeading(t *testing.T) {
+	for _, tt := range headingTests {
+		if h := heading(tt.line); (len(h) > 0) != tt.ok {
+			t.Errorf("isHeading(%q) = %v, want %v", tt.line, h, tt.ok)
+		}
+	}
+}
+
+var blocksTests = []struct {
+	in   string
+	out  []block
+	text string
+}{
+	{
+		in: `Para 1.
+Para 1 line 2.
+
+Para 2.
+
+Section
+
+Para 3.
+
+	pre
+	pre1
+
+Para 4.
+
+	pre
+	pre1
+
+	pre2
+
+Para 5.
+
+
+	pre
+
+
+	pre1
+	pre2
+
+Para 6.
+	pre
+	pre2
+`,
+		out: []block{
+			{opPara, []string{"Para 1.\n", "Para 1 line 2.\n"}},
+			{opPara, []string{"Para 2.\n"}},
+			{opHead, []string{"Section"}},
+			{opPara, []string{"Para 3.\n"}},
+			{opPre, []string{"pre\n", "pre1\n"}},
+			{opPara, []string{"Para 4.\n"}},
+			{opPre, []string{"pre\n", "pre1\n", "\n", "pre2\n"}},
+			{opPara, []string{"Para 5.\n"}},
+			{opPre, []string{"pre\n", "\n", "\n", "pre1\n", "pre2\n"}},
+			{opPara, []string{"Para 6.\n"}},
+			{opPre, []string{"pre\n", "pre2\n"}},
+		},
+		text: `.   Para 1. Para 1 line 2.
+
+.   Para 2.
+
+
+.   Section
+
+.   Para 3.
+
+$	pre
+$	pre1
+
+.   Para 4.
+
+$	pre
+$	pre1
+
+$	pre2
+
+.   Para 5.
+
+$	pre
+
+
+$	pre1
+$	pre2
+
+.   Para 6.
+
+$	pre
+$	pre2
+`,
+	},
+	{
+		in: "Para.\n\tshould not be ``escaped''",
+		out: []block{
+			{opPara, []string{"Para.\n"}},
+			{opPre, []string{"should not be ``escaped''"}},
+		},
+		text: ".   Para.\n\n$	should not be ``escaped''",
+	},
+	{
+		in: "// A very long line of 46 char for line wrapping.",
+		out: []block{
+			{opPara, []string{"// A very long line of 46 char for line wrapping."}},
+		},
+		text: `.   // A very long line of 46 char for line
+.   // wrapping.
+`,
+	},
+	{
+		in: `/* A very long line of 46 char for line wrapping.
+A very long line of 46 char for line wrapping. */`,
+		out: []block{
+			{opPara, []string{"/* A very long line of 46 char for line wrapping.\n", "A very long line of 46 char for line wrapping. */"}},
+		},
+		text: `.   /* A very long line of 46 char for line
+.   wrapping. A very long line of 46 char
+.   for line wrapping. */
+`,
+	},
+	{
+		in: `A line of 36 char for line wrapping.
+//Another line starting with //`,
+		out: []block{
+			{opPara, []string{"A line of 36 char for line wrapping.\n",
+				"//Another line starting with //"}},
+		},
+		text: `.   A line of 36 char for line wrapping.
+.   //Another line starting with //
+`,
+	},
+}
+
+func TestBlocks(t *testing.T) {
+	for i, tt := range blocksTests {
+		b := blocks(tt.in)
+		if !reflect.DeepEqual(b, tt.out) {
+			t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, b, tt.out)
+		}
+	}
+}
+
+func TestToText(t *testing.T) {
+	var buf bytes.Buffer
+	for i, tt := range blocksTests {
+		ToText(&buf, tt.in, ".   ", "$\t", 40)
+		if have := buf.String(); have != tt.text {
+			t.Errorf("#%d: mismatch\nhave: %s\nwant: %s\nhave vs want:\n%q\n%q", i, have, tt.text, have, tt.text)
+		}
+		buf.Reset()
+	}
+}
+
+var emphasizeTests = []struct {
+	in, out string
+}{
+	{"", ""},
+	{"http://[::1]:8080/foo.txt", `http://[::1]:8080/foo.txt`},
+	{"before (https://www.google.com) after", `before (https://www.google.com) after`},
+	{"before https://www.google.com:30/x/y/z:b::c. After", `before https://www.google.com:30/x/y/z:b::c. After`},
+	{"http://www.google.com/path/:;!-/?query=%34b#093124", `http://www.google.com/path/:;!-/?query=%34b#093124`},
+	{"http://www.google.com/path/:;!-/?query=%34bar#093124", `http://www.google.com/path/:;!-/?query=%34bar#093124`},
+	{"http://www.google.com/index.html! After", `http://www.google.com/index.html! After`},
+	{"http://www.google.com/", `http://www.google.com/`},
+	{"https://www.google.com/", `https://www.google.com/`},
+	{"http://www.google.com/path.", `http://www.google.com/path.`},
+	{"http://en.wikipedia.org/wiki/Camellia_(cipher)", `http://en.wikipedia.org/wiki/Camellia_(cipher)`},
+	{"(http://www.google.com/)", `(http://www.google.com/)`},
+	{"http://gmail.com)", `http://gmail.com)`},
+	{"((http://gmail.com))", `((http://gmail.com))`},
+	{"http://gmail.com ((http://gmail.com)) ()", `http://gmail.com ((http://gmail.com)) ()`},
+	{"Foo bar http://example.com/ quux!", `Foo bar http://example.com/ quux!`},
+	{"Hello http://example.com/%2f/ /world.", `Hello http://example.com/%2f/ /world.`},
+	{"Lorem http: ipsum //host/path", "Lorem http: ipsum //host/path"},
+	{"javascript://is/not/linked", "javascript://is/not/linked"},
+	{"http://foo", `http://foo`},
+	{"art by [[https://www.example.com/person/][Person Name]]", `art by [[https://www.example.com/person/][Person Name]]`},
+	{"please visit (http://golang.org/)", `please visit (http://golang.org/)`},
+	{"please visit http://golang.org/hello())", `please visit http://golang.org/hello())`},
+	{"http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD", `http://git.qemu.org/?p=qemu.git;a=blob;f=qapi-schema.json;hb=HEAD`},
+	{"https://foo.bar/bal/x(])", `https://foo.bar/bal/x(])`}, // inner ] causes (]) to be cut off from URL
+	{"foo [ http://bar(])", `foo [ http://bar(])`},                      // outer [ causes ]) to be cut off from URL
+}
+
+func TestEmphasize(t *testing.T) {
+	for i, tt := range emphasizeTests {
+		var buf bytes.Buffer
+		emphasize(&buf, tt.in, nil, true, false)
+		out := buf.String()
+		if out != tt.out {
+			t.Errorf("#%d: mismatch\nhave: %v\nwant: %v", i, out, tt.out)
+		}
+	}
+}
+
+func TestCommentEscape(t *testing.T) {
+	commentTests := []struct {
+		in, out string
+	}{
+		{"typically invoked as ``go tool asm'',", "typically invoked as " + ldquo + "go tool asm" + rdquo + ","},
+		{"For more detail, run ``go help test'' and ``go help testflag''", "For more detail, run " + ldquo + "go help test" + rdquo + " and " + ldquo + "go help testflag" + rdquo},
+	}
+	for i, tt := range commentTests {
+		var buf strings.Builder
+		commentEscape(&buf, tt.in, true, false)
+		out := buf.String()
+		if out != tt.out {
+			t.Errorf("#%d: mismatch\nhave: %q\nwant: %q", i, out, tt.out)
+		}
+	}
+}
diff --git a/third_party/go/doc/doc.go b/third_party/go/doc/doc.go
new file mode 100644
index 00000000000..7a43adfeae3
--- /dev/null
+++ b/third_party/go/doc/doc.go
@@ -0,0 +1,224 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.16
+
+// Package doc extracts source code documentation from a Go AST.
+package doc
+
+import (
+	"fmt"
+	"go/ast"
+	"go/token"
+	"strings"
+)
+
+// Package is the documentation for an entire package.
+type Package struct {
+	Doc        string
+	Name       string
+	ImportPath string
+	Imports    []string
+	Filenames  []string
+	Notes      map[string][]*Note
+
+	// Deprecated: For backward compatibility Bugs is still populated,
+	// but all new code should use Notes instead.
+	Bugs []string
+
+	// declarations
+	Consts []*Value
+	Types  []*Type
+	Vars   []*Value
+	Funcs  []*Func
+
+	// Examples is a sorted list of examples associated with
+	// the package. Examples are extracted from _test.go files
+	// provided to NewFromFiles.
+	Examples []*Example
+}
+
+// Value is the documentation for a (possibly grouped) var or const declaration.
+type Value struct {
+	Doc   string
+	Names []string // var or const names in declaration order
+	Decl  *ast.GenDecl
+
+	order int
+}
+
+// Type is the documentation for a type declaration.
+type Type struct {
+	Doc  string
+	Name string
+	Decl *ast.GenDecl
+
+	// associated declarations
+	Consts  []*Value // sorted list of constants of (mostly) this type
+	Vars    []*Value // sorted list of variables of (mostly) this type
+	Funcs   []*Func  // sorted list of functions returning this type
+	Methods []*Func  // sorted list of methods (including embedded ones) of this type
+
+	// Examples is a sorted list of examples associated with
+	// this type. Examples are extracted from _test.go files
+	// provided to NewFromFiles.
+	Examples []*Example
+}
+
+// Func is the documentation for a func declaration.
+type Func struct {
+	Doc  string
+	Name string
+	Decl *ast.FuncDecl
+
+	// methods
+	// (for functions, these fields have the respective zero value)
+	Recv  string // actual   receiver "T" or "*T"
+	Orig  string // original receiver "T" or "*T"
+	Level int    // embedding level; 0 means not embedded
+
+	// Examples is a sorted list of examples associated with this
+	// function or method. Examples are extracted from _test.go files
+	// provided to NewFromFiles.
+	Examples []*Example
+}
+
+// A Note represents a marked comment starting with "MARKER(uid): note body".
+// Any note with a marker of 2 or more upper case [A-Z] letters and a uid of
+// at least one character is recognized. The ":" following the uid is optional.
+// Notes are collected in the Package.Notes map indexed by the notes marker.
+type Note struct {
+	Pos, End token.Pos // position range of the comment containing the marker
+	UID      string    // uid found with the marker
+	Body     string    // note body text
+}
+
+// Mode values control the operation of New and NewFromFiles.
+type Mode int
+
+const (
+	// AllDecls says to extract documentation for all package-level
+	// declarations, not just exported ones.
+	AllDecls Mode = 1 << iota
+
+	// AllMethods says to show all embedded methods, not just the ones of
+	// invisible (unexported) anonymous fields.
+	AllMethods
+
+	// PreserveAST says to leave the AST unmodified. Originally, pieces of
+	// the AST such as function bodies were nil-ed out to save memory in
+	// godoc, but not all programs want that behavior.
+	PreserveAST
+)
+
+// New computes the package documentation for the given package AST.
+// New takes ownership of the AST pkg and may edit or overwrite it.
+// To have the Examples fields populated, use NewFromFiles and include
+// the package's _test.go files.
+//
+func New(pkg *ast.Package, importPath string, mode Mode) *Package {
+	var r reader
+	r.readPackage(pkg, mode)
+	r.computeMethodSets()
+	r.cleanupTypes()
+	return &Package{
+		Doc:        r.doc,
+		Name:       pkg.Name,
+		ImportPath: importPath,
+		Imports:    sortedKeys(r.imports),
+		Filenames:  r.filenames,
+		Notes:      r.notes,
+		Bugs:       noteBodies(r.notes["BUG"]),
+		Consts:     sortedValues(r.values, token.CONST),
+		Types:      sortedTypes(r.types, mode&AllMethods != 0),
+		Vars:       sortedValues(r.values, token.VAR),
+		Funcs:      sortedFuncs(r.funcs, true),
+	}
+}
+
+// NewFromFiles computes documentation for a package.
+//
+// The package is specified by a list of *ast.Files and corresponding
+// file set, which must not be nil.
+// NewFromFiles uses all provided files when computing documentation,
+// so it is the caller's responsibility to provide only the files that
+// match the desired build context. "go/build".Context.MatchFile can
+// be used for determining whether a file matches a build context with
+// the desired GOOS and GOARCH values, and other build constraints.
+// The import path of the package is specified by importPath.
+//
+// Examples found in _test.go files are associated with the corresponding
+// type, function, method, or the package, based on their name.
+// If the example has a suffix in its name, it is set in the
+// Example.Suffix field. Examples with malformed names are skipped.
+//
+// Optionally, a single extra argument of type Mode can be provided to
+// control low-level aspects of the documentation extraction behavior.
+//
+// NewFromFiles takes ownership of the AST files and may edit them,
+// unless the PreserveAST Mode bit is on.
+//
+func NewFromFiles(fset *token.FileSet, files []*ast.File, importPath string, opts ...interface{}) (*Package, error) {
+	// Check for invalid API usage.
+	if fset == nil {
+		panic(fmt.Errorf("doc.NewFromFiles: no token.FileSet provided (fset == nil)"))
+	}
+	var mode Mode
+	switch len(opts) { // There can only be 0 or 1 options, so a simple switch works for now.
+	case 0:
+		// Nothing to do.
+	case 1:
+		m, ok := opts[0].(Mode)
+		if !ok {
+			panic(fmt.Errorf("doc.NewFromFiles: option argument type must be doc.Mode"))
+		}
+		mode = m
+	default:
+		panic(fmt.Errorf("doc.NewFromFiles: there must not be more than 1 option argument"))
+	}
+
+	// Collect .go and _test.go files.
+	var (
+		goFiles     = make(map[string]*ast.File)
+		testGoFiles []*ast.File
+	)
+	for i := range files {
+		f := fset.File(files[i].Pos())
+		if f == nil {
+			return nil, fmt.Errorf("file files[%d] is not found in the provided file set", i)
+		}
+		switch name := f.Name(); {
+		case strings.HasSuffix(name, ".go") && !strings.HasSuffix(name, "_test.go"):
+			goFiles[name] = files[i]
+		case strings.HasSuffix(name, "_test.go"):
+			testGoFiles = append(testGoFiles, files[i])
+		default:
+			return nil, fmt.Errorf("file files[%d] filename %q does not have a .go extension", i, name)
+		}
+	}
+
+	// TODO(dmitshur,gri): A relatively high level call to ast.NewPackage with a simpleImporter
+	// ast.Importer implementation is made below. It might be possible to short-circuit and simplify.
+
+	// Compute package documentation.
+	pkg, _ := ast.NewPackage(fset, goFiles, simpleImporter, nil) // Ignore errors that can happen due to unresolved identifiers.
+	p := New(pkg, importPath, mode)
+	classifyExamples(p, Examples(testGoFiles...))
+	return p, nil
+}
+
+// simpleImporter returns a (dummy) package object named by the last path
+// component of the provided package path (as is the convention for packages).
+// This is sufficient to resolve package identifiers without doing an actual
+// import. It never returns an error.
+func simpleImporter(imports map[string]*ast.Object, path string) (*ast.Object, error) {
+	pkg := imports[path]
+	if pkg == nil {
+		// note that strings.LastIndex returns -1 if there is no "/"
+		pkg = ast.NewObj(ast.Pkg, path[strings.LastIndex(path, "/")+1:])
+		pkg.Data = ast.NewScope(nil) // required by ast.NewPackage for dot-import
+		imports[path] = pkg
+	}
+	return pkg, nil
+}
diff --git a/third_party/go/doc/example.go b/third_party/go/doc/example.go
new file mode 100644
index 00000000000..fdfa79a9d74
--- /dev/null
+++ b/third_party/go/doc/example.go
@@ -0,0 +1,549 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.16
+
+// Extract example functions from file ASTs.
+
+package doc
+
+import (
+	"go/ast"
+	"go/token"
+	"path"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+// An Example represents an example function found in a test source file.
+type Example struct {
+	Name        string // name of the item being exemplified (including optional suffix)
+	Suffix      string // example suffix, without leading '_' (only populated by NewFromFiles)
+	Doc         string // example function doc string
+	Code        ast.Node
+	Play        *ast.File // a whole program version of the example
+	Comments    []*ast.CommentGroup
+	Output      string // expected output
+	Unordered   bool
+	EmptyOutput bool // expect empty output
+	Order       int  // original source code order
+}
+
+// Examples returns the examples found in testFiles, sorted by Name field.
+// The Order fields record the order in which the examples were encountered.
+// The Suffix field is not populated when Examples is called directly, it is
+// only populated by NewFromFiles for examples it finds in _test.go files.
+//
+// Playable Examples must be in a package whose name ends in "_test".
+// An Example is "playable" (the Play field is non-nil) in either of these
+// circumstances:
+//   - The example function is self-contained: the function references only
+//     identifiers from other packages (or predeclared identifiers, such as
+//     "int") and the test file does not include a dot import.
+//   - The entire test file is the example: the file contains exactly one
+//     example function, zero test or benchmark functions, and at least one
+//     top-level function, type, variable, or constant declaration other
+//     than the example function.
+func Examples(testFiles ...*ast.File) []*Example {
+	var list []*Example
+	for _, file := range testFiles {
+		hasTests := false // file contains tests or benchmarks
+		numDecl := 0      // number of non-import declarations in the file
+		var flist []*Example
+		for _, decl := range file.Decls {
+			if g, ok := decl.(*ast.GenDecl); ok && g.Tok != token.IMPORT {
+				numDecl++
+				continue
+			}
+			f, ok := decl.(*ast.FuncDecl)
+			if !ok || f.Recv != nil {
+				continue
+			}
+			numDecl++
+			name := f.Name.Name
+			if isTest(name, "Test") || isTest(name, "Benchmark") {
+				hasTests = true
+				continue
+			}
+			if !isTest(name, "Example") {
+				continue
+			}
+			if params := f.Type.Params; len(params.List) != 0 {
+				continue // function has params; not a valid example
+			}
+			if f.Body == nil { // ast.File.Body nil dereference (see issue 28044)
+				continue
+			}
+			var doc string
+			if f.Doc != nil {
+				doc = f.Doc.Text()
+			}
+			output, unordered, hasOutput := exampleOutput(f.Body, file.Comments)
+			flist = append(flist, &Example{
+				Name:        name[len("Example"):],
+				Doc:         doc,
+				Code:        f.Body,
+				Play:        playExample(file, f),
+				Comments:    file.Comments,
+				Output:      output,
+				Unordered:   unordered,
+				EmptyOutput: output == "" && hasOutput,
+				Order:       len(flist),
+			})
+		}
+		if !hasTests && numDecl > 1 && len(flist) == 1 {
+			// If this file only has one example function, some
+			// other top-level declarations, and no tests or
+			// benchmarks, use the whole file as the example.
+			flist[0].Code = file
+			flist[0].Play = playExampleFile(file)
+		}
+		list = append(list, flist...)
+	}
+	// sort by name
+	sort.Slice(list, func(i, j int) bool {
+		return list[i].Name < list[j].Name
+	})
+	return list
+}
+
+var outputPrefix = regexp.MustCompile(`(?i)^[[:space:]]*(unordered )?output:`)
+
+// Extracts the expected output and whether there was a valid output comment
+func exampleOutput(b *ast.BlockStmt, comments []*ast.CommentGroup) (output string, unordered, ok bool) {
+	if _, last := lastComment(b, comments); last != nil {
+		// test that it begins with the correct prefix
+		text := last.Text()
+		if loc := outputPrefix.FindStringSubmatchIndex(text); loc != nil {
+			if loc[2] != -1 {
+				unordered = true
+			}
+			text = text[loc[1]:]
+			// Strip zero or more spaces followed by \n or a single space.
+			text = strings.TrimLeft(text, " ")
+			if len(text) > 0 && text[0] == '\n' {
+				text = text[1:]
+			}
+			return text, unordered, true
+		}
+	}
+	return "", false, false // no suitable comment found
+}
+
+// isTest tells whether name looks like a test, example, or benchmark.
+// It is a Test (say) if there is a character after Test that is not a
+// lower-case letter. (We don't want Testiness.)
+func isTest(name, prefix string) bool {
+	if !strings.HasPrefix(name, prefix) {
+		return false
+	}
+	if len(name) == len(prefix) { // "Test" is ok
+		return true
+	}
+	rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
+	return !unicode.IsLower(rune)
+}
+
+// playExample synthesizes a new *ast.File based on the provided
+// file with the provided function body as the body of main.
+func playExample(file *ast.File, f *ast.FuncDecl) *ast.File {
+	body := f.Body
+
+	if !strings.HasSuffix(file.Name.Name, "_test") {
+		// We don't support examples that are part of the
+		// greater package (yet).
+		return nil
+	}
+
+	// Collect top-level declarations in the file.
+	topDecls := make(map[*ast.Object]ast.Decl)
+	typMethods := make(map[string][]ast.Decl)
+
+	for _, decl := range file.Decls {
+		switch d := decl.(type) {
+		case *ast.FuncDecl:
+			if d.Recv == nil {
+				topDecls[d.Name.Obj] = d
+			} else {
+				if len(d.Recv.List) == 1 {
+					t := d.Recv.List[0].Type
+					tname, _ := baseTypeName(t)
+					typMethods[tname] = append(typMethods[tname], d)
+				}
+			}
+		case *ast.GenDecl:
+			for _, spec := range d.Specs {
+				switch s := spec.(type) {
+				case *ast.TypeSpec:
+					topDecls[s.Name.Obj] = d
+				case *ast.ValueSpec:
+					for _, name := range s.Names {
+						topDecls[name.Obj] = d
+					}
+				}
+			}
+		}
+	}
+
+	// Find unresolved identifiers and uses of top-level declarations.
+	unresolved := make(map[string]bool)
+	var depDecls []ast.Decl
+	hasDepDecls := make(map[ast.Decl]bool)
+
+	var inspectFunc func(ast.Node) bool
+	inspectFunc = func(n ast.Node) bool {
+		switch e := n.(type) {
+		case *ast.Ident:
+			if e.Obj == nil && e.Name != "_" {
+				unresolved[e.Name] = true
+			} else if d := topDecls[e.Obj]; d != nil {
+				if !hasDepDecls[d] {
+					hasDepDecls[d] = true
+					depDecls = append(depDecls, d)
+				}
+			}
+			return true
+		case *ast.SelectorExpr:
+			// For selector expressions, only inspect the left hand side.
+			// (For an expression like fmt.Println, only add "fmt" to the
+			// set of unresolved names, not "Println".)
+			ast.Inspect(e.X, inspectFunc)
+			return false
+		case *ast.KeyValueExpr:
+			// For key value expressions, only inspect the value
+			// as the key should be resolved by the type of the
+			// composite literal.
+			ast.Inspect(e.Value, inspectFunc)
+			return false
+		}
+		return true
+	}
+	ast.Inspect(body, inspectFunc)
+	for i := 0; i < len(depDecls); i++ {
+		switch d := depDecls[i].(type) {
+		case *ast.FuncDecl:
+			// Inspect types of parameters and results. See #28492.
+			if d.Type.Params != nil {
+				for _, p := range d.Type.Params.List {
+					ast.Inspect(p.Type, inspectFunc)
+				}
+			}
+			if d.Type.Results != nil {
+				for _, r := range d.Type.Results.List {
+					ast.Inspect(r.Type, inspectFunc)
+				}
+			}
+
+			ast.Inspect(d.Body, inspectFunc)
+		case *ast.GenDecl:
+			for _, spec := range d.Specs {
+				switch s := spec.(type) {
+				case *ast.TypeSpec:
+					ast.Inspect(s.Type, inspectFunc)
+
+					depDecls = append(depDecls, typMethods[s.Name.Name]...)
+				case *ast.ValueSpec:
+					if s.Type != nil {
+						ast.Inspect(s.Type, inspectFunc)
+					}
+					for _, val := range s.Values {
+						ast.Inspect(val, inspectFunc)
+					}
+				}
+			}
+		}
+	}
+
+	// Remove predeclared identifiers from unresolved list.
+	for n := range unresolved {
+		if predeclaredTypes[n] || predeclaredConstants[n] || predeclaredFuncs[n] {
+			delete(unresolved, n)
+		}
+	}
+
+	// Use unresolved identifiers to determine the imports used by this
+	// example. The heuristic assumes package names match base import
+	// paths for imports w/o renames (should be good enough most of the time).
+	namedImports := make(map[string]string) // [name]path
+	var blankImports []ast.Spec             // _ imports
+	for _, s := range file.Imports {
+		p, err := strconv.Unquote(s.Path.Value)
+		if err != nil {
+			continue
+		}
+		if p == "syscall/js" {
+			// We don't support examples that import syscall/js,
+			// because the package syscall/js is not available in the playground.
+			return nil
+		}
+		n := path.Base(p)
+		if s.Name != nil {
+			n = s.Name.Name
+			switch n {
+			case "_":
+				blankImports = append(blankImports, s)
+				continue
+			case ".":
+				// We can't resolve dot imports (yet).
+				return nil
+			}
+		}
+		if unresolved[n] {
+			namedImports[n] = p
+			delete(unresolved, n)
+		}
+	}
+
+	// If there are other unresolved identifiers, give up because this
+	// synthesized file is not going to build.
+	if len(unresolved) > 0 {
+		return nil
+	}
+
+	// Include documentation belonging to blank imports.
+	var comments []*ast.CommentGroup
+	for _, s := range blankImports {
+		if c := s.(*ast.ImportSpec).Doc; c != nil {
+			comments = append(comments, c)
+		}
+	}
+
+	// Include comments that are inside the function body.
+	for _, c := range file.Comments {
+		if body.Pos() <= c.Pos() && c.End() <= body.End() {
+			comments = append(comments, c)
+		}
+	}
+
+	// Strip the "Output:" or "Unordered output:" comment and adjust body
+	// end position.
+	body, comments = stripOutputComment(body, comments)
+
+	// Include documentation belonging to dependent declarations.
+	for _, d := range depDecls {
+		switch d := d.(type) {
+		case *ast.GenDecl:
+			if d.Doc != nil {
+				comments = append(comments, d.Doc)
+			}
+		case *ast.FuncDecl:
+			if d.Doc != nil {
+				comments = append(comments, d.Doc)
+			}
+		}
+	}
+
+	// Synthesize import declaration.
+	importDecl := &ast.GenDecl{
+		Tok:    token.IMPORT,
+		Lparen: 1, // Need non-zero Lparen and Rparen so that printer
+		Rparen: 1, // treats this as a factored import.
+	}
+	for n, p := range namedImports {
+		s := &ast.ImportSpec{Path: &ast.BasicLit{Value: strconv.Quote(p)}}
+		if path.Base(p) != n {
+			s.Name = ast.NewIdent(n)
+		}
+		importDecl.Specs = append(importDecl.Specs, s)
+	}
+	importDecl.Specs = append(importDecl.Specs, blankImports...)
+
+	// Synthesize main function.
+	funcDecl := &ast.FuncDecl{
+		Name: ast.NewIdent("main"),
+		Type: f.Type,
+		Body: body,
+	}
+
+	decls := make([]ast.Decl, 0, 2+len(depDecls))
+	decls = append(decls, importDecl)
+	decls = append(decls, depDecls...)
+	decls = append(decls, funcDecl)
+
+	sort.Slice(decls, func(i, j int) bool {
+		return decls[i].Pos() < decls[j].Pos()
+	})
+
+	sort.Slice(comments, func(i, j int) bool {
+		return comments[i].Pos() < comments[j].Pos()
+	})
+
+	// Synthesize file.
+	return &ast.File{
+		Name:     ast.NewIdent("main"),
+		Decls:    decls,
+		Comments: comments,
+	}
+}
+
+// playExampleFile takes a whole file example and synthesizes a new *ast.File
+// such that the example is function main in package main.
+func playExampleFile(file *ast.File) *ast.File {
+	// Strip copyright comment if present.
+	comments := file.Comments
+	if len(comments) > 0 && strings.HasPrefix(comments[0].Text(), "Copyright") {
+		comments = comments[1:]
+	}
+
+	// Copy declaration slice, rewriting the ExampleX function to main.
+	var decls []ast.Decl
+	for _, d := range file.Decls {
+		if f, ok := d.(*ast.FuncDecl); ok && isTest(f.Name.Name, "Example") {
+			// Copy the FuncDecl, as it may be used elsewhere.
+			newF := *f
+			newF.Name = ast.NewIdent("main")
+			newF.Body, comments = stripOutputComment(f.Body, comments)
+			d = &newF
+		}
+		decls = append(decls, d)
+	}
+
+	// Copy the File, as it may be used elsewhere.
+	f := *file
+	f.Name = ast.NewIdent("main")
+	f.Decls = decls
+	f.Comments = comments
+	return &f
+}
+
+// stripOutputComment finds and removes the "Output:" or "Unordered output:"
+// comment from body and comments, and adjusts the body block's end position.
+func stripOutputComment(body *ast.BlockStmt, comments []*ast.CommentGroup) (*ast.BlockStmt, []*ast.CommentGroup) {
+	// Do nothing if there is no "Output:" or "Unordered output:" comment.
+	i, last := lastComment(body, comments)
+	if last == nil || !outputPrefix.MatchString(last.Text()) {
+		return body, comments
+	}
+
+	// Copy body and comments, as the originals may be used elsewhere.
+	newBody := &ast.BlockStmt{
+		Lbrace: body.Lbrace,
+		List:   body.List,
+		Rbrace: last.Pos(),
+	}
+	newComments := make([]*ast.CommentGroup, len(comments)-1)
+	copy(newComments, comments[:i])
+	copy(newComments[i:], comments[i+1:])
+	return newBody, newComments
+}
+
+// lastComment returns the last comment inside the provided block.
+func lastComment(b *ast.BlockStmt, c []*ast.CommentGroup) (i int, last *ast.CommentGroup) {
+	if b == nil {
+		return
+	}
+	pos, end := b.Pos(), b.End()
+	for j, cg := range c {
+		if cg.Pos() < pos {
+			continue
+		}
+		if cg.End() > end {
+			break
+		}
+		i, last = j, cg
+	}
+	return
+}
+
+// classifyExamples classifies examples and assigns them to the Examples field
+// of the relevant Func, Type, or Package that the example is associated with.
+//
+// The classification process is ambiguous in some cases:
+//
+// 	- ExampleFoo_Bar matches a type named Foo_Bar
+// 	  or a method named Foo.Bar.
+// 	- ExampleFoo_bar matches a type named Foo_bar
+// 	  or Foo (with a "bar" suffix).
+//
+// Examples with malformed names are not associated with anything.
+//
+func classifyExamples(p *Package, examples []*Example) {
+	if len(examples) == 0 {
+		return
+	}
+
+	// Mapping of names for funcs, types, and methods to the example listing.
+	ids := make(map[string]*[]*Example)
+	ids[""] = &p.Examples // package-level examples have an empty name
+	for _, f := range p.Funcs {
+		if !token.IsExported(f.Name) {
+			continue
+		}
+		ids[f.Name] = &f.Examples
+	}
+	for _, t := range p.Types {
+		if !token.IsExported(t.Name) {
+			continue
+		}
+		ids[t.Name] = &t.Examples
+		for _, f := range t.Funcs {
+			if !token.IsExported(f.Name) {
+				continue
+			}
+			ids[f.Name] = &f.Examples
+		}
+		for _, m := range t.Methods {
+			if !token.IsExported(m.Name) {
+				continue
+			}
+			ids[strings.TrimPrefix(m.Recv, "*")+"_"+m.Name] = &m.Examples
+		}
+	}
+
+	// Group each example with the associated func, type, or method.
+	for _, ex := range examples {
+		// Consider all possible split points for the suffix
+		// by starting at the end of string (no suffix case),
+		// then trying all positions that contain a '_' character.
+		//
+		// An association is made on the first successful match.
+		// Examples with malformed names that match nothing are skipped.
+		for i := len(ex.Name); i >= 0; i = strings.LastIndexByte(ex.Name[:i], '_') {
+			prefix, suffix, ok := splitExampleName(ex.Name, i)
+			if !ok {
+				continue
+			}
+			exs, ok := ids[prefix]
+			if !ok {
+				continue
+			}
+			ex.Suffix = suffix
+			*exs = append(*exs, ex)
+			break
+		}
+	}
+
+	// Sort list of example according to the user-specified suffix name.
+	for _, exs := range ids {
+		sort.Slice((*exs), func(i, j int) bool {
+			return (*exs)[i].Suffix < (*exs)[j].Suffix
+		})
+	}
+}
+
+// splitExampleName attempts to split example name s at index i,
+// and reports if that produces a valid split. The suffix may be
+// absent. Otherwise, it must start with a lower-case letter and
+// be preceded by '_'.
+//
+// One of i == len(s) or s[i] == '_' must be true.
+func splitExampleName(s string, i int) (prefix, suffix string, ok bool) {
+	if i == len(s) {
+		return s, "", true
+	}
+	if i == len(s)-1 {
+		return "", "", false
+	}
+	prefix, suffix = s[:i], s[i+1:]
+	return prefix, suffix, isExampleSuffix(suffix)
+}
+
+func isExampleSuffix(s string) bool {
+	r, size := utf8.DecodeRuneInString(s)
+	return size > 0 && unicode.IsLower(r)
+}
diff --git a/third_party/go/doc/example_test.go b/third_party/go/doc/example_test.go
new file mode 100644
index 00000000000..208ab9a4904
--- /dev/null
+++ b/third_party/go/doc/example_test.go
@@ -0,0 +1,720 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.16
+
+package doc_test
+
+import (
+	"bytes"
+	"fmt"
+	"go/ast"
+	"go/doc"
+	"go/format"
+	"go/parser"
+	"go/token"
+	"reflect"
+	"strings"
+	"testing"
+)
+
+const exampleTestFile = `
+package foo_test
+
+import (
+	"flag"
+	"fmt"
+	"log"
+	"sort"
+	"os/exec"
+)
+
+func ExampleHello() {
+	fmt.Println("Hello, world!")
+	// Output: Hello, world!
+}
+
+func ExampleImport() {
+	out, err := exec.Command("date").Output()
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Printf("The date is %s\n", out)
+}
+
+func ExampleKeyValue() {
+	v := struct {
+		a string
+		b int
+	}{
+		a: "A",
+		b: 1,
+	}
+	fmt.Print(v)
+	// Output: a: "A", b: 1
+}
+
+func ExampleKeyValueImport() {
+	f := flag.Flag{
+		Name: "play",
+	}
+	fmt.Print(f)
+	// Output: Name: "play"
+}
+
+var keyValueTopDecl = struct {
+	a string
+	b int
+}{
+	a: "B",
+	b: 2,
+}
+
+func ExampleKeyValueTopDecl() {
+	fmt.Print(keyValueTopDecl)
+	// Output: a: "B", b: 2
+}
+
+// Person represents a person by name and age.
+type Person struct {
+    Name string
+    Age  int
+}
+
+// String returns a string representation of the Person.
+func (p Person) String() string {
+    return fmt.Sprintf("%s: %d", p.Name, p.Age)
+}
+
+// ByAge implements sort.Interface for []Person based on
+// the Age field.
+type ByAge []Person
+
+// Len returns the number of elements in ByAge.
+func (a (ByAge)) Len() int { return len(a) }
+
+// Swap swaps the elements in ByAge.
+func (a ByAge) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a ByAge) Less(i, j int) bool { return a[i].Age < a[j].Age }
+
+// people is the array of Person
+var people = []Person{
+	{"Bob", 31},
+	{"John", 42},
+	{"Michael", 17},
+	{"Jenny", 26},
+}
+
+func ExampleSort() {
+    fmt.Println(people)
+    sort.Sort(ByAge(people))
+    fmt.Println(people)
+    // Output:
+    // [Bob: 31 John: 42 Michael: 17 Jenny: 26]
+    // [Michael: 17 Jenny: 26 Bob: 31 John: 42]
+}
+`
+
+var exampleTestCases = []struct {
+	Name, Play, Output string
+}{
+	{
+		Name:   "Hello",
+		Play:   exampleHelloPlay,
+		Output: "Hello, world!\n",
+	},
+	{
+		Name: "Import",
+		Play: exampleImportPlay,
+	},
+	{
+		Name:   "KeyValue",
+		Play:   exampleKeyValuePlay,
+		Output: "a: \"A\", b: 1\n",
+	},
+	{
+		Name:   "KeyValueImport",
+		Play:   exampleKeyValueImportPlay,
+		Output: "Name: \"play\"\n",
+	},
+	{
+		Name:   "KeyValueTopDecl",
+		Play:   exampleKeyValueTopDeclPlay,
+		Output: "a: \"B\", b: 2\n",
+	},
+	{
+		Name:   "Sort",
+		Play:   exampleSortPlay,
+		Output: "[Bob: 31 John: 42 Michael: 17 Jenny: 26]\n[Michael: 17 Jenny: 26 Bob: 31 John: 42]\n",
+	},
+}
+
+const exampleHelloPlay = `package main
+
+import (
+	"fmt"
+)
+
+func main() {
+	fmt.Println("Hello, world!")
+}
+`
+const exampleImportPlay = `package main
+
+import (
+	"fmt"
+	"log"
+	"os/exec"
+)
+
+func main() {
+	out, err := exec.Command("date").Output()
+	if err != nil {
+		log.Fatal(err)
+	}
+	fmt.Printf("The date is %s\n", out)
+}
+`
+
+const exampleKeyValuePlay = `package main
+
+import (
+	"fmt"
+)
+
+func main() {
+	v := struct {
+		a string
+		b int
+	}{
+		a: "A",
+		b: 1,
+	}
+	fmt.Print(v)
+}
+`
+
+const exampleKeyValueImportPlay = `package main
+
+import (
+	"flag"
+	"fmt"
+)
+
+func main() {
+	f := flag.Flag{
+		Name: "play",
+	}
+	fmt.Print(f)
+}
+`
+
+const exampleKeyValueTopDeclPlay = `package main
+
+import (
+	"fmt"
+)
+
+var keyValueTopDecl = struct {
+	a string
+	b int
+}{
+	a: "B",
+	b: 2,
+}
+
+func main() {
+	fmt.Print(keyValueTopDecl)
+}
+`
+
+const exampleSortPlay = `package main
+
+import (
+	"fmt"
+	"sort"
+)
+
+// Person represents a person by name and age.
+type Person struct {
+	Name string
+	Age  int
+}
+
+// String returns a string representation of the Person.
+func (p Person) String() string {
+	return fmt.Sprintf("%s: %d", p.Name, p.Age)
+}
+
+// ByAge implements sort.Interface for []Person based on
+// the Age field.
+type ByAge []Person
+
+// Len returns the number of elements in ByAge.
+func (a ByAge) Len() int { return len(a) }
+
+// Swap swaps the elements in ByAge.
+func (a ByAge) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a ByAge) Less(i, j int) bool { return a[i].Age < a[j].Age }
+
+// people is the array of Person
+var people = []Person{
+	{"Bob", 31},
+	{"John", 42},
+	{"Michael", 17},
+	{"Jenny", 26},
+}
+
+func main() {
+	fmt.Println(people)
+	sort.Sort(ByAge(people))
+	fmt.Println(people)
+}
+`
+
+func TestExamples(t *testing.T) {
+	fset := token.NewFileSet()
+	file, err := parser.ParseFile(fset, "test.go", strings.NewReader(exampleTestFile), parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+	for i, e := range doc.Examples(file) {
+		c := exampleTestCases[i]
+		if e.Name != c.Name {
+			t.Errorf("got Name == %q, want %q", e.Name, c.Name)
+		}
+		if w := c.Play; w != "" {
+			g := formatFile(t, fset, e.Play)
+			if g != w {
+				t.Errorf("%s: got Play == %q, want %q", c.Name, g, w)
+			}
+		}
+		if g, w := e.Output, c.Output; g != w {
+			t.Errorf("%s: got Output == %q, want %q", c.Name, g, w)
+		}
+	}
+}
+
+const exampleWholeFile = `package foo_test
+
+type X int
+
+func (X) Foo() {
+}
+
+func (X) TestBlah() {
+}
+
+func (X) BenchmarkFoo() {
+}
+
+func Example() {
+	fmt.Println("Hello, world!")
+	// Output: Hello, world!
+}
+`
+
+const exampleWholeFileOutput = `package main
+
+type X int
+
+func (X) Foo() {
+}
+
+func (X) TestBlah() {
+}
+
+func (X) BenchmarkFoo() {
+}
+
+func main() {
+	fmt.Println("Hello, world!")
+}
+`
+
+const exampleWholeFileFunction = `package foo_test
+
+func Foo(x int) {
+}
+
+func Example() {
+	fmt.Println("Hello, world!")
+	// Output: Hello, world!
+}
+`
+
+const exampleWholeFileFunctionOutput = `package main
+
+func Foo(x int) {
+}
+
+func main() {
+	fmt.Println("Hello, world!")
+}
+`
+
+var exampleWholeFileTestCases = []struct {
+	Title, Source, Play, Output string
+}{
+	{
+		"Methods",
+		exampleWholeFile,
+		exampleWholeFileOutput,
+		"Hello, world!\n",
+	},
+	{
+		"Function",
+		exampleWholeFileFunction,
+		exampleWholeFileFunctionOutput,
+		"Hello, world!\n",
+	},
+}
+
+func TestExamplesWholeFile(t *testing.T) {
+	for _, c := range exampleWholeFileTestCases {
+		fset := token.NewFileSet()
+		file, err := parser.ParseFile(fset, "test.go", strings.NewReader(c.Source), parser.ParseComments)
+		if err != nil {
+			t.Fatal(err)
+		}
+		es := doc.Examples(file)
+		if len(es) != 1 {
+			t.Fatalf("%s: wrong number of examples; got %d want 1", c.Title, len(es))
+		}
+		e := es[0]
+		if e.Name != "" {
+			t.Errorf("%s: got Name == %q, want %q", c.Title, e.Name, "")
+		}
+		if g, w := formatFile(t, fset, e.Play), c.Play; g != w {
+			t.Errorf("%s: got Play == %q, want %q", c.Title, g, w)
+		}
+		if g, w := e.Output, c.Output; g != w {
+			t.Errorf("%s: got Output == %q, want %q", c.Title, g, w)
+		}
+	}
+}
+
+const exampleInspectSignature = `package foo_test
+
+import (
+	"bytes"
+	"io"
+)
+
+func getReader() io.Reader { return nil }
+
+func do(b bytes.Reader) {}
+
+func Example() {
+	getReader()
+	do()
+	// Output:
+}
+
+func ExampleIgnored() {
+}
+`
+
+const exampleInspectSignatureOutput = `package main
+
+import (
+	"bytes"
+	"io"
+)
+
+func getReader() io.Reader { return nil }
+
+func do(b bytes.Reader) {}
+
+func main() {
+	getReader()
+	do()
+}
+`
+
+func TestExampleInspectSignature(t *testing.T) {
+	// Verify that "bytes" and "io" are imported. See issue #28492.
+	fset := token.NewFileSet()
+	file, err := parser.ParseFile(fset, "test.go", strings.NewReader(exampleInspectSignature), parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+	es := doc.Examples(file)
+	if len(es) != 2 {
+		t.Fatalf("wrong number of examples; got %d want 2", len(es))
+	}
+	// We are interested in the first example only.
+	e := es[0]
+	if e.Name != "" {
+		t.Errorf("got Name == %q, want %q", e.Name, "")
+	}
+	if g, w := formatFile(t, fset, e.Play), exampleInspectSignatureOutput; g != w {
+		t.Errorf("got Play == %q, want %q", g, w)
+	}
+	if g, w := e.Output, ""; g != w {
+		t.Errorf("got Output == %q, want %q", g, w)
+	}
+}
+
+const exampleEmpty = `
+package p
+func Example() {}
+func Example_a()
+`
+
+const exampleEmptyOutput = `package main
+
+func main() {}
+func main()
+`
+
+func TestExampleEmpty(t *testing.T) {
+	fset := token.NewFileSet()
+	file, err := parser.ParseFile(fset, "test.go", strings.NewReader(exampleEmpty), parser.ParseComments)
+	if err != nil {
+		t.Fatal(err)
+	}
+
+	es := doc.Examples(file)
+	if len(es) != 1 {
+		t.Fatalf("wrong number of examples; got %d want 1", len(es))
+	}
+	e := es[0]
+	if e.Name != "" {
+		t.Errorf("got Name == %q, want %q", e.Name, "")
+	}
+	if g, w := formatFile(t, fset, e.Play), exampleEmptyOutput; g != w {
+		t.Errorf("got Play == %q, want %q", g, w)
+	}
+	if g, w := e.Output, ""; g != w {
+		t.Errorf("got Output == %q, want %q", g, w)
+	}
+}
+
+func formatFile(t *testing.T, fset *token.FileSet, n *ast.File) string {
+	if n == nil {
+		return ""
+	}
+	var buf bytes.Buffer
+	if err := format.Node(&buf, fset, n); err != nil {
+		t.Fatal(err)
+	}
+	return buf.String()
+}
+
+// This example illustrates how to use NewFromFiles
+// to compute package documentation with examples.
+func ExampleNewFromFiles() {
+	// src and test are two source files that make up
+	// a package whose documentation will be computed.
+	const src = `
+// This is the package comment.
+package p
+
+import "fmt"
+
+// This comment is associated with the Greet function.
+func Greet(who string) {
+	fmt.Printf("Hello, %s!\n", who)
+}
+`
+	const test = `
+package p_test
+
+// This comment is associated with the ExampleGreet_world example.
+func ExampleGreet_world() {
+	Greet("world")
+}
+`
+
+	// Create the AST by parsing src and test.
+	fset := token.NewFileSet()
+	files := []*ast.File{
+		mustParse(fset, "src.go", src),
+		mustParse(fset, "src_test.go", test),
+	}
+
+	// Compute package documentation with examples.
+	p, err := doc.NewFromFiles(fset, files, "example.com/p")
+	if err != nil {
+		panic(err)
+	}
+
+	fmt.Printf("package %s - %s", p.Name, p.Doc)
+	fmt.Printf("func %s - %s", p.Funcs[0].Name, p.Funcs[0].Doc)
+	fmt.Printf(" ⤷ example with suffix %q - %s", p.Funcs[0].Examples[0].Suffix, p.Funcs[0].Examples[0].Doc)
+
+	// Output:
+	// package p - This is the package comment.
+	// func Greet - This comment is associated with the Greet function.
+	//  ⤷ example with suffix "world" - This comment is associated with the ExampleGreet_world example.
+}
+
+func TestClassifyExamples(t *testing.T) {
+	const src = `
+package p
+
+const Const1 = 0
+var   Var1   = 0
+
+type (
+	Type1     int
+	Type1_Foo int
+	Type1_foo int
+	type2     int
+
+	Embed struct { Type1 }
+	Uembed struct { type2 }
+)
+
+func Func1()     {}
+func Func1_Foo() {}
+func Func1_foo() {}
+func func2()     {}
+
+func (Type1) Func1() {}
+func (Type1) Func1_Foo() {}
+func (Type1) Func1_foo() {}
+func (Type1) func2() {}
+
+func (type2) Func1() {}
+
+type (
+	Conflict          int
+	Conflict_Conflict int
+	Conflict_conflict int
+)
+
+func (Conflict) Conflict() {}
+`
+	const test = `
+package p_test
+
+func ExampleConst1() {} // invalid - no support for consts and vars
+func ExampleVar1()   {} // invalid - no support for consts and vars
+
+func Example()               {}
+func Example_()              {} // invalid - suffix must start with a lower-case letter
+func Example_suffix()        {}
+func Example_suffix_xX_X_x() {}
+func Example_世界()           {} // invalid - suffix must start with a lower-case letter
+func Example_123()           {} // invalid - suffix must start with a lower-case letter
+func Example_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
+
+func ExampleType1()               {}
+func ExampleType1_()              {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_suffix()        {}
+func ExampleType1_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_Foo()           {}
+func ExampleType1_Foo_suffix()    {}
+func ExampleType1_Foo_BadSuffix() {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_foo()           {}
+func ExampleType1_foo_suffix()    {}
+func ExampleType1_foo_Suffix()    {} // matches Type1, instead of Type1_foo
+func Exampletype2()               {} // invalid - cannot match unexported
+
+func ExampleFunc1()               {}
+func ExampleFunc1_()              {} // invalid - suffix must start with a lower-case letter
+func ExampleFunc1_suffix()        {}
+func ExampleFunc1_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
+func ExampleFunc1_Foo()           {}
+func ExampleFunc1_Foo_suffix()    {}
+func ExampleFunc1_Foo_BadSuffix() {} // invalid - suffix must start with a lower-case letter
+func ExampleFunc1_foo()           {}
+func ExampleFunc1_foo_suffix()    {}
+func ExampleFunc1_foo_Suffix()    {} // matches Func1, instead of Func1_foo
+func Examplefunc1()               {} // invalid - cannot match unexported
+
+func ExampleType1_Func1()               {}
+func ExampleType1_Func1_()              {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_Func1_suffix()        {}
+func ExampleType1_Func1_BadSuffix()     {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_Func1_Foo()           {}
+func ExampleType1_Func1_Foo_suffix()    {}
+func ExampleType1_Func1_Foo_BadSuffix() {} // invalid - suffix must start with a lower-case letter
+func ExampleType1_Func1_foo()           {}
+func ExampleType1_Func1_foo_suffix()    {}
+func ExampleType1_Func1_foo_Suffix()    {} // matches Type1.Func1, instead of Type1.Func1_foo
+func ExampleType1_func2()               {} // matches Type1, instead of Type1.func2
+
+func ExampleEmbed_Func1()         {} // invalid - no support for forwarded methods from embedding exported type
+func ExampleUembed_Func1()        {} // methods from embedding unexported types are OK
+func ExampleUembed_Func1_suffix() {}
+
+func ExampleConflict_Conflict()        {} // ambiguous with either Conflict or Conflict_Conflict type
+func ExampleConflict_conflict()        {} // ambiguous with either Conflict or Conflict_conflict type
+func ExampleConflict_Conflict_suffix() {} // ambiguous with either Conflict or Conflict_Conflict type
+func ExampleConflict_conflict_suffix() {} // ambiguous with either Conflict or Conflict_conflict type
+`
+
+	// Parse literal source code as a *doc.Package.
+	fset := token.NewFileSet()
+	files := []*ast.File{
+		mustParse(fset, "src.go", src),
+		mustParse(fset, "src_test.go", test),
+	}
+	p, err := doc.NewFromFiles(fset, files, "example.com/p")
+	if err != nil {
+		t.Fatalf("doc.NewFromFiles: %v", err)
+	}
+
+	// Collect the association of examples to top-level identifiers.
+	got := map[string][]string{}
+	got[""] = exampleNames(p.Examples)
+	for _, f := range p.Funcs {
+		got[f.Name] = exampleNames(f.Examples)
+	}
+	for _, t := range p.Types {
+		got[t.Name] = exampleNames(t.Examples)
+		for _, f := range t.Funcs {
+			got[f.Name] = exampleNames(f.Examples)
+		}
+		for _, m := range t.Methods {
+			got[t.Name+"."+m.Name] = exampleNames(m.Examples)
+		}
+	}
+
+	want := map[string][]string{
+		"": {"", "suffix", "suffix_xX_X_x"}, // Package-level examples.
+
+		"Type1":     {"", "foo_Suffix", "func2", "suffix"},
+		"Type1_Foo": {"", "suffix"},
+		"Type1_foo": {"", "suffix"},
+
+		"Func1":     {"", "foo_Suffix", "suffix"},
+		"Func1_Foo": {"", "suffix"},
+		"Func1_foo": {"", "suffix"},
+
+		"Type1.Func1":     {"", "foo_Suffix", "suffix"},
+		"Type1.Func1_Foo": {"", "suffix"},
+		"Type1.Func1_foo": {"", "suffix"},
+
+		"Uembed.Func1": {"", "suffix"},
+
+		// These are implementation dependent due to the ambiguous parsing.
+		"Conflict_Conflict": {"", "suffix"},
+		"Conflict_conflict": {"", "suffix"},
+	}
+
+	for id := range got {
+		if !reflect.DeepEqual(got[id], want[id]) {
+			t.Errorf("classification mismatch for %q:\ngot  %q\nwant %q", id, got[id], want[id])
+		}
+	}
+}
+
+func exampleNames(exs []*doc.Example) (out []string) {
+	for _, ex := range exs {
+		out = append(out, ex.Suffix)
+	}
+	return out
+}
+
+func mustParse(fset *token.FileSet, filename, src string) *ast.File {
+	f, err := parser.ParseFile(fset, filename, src, parser.ParseComments)
+	if err != nil {
+		panic(err)
+	}
+	return f
+}
diff --git a/third_party/go/doc/exports.go b/third_party/go/doc/exports.go
new file mode 100644
index 00000000000..2a73668895d
--- /dev/null
+++ b/third_party/go/doc/exports.go
@@ -0,0 +1,311 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.16
+
+// This file implements export filtering of an AST.
+
+package doc
+
+import (
+	"go/ast"
+	"go/token"
+)
+
+// filterIdentList removes unexported names from list in place
+// and returns the resulting list.
+//
+func filterIdentList(list []*ast.Ident) []*ast.Ident {
+	j := 0
+	for _, x := range list {
+		if token.IsExported(x.Name) {
+			list[j] = x
+			j++
+		}
+	}
+	return list[0:j]
+}
+
+var underscore = ast.NewIdent("_")
+
+func filterCompositeLit(lit *ast.CompositeLit, filter Filter, export bool) {
+	n := len(lit.Elts)
+	lit.Elts = filterExprList(lit.Elts, filter, export)
+	if len(lit.Elts) < n {
+		lit.Incomplete = true
+	}
+}
+
+func filterExprList(list []ast.Expr, filter Filter, export bool) []ast.Expr {
+	j := 0
+	for _, exp := range list {
+		switch x := exp.(type) {
+		case *ast.CompositeLit:
+			filterCompositeLit(x, filter, export)
+		case *ast.KeyValueExpr:
+			if x, ok := x.Key.(*ast.Ident); ok && !filter(x.Name) {
+				continue
+			}
+			if x, ok := x.Value.(*ast.CompositeLit); ok {
+				filterCompositeLit(x, filter, export)
+			}
+		}
+		list[j] = exp
+		j++
+	}
+	return list[0:j]
+}
+
+// updateIdentList replaces all unexported identifiers with underscore
+// and reports whether at least one exported name exists.
+func updateIdentList(list []*ast.Ident) (hasExported bool) {
+	for i, x := range list {
+		if token.IsExported(x.Name) {
+			hasExported = true
+		} else {
+			list[i] = underscore
+		}
+	}
+	return hasExported
+}
+
+// hasExportedName reports whether list contains any exported names.
+//
+func hasExportedName(list []*ast.Ident) bool {
+	for _, x := range list {
+		if x.IsExported() {
+			return true
+		}
+	}
+	return false
+}
+
+// removeErrorField removes anonymous fields named "error" from an interface.
+// This is called when "error" has been determined to be a local name,
+// not the predeclared type.
+//
+func removeErrorField(ityp *ast.InterfaceType) {
+	list := ityp.Methods.List // we know that ityp.Methods != nil
+	j := 0
+	for _, field := range list {
+		keepField := true
+		if n := len(field.Names); n == 0 {
+			// anonymous field
+			if fname, _ := baseTypeName(field.Type); fname == "error" {
+				keepField = false
+			}
+		}
+		if keepField {
+			list[j] = field
+			j++
+		}
+	}
+	if j < len(list) {
+		ityp.Incomplete = true
+	}
+	ityp.Methods.List = list[0:j]
+}
+
+// filterFieldList removes unexported fields (field names) from the field list
+// in place and reports whether fields were removed. Anonymous fields are
+// recorded with the parent type. filterType is called with the types of
+// all remaining fields.
+//
+func (r *reader) filterFieldList(parent *namedType, fields *ast.FieldList, ityp *ast.InterfaceType) (removedFields bool) {
+	if fields == nil {
+		return
+	}
+	list := fields.List
+	j := 0
+	for _, field := range list {
+		keepField := false
+		if n := len(field.Names); n == 0 {
+			// anonymous field
+			fname := r.recordAnonymousField(parent, field.Type)
+			if token.IsExported(fname) {
+				keepField = true
+			} else if ityp != nil && fname == "error" {
+				// possibly the predeclared error interface; keep
+				// it for now but remember this interface so that
+				// it can be fixed if error is also defined locally
+				keepField = true
+				r.remember(ityp)
+			}
+		} else {
+			field.Names = filterIdentList(field.Names)
+			if len(field.Names) < n {
+				removedFields = true
+			}
+			if len(field.Names) > 0 {
+				keepField = true
+			}
+		}
+		if keepField {
+			r.filterType(nil, field.Type)
+			list[j] = field
+			j++
+		}
+	}
+	if j < len(list) {
+		removedFields = true
+	}
+	fields.List = list[0:j]
+	return
+}
+
+// filterParamList applies filterType to each parameter type in fields.
+//
+func (r *reader) filterParamList(fields *ast.FieldList) {
+	if fields != nil {
+		for _, f := range fields.List {
+			r.filterType(nil, f.Type)
+		}
+	}
+}
+
+// filterType strips any unexported struct fields or method types from typ
+// in place. If fields (or methods) have been removed, the corresponding
+// struct or interface type has the Incomplete field set to true.
+//
+func (r *reader) filterType(parent *namedType, typ ast.Expr) {
+	switch t := typ.(type) {
+	case *ast.Ident:
+		// nothing to do
+	case *ast.ParenExpr:
+		r.filterType(nil, t.X)
+	case *ast.ArrayType:
+		r.filterType(nil, t.Elt)
+	case *ast.StructType:
+		if r.filterFieldList(parent, t.Fields, nil) {
+			t.Incomplete = true
+		}
+	case *ast.FuncType:
+		r.filterParamList(t.Params)
+		r.filterParamList(t.Results)
+	case *ast.InterfaceType:
+		if r.filterFieldList(parent, t.Methods, t) {
+			t.Incomplete = true
+		}
+	case *ast.MapType:
+		r.filterType(nil, t.Key)
+		r.filterType(nil, t.Value)
+	case *ast.ChanType:
+		r.filterType(nil, t.Value)
+	}
+}
+
+func (r *reader) filterSpec(spec ast.Spec) bool {
+	switch s := spec.(type) {
+	case *ast.ImportSpec:
+		// always keep imports so we can collect them
+		return true
+	case *ast.ValueSpec:
+		s.Values = filterExprList(s.Values, token.IsExported, true)
+		if len(s.Values) > 0 || s.Type == nil && len(s.Values) == 0 {
+			// If there are values declared on RHS, just replace the unexported
+			// identifiers on the LHS with underscore, so that it matches
+			// the sequence of expression on the RHS.
+			//
+			// Similarly, if there are no type and values, then this expression
+			// must be following an iota expression, where order matters.
+			if updateIdentList(s.Names) {
+				r.filterType(nil, s.Type)
+				return true
+			}
+		} else {
+			s.Names = filterIdentList(s.Names)
+			if len(s.Names) > 0 {
+				r.filterType(nil, s.Type)
+				return true
+			}
+		}
+	case *ast.TypeSpec:
+		if name := s.Name.Name; token.IsExported(name) {
+			r.filterType(r.lookupType(s.Name.Name), s.Type)
+			return true
+		} else if name == "error" {
+			// special case: remember that error is declared locally
+			r.errorDecl = true
+		}
+	}
+	return false
+}
+
+// copyConstType returns a copy of typ with position pos.
+// typ must be a valid constant type.
+// In practice, only (possibly qualified) identifiers are possible.
+//
+func copyConstType(typ ast.Expr, pos token.Pos) ast.Expr {
+	switch typ := typ.(type) {
+	case *ast.Ident:
+		return &ast.Ident{Name: typ.Name, NamePos: pos}
+	case *ast.SelectorExpr:
+		if id, ok := typ.X.(*ast.Ident); ok {
+			// presumably a qualified identifier
+			return &ast.SelectorExpr{
+				Sel: ast.NewIdent(typ.Sel.Name),
+				X:   &ast.Ident{Name: id.Name, NamePos: pos},
+			}
+		}
+	}
+	return nil // shouldn't happen, but be conservative and don't panic
+}
+
+func (r *reader) filterSpecList(list []ast.Spec, tok token.Token) []ast.Spec {
+	if tok == token.CONST {
+		// Propagate any type information that would get lost otherwise
+		// when unexported constants are filtered.
+		var prevType ast.Expr
+		for _, spec := range list {
+			spec := spec.(*ast.ValueSpec)
+			if spec.Type == nil && len(spec.Values) == 0 && prevType != nil {
+				// provide current spec with an explicit type
+				spec.Type = copyConstType(prevType, spec.Pos())
+			}
+			if hasExportedName(spec.Names) {
+				// exported names are preserved so there's no need to propagate the type
+				prevType = nil
+			} else {
+				prevType = spec.Type
+			}
+		}
+	}
+
+	j := 0
+	for _, s := range list {
+		if r.filterSpec(s) {
+			list[j] = s
+			j++
+		}
+	}
+	return list[0:j]
+}
+
+func (r *reader) filterDecl(decl ast.Decl) bool {
+	switch d := decl.(type) {
+	case *ast.GenDecl:
+		d.Specs = r.filterSpecList(d.Specs, d.Tok)
+		return len(d.Specs) > 0
+	case *ast.FuncDecl:
+		// ok to filter these methods early because any
+		// conflicting method will be filtered here, too -
+		// thus, removing these methods early will not lead
+		// to the false removal of possible conflicts
+		return token.IsExported(d.Name.Name)
+	}
+	return false
+}
+
+// fileExports removes unexported declarations from src in place.
+//
+func (r *reader) fileExports(src *ast.File) {
+	j := 0
+	for _, d := range src.Decls {
+		if r.filterDecl(d) {
+			src.Decls[j] = d
+			j++
+		}
+	}
+	src.Decls = src.Decls[0:j]
+}
diff --git a/third_party/go/doc/filter.go b/third_party/go/doc/filter.go
new file mode 100644
index 00000000000..eaa874d9b01
--- /dev/null
+++ b/third_party/go/doc/filter.go
@@ -0,0 +1,107 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.16
+
+package doc
+
+import "go/ast"
+
+type Filter func(string) bool
+
+func matchFields(fields *ast.FieldList, f Filter) bool {
+	if fields != nil {
+		for _, field := range fields.List {
+			for _, name := range field.Names {
+				if f(name.Name) {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+func matchDecl(d *ast.GenDecl, f Filter) bool {
+	for _, d := range d.Specs {
+		switch v := d.(type) {
+		case *ast.ValueSpec:
+			for _, name := range v.Names {
+				if f(name.Name) {
+					return true
+				}
+			}
+		case *ast.TypeSpec:
+			if f(v.Name.Name) {
+				return true
+			}
+			switch t := v.Type.(type) {
+			case *ast.StructType:
+				if matchFields(t.Fields, f) {
+					return true
+				}
+			case *ast.InterfaceType:
+				if matchFields(t.Methods, f) {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+func filterValues(a []*Value, f Filter) []*Value {
+	w := 0
+	for _, vd := range a {
+		if matchDecl(vd.Decl, f) {
+			a[w] = vd
+			w++
+		}
+	}
+	return a[0:w]
+}
+
+func filterFuncs(a []*Func, f Filter) []*Func {
+	w := 0
+	for _, fd := range a {
+		if f(fd.Name) {
+			a[w] = fd
+			w++
+		}
+	}
+	return a[0:w]
+}
+
+func filterTypes(a []*Type, f Filter) []*Type {
+	w := 0
+	for _, td := range a {
+		n := 0 // number of matches
+		if matchDecl(td.Decl, f) {
+			n = 1
+		} else {
+			// type name doesn't match, but we may have matching consts, vars, factories or methods
+			td.Consts = filterValues(td.Consts, f)
+			td.Vars = filterValues(td.Vars, f)
+			td.Funcs = filterFuncs(td.Funcs, f)
+			td.Methods = filterFuncs(td.Methods, f)
+			n += len(td.Consts) + len(td.Vars) + len(td.Funcs) + len(td.Methods)
+		}
+		if n > 0 {
+			a[w] = td
+			w++
+		}
+	}
+	return a[0:w]
+}
+
+// Filter eliminates documentation for names that don't pass through the filter f.
+// TODO(gri): Recognize "Type.Method" as a name.
+//
+func (p *Package) Filter(f Filter) {
+	p.Consts = filterValues(p.Consts, f)
+	p.Vars = filterValues(p.Vars, f)
+	p.Types = filterTypes(p.Types, f)
+	p.Funcs = filterFuncs(p.Funcs, f)
+	p.Doc = "" // don't show top-level package doc
+}
diff --git a/third_party/go/doc/headscan.go b/third_party/go/doc/headscan.go
new file mode 100644
index 00000000000..a38bbfb45d8
--- /dev/null
+++ b/third_party/go/doc/headscan.go
@@ -0,0 +1,115 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+/*
+	The headscan command extracts comment headings from package files;
+	it is used to detect false positives which may require an adjustment
+	to the comment formatting heuristics in comment.go.
+
+	Usage: headscan [-root root_directory]
+
+	By default, the $GOROOT/src directory is scanned.
+*/
+package main
+
+import (
+	"bytes"
+	"flag"
+	"fmt"
+	"go/doc"
+	"go/parser"
+	"go/token"
+	"io/fs"
+	"os"
+	"path/filepath"
+	"regexp"
+	"runtime"
+	"strings"
+)
+
+var (
+	root    = flag.String("root", filepath.Join(runtime.GOROOT(), "src"), "root of filesystem tree to scan")
+	verbose = flag.Bool("v", false, "verbose mode")
+)
+
+// ToHTML in comment.go assigns a (possibly blank) ID to each heading
+var html_h = regexp.MustCompile(`

`) + +const html_endh = "

\n" + +func isGoFile(fi fs.FileInfo) bool { + return strings.HasSuffix(fi.Name(), ".go") && + !strings.HasSuffix(fi.Name(), "_test.go") +} + +func appendHeadings(list []string, comment string) []string { + var buf bytes.Buffer + doc.ToHTML(&buf, comment, nil) + for s := buf.String(); ; { + loc := html_h.FindStringIndex(s) + if len(loc) == 0 { + break + } + i := loc[1] + j := strings.Index(s, html_endh) + if j < 0 { + list = append(list, s[i:]) // incorrect HTML + break + } + list = append(list, s[i:j]) + s = s[j+len(html_endh):] + } + return list +} + +func main() { + flag.Parse() + fset := token.NewFileSet() + nheadings := 0 + err := filepath.WalkDir(*root, func(path string, info fs.DirEntry, err error) error { + if !info.IsDir() { + return nil + } + pkgs, err := parser.ParseDir(fset, path, isGoFile, parser.ParseComments) + if err != nil { + if *verbose { + fmt.Fprintln(os.Stderr, err) + } + return nil + } + for _, pkg := range pkgs { + d := doc.New(pkg, path, doc.Mode(0)) + list := appendHeadings(nil, d.Doc) + for _, d := range d.Consts { + list = appendHeadings(list, d.Doc) + } + for _, d := range d.Types { + list = appendHeadings(list, d.Doc) + } + for _, d := range d.Vars { + list = appendHeadings(list, d.Doc) + } + for _, d := range d.Funcs { + list = appendHeadings(list, d.Doc) + } + if len(list) > 0 { + // directories may contain multiple packages; + // print path and package name + fmt.Printf("%s (package %s)\n", path, pkg.Name) + for _, h := range list { + fmt.Printf("\t%s\n", h) + } + nheadings += len(list) + } + } + return nil + }) + if err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + fmt.Println(nheadings, "headings found") +} diff --git a/third_party/go/doc/reader.go b/third_party/go/doc/reader.go new file mode 100644 index 00000000000..705b3b93f1e --- /dev/null +++ b/third_party/go/doc/reader.go @@ -0,0 +1,919 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.16 + +package doc + +import ( + "go/ast" + "go/token" + "regexp" + "sort" + "strconv" +) + +// ---------------------------------------------------------------------------- +// function/method sets +// +// Internally, we treat functions like methods and collect them in method sets. + +// A methodSet describes a set of methods. Entries where Decl == nil are conflict +// entries (more than one method with the same name at the same embedding level). +// +type methodSet map[string]*Func + +// recvString returns a string representation of recv of the +// form "T", "*T", or "BADRECV" (if not a proper receiver type). +// +func recvString(recv ast.Expr) string { + switch t := recv.(type) { + case *ast.Ident: + return t.Name + case *ast.StarExpr: + return "*" + recvString(t.X) + } + return "BADRECV" +} + +// set creates the corresponding Func for f and adds it to mset. +// If there are multiple f's with the same name, set keeps the first +// one with documentation; conflicts are ignored. The boolean +// specifies whether to leave the AST untouched. +// +func (mset methodSet) set(f *ast.FuncDecl, preserveAST bool) { + name := f.Name.Name + if g := mset[name]; g != nil && g.Doc != "" { + // A function with the same name has already been registered; + // since it has documentation, assume f is simply another + // implementation and ignore it. This does not happen if the + // caller is using go/build.ScanDir to determine the list of + // files implementing a package. + return + } + // function doesn't exist or has no documentation; use f + recv := "" + if f.Recv != nil { + var typ ast.Expr + // be careful in case of incorrect ASTs + if list := f.Recv.List; len(list) == 1 { + typ = list[0].Type + } + recv = recvString(typ) + } + mset[name] = &Func{ + Doc: f.Doc.Text(), + Name: name, + Decl: f, + Recv: recv, + Orig: recv, + } + if !preserveAST { + f.Doc = nil // doc consumed - remove from AST + } +} + +// add adds method m to the method set; m is ignored if the method set +// already contains a method with the same name at the same or a higher +// level than m. +// +func (mset methodSet) add(m *Func) { + old := mset[m.Name] + if old == nil || m.Level < old.Level { + mset[m.Name] = m + return + } + if m.Level == old.Level { + // conflict - mark it using a method with nil Decl + mset[m.Name] = &Func{ + Name: m.Name, + Level: m.Level, + } + } +} + +// ---------------------------------------------------------------------------- +// Named types + +// baseTypeName returns the name of the base type of x (or "") +// and whether the type is imported or not. +// +func baseTypeName(x ast.Expr) (name string, imported bool) { + switch t := x.(type) { + case *ast.Ident: + return t.Name, false + case *ast.SelectorExpr: + if _, ok := t.X.(*ast.Ident); ok { + // only possible for qualified type names; + // assume type is imported + return t.Sel.Name, true + } + case *ast.ParenExpr: + return baseTypeName(t.X) + case *ast.StarExpr: + return baseTypeName(t.X) + } + return +} + +// An embeddedSet describes a set of embedded types. +type embeddedSet map[*namedType]bool + +// A namedType represents a named unqualified (package local, or possibly +// predeclared) type. The namedType for a type name is always found via +// reader.lookupType. +// +type namedType struct { + doc string // doc comment for type + name string // type name + decl *ast.GenDecl // nil if declaration hasn't been seen yet + + isEmbedded bool // true if this type is embedded + isStruct bool // true if this type is a struct + embedded embeddedSet // true if the embedded type is a pointer + + // associated declarations + values []*Value // consts and vars + funcs methodSet + methods methodSet +} + +// ---------------------------------------------------------------------------- +// AST reader + +// reader accumulates documentation for a single package. +// It modifies the AST: Comments (declaration documentation) +// that have been collected by the reader are set to nil +// in the respective AST nodes so that they are not printed +// twice (once when printing the documentation and once when +// printing the corresponding AST node). +// +type reader struct { + mode Mode + + // package properties + doc string // package documentation, if any + filenames []string + notes map[string][]*Note + + // declarations + imports map[string]int + hasDotImp bool // if set, package contains a dot import + values []*Value // consts and vars + order int // sort order of const and var declarations (when we can't use a name) + types map[string]*namedType + funcs methodSet + + // support for package-local error type declarations + errorDecl bool // if set, type "error" was declared locally + fixlist []*ast.InterfaceType // list of interfaces containing anonymous field "error" +} + +func (r *reader) isVisible(name string) bool { + return r.mode&AllDecls != 0 || token.IsExported(name) +} + +// lookupType returns the base type with the given name. +// If the base type has not been encountered yet, a new +// type with the given name but no associated declaration +// is added to the type map. +// +func (r *reader) lookupType(name string) *namedType { + if name == "" || name == "_" { + return nil // no type docs for anonymous types + } + if typ, found := r.types[name]; found { + return typ + } + // type not found - add one without declaration + typ := &namedType{ + name: name, + embedded: make(embeddedSet), + funcs: make(methodSet), + methods: make(methodSet), + } + r.types[name] = typ + return typ +} + +// recordAnonymousField registers fieldType as the type of an +// anonymous field in the parent type. If the field is imported +// (qualified name) or the parent is nil, the field is ignored. +// The function returns the field name. +// +func (r *reader) recordAnonymousField(parent *namedType, fieldType ast.Expr) (fname string) { + fname, imp := baseTypeName(fieldType) + if parent == nil || imp { + return + } + if ftype := r.lookupType(fname); ftype != nil { + ftype.isEmbedded = true + _, ptr := fieldType.(*ast.StarExpr) + parent.embedded[ftype] = ptr + } + return +} + +func (r *reader) readDoc(comment *ast.CommentGroup) { + // By convention there should be only one package comment + // but collect all of them if there are more than one. + text := comment.Text() + if r.doc == "" { + r.doc = text + return + } + r.doc += "\n" + text +} + +func (r *reader) remember(typ *ast.InterfaceType) { + r.fixlist = append(r.fixlist, typ) +} + +func specNames(specs []ast.Spec) []string { + names := make([]string, 0, len(specs)) // reasonable estimate + for _, s := range specs { + // s guaranteed to be an *ast.ValueSpec by readValue + for _, ident := range s.(*ast.ValueSpec).Names { + names = append(names, ident.Name) + } + } + return names +} + +// readValue processes a const or var declaration. +// +func (r *reader) readValue(decl *ast.GenDecl) { + // determine if decl should be associated with a type + // Heuristic: For each typed entry, determine the type name, if any. + // If there is exactly one type name that is sufficiently + // frequent, associate the decl with the respective type. + domName := "" + domFreq := 0 + prev := "" + n := 0 + for _, spec := range decl.Specs { + s, ok := spec.(*ast.ValueSpec) + if !ok { + continue // should not happen, but be conservative + } + name := "" + switch { + case s.Type != nil: + // a type is present; determine its name + if n, imp := baseTypeName(s.Type); !imp { + name = n + } + case decl.Tok == token.CONST && len(s.Values) == 0: + // no type or value is present but we have a constant declaration; + // use the previous type name (possibly the empty string) + name = prev + } + if name != "" { + // entry has a named type + if domName != "" && domName != name { + // more than one type name - do not associate + // with any type + domName = "" + break + } + domName = name + domFreq++ + } + prev = name + n++ + } + + // nothing to do w/o a legal declaration + if n == 0 { + return + } + + // determine values list with which to associate the Value for this decl + values := &r.values + const threshold = 0.75 + if domName != "" && r.isVisible(domName) && domFreq >= int(float64(len(decl.Specs))*threshold) { + // typed entries are sufficiently frequent + if typ := r.lookupType(domName); typ != nil { + values = &typ.values // associate with that type + } + } + + *values = append(*values, &Value{ + Doc: decl.Doc.Text(), + Names: specNames(decl.Specs), + Decl: decl, + order: r.order, + }) + if r.mode&PreserveAST == 0 { + decl.Doc = nil // doc consumed - remove from AST + } + // Note: It's important that the order used here is global because the cleanupTypes + // methods may move values associated with types back into the global list. If the + // order is list-specific, sorting is not deterministic because the same order value + // may appear multiple times (was bug, found when fixing #16153). + r.order++ +} + +// fields returns a struct's fields or an interface's methods. +// +func fields(typ ast.Expr) (list []*ast.Field, isStruct bool) { + var fields *ast.FieldList + switch t := typ.(type) { + case *ast.StructType: + fields = t.Fields + isStruct = true + case *ast.InterfaceType: + fields = t.Methods + } + if fields != nil { + list = fields.List + } + return +} + +// readType processes a type declaration. +// +func (r *reader) readType(decl *ast.GenDecl, spec *ast.TypeSpec) { + typ := r.lookupType(spec.Name.Name) + if typ == nil { + return // no name or blank name - ignore the type + } + + // A type should be added at most once, so typ.decl + // should be nil - if it is not, simply overwrite it. + typ.decl = decl + + // compute documentation + doc := spec.Doc + if doc == nil { + // no doc associated with the spec, use the declaration doc, if any + doc = decl.Doc + } + if r.mode&PreserveAST == 0 { + spec.Doc = nil // doc consumed - remove from AST + decl.Doc = nil // doc consumed - remove from AST + } + typ.doc = doc.Text() + + // record anonymous fields (they may contribute methods) + // (some fields may have been recorded already when filtering + // exports, but that's ok) + var list []*ast.Field + list, typ.isStruct = fields(spec.Type) + for _, field := range list { + if len(field.Names) == 0 { + r.recordAnonymousField(typ, field.Type) + } + } +} + +// isPredeclared reports whether n denotes a predeclared type. +// +func (r *reader) isPredeclared(n string) bool { + return predeclaredTypes[n] && r.types[n] == nil +} + +// readFunc processes a func or method declaration. +// +func (r *reader) readFunc(fun *ast.FuncDecl) { + // strip function body if requested. + if r.mode&PreserveAST == 0 { + fun.Body = nil + } + + // associate methods with the receiver type, if any + if fun.Recv != nil { + // method + if len(fun.Recv.List) == 0 { + // should not happen (incorrect AST); (See issue 17788) + // don't show this method + return + } + recvTypeName, imp := baseTypeName(fun.Recv.List[0].Type) + if imp { + // should not happen (incorrect AST); + // don't show this method + return + } + if typ := r.lookupType(recvTypeName); typ != nil { + typ.methods.set(fun, r.mode&PreserveAST != 0) + } + // otherwise ignore the method + // TODO(gri): There may be exported methods of non-exported types + // that can be called because of exported values (consts, vars, or + // function results) of that type. Could determine if that is the + // case and then show those methods in an appropriate section. + return + } + + // Associate factory functions with the first visible result type, as long as + // others are predeclared types. + if fun.Type.Results.NumFields() >= 1 { + var typ *namedType // type to associate the function with + numResultTypes := 0 + for _, res := range fun.Type.Results.List { + factoryType := res.Type + if t, ok := factoryType.(*ast.ArrayType); ok { + // We consider functions that return slices or arrays of type + // T (or pointers to T) as factory functions of T. + factoryType = t.Elt + } + if n, imp := baseTypeName(factoryType); !imp && r.isVisible(n) && !r.isPredeclared(n) { + if t := r.lookupType(n); t != nil { + typ = t + numResultTypes++ + if numResultTypes > 1 { + break + } + } + } + } + // If there is exactly one result type, + // associate the function with that type. + if numResultTypes == 1 { + typ.funcs.set(fun, r.mode&PreserveAST != 0) + return + } + } + + // just an ordinary function + r.funcs.set(fun, r.mode&PreserveAST != 0) +} + +var ( + noteMarker = `([A-Z][A-Z]+)\(([^)]+)\):?` // MARKER(uid), MARKER at least 2 chars, uid at least 1 char + noteMarkerRx = regexp.MustCompile(`^[ \t]*` + noteMarker) // MARKER(uid) at text start + noteCommentRx = regexp.MustCompile(`^/[/*][ \t]*` + noteMarker) // MARKER(uid) at comment start +) + +// readNote collects a single note from a sequence of comments. +// +func (r *reader) readNote(list []*ast.Comment) { + text := (&ast.CommentGroup{List: list}).Text() + if m := noteMarkerRx.FindStringSubmatchIndex(text); m != nil { + // The note body starts after the marker. + // We remove any formatting so that we don't + // get spurious line breaks/indentation when + // showing the TODO body. + body := clean(text[m[1]:], keepNL) + if body != "" { + marker := text[m[2]:m[3]] + r.notes[marker] = append(r.notes[marker], &Note{ + Pos: list[0].Pos(), + End: list[len(list)-1].End(), + UID: text[m[4]:m[5]], + Body: body, + }) + } + } +} + +// readNotes extracts notes from comments. +// A note must start at the beginning of a comment with "MARKER(uid):" +// and is followed by the note body (e.g., "// BUG(gri): fix this"). +// The note ends at the end of the comment group or at the start of +// another note in the same comment group, whichever comes first. +// +func (r *reader) readNotes(comments []*ast.CommentGroup) { + for _, group := range comments { + i := -1 // comment index of most recent note start, valid if >= 0 + list := group.List + for j, c := range list { + if noteCommentRx.MatchString(c.Text) { + if i >= 0 { + r.readNote(list[i:j]) + } + i = j + } + } + if i >= 0 { + r.readNote(list[i:]) + } + } +} + +// readFile adds the AST for a source file to the reader. +// +func (r *reader) readFile(src *ast.File) { + // add package documentation + if src.Doc != nil { + r.readDoc(src.Doc) + if r.mode&PreserveAST == 0 { + src.Doc = nil // doc consumed - remove from AST + } + } + + // add all declarations but for functions which are processed in a separate pass + for _, decl := range src.Decls { + switch d := decl.(type) { + case *ast.GenDecl: + switch d.Tok { + case token.IMPORT: + // imports are handled individually + for _, spec := range d.Specs { + if s, ok := spec.(*ast.ImportSpec); ok { + if import_, err := strconv.Unquote(s.Path.Value); err == nil { + r.imports[import_] = 1 + if s.Name != nil && s.Name.Name == "." { + r.hasDotImp = true + } + } + } + } + case token.CONST, token.VAR: + // constants and variables are always handled as a group + r.readValue(d) + case token.TYPE: + // types are handled individually + if len(d.Specs) == 1 && !d.Lparen.IsValid() { + // common case: single declaration w/o parentheses + // (if a single declaration is parenthesized, + // create a new fake declaration below, so that + // go/doc type declarations always appear w/o + // parentheses) + if s, ok := d.Specs[0].(*ast.TypeSpec); ok { + r.readType(d, s) + } + break + } + for _, spec := range d.Specs { + if s, ok := spec.(*ast.TypeSpec); ok { + // use an individual (possibly fake) declaration + // for each type; this also ensures that each type + // gets to (re-)use the declaration documentation + // if there's none associated with the spec itself + fake := &ast.GenDecl{ + Doc: d.Doc, + // don't use the existing TokPos because it + // will lead to the wrong selection range for + // the fake declaration if there are more + // than one type in the group (this affects + // src/cmd/godoc/godoc.go's posLink_urlFunc) + TokPos: s.Pos(), + Tok: token.TYPE, + Specs: []ast.Spec{s}, + } + r.readType(fake, s) + } + } + } + } + } + + // collect MARKER(...): annotations + r.readNotes(src.Comments) + if r.mode&PreserveAST == 0 { + src.Comments = nil // consumed unassociated comments - remove from AST + } +} + +func (r *reader) readPackage(pkg *ast.Package, mode Mode) { + // initialize reader + r.filenames = make([]string, len(pkg.Files)) + r.imports = make(map[string]int) + r.mode = mode + r.types = make(map[string]*namedType) + r.funcs = make(methodSet) + r.notes = make(map[string][]*Note) + + // sort package files before reading them so that the + // result does not depend on map iteration order + i := 0 + for filename := range pkg.Files { + r.filenames[i] = filename + i++ + } + sort.Strings(r.filenames) + + // process files in sorted order + for _, filename := range r.filenames { + f := pkg.Files[filename] + if mode&AllDecls == 0 { + r.fileExports(f) + } + r.readFile(f) + } + + // process functions now that we have better type information + for _, f := range pkg.Files { + for _, decl := range f.Decls { + if d, ok := decl.(*ast.FuncDecl); ok { + r.readFunc(d) + } + } + } +} + +// ---------------------------------------------------------------------------- +// Types + +func customizeRecv(f *Func, recvTypeName string, embeddedIsPtr bool, level int) *Func { + if f == nil || f.Decl == nil || f.Decl.Recv == nil || len(f.Decl.Recv.List) != 1 { + return f // shouldn't happen, but be safe + } + + // copy existing receiver field and set new type + newField := *f.Decl.Recv.List[0] + origPos := newField.Type.Pos() + _, origRecvIsPtr := newField.Type.(*ast.StarExpr) + newIdent := &ast.Ident{NamePos: origPos, Name: recvTypeName} + var typ ast.Expr = newIdent + if !embeddedIsPtr && origRecvIsPtr { + newIdent.NamePos++ // '*' is one character + typ = &ast.StarExpr{Star: origPos, X: newIdent} + } + newField.Type = typ + + // copy existing receiver field list and set new receiver field + newFieldList := *f.Decl.Recv + newFieldList.List = []*ast.Field{&newField} + + // copy existing function declaration and set new receiver field list + newFuncDecl := *f.Decl + newFuncDecl.Recv = &newFieldList + + // copy existing function documentation and set new declaration + newF := *f + newF.Decl = &newFuncDecl + newF.Recv = recvString(typ) + // the Orig field never changes + newF.Level = level + + return &newF +} + +// collectEmbeddedMethods collects the embedded methods of typ in mset. +// +func (r *reader) collectEmbeddedMethods(mset methodSet, typ *namedType, recvTypeName string, embeddedIsPtr bool, level int, visited embeddedSet) { + visited[typ] = true + for embedded, isPtr := range typ.embedded { + // Once an embedded type is embedded as a pointer type + // all embedded types in those types are treated like + // pointer types for the purpose of the receiver type + // computation; i.e., embeddedIsPtr is sticky for this + // embedding hierarchy. + thisEmbeddedIsPtr := embeddedIsPtr || isPtr + for _, m := range embedded.methods { + // only top-level methods are embedded + if m.Level == 0 { + mset.add(customizeRecv(m, recvTypeName, thisEmbeddedIsPtr, level)) + } + } + if !visited[embedded] { + r.collectEmbeddedMethods(mset, embedded, recvTypeName, thisEmbeddedIsPtr, level+1, visited) + } + } + delete(visited, typ) +} + +// computeMethodSets determines the actual method sets for each type encountered. +// +func (r *reader) computeMethodSets() { + for _, t := range r.types { + // collect embedded methods for t + if t.isStruct { + // struct + r.collectEmbeddedMethods(t.methods, t, t.name, false, 1, make(embeddedSet)) + } else { + // interface + // TODO(gri) fix this + } + } + + // if error was declared locally, don't treat it as exported field anymore + if r.errorDecl { + for _, ityp := range r.fixlist { + removeErrorField(ityp) + } + } +} + +// cleanupTypes removes the association of functions and methods with +// types that have no declaration. Instead, these functions and methods +// are shown at the package level. It also removes types with missing +// declarations or which are not visible. +// +func (r *reader) cleanupTypes() { + for _, t := range r.types { + visible := r.isVisible(t.name) + predeclared := predeclaredTypes[t.name] + + if t.decl == nil && (predeclared || visible && (t.isEmbedded || r.hasDotImp)) { + // t.name is a predeclared type (and was not redeclared in this package), + // or it was embedded somewhere but its declaration is missing (because + // the AST is incomplete), or we have a dot-import (and all bets are off): + // move any associated values, funcs, and methods back to the top-level so + // that they are not lost. + // 1) move values + r.values = append(r.values, t.values...) + // 2) move factory functions + for name, f := range t.funcs { + // in a correct AST, package-level function names + // are all different - no need to check for conflicts + r.funcs[name] = f + } + // 3) move methods + if !predeclared { + for name, m := range t.methods { + // don't overwrite functions with the same name - drop them + if _, found := r.funcs[name]; !found { + r.funcs[name] = m + } + } + } + } + // remove types w/o declaration or which are not visible + if t.decl == nil || !visible { + delete(r.types, t.name) + } + } +} + +// ---------------------------------------------------------------------------- +// Sorting + +type data struct { + n int + swap func(i, j int) + less func(i, j int) bool +} + +func (d *data) Len() int { return d.n } +func (d *data) Swap(i, j int) { d.swap(i, j) } +func (d *data) Less(i, j int) bool { return d.less(i, j) } + +// sortBy is a helper function for sorting +func sortBy(less func(i, j int) bool, swap func(i, j int), n int) { + sort.Sort(&data{n, swap, less}) +} + +func sortedKeys(m map[string]int) []string { + list := make([]string, len(m)) + i := 0 + for key := range m { + list[i] = key + i++ + } + sort.Strings(list) + return list +} + +// sortingName returns the name to use when sorting d into place. +// +func sortingName(d *ast.GenDecl) string { + if len(d.Specs) == 1 { + if s, ok := d.Specs[0].(*ast.ValueSpec); ok { + return s.Names[0].Name + } + } + return "" +} + +func sortedValues(m []*Value, tok token.Token) []*Value { + list := make([]*Value, len(m)) // big enough in any case + i := 0 + for _, val := range m { + if val.Decl.Tok == tok { + list[i] = val + i++ + } + } + list = list[0:i] + + sortBy( + func(i, j int) bool { + if ni, nj := sortingName(list[i].Decl), sortingName(list[j].Decl); ni != nj { + return ni < nj + } + return list[i].order < list[j].order + }, + func(i, j int) { list[i], list[j] = list[j], list[i] }, + len(list), + ) + + return list +} + +func sortedTypes(m map[string]*namedType, allMethods bool) []*Type { + list := make([]*Type, len(m)) + i := 0 + for _, t := range m { + list[i] = &Type{ + Doc: t.doc, + Name: t.name, + Decl: t.decl, + Consts: sortedValues(t.values, token.CONST), + Vars: sortedValues(t.values, token.VAR), + Funcs: sortedFuncs(t.funcs, true), + Methods: sortedFuncs(t.methods, allMethods), + } + i++ + } + + sortBy( + func(i, j int) bool { return list[i].Name < list[j].Name }, + func(i, j int) { list[i], list[j] = list[j], list[i] }, + len(list), + ) + + return list +} + +func removeStar(s string) string { + if len(s) > 0 && s[0] == '*' { + return s[1:] + } + return s +} + +func sortedFuncs(m methodSet, allMethods bool) []*Func { + list := make([]*Func, len(m)) + i := 0 + for _, m := range m { + // determine which methods to include + switch { + case m.Decl == nil: + // exclude conflict entry + case allMethods, m.Level == 0, !token.IsExported(removeStar(m.Orig)): + // forced inclusion, method not embedded, or method + // embedded but original receiver type not exported + list[i] = m + i++ + } + } + list = list[0:i] + sortBy( + func(i, j int) bool { return list[i].Name < list[j].Name }, + func(i, j int) { list[i], list[j] = list[j], list[i] }, + len(list), + ) + return list +} + +// noteBodies returns a list of note body strings given a list of notes. +// This is only used to populate the deprecated Package.Bugs field. +// +func noteBodies(notes []*Note) []string { + var list []string + for _, n := range notes { + list = append(list, n.Body) + } + return list +} + +// ---------------------------------------------------------------------------- +// Predeclared identifiers + +// IsPredeclared reports whether s is a predeclared identifier. +func IsPredeclared(s string) bool { + return predeclaredTypes[s] || predeclaredFuncs[s] || predeclaredConstants[s] +} + +var predeclaredTypes = map[string]bool{ + "bool": true, + "byte": true, + "complex64": true, + "complex128": true, + "error": true, + "float32": true, + "float64": true, + "int": true, + "int8": true, + "int16": true, + "int32": true, + "int64": true, + "rune": true, + "string": true, + "uint": true, + "uint8": true, + "uint16": true, + "uint32": true, + "uint64": true, + "uintptr": true, +} + +var predeclaredFuncs = map[string]bool{ + "append": true, + "cap": true, + "close": true, + "complex": true, + "copy": true, + "delete": true, + "imag": true, + "len": true, + "make": true, + "new": true, + "panic": true, + "print": true, + "println": true, + "real": true, + "recover": true, +} + +var predeclaredConstants = map[string]bool{ + "false": true, + "iota": true, + "nil": true, + "true": true, +} diff --git a/third_party/go/doc/synopsis.go b/third_party/go/doc/synopsis.go new file mode 100644 index 00000000000..a1ba156b1ae --- /dev/null +++ b/third_party/go/doc/synopsis.go @@ -0,0 +1,85 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.16 + +package doc + +import ( + "strings" + "unicode" +) + +// firstSentenceLen returns the length of the first sentence in s. +// The sentence ends after the first period followed by space and +// not preceded by exactly one uppercase letter. +// +func firstSentenceLen(s string) int { + var ppp, pp, p rune + for i, q := range s { + if q == '\n' || q == '\r' || q == '\t' { + q = ' ' + } + if q == ' ' && p == '.' && (!unicode.IsUpper(pp) || unicode.IsUpper(ppp)) { + return i + } + if p == '。' || p == '.' { + return i + } + ppp, pp, p = pp, p, q + } + return len(s) +} + +const ( + keepNL = 1 << iota +) + +// clean replaces each sequence of space, \n, \r, or \t characters +// with a single space and removes any trailing and leading spaces. +// If the keepNL flag is set, newline characters are passed through +// instead of being change to spaces. +func clean(s string, flags int) string { + var b []byte + p := byte(' ') + for i := 0; i < len(s); i++ { + q := s[i] + if (flags&keepNL) == 0 && q == '\n' || q == '\r' || q == '\t' { + q = ' ' + } + if q != ' ' || p != ' ' { + b = append(b, q) + p = q + } + } + // remove trailing blank, if any + if n := len(b); n > 0 && p == ' ' { + b = b[0 : n-1] + } + return string(b) +} + +// Synopsis returns a cleaned version of the first sentence in s. +// That sentence ends after the first period followed by space and +// not preceded by exactly one uppercase letter. The result string +// has no \n, \r, or \t characters and uses only single spaces between +// words. If s starts with any of the IllegalPrefixes, the result +// is the empty string. +// +func Synopsis(s string) string { + s = clean(s[0:firstSentenceLen(s)], 0) + for _, prefix := range IllegalPrefixes { + if strings.HasPrefix(strings.ToLower(s), prefix) { + return "" + } + } + s = convertQuotes(s) + return s +} + +var IllegalPrefixes = []string{ + "copyright", + "all rights", + "author", +} diff --git a/third_party/go/doc/synopsis_test.go b/third_party/go/doc/synopsis_test.go new file mode 100644 index 00000000000..162b9a53c7a --- /dev/null +++ b/third_party/go/doc/synopsis_test.go @@ -0,0 +1,54 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.16 + +package doc + +import "testing" + +var tests = []struct { + txt string + fsl int + syn string +}{ + {"", 0, ""}, + {"foo", 3, "foo"}, + {"foo.", 4, "foo."}, + {"foo.bar", 7, "foo.bar"}, + {" foo. ", 6, "foo."}, + {" foo\t bar.\n", 12, "foo bar."}, + {" foo\t bar.\n", 12, "foo bar."}, + {"a b\n\nc\r\rd\t\t", 12, "a b c d"}, + {"a b\n\nc\r\rd\t\t . BLA", 15, "a b c d ."}, + {"Package poems by T.S.Eliot. To rhyme...", 27, "Package poems by T.S.Eliot."}, + {"Package poems by T. S. Eliot. To rhyme...", 29, "Package poems by T. S. Eliot."}, + {"foo implements the foo ABI. The foo ABI is...", 27, "foo implements the foo ABI."}, + {"Package\nfoo. ..", 12, "Package foo."}, + {"P . Q.", 3, "P ."}, + {"P. Q. ", 8, "P. Q."}, + {"Package Καλημέρα κόσμε.", 36, "Package Καλημέρα κόσμε."}, + {"Package こんにちは 世界\n", 31, "Package こんにちは 世界"}, + {"Package こんにちは。世界", 26, "Package こんにちは。"}, + {"Package 안녕.世界", 17, "Package 안녕."}, + {"Package foo does bar.", 21, "Package foo does bar."}, + {"Copyright 2012 Google, Inc. Package foo does bar.", 27, ""}, + {"All Rights reserved. Package foo does bar.", 20, ""}, + {"All rights reserved. Package foo does bar.", 20, ""}, + {"Authors: foo@bar.com. Package foo does bar.", 21, ""}, + {"typically invoked as ``go tool asm'',", 37, "typically invoked as " + ulquo + "go tool asm" + urquo + ","}, +} + +func TestSynopsis(t *testing.T) { + for _, e := range tests { + fsl := firstSentenceLen(e.txt) + if fsl != e.fsl { + t.Errorf("got fsl = %d; want %d for %q\n", fsl, e.fsl, e.txt) + } + syn := Synopsis(e.txt) + if syn != e.syn { + t.Errorf("got syn = %q; want %q for %q\n", syn, e.syn, e.txt) + } + } +} diff --git a/third_party/pkgsite/print_type.go b/third_party/pkgsite/print_type.go index 25c5e0bac09..8232fd6f657 100644 --- a/third_party/pkgsite/print_type.go +++ b/third_party/pkgsite/print_type.go @@ -17,12 +17,13 @@ import ( "bytes" "fmt" "go/ast" - "go/doc" "go/printer" "go/scanner" "go/token" "strconv" "strings" + + "cloud.google.com/go/third_party/go/doc" ) // PrintType returns a string representation of the decl.