From 68bd1dd89d366743d209a907b6b92eddbab087c2 Mon Sep 17 00:00:00 2001 From: Giteabot Date: Fri, 12 Apr 2024 23:09:16 +0800 Subject: [PATCH 1/8] Fix rename branch 500 when the target branch is deleted but exist in database (#30430) (#30437) Backport #30430 by @lunny Fix #30428 --------- Co-authored-by: Lunny Xiao --- models/git/branch.go | 31 ++++++-- routers/web/repo/setting/protected_branch.go | 8 +- tests/integration/rename_branch_test.go | 80 ++++++++++++++++++-- 3 files changed, 107 insertions(+), 12 deletions(-) diff --git a/models/git/branch.go b/models/git/branch.go index db02fc9b28..bf26ec67ac 100644 --- a/models/git/branch.go +++ b/models/git/branch.go @@ -292,6 +292,7 @@ func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to str sess := db.GetEngine(ctx) + // check whether from branch exist var branch Branch exist, err := db.GetEngine(ctx).Where("repo_id=? AND name=?", repo.ID, from).Get(&branch) if err != nil { @@ -303,6 +304,24 @@ func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to str } } + // check whether to branch exist or is_deleted + var dstBranch Branch + exist, err = db.GetEngine(ctx).Where("repo_id=? AND name=?", repo.ID, to).Get(&dstBranch) + if err != nil { + return err + } + if exist { + if !dstBranch.IsDeleted { + return ErrBranchAlreadyExists{ + BranchName: to, + } + } + + if _, err := db.GetEngine(ctx).ID(dstBranch.ID).NoAutoCondition().Delete(&dstBranch); err != nil { + return err + } + } + // 1. update branch in database if n, err := sess.Where("repo_id=? AND name=?", repo.ID, from).Update(&Branch{ Name: to, @@ -357,12 +376,7 @@ func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to str return err } - // 5. do git action - if err = gitAction(ctx, isDefault); err != nil { - return err - } - - // 6. insert renamed branch record + // 5. insert renamed branch record renamedBranch := &RenamedBranch{ RepoID: repo.ID, From: from, @@ -373,6 +387,11 @@ func RenameBranch(ctx context.Context, repo *repo_model.Repository, from, to str return err } + // 6. do git action + if err = gitAction(ctx, isDefault); err != nil { + return err + } + return committer.Commit() } diff --git a/routers/web/repo/setting/protected_branch.go b/routers/web/repo/setting/protected_branch.go index e0f2294a14..af3e03f36c 100644 --- a/routers/web/repo/setting/protected_branch.go +++ b/routers/web/repo/setting/protected_branch.go @@ -312,7 +312,13 @@ func RenameBranchPost(ctx *context.Context) { msg, err := repository.RenameBranch(ctx, ctx.Repo.Repository, ctx.Doer, ctx.Repo.GitRepo, form.From, form.To) if err != nil { - ctx.ServerError("RenameBranch", err) + switch { + case git_model.IsErrBranchAlreadyExists(err): + ctx.Flash.Error(ctx.Tr("repo.branch.branch_already_exists", form.To)) + ctx.Redirect(fmt.Sprintf("%s/branches", ctx.Repo.RepoLink)) + default: + ctx.ServerError("RenameBranch", err) + } return } diff --git a/tests/integration/rename_branch_test.go b/tests/integration/rename_branch_test.go index 703fc243a4..bc79b701cd 100644 --- a/tests/integration/rename_branch_test.go +++ b/tests/integration/rename_branch_test.go @@ -5,17 +5,23 @@ package integration import ( "net/http" + "net/url" "testing" git_model "code.gitea.io/gitea/models/git" repo_model "code.gitea.io/gitea/models/repo" "code.gitea.io/gitea/models/unittest" + gitea_context "code.gitea.io/gitea/modules/context" "code.gitea.io/gitea/tests" "github.com/stretchr/testify/assert" ) func TestRenameBranch(t *testing.T) { + onGiteaRun(t, testRenameBranch) +} + +func testRenameBranch(t *testing.T, u *url.URL) { defer tests.PrepareTestEnv(t)() unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: 1, Name: "master"}) @@ -26,20 +32,19 @@ func TestRenameBranch(t *testing.T) { resp := session.MakeRequest(t, req, http.StatusOK) htmlDoc := NewHTMLParser(t, resp.Body) - postData := map[string]string{ + req = NewRequestWithValues(t, "POST", "/user2/repo1/settings/rename_branch", map[string]string{ "_csrf": htmlDoc.GetCSRF(), "from": "master", "to": "main", - } - req = NewRequestWithValues(t, "POST", "/user2/repo1/settings/rename_branch", postData) + }) session.MakeRequest(t, req, http.StatusSeeOther) // check new branch link - req = NewRequestWithValues(t, "GET", "/user2/repo1/src/branch/main/README.md", postData) + req = NewRequestWithValues(t, "GET", "/user2/repo1/src/branch/main/README.md", nil) session.MakeRequest(t, req, http.StatusOK) // check old branch link - req = NewRequestWithValues(t, "GET", "/user2/repo1/src/branch/master/README.md", postData) + req = NewRequestWithValues(t, "GET", "/user2/repo1/src/branch/master/README.md", nil) resp = session.MakeRequest(t, req, http.StatusSeeOther) location := resp.Header().Get("Location") assert.Equal(t, "/user2/repo1/src/branch/main/README.md", location) @@ -47,4 +52,69 @@ func TestRenameBranch(t *testing.T) { // check db repo1 := unittest.AssertExistsAndLoadBean(t, &repo_model.Repository{ID: 1}) assert.Equal(t, "main", repo1.DefaultBranch) + + // create branch1 + csrf := GetCSRF(t, session, "/user2/repo1/src/branch/main") + + req = NewRequestWithValues(t, "POST", "/user2/repo1/branches/_new/branch/main", map[string]string{ + "_csrf": csrf, + "new_branch_name": "branch1", + }) + session.MakeRequest(t, req, http.StatusSeeOther) + + branch1 := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: repo1.ID, Name: "branch1"}) + assert.Equal(t, "branch1", branch1.Name) + + // create branch2 + req = NewRequestWithValues(t, "POST", "/user2/repo1/branches/_new/branch/main", map[string]string{ + "_csrf": csrf, + "new_branch_name": "branch2", + }) + session.MakeRequest(t, req, http.StatusSeeOther) + + branch2 := unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: repo1.ID, Name: "branch2"}) + assert.Equal(t, "branch2", branch2.Name) + + // rename branch2 to branch1 + req = NewRequestWithValues(t, "POST", "/user2/repo1/settings/rename_branch", map[string]string{ + "_csrf": htmlDoc.GetCSRF(), + "from": "branch2", + "to": "branch1", + }) + session.MakeRequest(t, req, http.StatusSeeOther) + flashCookie := session.GetCookie(gitea_context.CookieNameFlash) + assert.NotNil(t, flashCookie) + assert.Contains(t, flashCookie.Value, "error") + + branch2 = unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: repo1.ID, Name: "branch2"}) + assert.Equal(t, "branch2", branch2.Name) + branch1 = unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: repo1.ID, Name: "branch1"}) + assert.Equal(t, "branch1", branch1.Name) + + // delete branch1 + req = NewRequestWithValues(t, "POST", "/user2/repo1/branches/delete", map[string]string{ + "_csrf": htmlDoc.GetCSRF(), + "name": "branch1", + }) + session.MakeRequest(t, req, http.StatusOK) + branch2 = unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: repo1.ID, Name: "branch2"}) + assert.Equal(t, "branch2", branch2.Name) + branch1 = unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: repo1.ID, Name: "branch1"}) + assert.True(t, branch1.IsDeleted) // virtual deletion + + // rename branch2 to branch1 again + req = NewRequestWithValues(t, "POST", "/user2/repo1/settings/rename_branch", map[string]string{ + "_csrf": htmlDoc.GetCSRF(), + "from": "branch2", + "to": "branch1", + }) + session.MakeRequest(t, req, http.StatusSeeOther) + + flashCookie = session.GetCookie(gitea_context.CookieNameFlash) + assert.NotNil(t, flashCookie) + assert.Contains(t, flashCookie.Value, "success") + + unittest.AssertNotExistsBean(t, &git_model.Branch{RepoID: repo1.ID, Name: "branch2"}) + branch1 = unittest.AssertExistsAndLoadBean(t, &git_model.Branch{RepoID: repo1.ID, Name: "branch1"}) + assert.Equal(t, "branch1", branch1.Name) } From fc4e08f804704613d3a99347ef25813b9d38a422 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sat, 13 Apr 2024 21:33:50 +0800 Subject: [PATCH 2/8] Upgrade go-sqlite to v1.14.22 (#30462) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 8bd91706cc..475c081176 100644 --- a/go.mod +++ b/go.mod @@ -73,7 +73,7 @@ require ( github.com/lib/pq v1.10.9 github.com/markbates/goth v1.78.0 github.com/mattn/go-isatty v0.0.20 - github.com/mattn/go-sqlite3 v1.14.17 + github.com/mattn/go-sqlite3 v1.14.22 github.com/meilisearch/meilisearch-go v0.25.1 github.com/mholt/archiver/v3 v3.5.1 github.com/microcosm-cc/bluemonday v1.0.26 diff --git a/go.sum b/go.sum index 75187e15ae..4b13e15819 100644 --- a/go.sum +++ b/go.sum @@ -716,8 +716,8 @@ github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= -github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/meilisearch/meilisearch-go v0.25.1 h1:D5wY22sn5kkpRH3uYMGlwltdUEq5regIFmO7awHz3Vo= From 09df5c9c7d35a211cc7224425e5b95b9ef9125e9 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sun, 14 Apr 2024 01:44:57 +0800 Subject: [PATCH 3/8] Use db.ListOptions directly instead of Paginator interface to make iteasier to use and fix performance of /pulls and /issues (#29990) (#30447) backport #29990 This PR uses `db.ListOptions` instead of `Paginor` to make the code simpler. And it also fixed the performance problem when viewing /pulls or /issues. Before the counting in fact will also do the search. Co-authored-by: Jason Song Co-authored-by: silverwind --- models/issues/issue_search.go | 22 +++++-------------- models/issues/issue_stats.go | 6 ++++- modules/indexer/internal/paginator.go | 21 ++++++------------ modules/indexer/issues/db/db.go | 11 ++++++++++ modules/indexer/issues/indexer.go | 2 +- modules/indexer/issues/internal/model.go | 2 +- .../indexer/issues/internal/tests/tests.go | 7 ++++++ .../indexer/issues/meilisearch/meilisearch.go | 12 ++++++++++ 8 files changed, 49 insertions(+), 34 deletions(-) diff --git a/models/issues/issue_search.go b/models/issues/issue_search.go index 5c05ead687..d87f2258fd 100644 --- a/models/issues/issue_search.go +++ b/models/issues/issue_search.go @@ -21,7 +21,7 @@ import ( // IssuesOptions represents options of an issue. type IssuesOptions struct { //nolint - db.Paginator + Paginator *db.ListOptions RepoIDs []int64 // overwrites RepoCond if the length is not 0 RepoCond builder.Cond AssigneeID int64 @@ -103,23 +103,11 @@ func applyLimit(sess *xorm.Session, opts *IssuesOptions) *xorm.Session { return sess } - // Warning: Do not use GetSkipTake() for *db.ListOptions - // Its implementation could reset the page size with setting.API.MaxResponseItems - if listOptions, ok := opts.Paginator.(*db.ListOptions); ok { - if listOptions.Page >= 0 && listOptions.PageSize > 0 { - var start int - if listOptions.Page == 0 { - start = 0 - } else { - start = (listOptions.Page - 1) * listOptions.PageSize - } - sess.Limit(listOptions.PageSize, start) - } - return sess + start := 0 + if opts.Paginator.Page > 1 { + start = (opts.Paginator.Page - 1) * opts.Paginator.PageSize } - - start, limit := opts.Paginator.GetSkipTake() - sess.Limit(limit, start) + sess.Limit(opts.Paginator.PageSize, start) return sess } diff --git a/models/issues/issue_stats.go b/models/issues/issue_stats.go index 99ca19f804..d65b0da098 100644 --- a/models/issues/issue_stats.go +++ b/models/issues/issue_stats.go @@ -69,13 +69,17 @@ func CountIssuesByRepo(ctx context.Context, opts *IssuesOptions) (map[int64]int6 } // CountIssues number return of issues by given conditions. -func CountIssues(ctx context.Context, opts *IssuesOptions) (int64, error) { +func CountIssues(ctx context.Context, opts *IssuesOptions, otherConds ...builder.Cond) (int64, error) { sess := db.GetEngine(ctx). Select("COUNT(issue.id) AS count"). Table("issue"). Join("INNER", "repository", "`issue`.repo_id = `repository`.id") applyConditions(sess, opts) + for _, cond := range otherConds { + sess.And(cond) + } + return sess.Count() } diff --git a/modules/indexer/internal/paginator.go b/modules/indexer/internal/paginator.go index de0a33c06f..ee204bf047 100644 --- a/modules/indexer/internal/paginator.go +++ b/modules/indexer/internal/paginator.go @@ -10,7 +10,7 @@ import ( ) // ParsePaginator parses a db.Paginator into a skip and limit -func ParsePaginator(paginator db.Paginator, max ...int) (int, int) { +func ParsePaginator(paginator *db.ListOptions, max ...int) (int, int) { // Use a very large number to indicate no limit unlimited := math.MaxInt32 if len(max) > 0 { @@ -19,22 +19,15 @@ func ParsePaginator(paginator db.Paginator, max ...int) (int, int) { } if paginator == nil || paginator.IsListAll() { + // It shouldn't happen. In actual usage scenarios, there should not be requests to search all. + // But if it does happen, respect it and return "unlimited". + // And it's also useful for testing. return 0, unlimited } - // Warning: Do not use GetSkipTake() for *db.ListOptions - // Its implementation could reset the page size with setting.API.MaxResponseItems - if listOptions, ok := paginator.(*db.ListOptions); ok { - if listOptions.Page >= 0 && listOptions.PageSize > 0 { - var start int - if listOptions.Page == 0 { - start = 0 - } else { - start = (listOptions.Page - 1) * listOptions.PageSize - } - return start, listOptions.PageSize - } - return 0, unlimited + if paginator.PageSize == 0 { + // Do not return any results when searching, it's used to get the total count only. + return 0, 0 } return paginator.GetSkipTake() diff --git a/modules/indexer/issues/db/db.go b/modules/indexer/issues/db/db.go index 1016523b72..05ec548435 100644 --- a/modules/indexer/issues/db/db.go +++ b/modules/indexer/issues/db/db.go @@ -78,6 +78,17 @@ func (i *Indexer) Search(ctx context.Context, options *internal.SearchOptions) ( return nil, err } + // If pagesize == 0, return total count only. It's a special case for search count. + if options.Paginator != nil && options.Paginator.PageSize == 0 { + total, err := issue_model.CountIssues(ctx, opt, cond) + if err != nil { + return nil, err + } + return &internal.SearchResult{ + Total: total, + }, nil + } + ids, total, err := issue_model.IssueIDs(ctx, opt, cond) if err != nil { return nil, err diff --git a/modules/indexer/issues/indexer.go b/modules/indexer/issues/indexer.go index ef06d8862a..0bb4710afc 100644 --- a/modules/indexer/issues/indexer.go +++ b/modules/indexer/issues/indexer.go @@ -309,7 +309,7 @@ func SearchIssues(ctx context.Context, opts *SearchOptions) ([]int64, int64, err // CountIssues counts issues by options. It is a shortcut of SearchIssues(ctx, opts) but only returns the total count. func CountIssues(ctx context.Context, opts *SearchOptions) (int64, error) { - opts = opts.Copy(func(options *SearchOptions) { opts.Paginator = &db_model.ListOptions{PageSize: 0} }) + opts = opts.Copy(func(options *SearchOptions) { options.Paginator = &db_model.ListOptions{PageSize: 0} }) _, total, err := SearchIssues(ctx, opts) return total, err diff --git a/modules/indexer/issues/internal/model.go b/modules/indexer/issues/internal/model.go index 031745dd2f..205ffdaaea 100644 --- a/modules/indexer/issues/internal/model.go +++ b/modules/indexer/issues/internal/model.go @@ -104,7 +104,7 @@ type SearchOptions struct { UpdatedAfterUnix *int64 UpdatedBeforeUnix *int64 - db.Paginator + Paginator *db.ListOptions SortBy SortBy // sort by field } diff --git a/modules/indexer/issues/internal/tests/tests.go b/modules/indexer/issues/internal/tests/tests.go index 06fddeb65b..335701e9b1 100644 --- a/modules/indexer/issues/internal/tests/tests.go +++ b/modules/indexer/issues/internal/tests/tests.go @@ -77,6 +77,13 @@ func TestIndexer(t *testing.T, indexer internal.Indexer) { assert.Equal(t, c.ExpectedIDs, ids) assert.Equal(t, c.ExpectedTotal, result.Total) } + + // test counting + c.SearchOptions.Paginator = &db.ListOptions{PageSize: 0} + countResult, err := indexer.Search(context.Background(), c.SearchOptions) + require.NoError(t, err) + assert.Empty(t, countResult.Hits) + assert.Equal(t, result.Total, countResult.Total) }) } } diff --git a/modules/indexer/issues/meilisearch/meilisearch.go b/modules/indexer/issues/meilisearch/meilisearch.go index 060b5c5279..7f9d254e6a 100644 --- a/modules/indexer/issues/meilisearch/meilisearch.go +++ b/modules/indexer/issues/meilisearch/meilisearch.go @@ -211,6 +211,14 @@ func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) ( skip, limit := indexer_internal.ParsePaginator(options.Paginator, maxTotalHits) + counting := limit == 0 + if counting { + // If set limit to 0, it will be 20 by default, and -1 is not allowed. + // See https://www.meilisearch.com/docs/reference/api/search#limit + // So set limit to 1 to make the cost as low as possible, then clear the result before returning. + limit = 1 + } + // to make it non fuzzy ("typo tolerance" in meilisearch terms), we have to quote the keyword(s) // https://www.meilisearch.com/docs/reference/api/search#phrase-search keyword := doubleQuoteKeyword(options.Keyword) @@ -226,6 +234,10 @@ func (b *Indexer) Search(ctx context.Context, options *internal.SearchOptions) ( return nil, err } + if counting { + searchRes.Hits = nil + } + hits := make([]internal.Match, 0, len(searchRes.Hits)) for _, hit := range searchRes.Hits { hits = append(hits, internal.Match{ From 222d16e6ea245fe28ada9ad9ad60eab7e886902e Mon Sep 17 00:00:00 2001 From: Giteabot Date: Sun, 14 Apr 2024 19:45:51 +0800 Subject: [PATCH 4/8] fix: Fix to delete cookie when AppSubURL is non-empty (#30375) (#30468) Backport #30375 by @jtran Cookies may exist on "/subpath" and "/subpath/" for some legacy reasons (eg: changed CookiePath behavior in code). The legacy cookie should be removed correctly. Co-authored-by: Jonathan Tran Co-authored-by: wxiaoguang Co-authored-by: Kyle D --- modules/session/store.go | 7 ++++++ modules/web/middleware/cookie.go | 32 +++++++++++++++++++++++----- services/auth/source/oauth2/store.go | 3 ++- 3 files changed, 36 insertions(+), 6 deletions(-) diff --git a/modules/session/store.go b/modules/session/store.go index 4fa4d2848f..2f7ab7760b 100644 --- a/modules/session/store.go +++ b/modules/session/store.go @@ -6,6 +6,9 @@ package session import ( "net/http" + "code.gitea.io/gitea/modules/setting" + "code.gitea.io/gitea/modules/web/middleware" + "gitea.com/go-chi/session" ) @@ -18,6 +21,10 @@ type Store interface { // RegenerateSession regenerates the underlying session and returns the new store func RegenerateSession(resp http.ResponseWriter, req *http.Request) (Store, error) { + // Ensure that a cookie with a trailing slash does not take precedence over + // the cookie written by the middleware. + middleware.DeleteLegacySiteCookie(resp, setting.SessionConfig.CookieName) + s, err := session.RegenerateSession(resp, req) return s, err } diff --git a/modules/web/middleware/cookie.go b/modules/web/middleware/cookie.go index 621640895b..0bed726793 100644 --- a/modules/web/middleware/cookie.go +++ b/modules/web/middleware/cookie.go @@ -45,10 +45,32 @@ func SetSiteCookie(resp http.ResponseWriter, name, value string, maxAge int) { SameSite: setting.SessionConfig.SameSite, } resp.Header().Add("Set-Cookie", cookie.String()) - if maxAge < 0 { - // There was a bug in "setting.SessionConfig.CookiePath" code, the old default value of it was empty "". - // So we have to delete the cookie on path="" again, because some old code leaves cookies on path="". - cookie.Path = strings.TrimSuffix(setting.SessionConfig.CookiePath, "/") - resp.Header().Add("Set-Cookie", cookie.String()) + // Previous versions would use a cookie path with a trailing /. + // These are more specific than cookies without a trailing /, so + // we need to delete these if they exist. + DeleteLegacySiteCookie(resp, name) +} + +// DeleteLegacySiteCookie deletes the cookie with the given name at the cookie +// path with a trailing /, which would unintentionally override the cookie. +func DeleteLegacySiteCookie(resp http.ResponseWriter, name string) { + if setting.SessionConfig.CookiePath == "" || strings.HasSuffix(setting.SessionConfig.CookiePath, "/") { + // If the cookie path ends with /, no legacy cookies will take + // precedence, so do nothing. The exception is that cookies with no + // path could override other cookies, but it's complicated and we don't + // currently handle that. + return } + + cookie := &http.Cookie{ + Name: name, + Value: "", + MaxAge: -1, + Path: setting.SessionConfig.CookiePath + "/", + Domain: setting.SessionConfig.Domain, + Secure: setting.SessionConfig.Secure, + HttpOnly: true, + SameSite: setting.SessionConfig.SameSite, + } + resp.Header().Add("Set-Cookie", cookie.String()) } diff --git a/services/auth/source/oauth2/store.go b/services/auth/source/oauth2/store.go index 394bf99463..90fa965602 100644 --- a/services/auth/source/oauth2/store.go +++ b/services/auth/source/oauth2/store.go @@ -9,6 +9,7 @@ import ( "net/http" "code.gitea.io/gitea/modules/log" + session_module "code.gitea.io/gitea/modules/session" chiSession "gitea.com/go-chi/session" "github.com/gorilla/sessions" @@ -65,7 +66,7 @@ func (st *SessionsStore) Save(r *http.Request, w http.ResponseWriter, session *s chiStore := chiSession.GetSession(r) if session.IsNew { - _, _ = chiSession.RegenerateSession(w, r) + _, _ = session_module.RegenerateSession(w, r) session.IsNew = false } From 928c0d4f460ef2a53766354f2ffc97a7845ac78c Mon Sep 17 00:00:00 2001 From: yp05327 <576951401@qq.com> Date: Sun, 14 Apr 2024 21:18:06 +0900 Subject: [PATCH 5/8] Fix mirror error when mirror repo is empty (#30432) (#30467) Backport #30432 Fix https://github.com/go-gitea/gitea/issues/30424 ps: convert `gitrepo.OpenRepository` to `git.OpenRepository` remove `ctx` from `checkAndUpdateEmptyRepository` Co-authored-by: Giteabot --- services/mirror/mirror_pull.go | 40 +++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 17 deletions(-) diff --git a/services/mirror/mirror_pull.go b/services/mirror/mirror_pull.go index 38b6986c4a..62d2bafaf5 100644 --- a/services/mirror/mirror_pull.go +++ b/services/mirror/mirror_pull.go @@ -449,19 +449,17 @@ func SyncPullMirror(ctx context.Context, repoID int64) bool { return false } - var gitRepo *git.Repository - if len(results) == 0 { - log.Trace("SyncMirrors [repo: %-v]: no branches updated", m.Repo) - } else { - log.Trace("SyncMirrors [repo: %-v]: %d branches updated", m.Repo, len(results)) - gitRepo, err = git.OpenRepository(ctx, m.Repo.RepoPath()) - if err != nil { - log.Error("SyncMirrors [repo: %-v]: unable to OpenRepository: %v", m.Repo, err) - return false - } - defer gitRepo.Close() + gitRepo, err := git.OpenRepository(ctx, m.Repo.RepoPath()) + if err != nil { + log.Error("SyncMirrors [repo: %-v]: unable to OpenRepository: %v", m.Repo, err) + return false + } + defer gitRepo.Close() + log.Trace("SyncMirrors [repo: %-v]: %d branches updated", m.Repo, len(results)) + if len(results) > 0 { if ok := checkAndUpdateEmptyRepository(m, gitRepo, results); !ok { + log.Error("SyncMirrors [repo: %-v]: checkAndUpdateEmptyRepository: %v", m.Repo, err) return false } } @@ -533,16 +531,24 @@ func SyncPullMirror(ctx context.Context, repoID int64) bool { } log.Trace("SyncMirrors [repo: %-v]: done notifying updated branches/tags - now updating last commit time", m.Repo) - // Get latest commit date and update to current repository updated time - commitDate, err := git.GetLatestCommitTime(ctx, m.Repo.RepoPath()) + isEmpty, err := gitRepo.IsEmpty() if err != nil { - log.Error("SyncMirrors [repo: %-v]: unable to GetLatestCommitDate: %v", m.Repo, err) + log.Error("SyncMirrors [repo: %-v]: unable to check empty git repo: %v", m.Repo, err) return false } + if !isEmpty { + // Get latest commit date and update to current repository updated time + commitDate, err := git.GetLatestCommitTime(ctx, m.Repo.RepoPath()) + if err != nil { + log.Error("SyncMirrors [repo: %-v]: unable to GetLatestCommitDate: %v", m.Repo, err) + return false + } + + if err = repo_model.UpdateRepositoryUpdatedTime(ctx, m.RepoID, commitDate); err != nil { + log.Error("SyncMirrors [repo: %-v]: unable to update repository 'updated_unix': %v", m.Repo, err) + return false + } - if err = repo_model.UpdateRepositoryUpdatedTime(ctx, m.RepoID, commitDate); err != nil { - log.Error("SyncMirrors [repo: %-v]: unable to update repository 'updated_unix': %v", m.Repo, err) - return false } log.Trace("SyncMirrors [repo: %-v]: Successfully updated", m.Repo) From b6379d2f167551560c870d2d705269c9ba6fc3bc Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sun, 14 Apr 2024 20:42:50 +0800 Subject: [PATCH 6/8] Change the default maxPerPage for gitbucket (#30392) (#30471) Backport #30392 This patch improves the migration from gitbucket to gitea. The gitbucket uses it's own internal perPage value (= 25) for paging and ignore per_page arguments in the requested URL. This cause gitea to migrate only 25 issues and 25 PRs from gitbucket repository. This may not happens on old gitbucket. But recent gitbucket 4.40 or 4.38.4 has this problem. This patch change to use this internally hardcoded perPage of gitbucket as gitea's maxPerPage numer when migrating from gitbucket. There are several perPage values in gitbucket like 25 for Isseus/PRs and 10 for Releases. Some of those API doesn't support paging yet. It sounds difficult to implement, but using the minimum number among them worked out very well. So, I use 10 in this patch. Brief descriptions of problems and this patch are also available in https://github.com/go-gitea/gitea/issues/30316. In addition, I'm not sure what kind of test cases are possible to write here. It's a test for migration, so it requires testing gitbucket server and gitea server, I guess. Please let me know if it is possible to write such test cases here. Thanks! Co-authored-by: Kazushi (Jam) Marukawa --- services/migrations/gitbucket.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/services/migrations/gitbucket.go b/services/migrations/gitbucket.go index 5f11555839..4fe9e30a39 100644 --- a/services/migrations/gitbucket.go +++ b/services/migrations/gitbucket.go @@ -72,6 +72,11 @@ func (g *GitBucketDownloader) LogString() string { // NewGitBucketDownloader creates a GitBucket downloader func NewGitBucketDownloader(ctx context.Context, baseURL, userName, password, token, repoOwner, repoName string) *GitBucketDownloader { githubDownloader := NewGithubDownloaderV3(ctx, baseURL, userName, password, token, repoOwner, repoName) + // Gitbucket 4.40 uses different internal hard-coded perPage values. + // Issues, PRs, and other major parts use 25. Release page uses 10. + // Some API doesn't support paging yet. Sounds difficult, but using + // minimum number among them worked out very well. + githubDownloader.maxPerPage = 10 githubDownloader.SkipReactions = true githubDownloader.SkipReviews = true return &GitBucketDownloader{ From 430fe6c0c14e6b6761f589cbef54a79b43a1445a Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Mon, 15 Apr 2024 11:29:42 +0800 Subject: [PATCH 7/8] Avoid losing token when updating mirror settings (#30429) (#30466) Fix #30416. Backport #30429 Before (it shows as "Unset" while there's a token): image After: image The username shows as "oauth2" because of https://github.com/go-gitea/gitea/blob/f9fdac9809335729b2ac3227b2a5f71a62fc64ad/services/migrations/dump.go#L99 I have checked that all usage of `MirrorRemoteAddress` has been updated. image However, it needs to be checked again when backporting. Co-authored-by: Jason Song --- modules/templates/util_misc.go | 38 +++++++++++++++------------- services/mirror/mirror_pull.go | 12 +++++++-- templates/repo/settings/options.tmpl | 2 +- 3 files changed, 32 insertions(+), 20 deletions(-) diff --git a/modules/templates/util_misc.go b/modules/templates/util_misc.go index 6c1b4ab240..774385483b 100644 --- a/modules/templates/util_misc.go +++ b/modules/templates/util_misc.go @@ -142,35 +142,39 @@ type remoteAddress struct { Password string } -func mirrorRemoteAddress(ctx context.Context, m *repo_model.Repository, remoteName string, ignoreOriginalURL bool) remoteAddress { - a := remoteAddress{} - - remoteURL := m.OriginalURL - if ignoreOriginalURL || remoteURL == "" { - var err error - remoteURL, err = git.GetRemoteAddress(ctx, m.RepoPath(), remoteName) - if err != nil { - log.Error("GetRemoteURL %v", err) - return a - } +func mirrorRemoteAddress(ctx context.Context, m *repo_model.Repository, remoteName string) remoteAddress { + ret := remoteAddress{} + remoteURL, err := git.GetRemoteAddress(ctx, m.RepoPath(), remoteName) + if err != nil { + log.Error("GetRemoteURL %v", err) + return ret } u, err := giturl.Parse(remoteURL) if err != nil { log.Error("giturl.Parse %v", err) - return a + return ret } if u.Scheme != "ssh" && u.Scheme != "file" { if u.User != nil { - a.Username = u.User.Username() - a.Password, _ = u.User.Password() + ret.Username = u.User.Username() + ret.Password, _ = u.User.Password() } - u.User = nil } - a.Address = u.String() - return a + // The URL stored in the git repo could contain authentication, + // erase it, or it will be shown in the UI. + u.User = nil + ret.Address = u.String() + // Why not use m.OriginalURL to set ret.Address? + // It should be OK to use it, since m.OriginalURL should be the same as the authentication-erased URL from the Git repository. + // However, the old code has already stored authentication in m.OriginalURL when updating mirror settings. + // That means we need to use "giturl.Parse" for m.OriginalURL again to ensure authentication is erased. + // Instead of doing this, why not directly use the authentication-erased URL from the Git repository? + // It should be the same as long as there are no bugs. + + return ret } func FilenameIsImage(filename string) bool { diff --git a/services/mirror/mirror_pull.go b/services/mirror/mirror_pull.go index 62d2bafaf5..71cb35c4fc 100644 --- a/services/mirror/mirror_pull.go +++ b/services/mirror/mirror_pull.go @@ -14,6 +14,7 @@ import ( system_model "code.gitea.io/gitea/models/system" "code.gitea.io/gitea/modules/cache" "code.gitea.io/gitea/modules/git" + giturl "code.gitea.io/gitea/modules/git/url" "code.gitea.io/gitea/modules/lfs" "code.gitea.io/gitea/modules/log" "code.gitea.io/gitea/modules/process" @@ -30,10 +31,15 @@ const gitShortEmptySha = "0000000" // UpdateAddress writes new address to Git repository and database func UpdateAddress(ctx context.Context, m *repo_model.Mirror, addr string) error { + u, err := giturl.Parse(addr) + if err != nil { + return fmt.Errorf("invalid addr: %v", err) + } + remoteName := m.GetRemoteName() repoPath := m.GetRepository(ctx).RepoPath() // Remove old remote - _, _, err := git.NewCommand(ctx, "remote", "rm").AddDynamicArguments(remoteName).RunStdString(&git.RunOpts{Dir: repoPath}) + _, _, err = git.NewCommand(ctx, "remote", "rm").AddDynamicArguments(remoteName).RunStdString(&git.RunOpts{Dir: repoPath}) if err != nil && !strings.HasPrefix(err.Error(), "exit status 128 - fatal: No such remote ") { return err } @@ -70,7 +76,9 @@ func UpdateAddress(ctx context.Context, m *repo_model.Mirror, addr string) error } } - m.Repo.OriginalURL = addr + // erase authentication before storing in database + u.User = nil + m.Repo.OriginalURL = u.String() return repo_model.UpdateRepositoryCols(ctx, m.Repo, "original_url") } diff --git a/templates/repo/settings/options.tmpl b/templates/repo/settings/options.tmpl index 6e7bd4ce4d..6761c0bd34 100644 --- a/templates/repo/settings/options.tmpl +++ b/templates/repo/settings/options.tmpl @@ -151,7 +151,7 @@ - {{$address := MirrorRemoteAddress $.Context .Repository .PullMirror.GetRemoteName false}} + {{$address := MirrorRemoteAddress $.Context .Repository .PullMirror.GetRemoteName}}
From a0ca311165890561f9f8bc64b9564cd5da3f3843 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Mon, 15 Apr 2024 15:43:20 +0800 Subject: [PATCH 8/8] Fix commit status cache which missed target_url (#30426) (#30445) Fix #30421 Backport #30426 Co-authored-by: Jason Song --- .../repository/commitstatus/commitstatus.go | 50 ++++++++++++++----- 1 file changed, 38 insertions(+), 12 deletions(-) diff --git a/services/repository/commitstatus/commitstatus.go b/services/repository/commitstatus/commitstatus.go index fb4ad1f1d8..65d2e3d468 100644 --- a/services/repository/commitstatus/commitstatus.go +++ b/services/repository/commitstatus/commitstatus.go @@ -14,6 +14,7 @@ import ( user_model "code.gitea.io/gitea/models/user" "code.gitea.io/gitea/modules/cache" "code.gitea.io/gitea/modules/git" + "code.gitea.io/gitea/modules/json" "code.gitea.io/gitea/modules/log" api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/services/automerge" @@ -24,15 +25,41 @@ func getCacheKey(repoID int64, brancheName string) string { return fmt.Sprintf("commit_status:%x", hashBytes) } -func updateCommitStatusCache(ctx context.Context, repoID int64, branchName string, status api.CommitStatusState) error { +type commitStatusCacheValue struct { + State string `json:"state"` + TargetURL string `json:"target_url"` +} + +func getCommitStatusCache(repoID int64, branchName string) *commitStatusCacheValue { c := cache.GetCache() - if c == nil { + statusStr, ok := c.Get(getCacheKey(repoID, branchName)).(string) + if ok && statusStr != "" { + var cv commitStatusCacheValue + err := json.Unmarshal([]byte(statusStr), &cv) + if err == nil && cv.State != "" { + return &cv + } + if err != nil { + log.Warn("getCommitStatusCache: json.Unmarshal failed: %v", err) + } + } + return nil +} + +func updateCommitStatusCache(repoID int64, branchName string, state api.CommitStatusState, targetURL string) error { + c := cache.GetCache() + bs, err := json.Marshal(commitStatusCacheValue{ + State: state.String(), + TargetURL: targetURL, + }) + if err != nil { + log.Warn("updateCommitStatusCache: json.Marshal failed: %v", err) return nil } - return c.Put(getCacheKey(repoID, branchName), string(status), 3*24*60) + return c.Put(getCacheKey(repoID, branchName), string(bs), 3*24*60) } -func deleteCommitStatusCache(ctx context.Context, repoID int64, branchName string) error { +func deleteCommitStatusCache(repoID int64, branchName string) error { c := cache.GetCache() if c == nil { return nil @@ -76,7 +103,7 @@ func CreateCommitStatus(ctx context.Context, repo *repo_model.Repository, creato } if commit.ID.String() == defaultBranchCommit.ID.String() { // since one commit status updated, the combined commit status should be invalid - if err := deleteCommitStatusCache(ctx, repo.ID, repo.DefaultBranch); err != nil { + if err := deleteCommitStatusCache(repo.ID, repo.DefaultBranch); err != nil { log.Error("deleteCommitStatusCache[%d:%s] failed: %v", repo.ID, repo.DefaultBranch, err) } } @@ -93,12 +120,11 @@ func CreateCommitStatus(ctx context.Context, repo *repo_model.Repository, creato // FindReposLastestCommitStatuses loading repository default branch latest combinded commit status with cache func FindReposLastestCommitStatuses(ctx context.Context, repos []*repo_model.Repository) ([]*git_model.CommitStatus, error) { results := make([]*git_model.CommitStatus, len(repos)) - c := cache.GetCache() - if c != nil { - for i, repo := range repos { - status, ok := c.Get(getCacheKey(repo.ID, repo.DefaultBranch)).(string) - if ok && status != "" { - results[i] = &git_model.CommitStatus{State: api.CommitStatusState(status)} + for i, repo := range repos { + if cv := getCommitStatusCache(repo.ID, repo.DefaultBranch); cv != nil { + results[i] = &git_model.CommitStatus{ + State: api.CommitStatusState(cv.State), + TargetURL: cv.TargetURL, } } } @@ -127,7 +153,7 @@ func FindReposLastestCommitStatuses(ctx context.Context, repos []*repo_model.Rep if results[i] == nil { results[i] = git_model.CalcCommitStatus(repoToItsLatestCommitStatuses[repo.ID]) if results[i].State != "" { - if err := updateCommitStatusCache(ctx, repo.ID, repo.DefaultBranch, results[i].State); err != nil { + if err := updateCommitStatusCache(repo.ID, repo.DefaultBranch, results[i].State, results[i].TargetURL); err != nil { log.Error("updateCommitStatusCache[%d:%s] failed: %v", repo.ID, repo.DefaultBranch, err) } }