diff --git a/cmd/api.go b/cmd/api.go index ad0d136..cafa38f 100644 --- a/cmd/api.go +++ b/cmd/api.go @@ -64,7 +64,40 @@ func init() { apiCmd.Flags().StringArrayP("header", "H", nil, "Add an HTTP request header (key:value)") apiCmd.Flags().Bool("silent", false, "Do not print the response body") apiCmd.Flags().BoolP("include", "i", false, "Include HTTP response headers in the output") - addJSONFlags(apiCmd, "Output the response as JSON; pass a comma-separated field list to project specific keys") + apiCmd.Flags().Bool("paginate", false, "Follow rel=\"next\" Link headers and concatenate JSON array pages (gh-compatible)") + addJSONFlags(apiCmd, "Output the response as JSON") +} + +// parseLinkHeaderNext extracts the URL with rel="next" from an RFC 5988 +// Link header. Returns "" if not present. +func parseLinkHeaderNext(link string) string { + for _, segment := range strings.Split(link, ",") { + segment = strings.TrimSpace(segment) + if !strings.Contains(segment, `rel="next"`) { + continue + } + start := strings.Index(segment, "<") + end := strings.Index(segment, ">") + if start >= 0 && end > start { + return segment[start+1 : end] + } + } + return "" +} + +// concatPaginatedJSON parses each body as a JSON array and merges them. +// Errors if any body isn't an array (e.g. an object response means the +// endpoint isn't paginated and --paginate doesn't apply). +func concatPaginatedJSON(bodies [][]byte) ([]byte, error) { + merged := make([]json.RawMessage, 0) + for i, b := range bodies { + var page []json.RawMessage + if err := json.Unmarshal(b, &page); err != nil { + return nil, fmt.Errorf("--paginate requires JSON array responses; page %d wasn't an array: %w", i+1, err) + } + merged = append(merged, page...) + } + return json.Marshal(merged) } func runAPI(cmd *cobra.Command, args []string) error { @@ -198,21 +231,42 @@ func runAPI(cmd *cobra.Command, args []string) error { req.Header.Set(strings.TrimSpace(key), strings.TrimSpace(value)) } - // Execute request via the shared client (30 s timeout, pooled - // connections). Previous zero-value http.Client{} had no timeout, which - // pinned the CLI on a hung Forgejo indefinitely. - ios.StartSpinner("Requesting...") - resp, err := api.SharedHTTPClient.Do(req) - ios.StopSpinner() - if err != nil { - return fmt.Errorf("failed to perform request: %w", err) + paginate, _ := cmd.Flags().GetBool("paginate") + if paginate && method != http.MethodGet { + return fmt.Errorf("--paginate only supports GET requests") + } + + // doOnce executes a single request via the shared client (30 s timeout, + // pooled connections), reads the body bounded by maxAPIResponseBytes, + // and closes the body before returning. Previous zero-value http.Client{} + // had no timeout, pinning the CLI on a hung Forgejo indefinitely. + doOnce := func(r *http.Request) (body []byte, header http.Header, status int, proto string, statusText string, retErr error) { + ios.StartSpinner("Requesting...") + resp, err := api.SharedHTTPClient.Do(r) + ios.StopSpinner() + if err != nil { + return nil, nil, 0, "", "", fmt.Errorf("failed to perform request: %w", err) + } + defer func() { _ = resp.Body.Close() }() + + body, err = io.ReadAll(io.LimitReader(resp.Body, maxAPIResponseBytes+1)) + if err != nil { + return nil, nil, 0, "", "", fmt.Errorf("failed to read response body: %w", err) + } + if int64(len(body)) > maxAPIResponseBytes { + return nil, nil, 0, "", "", fmt.Errorf("response body exceeded %d bytes (use a different tool for bulk transfers)", maxAPIResponseBytes) + } + return body, resp.Header, resp.StatusCode, resp.Proto, resp.Status, nil + } + + respBody, respHeader, statusCode, proto, status, err := doOnce(req) + if err != nil { + return err } - defer func() { _ = resp.Body.Close() }() - // Print response headers if requested if include { - fmt.Fprintf(ios.Out, "%s %s\n", resp.Proto, resp.Status) - for key, values := range resp.Header { + fmt.Fprintf(ios.Out, "%s %s\n", proto, status) + for key, values := range respHeader { for _, v := range values { fmt.Fprintf(ios.Out, "%s: %s\n", key, v) } @@ -220,32 +274,60 @@ func runAPI(cmd *cobra.Command, args []string) error { fmt.Fprintln(ios.Out) } - // Read response body with a hard ceiling so a runaway upstream can't OOM - // the CLI. Read maxAPIResponseBytes+1 to detect overflow. - respBody, err := io.ReadAll(io.LimitReader(resp.Body, maxAPIResponseBytes+1)) - if err != nil { - return fmt.Errorf("failed to read response body: %w", err) - } - if int64(len(respBody)) > maxAPIResponseBytes { - return fmt.Errorf("response body exceeded %d bytes (use a different tool for bulk transfers)", maxAPIResponseBytes) - } - - // Handle non-2xx status codes - if resp.StatusCode < 200 || resp.StatusCode >= 300 { + if statusCode < 200 || statusCode >= 300 { if !silent { fmt.Fprint(ios.ErrOut, string(respBody)) if len(respBody) > 0 && respBody[len(respBody)-1] != '\n' { fmt.Fprintln(ios.ErrOut) } } - return fmt.Errorf("API request failed with status %d", resp.StatusCode) + return fmt.Errorf("API request failed with status %d", statusCode) + } + + // Follow `Link: rel="next"` headers when --paginate is set, accumulating + // each page's body. After the loop, concatPaginatedJSON merges them into + // a single JSON array. Endpoint must be paginatable (returns an array). + if paginate { + bodies := [][]byte{respBody} + nextURL := parseLinkHeaderNext(respHeader.Get("Link")) + for nextURL != "" { + nextReq, err := http.NewRequest(http.MethodGet, nextURL, nil) + if err != nil { + return fmt.Errorf("failed to build paginated request: %w", err) + } + if host.Token != "" { + nextReq.Header.Set("Authorization", "token "+host.Token) + } + nextReq.Header.Set("Accept", "application/json") + for _, h := range headers { + key, value, found := strings.Cut(h, ":") + if !found { + continue + } + nextReq.Header.Set(strings.TrimSpace(key), strings.TrimSpace(value)) + } + pageBody, pageHeader, pageStatus, _, _, err := doOnce(nextReq) + if err != nil { + return err + } + if pageStatus < 200 || pageStatus >= 300 { + return fmt.Errorf("paginated request to %s failed with status %d", nextURL, pageStatus) + } + bodies = append(bodies, pageBody) + nextURL = parseLinkHeaderNext(pageHeader.Get("Link")) + } + merged, err := concatPaginatedJSON(bodies) + if err != nil { + return err + } + respBody = merged } if silent || len(respBody) == 0 { return nil } - contentType := resp.Header.Get("Content-Type") + contentType := respHeader.Get("Content-Type") isJSON := strings.Contains(contentType, "json") || json.Valid(respBody) // If the user asked for JSON projection or jq filtering, route through diff --git a/cmd/issue.go b/cmd/issue.go index f5e2517..25f54f3 100644 --- a/cmd/issue.go +++ b/cmd/issue.go @@ -221,13 +221,24 @@ func runIssueList(cmd *cobra.Command, args []string) error { } ios.StartSpinner("Fetching issues...") - issues, _, err := client.ListRepoIssues(owner, name, gitea.ListIssueOption{ - State: stateType, - Labels: labels, - KeyWord: search, - CreatedBy: author, - AssignedBy: assignee, - ListOptions: gitea.ListOptions{PageSize: limit}, + // ListRepoIssues returns both issues AND PRs (we filter PRs out below). + // Pull more than `limit` so post-filter we still have `limit` real issues + // — overshoot 2x as a heuristic. paginateGitea(0, ...) would be safer + // but spends extra round-trips; keep it bounded. + fetchLimit := limit * 2 + if fetchLimit < 50 { + fetchLimit = 50 + } + issues, err := paginateGitea(fetchLimit, func(page, pageSize int) ([]*gitea.Issue, error) { + batch, _, err := client.ListRepoIssues(owner, name, gitea.ListIssueOption{ + State: stateType, + Labels: labels, + KeyWord: search, + CreatedBy: author, + AssignedBy: assignee, + ListOptions: gitea.ListOptions{Page: page, PageSize: pageSize}, + }) + return batch, err }) ios.StopSpinner() if err != nil { @@ -240,6 +251,9 @@ func runIssueList(cmd *cobra.Command, args []string) error { nonPRIssues = append(nonPRIssues, issue) } } + if limit > 0 && len(nonPRIssues) > limit { + nonPRIssues = nonPRIssues[:limit] + } if wantJSON(cmd) { return outputJSON(cmd, nonPRIssues) diff --git a/cmd/paginate.go b/cmd/paginate.go new file mode 100644 index 0000000..8c9737e --- /dev/null +++ b/cmd/paginate.go @@ -0,0 +1,43 @@ +package cmd + +// paginateGitea walks pages of a gitea SDK list method until the response +// is short (last page) or we hit limit. limit=0 means unlimited. +// +// Forgejo/Gitea caps PageSize at 50, so naive `PageSize: limit` for limit > 50 +// silently truncated results across most `fj * list` commands. This helper +// centralizes the loop so every list command paginates consistently. +// +// fetch is called with (page, pageSize) and returns the items for that page. +// The 1-based `page` matches the gitea SDK convention. +func paginateGitea[T any](limit int, fetch func(page, pageSize int) ([]T, error)) ([]T, error) { + const maxPageSize = 50 + pageSize := maxPageSize + if limit > 0 && limit < pageSize { + pageSize = limit + } + + var all []T + for page := 1; ; page++ { + if limit > 0 && len(all) >= limit { + break + } + batch, err := fetch(page, pageSize) + if err != nil { + return all, err + } + if len(batch) == 0 { + break + } + all = append(all, batch...) + // A short page (less than the requested size) is the conventional + // "you've reached the end" signal — saves one extra round-trip. + if len(batch) < pageSize { + break + } + } + + if limit > 0 && len(all) > limit { + all = all[:limit] + } + return all, nil +} diff --git a/cmd/pr.go b/cmd/pr.go index 1210fe1..e38faac 100644 --- a/cmd/pr.go +++ b/cmd/pr.go @@ -252,39 +252,32 @@ func runPRList(cmd *cobra.Command, args []string) error { needsClientFilter := assignee != "" || author != "" || len(labels) > 0 || search != "" || draft || head != "" || base != "" ios.StartSpinner("Fetching pull requests...") + // When client-side filtering is needed, pull pages until exhausted (no + // limit) so we can apply filters; otherwise paginate up to the user's + // limit. Either way, paginate — `PageSize: limit` capped at 50 silently. + fetchPage := func(page, pageSize int) ([]*gitea.PullRequest, error) { + batch, _, err := client.ListRepoPullRequests(owner, name, gitea.ListPullRequestsOptions{ + State: stateType, + ListOptions: gitea.ListOptions{Page: page, PageSize: pageSize}, + }) + return batch, err + } var prs []*gitea.PullRequest if needsClientFilter { - page := 1 - for { - batch, _, err := client.ListRepoPullRequests(owner, name, gitea.ListPullRequestsOptions{ - State: stateType, - ListOptions: gitea.ListOptions{Page: page, PageSize: 50}, - }) - if err != nil { - ios.StopSpinner() - return fmt.Errorf("failed to list pull requests: %w", err) + prs, err = paginateGitea(0, fetchPage) // pull all, then filter + limit + if err == nil { + prs = filterPRs(prs, author, assignee, labels, search, draft, head, base) + if limit > 0 && len(prs) > limit { + prs = prs[:limit] } - prs = append(prs, batch...) - if len(batch) < 50 { - break - } - page++ - } - prs = filterPRs(prs, author, assignee, labels, search, draft, head, base) - if len(prs) > limit { - prs = prs[:limit] } } else { - prs, _, err = client.ListRepoPullRequests(owner, name, gitea.ListPullRequestsOptions{ - State: stateType, - ListOptions: gitea.ListOptions{PageSize: limit}, - }) - if err != nil { - ios.StopSpinner() - return fmt.Errorf("failed to list pull requests: %w", err) - } + prs, err = paginateGitea(limit, fetchPage) } ios.StopSpinner() + if err != nil { + return fmt.Errorf("failed to list pull requests: %w", err) + } if wantJSON(cmd) { return outputJSON(cmd, prs) diff --git a/cmd/repo.go b/cmd/repo.go index b9bd7fc..0514c4d 100644 --- a/cmd/repo.go +++ b/cmd/repo.go @@ -216,17 +216,18 @@ func runRepoList(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to get user info: %w", err) } - repos, _, err := client.ListUserRepos(user.UserName, gitea.ListReposOptions{}) + limit, _ := cmd.Flags().GetInt("limit") + repos, err := paginateGitea(limit, func(page, pageSize int) ([]*gitea.Repository, error) { + batch, _, err := client.ListUserRepos(user.UserName, gitea.ListReposOptions{ + ListOptions: gitea.ListOptions{Page: page, PageSize: pageSize}, + }) + return batch, err + }) ios.StopSpinner() if err != nil { return fmt.Errorf("failed to list repositories: %w", err) } - limit, _ := cmd.Flags().GetInt("limit") - if limit > 0 && len(repos) > limit { - repos = repos[:limit] - } - if wantJSON(cmd) { return outputJSON(cmd, repos) }