chore: fix typos in code comments (#6556)

pull/6562/head
Atin 2021-05-19 10:42:09 +05:30 committed by GitHub
parent 509a392272
commit d6987ee05b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 52 additions and 52 deletions

View File

@ -17,7 +17,7 @@ main() {
fi
if [ ! -d "${BACKUP_PATH}" ]; then
echo "Error: BACKUP_PATH does't exist or is not a directory" 1>&2
echo "Error: BACKUP_PATH doesn't exist or is not a directory" 1>&2
exit 1
fi

View File

@ -230,7 +230,7 @@ func runServ(c *cli.Context) error {
// Check if the key can access to the repository in case of it is a deploy key (a deploy keys != user key).
// A deploy key doesn't represent a signed in user, so in a site with Auth.RequireSignInView enabled,
// we should give read access only in repositories where this deploy key is in use. In other cases,
// a server or system using an active deploy key can get read access to all repositories on a Gogs instace.
// a server or system using an active deploy key can get read access to all repositories on a Gogs instance.
if key.IsDeployKey() && conf.Auth.RequireSigninView {
checkDeployKey(key, repo)
}

View File

@ -53,7 +53,7 @@ var (
workDirOnce sync.Once
)
// WorkDir returns the absolute path of work directory. It reads the value of envrionment
// WorkDir returns the absolute path of work directory. It reads the value of environment
// variable GOGS_WORK_DIR. When not set, it uses the directory where the application's
// binary is located.
func WorkDir() string {
@ -75,8 +75,8 @@ var (
)
// CustomDir returns the absolute path of the custom directory that contains local overrides.
// It reads the value of envrionment variable GOGS_CUSTOM. When not set, it uses the work
// directory returned by WorkDir fucntion.
// It reads the value of environment variable GOGS_CUSTOM. When not set, it uses the work
// directory returned by WorkDir function.
func CustomDir() string {
customDirOnce.Do(func() {
customDir = os.Getenv("GOGS_CUSTOM")

View File

@ -32,7 +32,7 @@ var Log *logConf
// argument "hookMode" is true, it only initializes the root path for log files.
// NOTE: Because we always create a console logger as the primary logger at init time,
// we need to remove it in case the user doesn't configure to use it after the logging
// service is initalized.
// service is initialized.
func initLogConf(cfg *ini.File, hookMode bool) (_ *logConf, hasConsole bool, _ error) {
rootPath := cfg.Section("log").Key("ROOT_PATH").MustString(filepath.Join(WorkDir(), "log"))
if hookMode {

View File

@ -401,7 +401,7 @@ func RepoRef() macaron.Handler {
c.Data["IsViewTag"] = c.Repo.IsViewTag
c.Data["IsViewCommit"] = c.Repo.IsViewCommit
// People who have push access or have fored repository can propose a new pull request.
// People who have push access or have forked repository can propose a new pull request.
if c.Repo.IsWriter() || (c.IsLogged && c.User.HasForkedRepo(c.Repo.Repository.ID)) {
// Pull request is allowed if this is a fork repository
// and base repository accepts pull requests.

View File

@ -257,7 +257,7 @@ func DeleteLabel(repoID, labelID int64) error {
// |___/____ >____ >____/ \___ >_______ (____ /___ /\___ >____/
// \/ \/ \/ \/ \/ \/ \/
// IssueLabel represetns an issue-lable relation.
// IssueLabel represents an issue-lable relation.
type IssueLabel struct {
ID int64
IssueID int64 `xorm:"UNIQUE(s)"`

View File

@ -66,7 +66,7 @@ func (m *Mirror) AfterSet(colName string, _ xorm.Cell) {
}
}
// ScheduleNextSync calculates and sets next sync time based on repostiroy mirror setting.
// ScheduleNextSync calculates and sets next sync time based on repository mirror setting.
func (m *Mirror) ScheduleNextSync() {
m.NextSync = time.Now().Add(time.Duration(m.Interval) * time.Hour)
}

View File

@ -58,7 +58,7 @@ func (org *User) GetTeams() error {
return org.getTeams(x)
}
// TeamsHaveAccessToRepo returns all teamsthat have given access level to the repository.
// TeamsHaveAccessToRepo returns all teams that have given access level to the repository.
func (org *User) TeamsHaveAccessToRepo(repoID int64, mode AccessMode) ([]*Team, error) {
return GetTeamsHaveAccessToRepo(org.ID, repoID, mode)
}
@ -470,7 +470,7 @@ func (org *User) getUserTeams(e Engine, userID int64, cols ...string) ([]*Team,
Cols(cols...).Find(&teams)
}
// GetUserTeamIDs returns of all team IDs of the organization that user is memeber of.
// GetUserTeamIDs returns of all team IDs of the organization that user is member of.
func (org *User) GetUserTeamIDs(userID int64) ([]int64, error) {
teams, err := org.getUserTeams(x, userID, "team.id")
if err != nil {

View File

@ -362,7 +362,7 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
// NOTE: It is possible that head branch is not fully sync with base branch
// for merge commits, so we need to get latest head commit and append merge
// commit manully to avoid strange diff commits produced.
// commit manually to avoid strange diff commits produced.
mergeCommit, err := baseGitRepo.BranchCommit(pr.BaseBranch)
if err != nil {
log.Error("Failed to get base branch %q commit: %v", pr.BaseBranch, err)
@ -395,7 +395,7 @@ func (pr *PullRequest) Merge(doer *User, baseGitRepo *git.Repository, mergeStyle
return nil
}
// testPatch checks if patch can be merged to base repository without conflit.
// testPatch checks if patch can be merged to base repository without conflict.
// FIXME: make a mechanism to clean up stable local copies.
func (pr *PullRequest) testPatch() (err error) {
if pr.BaseRepo == nil {
@ -410,9 +410,9 @@ func (pr *PullRequest) testPatch() (err error) {
return fmt.Errorf("BaseRepo.PatchPath: %v", err)
}
// Fast fail if patch does not exist, this assumes data is cruppted.
// Fast fail if patch does not exist, this assumes data is corrupted.
if !osutil.IsFile(patchPath) {
log.Trace("PullRequest[%d].testPatch: ignored cruppted data", pr.ID)
log.Trace("PullRequest[%d].testPatch: ignored corrupted data", pr.ID)
return nil
}
@ -436,7 +436,7 @@ func (pr *PullRequest) testPatch() (err error) {
fmt.Sprintf("testPatch (git apply --check): %d", pr.BaseRepo.ID),
"git", args...)
if err != nil {
log.Trace("PullRequest[%d].testPatch (apply): has conflit\n%s", pr.ID, stderr)
log.Trace("PullRequest[%d].testPatch (apply): has conflict\n%s", pr.ID, stderr)
pr.Status = PULL_REQUEST_STATUS_CONFLICT
return nil
}
@ -515,7 +515,7 @@ func NewPullRequest(repo *Repository, pull *Issue, labelIDs []int64, uuids []str
return nil
}
// GetUnmergedPullRequest returnss a pull request that is open and has not been merged
// GetUnmergedPullRequest returns a pull request that is open and has not been merged
// by given head/base and repo/branch.
func GetUnmergedPullRequest(headRepoID, baseRepoID int64, headBranch, baseBranch string) (*PullRequest, error) {
pr := new(PullRequest)
@ -536,7 +536,7 @@ func GetUnmergedPullRequest(headRepoID, baseRepoID int64, headBranch, baseBranch
return pr, nil
}
// GetUnmergedPullRequestsByHeadInfo returnss all pull requests that are open and has not been merged
// GetUnmergedPullRequestsByHeadInfo returns all pull requests that are open and has not been merged
// by given head information (repo and branch).
func GetUnmergedPullRequestsByHeadInfo(repoID int64, branch string) ([]*PullRequest, error) {
prs := make([]*PullRequest, 0, 2)
@ -545,7 +545,7 @@ func GetUnmergedPullRequestsByHeadInfo(repoID int64, branch string) ([]*PullRequ
Join("INNER", "issue", "issue.id = pull_request.issue_id").Find(&prs)
}
// GetUnmergedPullRequestsByBaseInfo returnss all pull requests that are open and has not been merged
// GetUnmergedPullRequestsByBaseInfo returns all pull requests that are open and has not been merged
// by given base information (repo and branch).
func GetUnmergedPullRequestsByBaseInfo(repoID int64, branch string) ([]*PullRequest, error) {
prs := make([]*PullRequest, 0, 2)
@ -622,7 +622,7 @@ func (pr *PullRequest) UpdateCols(cols ...string) error {
// UpdatePatch generates and saves a new patch.
func (pr *PullRequest) UpdatePatch() (err error) {
if pr.HeadRepo == nil {
log.Trace("PullRequest[%d].UpdatePatch: ignored cruppted data", pr.ID)
log.Trace("PullRequest[%d].UpdatePatch: ignored corrupted data", pr.ID)
return nil
}
@ -829,7 +829,7 @@ func ChangeUsernameInPullRequests(oldUserName, newUserName string) error {
return err
}
// checkAndUpdateStatus checks if pull request is possible to levaing checking status,
// checkAndUpdateStatus checks if pull request is possible to leaving checking status,
// and set to be either conflict or mergeable.
func (pr *PullRequest) checkAndUpdateStatus() {
// Status is not changed to conflict means mergeable.
@ -837,7 +837,7 @@ func (pr *PullRequest) checkAndUpdateStatus() {
pr.Status = PULL_REQUEST_STATUS_MERGEABLE
}
// Make sure there is no waiting test to process before levaing the checking status.
// Make sure there is no waiting test to process before leaving the checking status.
if !PullRequestQueue.Exist(pr.ID) {
if err := pr.UpdateCols("status"); err != nil {
log.Error("Update[%d]: %v", pr.ID, err)

View File

@ -352,7 +352,7 @@ func DeleteReleaseOfRepoByID(repoID, id int64) error {
return fmt.Errorf("GetReleaseByID: %v", err)
}
// Mark sure the delete operation againsts same repository.
// Mark sure the delete operation against same repository.
if repoID != rel.RepoID {
return nil
}

View File

@ -278,7 +278,7 @@ func (repo *Repository) CanGuestViewIssues() bool {
}
// MustOwner always returns a valid *User object to avoid conceptually impossible error handling.
// It creates a fake object that contains error deftail when error occurs.
// It creates a fake object that contains error details when error occurs.
func (repo *Repository) MustOwner() *User {
return repo.mustOwner(x)
}
@ -951,7 +951,7 @@ func getRepoInitFile(tp, name string) ([]byte, error) {
}
func prepareRepoCommit(repo *Repository, tmpDir, repoPath string, opts CreateRepoOptions) error {
// Clone to temprory path and do the init commit.
// Clone to temporary path and do the init commit.
_, stderr, err := process.Exec(
fmt.Sprintf("initRepository(git clone): %s", repoPath), "git", "clone", repoPath, tmpDir)
if err != nil {
@ -1233,7 +1233,7 @@ func RepositoriesWithUsers(page, pageSize int) (_ []*Repository, err error) {
return repos, nil
}
// FilterRepositoryWithIssues selects repositories that are using interal issue tracker
// FilterRepositoryWithIssues selects repositories that are using internal issue tracker
// and has disabled external tracker from given set.
// It returns nil if result set is empty.
func FilterRepositoryWithIssues(repoIDs []int64) ([]int64, error) {
@ -1468,7 +1468,7 @@ func updateRepository(e Engine, repo *Repository, visibilityChanged bool) (err e
return fmt.Errorf("getOwner: %v", err)
}
if repo.Owner.IsOrganization() {
// Organization repository need to recalculate access table when visivility is changed
// Organization repository need to recalculate access table when visibility is changed
if err = repo.recalculateTeamAccesses(e, 0); err != nil {
return fmt.Errorf("recalculateTeamAccesses: %v", err)
}

View File

@ -111,7 +111,7 @@ type ProtectBranch struct {
WhitelistTeamIDs string `xorm:"TEXT"`
}
// GetProtectBranchOfRepoByName returns *ProtectBranch by branch name in given repostiory.
// GetProtectBranchOfRepoByName returns *ProtectBranch by branch name in given repository.
func GetProtectBranchOfRepoByName(repoID int64, name string) (*ProtectBranch, error) {
protectBranch := &ProtectBranch{
RepoID: repoID,
@ -271,7 +271,7 @@ func UpdateOrgProtectBranch(repo *Repository, protectBranch *ProtectBranch, whit
return sess.Commit()
}
// GetProtectBranchesByRepoID returns a list of *ProtectBranch in given repostiory.
// GetProtectBranchesByRepoID returns a list of *ProtectBranch in given repository.
func GetProtectBranchesByRepoID(repoID int64) ([]*ProtectBranch, error) {
protectBranches := make([]*ProtectBranch, 0, 2)
return protectBranches, x.Where("repo_id = ? and protected = ?", repoID, true).Asc("name").Find(&protectBranches)

View File

@ -517,7 +517,7 @@ func DeletePublicKey(doer *User, id int64) (err error) {
// RewriteAuthorizedKeys removes any authorized key and rewrite all keys from database again.
// Note: x.Iterate does not get latest data after insert/delete, so we have to call this function
// outsite any session scope independently.
// outside any session scope independently.
func RewriteAuthorizedKeys() error {
sshOpLocker.Lock()
defer sshOpLocker.Unlock()

View File

@ -283,7 +283,7 @@ func (u *User) AvatarLink() string {
return link
}
// User.GetFollwoers returns range of user's followers.
// User.GetFollowers returns range of user's followers.
func (u *User) GetFollowers(page int) ([]*User, error) {
users := make([]*User, 0, ItemsPerPage)
sess := x.Limit(ItemsPerPage, (page-1)*ItemsPerPage).Where("follow.follow_id=?", u.ID)
@ -468,7 +468,7 @@ func (u *User) ShortName(length int) string {
return tool.EllipsisString(u.Name, length)
}
// IsMailable checks if a user is elegible
// IsMailable checks if a user is eligible
// to receive emails.
func (u *User) IsMailable() bool {
return u.IsActive
@ -1002,7 +1002,7 @@ type UserCommit struct {
*git.Commit
}
// ValidateCommitWithEmail chceck if author's e-mail of commit is corresponsind to a user.
// ValidateCommitWithEmail checks if author's e-mail of commit is corresponding to a user.
func ValidateCommitWithEmail(c *git.Commit) *User {
u, err := GetUserByEmail(c.Author.Email)
if err != nil {

View File

@ -44,7 +44,7 @@ func GetEmailAddresses(uid int64) ([]*EmailAddress, error) {
}
}
// We alway want the primary email address displayed, even if it's not in
// We always want the primary email address displayed, even if it's not in
// the emailaddress table (yet).
if !isPrimaryFound {
emails = append(emails, &EmailAddress{

View File

@ -601,7 +601,7 @@ func prepareHookTasks(e Engine, repo *Repository, event HookEventType, p api.Pay
}
}
// Use separate objects so modifcations won't be made on payload on non-Gogs type hooks.
// Use separate objects so modifications won't be made on payload on non-Gogs type hooks.
switch w.HookTaskType {
case SLACK:
payloader, err = GetSlackPayload(p, event, w.Meta)

View File

@ -219,7 +219,7 @@ var mailQueue chan *Message
// NewContext initializes settings for mailer.
func NewContext() {
// Need to check if mailQueue is nil because in during reinstall (user had installed
// before but swithed install lock off), this function will be called again
// before but switched install lock off), this function will be called again
// while mail queue is already processing tasks, and produces a race condition.
if !conf.Email.Enabled || mailQueue != nil {
return

View File

@ -431,7 +431,7 @@ func (r *Request) ToXml(v interface{}) error {
return xml.Unmarshal(data, v)
}
// Response executes request client gets response mannually.
// Response executes request client gets response manually.
func (r *Request) Response() (*http.Response, error) {
return r.getResponse()
}

View File

@ -26,7 +26,7 @@ func GetUserByParamsName(c *context.APIContext, name string) *db.User {
return user
}
// GetUserByParams returns user whose name is presented in URL paramenter.
// GetUserByParams returns user whose name is presented in URL parameter.
func GetUserByParams(c *context.APIContext) *db.User {
return GetUserByParamsName(c, ":username")
}

View File

@ -63,7 +63,7 @@ func Releases(c *context.Context) {
return
}
// Temproray cache commits count of used branches to speed up.
// Temporary cache commits count of used branches to speed up.
countCache := make(map[string]int64)
results := make([]*db.Release, len(tagsPage.Tags))

View File

@ -241,7 +241,7 @@ func SettingsPost(c *context.Context, f form.RepoSetting) {
}
return
}
log.Trace("Repository transfered: %s/%s -> %s", c.Repo.Owner.Name, repo.Name, newOwner)
log.Trace("Repository transferred: %s/%s -> %s", c.Repo.Owner.Name, repo.Name, newOwner)
c.Flash.Success(c.Tr("repo.settings.transfer_succeed"))
c.Redirect(conf.Server.Subpath + "/" + newOwner + "/" + repo.Name)

View File

@ -16,7 +16,7 @@
"use strict";
CodeMirror.defineSimpleMode("factor", {
// The start state contains the rules that are intially used
// The start state contains the rules that are initially used
start: [
// comments
{regex: /#?!.*/, token: "comment"},

View File

@ -21,7 +21,7 @@ CodeMirror.defineMode("gas", function(_config, parserConfig) {
// The symbol used to start a line comment changes based on the target
// architecture.
// If no architecture is pased in "parserConfig" then only multiline
// If no architecture is passed in "parserConfig" then only multiline
// comments will have syntax support.
var lineCommentStartSymbol = "";

View File

@ -57,12 +57,12 @@ CodeMirror.defineMode("yaml", function() {
return 'meta';
}
/* list seperator */
/* list separator */
if (state.inlineList > 0 && !esc && ch == ',') {
stream.next();
return 'meta';
}
/* pairs seperator */
/* pairs separator */
if (state.inlinePairs > 0 && !esc && ch == ',') {
state.keyCol = 0;
state.pair = false;

View File

@ -14737,13 +14737,13 @@ var JpxImage = (function JpxImageClosure() {
var subband = resolution.subbands[j];
var gainLog2 = SubbandsGainLog2[subband.type];
// calulate quantization coefficient (Section E.1.1.1)
// calculate quantization coefficient (Section E.1.1.1)
var delta = (reversible ? 1 :
Math.pow(2, precision + gainLog2 - epsilon) * (1 + mu / 2048));
var mb = (guardBits + epsilon - 1);
// In the first resolution level, copyCoefficients will fill the
// whole array with coefficients. In the succeding passes,
// whole array with coefficients. In the succeeding passes,
// copyCoefficients will consecutively fill in the values that belong
// to the interleaved positions of the HL, LH, and HH coefficients.
// The LL coefficients will then be interleaved in Transform.iterate().
@ -15629,7 +15629,7 @@ exports.JpxImage = JpxImage;
var Uint32ArrayView = sharedUtil.Uint32ArrayView;
var MurmurHash3_64 = (function MurmurHash3_64Closure (seed) {
// Workaround for missing math precison in JS.
// Workaround for missing math precision in JS.
var MASK_HIGH = 0xffff0000;
var MASK_LOW = 0xffff;
@ -25395,7 +25395,7 @@ var PDF_GLYPH_SPACE_UNITS = 1000;
// in tracemonkey and various other pdfs with type1 fonts.
var HINTING_ENABLED = false;
// Accented charactars are not displayed properly on windows, using this flag
// Accented characters are not displayed properly on windows, using this flag
// to control analysis of seac charstrings.
var SEAC_ANALYSIS_ENABLED = false;
@ -29254,7 +29254,7 @@ var Font = (function FontClosure() {
}
} else if (op === 0x2B && !tooComplexToFollowFunctions) { // CALL
if (!inFDEF && !inELSE) {
// collecting inforamtion about which functions are used
// collecting information about which functions are used
funcId = stack[stack.length - 1];
ttContext.functionsUsed[funcId] = true;
if (funcId in ttContext.functionsStackDeltas) {
@ -29279,7 +29279,7 @@ var Font = (function FontClosure() {
tooComplexToFollowFunctions = true;
}
inFDEF = true;
// collecting inforamtion about which functions are defined
// collecting information about which functions are defined
lastDeff = i;
funcId = stack.pop();
ttContext.functionsDefined[funcId] = {data: data, i: i};
@ -31170,7 +31170,7 @@ Type1Font.prototype = {
cff.names = [name];
var topDict = new CFFTopDict();
// CFF strings IDs 0...390 are predefined names, so refering
// CFF strings IDs 0...390 are predefined names, so referring
// to entries in our own String INDEX starts at SID 391.
topDict.setByName('version', 391);
topDict.setByName('Notice', 392);
@ -35744,7 +35744,7 @@ var PDFImage = (function PDFImageClosure() {
height);
}
} else if (isArray(mask)) {
// Color key mask: if any of the compontents are outside the range
// Color key mask: if any of the components are outside the range
// then they should be painted.
alphaBuf = new Uint8Array(width * height);
var numComps = this.numComps;