feat: add foreign keys to stopwatch & tracked_time tables (#9373)
Adds four foreign keys: - stopwatch -- issue_id -> issue, user_id -> user - tracked_time -- issue_id -> issue, user_id -> user The majority of work encompassed in this PR is updating testing and support infrastructure to support foreign keys: - `models/db/foreign_keys.go` adds new capabilities to sort registered tables into the right insertion order to avoid violating foreign keys - `RecreateTables`, used by migration testing and the `doctor recreate-table` CLI, has been updated to support tables with foreign keys; new restrictions require that FK-related tables be rebuilt at the same time - test fixture data is inserted in foreign-key order, and deleted in the reverse An upgrade to xorm v1.3.9-forgejo.2 is incorporated in this PR, as two unexpected behaviors in the foreign key schema management were discovered during development of the updated `RecreateTables` routine. Work in this PR is laid out to be reviewed easier commit-by-commit. ## Checklist The [contributor guide](https://forgejo.org/docs/next/contributor/) contains information that will be helpful to first time contributors. There also are a few [conditions for merging Pull Requests in Forgejo repositories](https://codeberg.org/forgejo/governance/src/branch/main/PullRequestsAgreement.md). You are also welcome to join the [Forgejo development chatroom](https://matrix.to/#/#forgejo-development:matrix.org). ### Tests - I added test coverage for Go changes... - [x] in their respective `*_test.go` for unit tests. - [x] in the `tests/integration` directory if it involves interactions with a live Forgejo server. - I added test coverage for JavaScript changes... - [ ] in `web_src/js/*.test.js` if it can be unit tested. - [ ] in `tests/e2e/*.test.e2e.js` if it requires interactions with a live Forgejo server (see also the [developer guide for JavaScript testing](https://codeberg.org/forgejo/forgejo/src/branch/forgejo/tests/e2e/README.md#end-to-end-tests)). ### Documentation - [ ] I created a pull request [to the documentation](https://codeberg.org/forgejo/docs) to explain to Forgejo users how to use this change. - [x] I did not document these changes and I do not expect someone else to do it. ### Release notes - [ ] I do not want this change to show in the release notes. - [ ] I want the title to show in the release notes with a link to this pull request. - [x] I want the content of the `release-notes/<pull request number>.md` to be be used for the release notes instead of the title. Reviewed-on: https://codeberg.org/forgejo/forgejo/pulls/9373 Reviewed-by: Earl Warren <earl-warren@noreply.codeberg.org> Reviewed-by: Gusted <gusted@noreply.codeberg.org> Co-authored-by: Mathieu Fenniak <mathieu@fenniak.net> Co-committed-by: Mathieu Fenniak <mathieu@fenniak.net>
This commit is contained in:
parent
ee4f9b02f9
commit
187ad99f3c
26 changed files with 572 additions and 112 deletions
2
go.mod
2
go.mod
|
@ -260,4 +260,4 @@ replace github.com/gliderlabs/ssh => code.forgejo.org/forgejo/ssh v0.0.0-2024121
|
|||
|
||||
replace git.sr.ht/~mariusor/go-xsd-duration => code.forgejo.org/forgejo/go-xsd-duration v0.0.0-20220703122237-02e73435a078
|
||||
|
||||
replace xorm.io/xorm v1.3.9 => code.forgejo.org/xorm/xorm v1.3.9-forgejo.1
|
||||
replace xorm.io/xorm v1.3.9 => code.forgejo.org/xorm/xorm v1.3.9-forgejo.2
|
||||
|
|
4
go.sum
4
go.sum
|
@ -40,8 +40,8 @@ code.forgejo.org/go-chi/captcha v1.0.2 h1:vyHDPXkpjDv8bLO9NqtWzZayzstD/WpJ5xwEkA
|
|||
code.forgejo.org/go-chi/captcha v1.0.2/go.mod h1:lxiPLcJ76UCZHoH31/Wbum4GUi2NgjfFZLrJkKv1lLE=
|
||||
code.forgejo.org/go-chi/session v1.0.2 h1:pG+AXre9L9VXJmTaADXkmeEPuRalhmBXyv6tG2Rvjcc=
|
||||
code.forgejo.org/go-chi/session v1.0.2/go.mod h1:HnEGyBny7WPzCiVLP2vzL5ssma+3gCSl/vLpuVNYrqc=
|
||||
code.forgejo.org/xorm/xorm v1.3.9-forgejo.1 h1:jU5CICzbkpqol6qTa4yctLuS0BI3D2mVCx7q6ogK/gE=
|
||||
code.forgejo.org/xorm/xorm v1.3.9-forgejo.1/go.mod h1:0q+miQUSpTlsgMw2blcO9a87GB5WyatiX3GuwBca31w=
|
||||
code.forgejo.org/xorm/xorm v1.3.9-forgejo.2 h1:yiayiXucyEwFo1cRHXAt+JRCZtIGExWhEk0V+AD0h38=
|
||||
code.forgejo.org/xorm/xorm v1.3.9-forgejo.2/go.mod h1:0q+miQUSpTlsgMw2blcO9a87GB5WyatiX3GuwBca31w=
|
||||
code.gitea.io/actions-proto-go v0.4.0 h1:OsPBPhodXuQnsspG1sQ4eRE1PeoZyofd7+i73zCwnsU=
|
||||
code.gitea.io/actions-proto-go v0.4.0/go.mod h1:mn7Wkqz6JbnTOHQpot3yDeHx+O5C9EGhMEE+htvHBas=
|
||||
code.gitea.io/sdk/gitea v0.21.0 h1:69n6oz6kEVHRo1+APQQyizkhrZrLsTLXey9142pfkD4=
|
||||
|
|
131
models/db/foreign_keys.go
Normal file
131
models/db/foreign_keys.go
Normal file
|
@ -0,0 +1,131 @@
|
|||
// Copyright 2025 The Forgejo Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package db
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sync"
|
||||
|
||||
"xorm.io/xorm/schemas"
|
||||
)
|
||||
|
||||
var (
|
||||
cachedForeignKeyOrderedTables = sync.OnceValues(foreignKeyOrderedTables)
|
||||
cachedTableNameLookupOrder = sync.OnceValues(tableNameLookupOrder)
|
||||
)
|
||||
|
||||
// Create a list of database tables in their "foreign key order". This order specifies the safe insertion order for
|
||||
// records into tables, where earlier tables in the list are referenced by foreign keys that exist in tables later in
|
||||
// the list. This order can be used in reverse as a safe deletion order as well.
|
||||
//
|
||||
// An ordered list of tables is incompatible with tables that have self-referencing foreign keys and circular referenced
|
||||
// foreign keys; however neither of those cases are in-use in Forgejo.
|
||||
func calculateTableForeignKeyOrder(tables []*schemas.Table) ([]*schemas.Table, error) {
|
||||
remainingTables := slices.Clone(tables)
|
||||
|
||||
// Create a lookup for each table that has a foreign key, and a slice of the tables that it references it.
|
||||
referencingTables := make(map[string][]string)
|
||||
for _, table := range remainingTables {
|
||||
tableName := table.Name
|
||||
for _, fk := range table.ForeignKeys {
|
||||
referencingTables[tableName] = append(referencingTables[tableName], fk.TargetTableName)
|
||||
}
|
||||
}
|
||||
|
||||
orderedTables := make([]*schemas.Table, 0, len(remainingTables))
|
||||
|
||||
for len(remainingTables) > 0 {
|
||||
nextGroup := make([]*schemas.Table, 0, len(remainingTables))
|
||||
|
||||
for _, targetTable := range remainingTables {
|
||||
// Skip if this targetTable has foreign keys and the target table hasn't been created.
|
||||
slice, ok := referencingTables[targetTable.Name]
|
||||
if ok && len(slice) > 1 { // This table is still referencing an uncreated table
|
||||
continue
|
||||
}
|
||||
// This table's references are satisfied or it had none
|
||||
nextGroup = append(nextGroup, targetTable)
|
||||
}
|
||||
|
||||
if len(nextGroup) == 0 {
|
||||
return nil, fmt.Errorf("calculateTableForeignKeyOrder: unable to figure out next table from remainingTables = %#v", remainingTables)
|
||||
}
|
||||
|
||||
orderedTables = append(orderedTables, nextGroup...)
|
||||
|
||||
// Cleanup between loops: remove each table in nextGroup from remainingTables, and remove their table names from
|
||||
// referencingTables as well.
|
||||
for _, doneTable := range nextGroup {
|
||||
remainingTables = slices.DeleteFunc(remainingTables, func(remainingTable *schemas.Table) bool {
|
||||
return remainingTable.Name == doneTable.Name
|
||||
})
|
||||
for referencingTable, referencedTables := range referencingTables {
|
||||
referencingTables[referencingTable] = slices.DeleteFunc(referencedTables, func(tableName string) bool {
|
||||
return tableName == doneTable.Name
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return orderedTables, nil
|
||||
}
|
||||
|
||||
// Create a list of registered database tables in their "foreign key order", per calculateTableForeignKeyOrder.
|
||||
func foreignKeyOrderedTables() ([]*schemas.Table, error) {
|
||||
schemaTables := make([]*schemas.Table, 0, len(tables))
|
||||
for _, tbl := range tables {
|
||||
table, err := TableInfo(tbl)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("foreignKeyOrderedTables: failure to fetch schema table for bean %#v: %w", tbl, err)
|
||||
}
|
||||
schemaTables = append(schemaTables, table)
|
||||
}
|
||||
|
||||
orderedTables, err := calculateTableForeignKeyOrder(schemaTables)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return orderedTables, nil
|
||||
}
|
||||
|
||||
// Create a map from each registered database table's name to its order in "foreign key order", per
|
||||
// calculateTableForeignKeyOrder.
|
||||
func tableNameLookupOrder() (map[string]int, error) {
|
||||
tables, err := cachedForeignKeyOrderedTables()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lookupMap := make(map[string]int, len(tables))
|
||||
for i, table := range tables {
|
||||
lookupMap[table.Name] = i
|
||||
}
|
||||
|
||||
return lookupMap, nil
|
||||
}
|
||||
|
||||
// When used as a comparator function in `slices.SortFunc`, can sort a slice into the safe insertion order for records
|
||||
// in tables, where earlier tables in the list are referenced by foreign keys that exist in tables later in the list.
|
||||
func TableNameInsertionOrderSortFunc(table1, table2 string) int {
|
||||
lookupMap, err := cachedTableNameLookupOrder()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("cachedTableNameLookupOrder failed: %#v", err))
|
||||
}
|
||||
|
||||
// Since this is typically used by `slices.SortFunc` it can't return an error. If a table is referenced that isn't
|
||||
// a registered model then it will be sorted at the beginning -- this case is used in models/migrations/test.
|
||||
val1, ok := lookupMap[table1]
|
||||
if !ok {
|
||||
val1 = -1
|
||||
}
|
||||
val2, ok := lookupMap[table2]
|
||||
if !ok {
|
||||
val2 = -1
|
||||
}
|
||||
|
||||
return cmp.Compare(val1, val2)
|
||||
}
|
|
@ -24,7 +24,6 @@
|
|||
|
||||
-
|
||||
id: 4
|
||||
user_id: -1
|
||||
issue_id: 4
|
||||
time: 1
|
||||
created_unix: 946684803
|
||||
|
|
|
@ -119,6 +119,8 @@ var migrations = []*Migration{
|
|||
NewMigration("Migrate `data` column of `secret` table to store keying material", MigrateActionSecretsToKeying),
|
||||
// v39 -> v40
|
||||
NewMigration("Add index for release sha1", AddIndexForReleaseSha1),
|
||||
// v40 -> v41
|
||||
NewMigration("Add foreign keys to stopwatch & tracked_time", AddForeignKeysStopwatchTrackedTime),
|
||||
}
|
||||
|
||||
// GetCurrentDBVersion returns the current Forgejo database version.
|
||||
|
|
70
models/forgejo_migrations/v41.go
Normal file
70
models/forgejo_migrations/v41.go
Normal file
|
@ -0,0 +1,70 @@
|
|||
// Copyright 2025 The Forgejo Authors. All rights reserved.
|
||||
// SPDX-License-Identifier: GPL-3.0-or-later
|
||||
|
||||
package forgejo_migrations
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"forgejo.org/modules/log"
|
||||
|
||||
"xorm.io/builder"
|
||||
"xorm.io/xorm"
|
||||
)
|
||||
|
||||
func syncDoctorForeignKey(x *xorm.Engine, beans []any) error {
|
||||
for _, bean := range beans {
|
||||
// Sync() drops indexes by default, which will cause unnecessary rebuilding of indexes when syncDoctorForeignKey
|
||||
// is used with partial bean definitions; so we disable that option
|
||||
_, err := x.SyncWithOptions(xorm.SyncOptions{IgnoreDropIndices: true}, bean)
|
||||
if err != nil {
|
||||
if errors.Is(err, xorm.ErrForeignKeyViolation) {
|
||||
tableName := x.TableName(bean)
|
||||
log.Error(
|
||||
"Foreign key creation on table %s failed. Run `forgejo doctor check --all` to identify the orphaned records preventing this foreign key from being created. Error was: %v",
|
||||
tableName, err)
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func AddForeignKeysStopwatchTrackedTime(x *xorm.Engine) error {
|
||||
type Stopwatch struct {
|
||||
IssueID int64 `xorm:"INDEX REFERENCES(issue, id)"`
|
||||
UserID int64 `xorm:"INDEX REFERENCES(user, id)"`
|
||||
}
|
||||
type TrackedTime struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
IssueID int64 `xorm:"INDEX REFERENCES(issue, id)"`
|
||||
UserID int64 `xorm:"INDEX REFERENCES(user, id)"`
|
||||
}
|
||||
|
||||
// TrackedTime.UserID used to be an intentionally dangling reference if a user was deleted, in order to maintain the
|
||||
// time that was tracked against an issue. With the addition of a foreign key, we set UserID to NULL where the user
|
||||
// doesn't exist instead of leaving it pointing to an invalid record:
|
||||
var trackedTime []TrackedTime
|
||||
err := x.Table("tracked_time").
|
||||
Join("LEFT", "`user`", "`tracked_time`.user_id = `user`.id").
|
||||
Where(builder.IsNull{"`user`.id"}).
|
||||
Find(&trackedTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, tt := range trackedTime {
|
||||
affected, err := x.Table(&TrackedTime{}).Where("id = ?", tt.ID).Update(map[string]any{"user_id": nil})
|
||||
if err != nil {
|
||||
return err
|
||||
} else if affected != 1 {
|
||||
return fmt.Errorf("expected to update 1 tracked_time record with ID %d, but actually affected %d records", tt.ID, affected)
|
||||
}
|
||||
}
|
||||
|
||||
return syncDoctorForeignKey(x, []any{
|
||||
new(Stopwatch),
|
||||
new(TrackedTime),
|
||||
})
|
||||
}
|
|
@ -3,9 +3,3 @@
|
|||
user_id: 1
|
||||
issue_id: 2
|
||||
created_unix: 1500988004
|
||||
|
||||
-
|
||||
id: 4
|
||||
user_id: 3
|
||||
issue_id: 0
|
||||
created_unix: 1500988003
|
||||
|
|
|
@ -32,8 +32,8 @@ func (err ErrIssueStopwatchNotExist) Unwrap() error {
|
|||
// Stopwatch represents a stopwatch for time tracking.
|
||||
type Stopwatch struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
IssueID int64 `xorm:"INDEX"`
|
||||
UserID int64 `xorm:"INDEX"`
|
||||
IssueID int64 `xorm:"INDEX REFERENCES(issue, id)"`
|
||||
UserID int64 `xorm:"INDEX REFERENCES(user, id)"`
|
||||
CreatedUnix timeutil.TimeStamp `xorm:"created"`
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ func getStopwatch(ctx context.Context, userID, issueID int64) (sw *Stopwatch, ex
|
|||
// GetUIDsAndNotificationCounts between the two provided times
|
||||
func GetUIDsAndStopwatch(ctx context.Context) (map[int64][]*Stopwatch, error) {
|
||||
sws := []*Stopwatch{}
|
||||
if err := db.GetEngine(ctx).Where("issue_id != 0").Find(&sws); err != nil {
|
||||
if err := db.GetEngine(ctx).Find(&sws); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res := map[int64][]*Stopwatch{}
|
||||
|
|
|
@ -22,9 +22,9 @@ import (
|
|||
// TrackedTime represents a time that was spent for a specific issue.
|
||||
type TrackedTime struct {
|
||||
ID int64 `xorm:"pk autoincr"`
|
||||
IssueID int64 `xorm:"INDEX"`
|
||||
IssueID int64 `xorm:"INDEX REFERENCES(issue, id)"`
|
||||
Issue *Issue `xorm:"-"`
|
||||
UserID int64 `xorm:"INDEX"`
|
||||
UserID int64 `xorm:"INDEX REFERENCES(user, id)"`
|
||||
User *user_model.User `xorm:"-"`
|
||||
Created time.Time `xorm:"-"`
|
||||
CreatedUnix int64 `xorm:"created"`
|
||||
|
|
|
@ -8,8 +8,10 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"forgejo.org/models/db"
|
||||
"forgejo.org/modules/log"
|
||||
"forgejo.org/modules/setting"
|
||||
|
||||
|
@ -17,7 +19,12 @@ import (
|
|||
"xorm.io/xorm/schemas"
|
||||
)
|
||||
|
||||
// RecreateTables will recreate the tables for the provided beans using the newly provided bean definition and move all data to that new table
|
||||
// RecreateTables returns a function that will recreate the tables for the provided beans using the newly provided bean
|
||||
// definition, move all data to the new tables, and then replace the original tables with a drop and rename.
|
||||
//
|
||||
// If any 'base' table is requested to be rebuilt where one-or-more 'satellite' tables exists that references it through
|
||||
// a foreign key, you must rebuild the satellite tables as well or you will receive an error 'incomplete table set'.
|
||||
//
|
||||
// WARNING: YOU MUST PROVIDE THE FULL BEAN DEFINITION
|
||||
func RecreateTables(beans ...any) func(*xorm.Engine) error {
|
||||
return func(x *xorm.Engine) error {
|
||||
|
@ -27,34 +34,179 @@ func RecreateTables(beans ...any) func(*xorm.Engine) error {
|
|||
return err
|
||||
}
|
||||
sess = sess.StoreEngine("InnoDB")
|
||||
|
||||
tableNames := make(map[any]string, len(beans))
|
||||
tempTableNames := make(map[any]string, len(beans))
|
||||
tempTableNamesByOriginalName := make(map[string]string, len(beans))
|
||||
for _, bean := range beans {
|
||||
log.Info("Recreating Table: %s for Bean: %s", x.TableName(bean), reflect.Indirect(reflect.ValueOf(bean)).Type().Name())
|
||||
if err := RecreateTable(sess, bean); err != nil {
|
||||
tableName := sess.Engine().TableName(bean)
|
||||
tableNames[bean] = tableName
|
||||
tempTableName := fmt.Sprintf("tmp_recreate__%s", tableName)
|
||||
tempTableNames[bean] = tempTableName
|
||||
tempTableNamesByOriginalName[tableName] = tempTableName
|
||||
}
|
||||
|
||||
// Create a set of temp tables.
|
||||
for _, bean := range beans {
|
||||
log.Info("Creating temp table: %s for Bean: %s", tempTableNames[bean], reflect.Indirect(reflect.ValueOf(bean)).Type().Name())
|
||||
if err := createTempTable(sess, bean, tempTableNames[bean]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Our new temp tables tables will have foreign keys that point to the original tables we are recreating.
|
||||
// Before we put data into these tables, we need to drop those foreign keys and add new foreign keys that point
|
||||
// to the temp tables.
|
||||
tableSchemas := make(map[any]*schemas.Table, len(beans))
|
||||
for _, bean := range beans {
|
||||
tableSchema, err := sess.Engine().TableInfo(bean)
|
||||
if err != nil {
|
||||
log.Error("Unable to get table info. Error: %v", err)
|
||||
return err
|
||||
}
|
||||
tableSchemas[bean] = tableSchema
|
||||
modifications := make([]schemas.TableModification, 0, len(tableSchema.ForeignKeys)*2)
|
||||
for _, fk := range tableSchema.ForeignKeys {
|
||||
targetTempTableName, ok := tempTableNamesByOriginalName[fk.TargetTableName]
|
||||
if !ok {
|
||||
return fmt.Errorf("incomplete table set: Found a foreign key reference to table %s, but it is not included in RecreateTables", fk.TargetTableName)
|
||||
}
|
||||
fkName := fk.Name
|
||||
if setting.Database.Type.IsMySQL() {
|
||||
// See MySQL explanation in createTempTable.
|
||||
fkName = "_" + fkName
|
||||
}
|
||||
modifications = append(modifications, schemas.DropForeignKey{ForeignKey: schemas.ForeignKey{
|
||||
Name: fkName,
|
||||
SourceFieldName: fk.SourceFieldName,
|
||||
TargetTableName: fk.TargetTableName,
|
||||
TargetFieldName: fk.TargetFieldName,
|
||||
}})
|
||||
modifications = append(modifications, schemas.AddForeignKey{ForeignKey: schemas.ForeignKey{
|
||||
Name: fkName,
|
||||
SourceFieldName: fk.SourceFieldName,
|
||||
TargetTableName: targetTempTableName, // FK changed to new temp table
|
||||
TargetFieldName: fk.TargetFieldName,
|
||||
}})
|
||||
}
|
||||
|
||||
if len(modifications) != 0 {
|
||||
log.Info("Modifying temp table %s foreign keys to point to other temp tables", tempTableNames[bean])
|
||||
if err := sess.Table(tempTableNames[bean]).AlterTable(bean, modifications...); err != nil {
|
||||
return fmt.Errorf("alter table failed: while rewriting foreign keys to temp tables, error occurred: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Insert into the set of temp tables in the right order, starting with base tables, working outwards to
|
||||
// satellite tables.
|
||||
orderedBeans := slices.Clone(beans)
|
||||
slices.SortFunc(orderedBeans, func(b1, b2 any) int {
|
||||
return db.TableNameInsertionOrderSortFunc(tableNames[b1], tableNames[b2])
|
||||
})
|
||||
for _, bean := range orderedBeans {
|
||||
log.Info("Copying table %s to temp table %s", tableNames[bean], tempTableNames[bean])
|
||||
if err := copyData(sess, bean, tableNames[bean], tempTableNames[bean]); err != nil {
|
||||
// copyData does its own logging
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Drop all the old tables in the right order, starting with satellite tables working inwards to base tables,
|
||||
// and rename all the temp tables to the final tables. The database will automatically update the foreign key
|
||||
// references during the rename from temp to final tables.
|
||||
for i := len(orderedBeans) - 1; i >= 0; i-- {
|
||||
bean := orderedBeans[i]
|
||||
log.Info("Dropping existing table %s, and renaming temp table %s in its place", tableNames[bean], tempTableNames[bean])
|
||||
if err := renameTable(sess, bean, tableNames[bean], tempTableNames[bean], tableSchemas[bean]); err != nil {
|
||||
// renameTable does its own logging
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return sess.Commit()
|
||||
}
|
||||
}
|
||||
|
||||
// RecreateTable will recreate the table using the newly provided bean definition and move all data to that new table
|
||||
// LegacyRecreateTable will recreate the table using the newly provided bean definition and move all data to that new
|
||||
// table.
|
||||
//
|
||||
// WARNING: YOU MUST PROVIDE THE FULL BEAN DEFINITION
|
||||
//
|
||||
// WARNING: YOU MUST COMMIT THE SESSION AT THE END
|
||||
func RecreateTable(sess *xorm.Session, bean any) error {
|
||||
// TODO: This will not work if there are foreign keys
|
||||
|
||||
//
|
||||
// Deprecated: LegacyRecreateTable exists for historical migrations and should not be used in current code -- tt does
|
||||
// not support foreign key management. Use RecreateTables instead which provides foreign key support.
|
||||
func LegacyRecreateTable(sess *xorm.Session, bean any) error {
|
||||
tableName := sess.Engine().TableName(bean)
|
||||
tempTableName := fmt.Sprintf("tmp_recreate__%s", tableName)
|
||||
|
||||
tableSchema, err := sess.Engine().TableInfo(bean)
|
||||
if err != nil {
|
||||
log.Error("Unable to get table info. Error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// We need to move the old table away and create a new one with the correct columns
|
||||
// We will need to do this in stages to prevent data loss
|
||||
//
|
||||
// First create the temporary table
|
||||
if err := sess.Table(tempTableName).CreateTable(bean); err != nil {
|
||||
log.Error("Unable to create table %s. Error: %v", tempTableName, err)
|
||||
if err := createTempTable(sess, bean, tempTableName); err != nil {
|
||||
// createTempTable does its own logging
|
||||
return err
|
||||
}
|
||||
|
||||
if err := copyData(sess, bean, tableName, tempTableName); err != nil {
|
||||
// copyData does its own logging
|
||||
return err
|
||||
}
|
||||
|
||||
if err := renameTable(sess, bean, tableName, tempTableName, tableSchema); err != nil {
|
||||
// renameTable does its own logging
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTempTable(sess *xorm.Session, bean any, tempTableName string) error {
|
||||
if setting.Database.Type.IsMySQL() {
|
||||
// Can't have identical foreign key names in MySQL, and Table(tempTableName) only affects the table name and not
|
||||
// the schema definition generated from the bean, so, we do a little adjusting by appending a `_` at the
|
||||
// beginning of each foreign key name on the temp table. We'll remove this by renaming the constraint after we
|
||||
// drop the original table, in renameTable.
|
||||
originalTableSchema, err := sess.Engine().TableInfo(bean)
|
||||
if err != nil {
|
||||
log.Error("Unable to get table info. Error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// `TableInfo()` will return a `*schema.Table` that is stored in a shared cache. We don't want to mutate that
|
||||
// object as it will stick around and affect other things. Make a mostly-shallow clone, with a new slice for
|
||||
// what we're changing.
|
||||
tableSchema := *originalTableSchema
|
||||
tableSchema.ForeignKeys = slices.Clone(originalTableSchema.ForeignKeys)
|
||||
for i := range tableSchema.ForeignKeys {
|
||||
tableSchema.ForeignKeys[i].Name = "_" + tableSchema.ForeignKeys[i].Name
|
||||
}
|
||||
|
||||
sql, _, err := sess.Engine().Dialect().CreateTableSQL(&tableSchema, tempTableName)
|
||||
if err != nil {
|
||||
log.Error("Unable to generate CREATE TABLE query. Error: %v", err)
|
||||
return err
|
||||
}
|
||||
_, err = sess.Exec(sql)
|
||||
if err != nil {
|
||||
log.Error("Unable to create table %s. Error: %v", tempTableName, err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
if err := sess.Table(tempTableName).CreateTable(bean); err != nil {
|
||||
log.Error("Unable to create table %s. Error: %v", tempTableName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := sess.Table(tempTableName).CreateUniques(bean); err != nil {
|
||||
log.Error("Unable to create uniques for table %s. Error: %v", tempTableName, err)
|
||||
return err
|
||||
|
@ -65,11 +217,14 @@ func RecreateTable(sess *xorm.Session, bean any) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyData(sess *xorm.Session, bean any, tableName, tempTableName string) error {
|
||||
// Work out the column names from the bean - these are the columns to select from the old table and install into the new table
|
||||
table, err := sess.Engine().TableInfo(bean)
|
||||
if err != nil {
|
||||
log.Error("Unable to get table info. Error: %v", err)
|
||||
|
||||
return err
|
||||
}
|
||||
newTableColumns := table.Columns()
|
||||
|
@ -128,9 +283,12 @@ func RecreateTable(sess *xorm.Session, bean any) error {
|
|||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func renameTable(sess *xorm.Session, bean any, tableName, tempTableName string, tableSchema *schemas.Table) error {
|
||||
switch {
|
||||
case setting.Database.Type.IsSQLite3():
|
||||
// SQLite will drop all the constraints on the old table
|
||||
if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil {
|
||||
log.Error("Unable to drop old table %s. Error: %v", tableName, err)
|
||||
return err
|
||||
|
@ -157,32 +315,44 @@ func RecreateTable(sess *xorm.Session, bean any) error {
|
|||
}
|
||||
|
||||
case setting.Database.Type.IsMySQL():
|
||||
// MySQL will drop all the constraints on the old table
|
||||
if _, err := sess.Exec(fmt.Sprintf("DROP TABLE `%s`", tableName)); err != nil {
|
||||
log.Error("Unable to drop old table %s. Error: %v", tableName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sess.Table(tempTableName).DropIndexes(bean); err != nil {
|
||||
log.Error("Unable to drop indexes on temporary table %s. Error: %v", tempTableName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// SQLite and MySQL will move all the constraints from the temporary table to the new table
|
||||
// MySQL will move all the constraints that reference this table from the temporary table to the new table
|
||||
if _, err := sess.Exec(fmt.Sprintf("ALTER TABLE `%s` RENAME TO `%s`", tempTableName, tableName)); err != nil {
|
||||
log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := sess.Table(tableName).CreateIndexes(bean); err != nil {
|
||||
log.Error("Unable to recreate indexes on table %s. Error: %v", tableName, err)
|
||||
return err
|
||||
// In `RecreateTables` the foreign keys were renamed with a '_' prefix to avoid conflicting on the original
|
||||
// table's constraint names. Now that table has been dropped, so we can rename them back to leave the table in
|
||||
// the right state. Unfortunately this will cause a recheck of the constraint's validity against the target
|
||||
// table which will be slow for large tables, but it's unavoidable without the ability to rename constraints
|
||||
// in-place. Awkwardly these FKs are still a reference to the tmp_recreate target table since we drop in reverse
|
||||
// FK order -- the ALTER TABLE ... RENAME .. on those tmp tables will correct the FKs later.
|
||||
modifications := make([]schemas.TableModification, 0, len(tableSchema.ForeignKeys)*2)
|
||||
for _, fk := range tableSchema.ForeignKeys {
|
||||
modifications = append(modifications, schemas.DropForeignKey{ForeignKey: schemas.ForeignKey{
|
||||
Name: "_" + fk.Name,
|
||||
SourceFieldName: fk.SourceFieldName,
|
||||
TargetTableName: fmt.Sprintf("tmp_recreate__%s", fk.TargetTableName),
|
||||
TargetFieldName: fk.TargetFieldName,
|
||||
}})
|
||||
modifications = append(modifications, schemas.AddForeignKey{ForeignKey: schemas.ForeignKey{
|
||||
Name: fk.Name,
|
||||
SourceFieldName: fk.SourceFieldName,
|
||||
TargetTableName: fmt.Sprintf("tmp_recreate__%s", fk.TargetTableName),
|
||||
TargetFieldName: fk.TargetFieldName,
|
||||
}})
|
||||
}
|
||||
if len(modifications) != 0 {
|
||||
if err := sess.Table(tableName).AlterTable(bean, modifications...); err != nil {
|
||||
return fmt.Errorf("alter table failed: while rewriting foreign keys to original names, error occurred: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := sess.Table(tableName).CreateUniques(bean); err != nil {
|
||||
log.Error("Unable to recreate uniques on table %s. Error: %v", tableName, err)
|
||||
return err
|
||||
}
|
||||
case setting.Database.Type.IsPostgreSQL():
|
||||
var originalSequences []string
|
||||
type sequenceData struct {
|
||||
|
@ -193,7 +363,7 @@ func RecreateTable(sess *xorm.Session, bean any) error {
|
|||
|
||||
schema := sess.Engine().Dialect().URI().Schema
|
||||
sess.Engine().SetSchema("")
|
||||
if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE ? || '_%' AND sequence_catalog = ?", tableName, setting.Database.Name).Find(&originalSequences); err != nil {
|
||||
if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE ? || '_id_seq' AND sequence_catalog = ?", tableName, setting.Database.Name).Find(&originalSequences); err != nil {
|
||||
log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
|
||||
return err
|
||||
}
|
||||
|
@ -238,7 +408,7 @@ func RecreateTable(sess *xorm.Session, bean any) error {
|
|||
|
||||
var sequences []string
|
||||
sess.Engine().SetSchema("")
|
||||
if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE 'tmp_recreate__' || ? || '_%' AND sequence_catalog = ?", tableName, setting.Database.Name).Find(&sequences); err != nil {
|
||||
if err := sess.Table("information_schema.sequences").Cols("sequence_name").Where("sequence_name LIKE 'tmp_recreate__' || ? || '_id_seq' AND sequence_catalog = ?", tableName, setting.Database.Name).Find(&sequences); err != nil {
|
||||
log.Error("Unable to rename %s to %s. Error: %v", tempTableName, tableName, err)
|
||||
return err
|
||||
}
|
||||
|
@ -275,6 +445,7 @@ func RecreateTable(sess *xorm.Session, bean any) error {
|
|||
default:
|
||||
log.Fatal("Unrecognized DB")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -95,8 +95,8 @@ func PrepareTestEnv(t *testing.T, skip int, syncModels ...any) (*xorm.Engine, fu
|
|||
t.Logf("initializing fixtures from: %s", fixturesDir)
|
||||
if err := unittest.InitFixtures(
|
||||
unittest.FixturesOptions{
|
||||
Dir: fixturesDir,
|
||||
SkipCleanRegistedModels: true,
|
||||
Dir: fixturesDir,
|
||||
OnlyAffectModels: syncModels,
|
||||
}, x); err != nil {
|
||||
t.Errorf("error whilst initializing fixtures from %s: %v", fixturesDir, err)
|
||||
return x, deferFn
|
||||
|
|
|
@ -32,8 +32,8 @@ func AddPrimaryKeyToRepoTopic(x *xorm.Engine) error {
|
|||
return err
|
||||
}
|
||||
|
||||
base.RecreateTable(sess, &Topic{})
|
||||
base.RecreateTable(sess, &RepoTopic{})
|
||||
base.LegacyRecreateTable(sess, &Topic{}) //nolint:staticcheck
|
||||
base.LegacyRecreateTable(sess, &RepoTopic{}) //nolint:staticcheck
|
||||
|
||||
return sess.Commit()
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ func UpdateReactionConstraint(x *xorm.Engine) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if err := base.RecreateTable(sess, &Reaction{}); err != nil {
|
||||
if err := base.LegacyRecreateTable(sess, &Reaction{}); err != nil { //nolint:staticcheck
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ func ConvertTopicNameFrom25To50(x *xorm.Engine) error {
|
|||
if err := sess.Begin(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := base.RecreateTable(sess, new(Topic)); err != nil {
|
||||
if err := base.LegacyRecreateTable(sess, new(Topic)); err != nil { //nolint:staticcheck
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -10,8 +10,10 @@ import (
|
|||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"forgejo.org/models/db"
|
||||
"forgejo.org/modules/container"
|
||||
|
||||
"go.yaml.in/yaml/v3"
|
||||
|
@ -41,7 +43,7 @@ func newFixtureLoader(db *sql.DB, dialect string, fixturePaths []string, allTabl
|
|||
fixtureFiles: []*fixtureFile{},
|
||||
}
|
||||
|
||||
tablesWithoutFixture := allTableNames
|
||||
tablesWithoutFixture := allTableNames.Clone()
|
||||
|
||||
// Load fixtures
|
||||
for _, fixturePath := range fixturePaths {
|
||||
|
@ -63,8 +65,10 @@ func newFixtureLoader(db *sql.DB, dialect string, fixturePaths []string, allTabl
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.fixtureFiles = append(l.fixtureFiles, fixtureFile)
|
||||
tablesWithoutFixture.Remove(fixtureFile.name)
|
||||
if allTableNames.Contains(fixtureFile.name) {
|
||||
l.fixtureFiles = append(l.fixtureFiles, fixtureFile)
|
||||
tablesWithoutFixture.Remove(fixtureFile.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -72,7 +76,10 @@ func newFixtureLoader(db *sql.DB, dialect string, fixturePaths []string, allTabl
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
l.fixtureFiles = append(l.fixtureFiles, fixtureFile)
|
||||
if allTableNames.Contains(fixtureFile.name) {
|
||||
l.fixtureFiles = append(l.fixtureFiles, fixtureFile)
|
||||
tablesWithoutFixture.Remove(fixtureFile.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -84,6 +91,8 @@ func newFixtureLoader(db *sql.DB, dialect string, fixturePaths []string, allTabl
|
|||
})
|
||||
}
|
||||
|
||||
l.orderFixtures()
|
||||
|
||||
return l, nil
|
||||
}
|
||||
|
||||
|
@ -179,6 +188,14 @@ func (l *loader) buildFixtureFile(fixturePath string) (*fixtureFile, error) {
|
|||
return fixture, nil
|
||||
}
|
||||
|
||||
// Reorganize `fixtureFiles` based upon the order that they'll need to be inserted into the database to satisfy foreign
|
||||
// key constraints.
|
||||
func (l *loader) orderFixtures() {
|
||||
slices.SortFunc(l.fixtureFiles, func(v1, v2 *fixtureFile) int {
|
||||
return db.TableNameInsertionOrderSortFunc(v1.name, v2.name)
|
||||
})
|
||||
}
|
||||
|
||||
func (l *loader) Load() error {
|
||||
// Start transaction.
|
||||
tx, err := l.db.Begin()
|
||||
|
@ -192,14 +209,18 @@ func (l *loader) Load() error {
|
|||
|
||||
// Clean the table and re-insert the fixtures.
|
||||
tableDeleted := make(container.Set[string])
|
||||
for _, fixture := range l.fixtureFiles {
|
||||
|
||||
// Issue deletes first, in reverse of insertion order, to maintain foreign key constraints.
|
||||
for i := len(l.fixtureFiles) - 1; i >= 0; i-- {
|
||||
fixture := l.fixtureFiles[i]
|
||||
if !tableDeleted.Contains(fixture.name) {
|
||||
if _, err := tx.Exec(fmt.Sprintf("DELETE FROM %s", l.quoteKeyword(fixture.name))); err != nil {
|
||||
return fmt.Errorf("cannot delete table %s: %w", fixture.name, err)
|
||||
}
|
||||
tableDeleted.Add(fixture.name)
|
||||
}
|
||||
|
||||
}
|
||||
for _, fixture := range l.fixtureFiles {
|
||||
for _, insertSQL := range fixture.insertSQLs {
|
||||
if _, err := tx.Exec(insertSQL.statement, insertSQL.values...); err != nil {
|
||||
return fmt.Errorf("cannot insert %q with values %q: %w", insertSQL.statement, insertSQL.values, err)
|
||||
|
|
|
@ -80,8 +80,13 @@ func InitFixtures(opts FixturesOptions, engine ...*xorm.Engine) (err error) {
|
|||
}
|
||||
|
||||
var allTables container.Set[string]
|
||||
if !opts.SkipCleanRegistedModels {
|
||||
if opts.OnlyAffectModels == nil {
|
||||
allTables = allTableNames().Clone()
|
||||
} else {
|
||||
allTables = make(container.Set[string])
|
||||
for _, bean := range opts.OnlyAffectModels {
|
||||
allTables.Add(e.TableName(bean))
|
||||
}
|
||||
}
|
||||
|
||||
fixturesLoader, err = newFixtureLoader(e.DB().DB, dialect, fixturePaths, allTables)
|
||||
|
|
|
@ -217,10 +217,9 @@ type FixturesOptions struct {
|
|||
Files []string
|
||||
Dirs []string
|
||||
Base string
|
||||
// By default all registered models are cleaned, even if they do not have
|
||||
// fixture. Enabling this will skip that and only models with fixtures are
|
||||
// considered.
|
||||
SkipCleanRegistedModels bool
|
||||
// By default all registered models are cleaned, even if they do not have fixture. When OnlyAffectModels is not-nil,
|
||||
// cleaning registered models will be skipped and only these models with fixtures are considered.
|
||||
OnlyAffectModels []any
|
||||
}
|
||||
|
||||
// CreateTestEngine creates a memory database and loads the fixture data from fixturesDir
|
||||
|
|
1
release-notes/9373.md
Normal file
1
release-notes/9373.md
Normal file
|
@ -0,0 +1 @@
|
|||
Foreign keys have been added to the Forgejo database schema, which may identify data inconsistencies during the v41 database schema upgrade. If migration errors occur, `forgejo doctor check --all` can be used to identify the inconsistent records, which can be manually corrected or deleted. `forgejo doctor check --all --fix` will automatically delete the inconsistent records. Affected tables in this release are: `stopwatch` (references `issue` and `user`), and `tracked_time` (references `issue` and `user`).
|
|
@ -8,6 +8,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"forgejo.org/models/db"
|
||||
"forgejo.org/models/issues"
|
||||
"forgejo.org/models/unittest"
|
||||
user_model "forgejo.org/models/user"
|
||||
"forgejo.org/modules/setting"
|
||||
|
@ -22,7 +23,7 @@ func TestReverseProxyAuth(t *testing.T) {
|
|||
defer test.MockVariableValue(&setting.Service.EnableReverseProxyFullName, true)()
|
||||
require.NoError(t, unittest.PrepareTestDatabase())
|
||||
|
||||
require.NoError(t, db.TruncateBeans(db.DefaultContext, &user_model.User{}))
|
||||
require.NoError(t, db.TruncateBeans(db.DefaultContext, &issues.TrackedTime{}, &issues.Stopwatch{}, &user_model.User{}))
|
||||
require.EqualValues(t, 0, user_model.CountUsers(db.DefaultContext, nil))
|
||||
|
||||
t.Run("First user should be admin", func(t *testing.T) {
|
||||
|
|
|
@ -121,6 +121,8 @@ func checkDBConsistency(ctx context.Context, logger log.Logger, autofix bool) er
|
|||
// find tracked times without existing issues/pulls
|
||||
genericOrphanCheck("Orphaned TrackedTimes without existing issue",
|
||||
"tracked_time", "issue", "tracked_time.issue_id=issue.id"),
|
||||
genericOrphanCheck("Orphaned TrackedTimes without existing user",
|
||||
"tracked_time", "user", "tracked_time.user_id=`user`.id"),
|
||||
// find attachments without existing issues or releases
|
||||
{
|
||||
Name: "Orphaned Attachments without existing issues or releases",
|
||||
|
|
|
@ -265,6 +265,29 @@ func deleteIssue(ctx context.Context, issue *issues_model.Issue) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// delete all database data still assigned to this issue
|
||||
if err := db.DeleteBeans(ctx,
|
||||
&issues_model.ContentHistory{IssueID: issue.ID},
|
||||
&issues_model.Comment{IssueID: issue.ID},
|
||||
&issues_model.IssueLabel{IssueID: issue.ID},
|
||||
&issues_model.IssueDependency{IssueID: issue.ID},
|
||||
&issues_model.IssueAssignees{IssueID: issue.ID},
|
||||
&issues_model.IssueUser{IssueID: issue.ID},
|
||||
&activities_model.Notification{IssueID: issue.ID},
|
||||
&issues_model.Reaction{IssueID: issue.ID},
|
||||
&issues_model.IssueWatch{IssueID: issue.ID},
|
||||
&issues_model.Stopwatch{IssueID: issue.ID},
|
||||
&issues_model.TrackedTime{IssueID: issue.ID},
|
||||
&project_model.ProjectIssue{IssueID: issue.ID},
|
||||
&repo_model.Attachment{IssueID: issue.ID},
|
||||
&issues_model.PullRequest{IssueID: issue.ID},
|
||||
&issues_model.Comment{RefIssueID: issue.ID},
|
||||
&issues_model.IssueDependency{DependencyID: issue.ID},
|
||||
&issues_model.Comment{DependentIssueID: issue.ID},
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := e.ID(issue.ID).NoAutoCondition().Delete(issue); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -298,29 +321,6 @@ func deleteIssue(ctx context.Context, issue *issues_model.Issue) error {
|
|||
system_model.RemoveStorageWithNotice(ctx, storage.Attachments, "Delete issue attachment", issue.Attachments[i].RelativePath())
|
||||
}
|
||||
|
||||
// delete all database data still assigned to this issue
|
||||
if err := db.DeleteBeans(ctx,
|
||||
&issues_model.ContentHistory{IssueID: issue.ID},
|
||||
&issues_model.Comment{IssueID: issue.ID},
|
||||
&issues_model.IssueLabel{IssueID: issue.ID},
|
||||
&issues_model.IssueDependency{IssueID: issue.ID},
|
||||
&issues_model.IssueAssignees{IssueID: issue.ID},
|
||||
&issues_model.IssueUser{IssueID: issue.ID},
|
||||
&activities_model.Notification{IssueID: issue.ID},
|
||||
&issues_model.Reaction{IssueID: issue.ID},
|
||||
&issues_model.IssueWatch{IssueID: issue.ID},
|
||||
&issues_model.Stopwatch{IssueID: issue.ID},
|
||||
&issues_model.TrackedTime{IssueID: issue.ID},
|
||||
&project_model.ProjectIssue{IssueID: issue.ID},
|
||||
&repo_model.Attachment{IssueID: issue.ID},
|
||||
&issues_model.PullRequest{IssueID: issue.ID},
|
||||
&issues_model.Comment{RefIssueID: issue.ID},
|
||||
&issues_model.IssueDependency{DependencyID: issue.ID},
|
||||
&issues_model.Comment{DependentIssueID: issue.ID},
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return committer.Commit()
|
||||
}
|
||||
|
||||
|
|
5
services/user/TestDeleteUser/stopwatch.yml
Normal file
5
services/user/TestDeleteUser/stopwatch.yml
Normal file
|
@ -0,0 +1,5 @@
|
|||
-
|
||||
id: 10
|
||||
user_id: 4
|
||||
issue_id: 1
|
||||
created_unix: 1500988001
|
7
services/user/TestDeleteUser/tracked_time.yml
Normal file
7
services/user/TestDeleteUser/tracked_time.yml
Normal file
|
@ -0,0 +1,7 @@
|
|||
-
|
||||
id: 10
|
||||
user_id: 8
|
||||
issue_id: 22
|
||||
time: 401
|
||||
created_unix: 946684800
|
||||
deleted: false
|
|
@ -102,6 +102,12 @@ func deleteUser(ctx context.Context, u *user_model.User, purge bool) (err error)
|
|||
return fmt.Errorf("deleteBeans: %w", err)
|
||||
}
|
||||
|
||||
// Retain the fact that time was tracked, but set DB's `user_id` to NULL.
|
||||
_, err = e.Table(&issues_model.TrackedTime{}).Where("user_id = ?", u.ID).Update(map[string]any{"user_id": nil})
|
||||
if err != nil {
|
||||
return fmt.Errorf("update tracked_time user_id: %w", err)
|
||||
}
|
||||
|
||||
if err := auth_model.DeleteOAuth2RelictsByUserID(ctx, u.ID); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
asymkey_model "forgejo.org/models/asymkey"
|
||||
"forgejo.org/models/auth"
|
||||
"forgejo.org/models/db"
|
||||
"forgejo.org/models/issues"
|
||||
"forgejo.org/models/moderation"
|
||||
"forgejo.org/models/organization"
|
||||
repo_model "forgejo.org/models/repo"
|
||||
|
@ -37,38 +38,81 @@ func TestMain(m *testing.M) {
|
|||
}
|
||||
|
||||
func TestDeleteUser(t *testing.T) {
|
||||
test := func(userID int64) {
|
||||
require.NoError(t, unittest.PrepareTestDatabase())
|
||||
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: userID})
|
||||
// Note: an earlier revision of TestDeleteUser also tested for deleting user 2, but then failed to remove them from
|
||||
// all organizations because ErrLastOrgOwner and considered a successful test -- since that didn't actually test
|
||||
// DeleteUser in any way, that case has been removed from TestDeleteUser.
|
||||
|
||||
ownedRepos := make([]*repo_model.Repository, 0, 10)
|
||||
require.NoError(t, db.GetEngine(db.DefaultContext).Find(&ownedRepos, &repo_model.Repository{OwnerID: userID}))
|
||||
if len(ownedRepos) > 0 {
|
||||
err := DeleteUser(db.DefaultContext, user, false)
|
||||
require.Error(t, err)
|
||||
assert.True(t, models.IsErrUserOwnRepos(err))
|
||||
return
|
||||
}
|
||||
|
||||
orgUsers := make([]*organization.OrgUser, 0, 10)
|
||||
require.NoError(t, db.GetEngine(db.DefaultContext).Find(&orgUsers, &organization.OrgUser{UID: userID}))
|
||||
for _, orgUser := range orgUsers {
|
||||
if err := models.RemoveOrgUser(db.DefaultContext, orgUser.OrgID, orgUser.UID); err != nil {
|
||||
assert.True(t, organization.IsErrLastOrgOwner(err))
|
||||
return
|
||||
}
|
||||
}
|
||||
require.NoError(t, DeleteUser(db.DefaultContext, user, false))
|
||||
unittest.AssertNotExistsBean(t, &user_model.User{ID: userID})
|
||||
unittest.CheckConsistencyFor(t, &user_model.User{}, &repo_model.Repository{})
|
||||
testCases := []struct {
|
||||
userID int64
|
||||
errUserOwnRepos bool
|
||||
errContains string
|
||||
}{
|
||||
{
|
||||
userID: 3,
|
||||
errContains: "is an organization not a user",
|
||||
},
|
||||
{
|
||||
userID: 4,
|
||||
},
|
||||
{
|
||||
userID: 8,
|
||||
},
|
||||
{
|
||||
userID: 11,
|
||||
errUserOwnRepos: true,
|
||||
},
|
||||
}
|
||||
test(2)
|
||||
test(4)
|
||||
test(8)
|
||||
test(11)
|
||||
for _, tc := range testCases {
|
||||
t.Run(fmt.Sprintf("delete %d", tc.userID), func(t *testing.T) {
|
||||
defer unittest.OverrideFixtures("services/user/TestDeleteUser")()
|
||||
require.NoError(t, unittest.PrepareTestDatabase())
|
||||
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: tc.userID})
|
||||
|
||||
org := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 3})
|
||||
require.Error(t, DeleteUser(db.DefaultContext, org, false))
|
||||
// Remove user from all organizations first
|
||||
orgUsers := make([]*organization.OrgUser, 0, 10)
|
||||
require.NoError(t, db.GetEngine(db.DefaultContext).Find(&orgUsers, &organization.OrgUser{UID: tc.userID}))
|
||||
for _, orgUser := range orgUsers {
|
||||
err := models.RemoveOrgUser(db.DefaultContext, orgUser.OrgID, orgUser.UID)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
err := DeleteUser(db.DefaultContext, user, false)
|
||||
if tc.errUserOwnRepos {
|
||||
require.Error(t, err)
|
||||
assert.True(t, models.IsErrUserOwnRepos(err), "IsErrUserOwnRepos: %v", err)
|
||||
} else if tc.errContains != "" {
|
||||
assert.ErrorContains(t, err, tc.errContains)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
unittest.AssertNotExistsBean(t, &user_model.User{ID: tc.userID})
|
||||
unittest.CheckConsistencyFor(t, &user_model.User{}, &repo_model.Repository{})
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteUserRetainsTrackedTime(t *testing.T) {
|
||||
defer unittest.OverrideFixtures("services/user/TestDeleteUser")()
|
||||
require.NoError(t, unittest.PrepareTestDatabase())
|
||||
user := unittest.AssertExistsAndLoadBean(t, &user_model.User{ID: 8})
|
||||
|
||||
err := DeleteUser(db.DefaultContext, user, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
time := make([]*issues.TrackedTime, 0, 10)
|
||||
err = db.GetEngine(db.DefaultContext).Find(&time, &issues.TrackedTime{IssueID: 22})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, time, 1)
|
||||
tt := time[0]
|
||||
assert.EqualValues(t, 0, tt.UserID)
|
||||
assert.EqualValues(t, 22, tt.IssueID)
|
||||
assert.EqualValues(t, 401, tt.Time)
|
||||
|
||||
// Make sure other tracked time wasn't affected
|
||||
time = make([]*issues.TrackedTime, 0, 10)
|
||||
err = db.GetEngine(db.DefaultContext).Find(&time, &issues.TrackedTime{UserID: 2})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, time, 5)
|
||||
}
|
||||
|
||||
func TestPurgeUser(t *testing.T) {
|
||||
|
|
|
@ -133,6 +133,8 @@ func Test_Cmd_AdminFirstUser(t *testing.T) {
|
|||
},
|
||||
} {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
db.GetEngine(db.DefaultContext).Exec("DELETE FROM `stopwatch`")
|
||||
db.GetEngine(db.DefaultContext).Exec("DELETE FROM `tracked_time`")
|
||||
db.GetEngine(db.DefaultContext).Exec("DELETE FROM `user`")
|
||||
db.GetEngine(db.DefaultContext).Exec("DELETE FROM `email_address`")
|
||||
assert.Equal(t, int64(0), user_model.CountUsers(db.DefaultContext, nil))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue