diff --git a/consts/auth.go b/consts/auth.go new file mode 100644 index 0000000..f52b968 --- /dev/null +++ b/consts/auth.go @@ -0,0 +1,19 @@ +package consts + +import "errors" + +const ( + AuthHeader = "Authorization" + AuthPrefix = "Bearer" + + AnonymousUser = "anonymous" + + AuthMiddlewareKey = "auth.user" +) + +var ( + ErrNotValidToken = errors.New("无效的 JWT 令牌。") + ErrJWTFormatError = errors.New("JWT 格式错误。") + ErrNotBearerType = errors.New("不是 Bearer 类型。") + ErrEmptyResponse = errors.New("我们的服务器返回了空请求,可能某些环节出了问题。") +) diff --git a/go.mod b/go.mod index 445b474..183b821 100644 --- a/go.mod +++ b/go.mod @@ -75,6 +75,7 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect go.uber.org/atomic v1.11.0 // indirect + go.uber.org/dig v1.17.1 go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.8.0 // indirect golang.org/x/crypto v0.24.0 // indirect diff --git a/go.sum b/go.sum index 5ec177b..7d83669 100644 --- a/go.sum +++ b/go.sum @@ -148,6 +148,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= @@ -167,6 +169,8 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= @@ -228,6 +232,8 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= diff --git a/internal/bootstrap/router.go b/internal/bootstrap/router.go new file mode 100644 index 0000000..7627a37 --- /dev/null +++ b/internal/bootstrap/router.go @@ -0,0 +1,14 @@ +package bootstrap + +import ( + "framework_v2/internal/handlers/controllers/user" + "framework_v2/internal/middleware/http" + "framework_v2/internal/providers" + "github.com/gin-gonic/gin" +) + +func InitApiRoutes() { + var router = *providers.MustGet[gin.Engine]() + + router.GET("/", http.MiddlewareJSONResponse, http.ValidateUser, user.CurrentUser) +} diff --git a/internal/cmd/http.go b/internal/cmd/http.go index 2dd540c..f1cdc96 100644 --- a/internal/cmd/http.go +++ b/internal/cmd/http.go @@ -1,14 +1,10 @@ package cmd import ( - "framework_v2/internal/providers/config" - "framework_v2/internal/providers/ent" - "framework_v2/internal/providers/facade" - "framework_v2/internal/providers/gin" - "framework_v2/internal/providers/jobs" + "framework_v2/internal/bootstrap" + "framework_v2/internal/providers" "framework_v2/internal/providers/jwks" - "framework_v2/internal/providers/logger" - "framework_v2/internal/providers/s3" + "github.com/gin-gonic/gin" "github.com/spf13/cobra" ) @@ -16,27 +12,21 @@ var httpCommand = &cobra.Command{ Use: "http", Run: func(cmd *cobra.Command, args []string) { - config.InitConfig() - logger.InitLogger() - // you should uncommit it after run make ent - ent.InitEnt() - //redis.InitRedis() - s3.InitS3Driver() jwks.InitJwksRefresh() - jobs.InitAsynQClient() - gin.InitGin() StartHttp() }, } func StartHttp() { - if config.Config.ListenAddr.HTTP == "" { - config.Config.ListenAddr.HTTP = "0.0.0.0:8080" + if config.ListenAddr.HTTP == "" { + config.ListenAddr.HTTP = "0.0.0.0:8080" } - logger.Logger.Info("Http Server listening at " + config.Config.ListenAddr.HTTP) - err := facade.Router.Run(config.Config.ListenAddr.HTTP) + bootstrap.InitApiRoutes() + router := providers.MustGet[gin.Engine]() + logger.Info("Http Server listening at " + config.ListenAddr.HTTP) + err := router.Run(config.ListenAddr.HTTP) if err != nil { panic("failed to listen: " + err.Error()) } diff --git a/internal/cmd/migrate.go b/internal/cmd/migrate.go index de26558..bd6de2b 100644 --- a/internal/cmd/migrate.go +++ b/internal/cmd/migrate.go @@ -7,11 +7,7 @@ import ( "entgo.io/ent/dialect/sql/schema" "errors" "fmt" - "framework_v2/internal/providers/config" - "framework_v2/internal/providers/ent" - "framework_v2/internal/providers/logger" - - //entmigrate "framework_v2/internal/ent/migrate" + entmigrate "framework_v2/internal/ent/migrate" "framework_v2/internal/migrations" "github.com/golang-migrate/migrate/v4" _ "github.com/golang-migrate/migrate/v4/database/postgres" @@ -29,8 +25,8 @@ var dsnCommand = &cobra.Command{ Short: "生成 DSN", Long: "生成 DSN", Run: func(cmd *cobra.Command, args []string) { - config.InitConfig() - fmt.Print(config.Config.DB.Driver + "://" + config.Config.DB.DSN) + + fmt.Print(config.DB.Driver + "://" + config.DB.DSN) }, } @@ -39,9 +35,6 @@ var migrateCommand = &cobra.Command{ Short: "迁移数据库", Long: "适用于生产环境的数据库迁移", Run: func(cmd *cobra.Command, args []string) { - config.InitConfig() - logger.InitLogger() - ent.InitEnt() RunMigrate() }, } @@ -51,9 +44,6 @@ var createMigrateCommand = &cobra.Command{ Short: "新建迁移", Long: "从 internal/ent 中新建迁移。在这之前,需要运行 go generate ./internal/ent", Run: func(cmd *cobra.Command, args []string) { - config.InitConfig() - logger.InitLogger() - ent.InitEnt() generateMigration() }, } @@ -62,7 +52,7 @@ var createMigrateCommand = &cobra.Command{ func RunMigrate() { source, err := httpfs.New(http.FS(migrations.MigrationFS), ".") - mig, err := migrate.NewWithSourceInstance("httpfs", source, config.Config.DB.Driver+"://"+config.Config.DB.DSN) + mig, err := migrate.NewWithSourceInstance("httpfs", source, config.DB.Driver+"://"+config.DB.DSN) if err != nil { panic(err) @@ -120,10 +110,8 @@ func generateMigration() { log.Fatalln("migration name is required. Use: 'go run -mod=mod internal/ent/migrate/main.go '") } - panic("ent not implemented, if implemented, uncommit this line.") - fmt.Println(ctx, opts) // uncommit after implementing ent - //err = entmigrate.NamedDiff(ctx, config.Config.DB.Driver+"://"+config.Config.DB.DSN, os.Args[2], opts...) - //if err != nil { - // log.Fatalf("failed generating migration file: %v", err) - //} + err = entmigrate.NamedDiff(ctx, config.DB.Driver+"://"+config.DB.DSN, os.Args[2], opts...) + if err != nil { + log.Fatalf("failed generating migration file: %v", err) + } } diff --git a/internal/cmd/root.go b/internal/cmd/root.go index 5f2e532..416d5a2 100644 --- a/internal/cmd/root.go +++ b/internal/cmd/root.go @@ -2,11 +2,21 @@ package cmd import ( "fmt" + "framework_v2/internal/providers" "github.com/spf13/cobra" + "go.uber.org/zap" "os" "path/filepath" ) +var logger *zap.Logger +var config *providers.GlobalConfig + +func init() { + config = providers.MustGet[providers.GlobalConfig]() + logger = providers.MustGet[zap.Logger]() +} + var rootCmd = &cobra.Command{ Use: filepath.Base(os.Args[0]), } diff --git a/internal/cmd/rpc.go b/internal/cmd/rpc.go index cc47019..4b8e603 100644 --- a/internal/cmd/rpc.go +++ b/internal/cmd/rpc.go @@ -2,12 +2,7 @@ package cmd import ( grpc2 "framework_v2/internal/middleware/grpc" - "framework_v2/internal/providers/config" - "framework_v2/internal/providers/ent" - "framework_v2/internal/providers/jobs" "framework_v2/internal/providers/jwks" - "framework_v2/internal/providers/logger" - "framework_v2/internal/providers/redis" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/auth" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" "github.com/spf13/cobra" @@ -18,30 +13,24 @@ import ( ) var rpcCommand = &cobra.Command{ - Use: "serve", + Use: "rpc", Run: func(cmd *cobra.Command, args []string) { - config.InitConfig() - logger.InitLogger() - ent.InitEnt() - redis.InitRedis() jwks.InitJwksRefresh() - jobs.InitAsynQClient() - StartSpiderService() }, } func StartSpiderService() { - if config.Config.ListenAddr.GRPC == "" { - config.Config.ListenAddr.GRPC = "0.0.0.0:8081" + if config.ListenAddr.GRPC == "" { + config.ListenAddr.GRPC = "0.0.0.0:8081" } - lis, err := net.Listen("tcp", config.Config.ListenAddr.GRPC) + lis, err := net.Listen("tcp", config.ListenAddr.GRPC) if err != nil { panic("failed to listen: " + err.Error()) } - logger.Logger.Info("Server listening at " + config.Config.ListenAddr.GRPC) + logger.Info("Server listening at " + config.ListenAddr.GRPC) var opts = []grpc.ServerOption{ grpc.ChainUnaryInterceptor( diff --git a/internal/cmd/schedule.go b/internal/cmd/schedule.go index 545dbfc..b211596 100644 --- a/internal/cmd/schedule.go +++ b/internal/cmd/schedule.go @@ -2,11 +2,6 @@ package cmd import ( "fmt" - "framework_v2/internal/providers/config" - "framework_v2/internal/providers/ent" - "framework_v2/internal/providers/jobs" - "framework_v2/internal/providers/logger" - "framework_v2/internal/providers/redis" "github.com/spf13/cobra" "time" @@ -16,19 +11,12 @@ var scheduleCommand = &cobra.Command{ Use: "schedule", Run: func(cmd *cobra.Command, args []string) { - config.InitConfig() - logger.InitLogger() - ent.InitEnt() - redis.InitRedis() - jobs.InitAsynQClient() - runSchedule() }, } func runSchedule() { - - if config.Config.DebugMode.Enable { + if config.DebugMode.Enable { fmt.Println("调试模式开启,直接触发。") // RUN JOB return diff --git a/internal/cmd/worker.go b/internal/cmd/worker.go index 6e22d3d..2e78aa8 100644 --- a/internal/cmd/worker.go +++ b/internal/cmd/worker.go @@ -1,11 +1,7 @@ package cmd import ( - "framework_v2/internal/providers/config" - "framework_v2/internal/providers/ent" - "framework_v2/internal/providers/jobs" - "framework_v2/internal/providers/logger" - "framework_v2/internal/providers/redis" + "framework_v2/internal/providers" "github.com/hibiken/asynq" "github.com/spf13/cobra" "log" @@ -15,12 +11,6 @@ var workerCommand = &cobra.Command{ Use: "worker", Run: func(cmd *cobra.Command, args []string) { - config.InitConfig() - logger.InitLogger() - ent.InitEnt() - redis.InitRedis() - jobs.InitAsynQServer() - runWorker() }, } @@ -28,10 +18,12 @@ var workerCommand = &cobra.Command{ func runWorker() { mux := asynq.NewServeMux() + var asynqServer = providers.MustGet[asynq.Server]() + //mux.HandleFunc(tasks.DocumentChunkTask, tasks.HandleDocumentChunkTask) //mux.HandleFunc(tasks.DocumentImportTask, tasks.HandleDocumentImportTask) - if err := jobs.AsynQServer.Run(mux); err != nil { + if err := asynqServer.Run(mux); err != nil { log.Fatal(err) } diff --git a/internal/ent/client.go b/internal/ent/client.go new file mode 100644 index 0000000..3cbc1e8 --- /dev/null +++ b/internal/ent/client.go @@ -0,0 +1,341 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + + "framework_v2/internal/ent/migrate" + + "framework_v2/internal/ent/user" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // User is the client for interacting with the User builders. + User *UserClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + client := &Client{config: newConfig(opts...)} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.User = NewUserClient(c.config) +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, ErrTxStarted + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + User: NewUserClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + User: NewUserClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// User. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + c.User.Use(hooks...) +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + c.User.Intercept(interceptors...) +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *UserMutation: + return c.User.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } +} + +// UserClient is a client for the User schema. +type UserClient struct { + config +} + +// NewUserClient returns a client for the User from the given config. +func NewUserClient(c config) *UserClient { + return &UserClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `user.Hooks(f(g(h())))`. +func (c *UserClient) Use(hooks ...Hook) { + c.hooks.User = append(c.hooks.User, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `user.Intercept(f(g(h())))`. +func (c *UserClient) Intercept(interceptors ...Interceptor) { + c.inters.User = append(c.inters.User, interceptors...) +} + +// Create returns a builder for creating a User entity. +func (c *UserClient) Create() *UserCreate { + mutation := newUserMutation(c.config, OpCreate) + return &UserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of User entities. +func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk { + return &UserCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for User. +func (c *UserClient) Update() *UserUpdate { + mutation := newUserMutation(c.config, OpUpdate) + return &UserUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserClient) UpdateOne(u *User) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUser(u)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserClient) UpdateOneID(id int) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUserID(id)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for User. +func (c *UserClient) Delete() *UserDelete { + mutation := newUserMutation(c.config, OpDelete) + return &UserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserClient) DeleteOne(u *User) *UserDeleteOne { + return c.DeleteOneID(u.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserClient) DeleteOneID(id int) *UserDeleteOne { + builder := c.Delete().Where(user.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserDeleteOne{builder} +} + +// Query returns a query builder for User. +func (c *UserClient) Query() *UserQuery { + return &UserQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUser}, + inters: c.Interceptors(), + } +} + +// Get returns a User entity by its id. +func (c *UserClient) Get(ctx context.Context, id int) (*User, error) { + return c.Query().Where(user.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserClient) GetX(ctx context.Context, id int) *User { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *UserClient) Hooks() []Hook { + return c.hooks.User +} + +// Interceptors returns the client interceptors. +func (c *UserClient) Interceptors() []Interceptor { + return c.inters.User +} + +func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + User []ent.Hook + } + inters struct { + User []ent.Interceptor + } +) diff --git a/internal/ent/ent.go b/internal/ent/ent.go new file mode 100644 index 0000000..bf2cd14 --- /dev/null +++ b/internal/ent/ent.go @@ -0,0 +1,608 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "framework_v2/internal/ent/user" + "reflect" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// columnChecker checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + user.Table: user.ValidColumn, + }) + }) + return columnCheck(table, column) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "ent: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "ent: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "ent: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "ent: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/internal/ent/enttest/enttest.go b/internal/ent/enttest/enttest.go new file mode 100644 index 0000000..fdb109a --- /dev/null +++ b/internal/ent/enttest/enttest.go @@ -0,0 +1,84 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + "framework_v2/internal/ent" + // required by schema hooks. + _ "framework_v2/internal/ent/runtime" + + "framework_v2/internal/ent/migrate" + + "entgo.io/ent/dialect/sql/schema" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []ent.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...ent.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls ent.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { + o := newOptions(opts) + c, err := ent.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls ent.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *ent.Client { + o := newOptions(opts) + c := ent.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *ent.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/internal/ent/hook/hook.go b/internal/ent/hook/hook.go new file mode 100644 index 0000000..bb82f75 --- /dev/null +++ b/internal/ent/hook/hook.go @@ -0,0 +1,198 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + "framework_v2/internal/ent" +) + +// The UserFunc type is an adapter to allow the use of ordinary +// function as User mutator. +type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m) +} + +// Condition is a hook condition function. +type Condition func(context.Context, ent.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op ent.Op) Condition { + return func(_ context.Context, m ent.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk ent.Hook, cond Condition) ent.Hook { + return func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, ent.Delete|ent.Create) +func On(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, ent.Update|ent.UpdateOne) +func Unless(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) ent.Hook { + return func(ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []ent.Hook { +// return []ent.Hook{ +// Reject(ent.Delete|ent.Update), +// } +// } +func Reject(op ent.Op) ent.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []ent.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...ent.Hook) Chain { + return Chain{append([]ent.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() ent.Hook { + return func(mutator ent.Mutator) ent.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...ent.Hook) Chain { + newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/internal/ent/migrate/migrate.go b/internal/ent/migrate/migrate.go new file mode 100644 index 0000000..d8d3bcb --- /dev/null +++ b/internal/ent/migrate/migrate.go @@ -0,0 +1,96 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// Diff compares the state read from a database connection or migration directory with +// the state defined by the Ent schema. Changes will be written to new migration files. +func Diff(ctx context.Context, url string, opts ...schema.MigrateOption) error { + return NamedDiff(ctx, url, "changes", opts...) +} + +// NamedDiff compares the state read from a database connection or migration directory with +// the state defined by the Ent schema. Changes will be written to new named migration files. +func NamedDiff(ctx context.Context, url, name string, opts ...schema.MigrateOption) error { + return schema.Diff(ctx, url, name, Tables, opts...) +} + +// Diff creates a migration file containing the statements to resolve the diff +// between the Ent schema and the connected database. +func (s *Schema) Diff(ctx context.Context, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Diff(ctx, Tables...) +} + +// NamedDiff creates a named migration file containing the statements to resolve the diff +// between the Ent schema and the connected database. +func (s *Schema) NamedDiff(ctx context.Context, name string, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.NamedDiff(ctx, name, Tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/internal/ent/migrate/schema.go b/internal/ent/migrate/schema.go new file mode 100644 index 0000000..ad98b98 --- /dev/null +++ b/internal/ent/migrate/schema.go @@ -0,0 +1,38 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // UsersColumns holds the columns for the "users" table. + UsersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "name", Type: field.TypeString}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + } + // UsersTable holds the schema information for the "users" table. + UsersTable = &schema.Table{ + Name: "users", + Columns: UsersColumns, + PrimaryKey: []*schema.Column{UsersColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "user_name", + Unique: false, + Columns: []*schema.Column{UsersColumns[1]}, + }, + }, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + UsersTable, + } +) + +func init() { +} diff --git a/internal/ent/mutation.go b/internal/ent/mutation.go new file mode 100644 index 0000000..559a6d8 --- /dev/null +++ b/internal/ent/mutation.go @@ -0,0 +1,462 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "framework_v2/internal/ent/predicate" + "framework_v2/internal/ent/user" + "sync" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeUser = "User" +) + +// UserMutation represents an operation that mutates the User nodes in the graph. +type UserMutation struct { + config + op Op + typ string + id *int + name *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*User, error) + predicates []predicate.User +} + +var _ ent.Mutation = (*UserMutation)(nil) + +// userOption allows management of the mutation configuration using functional options. +type userOption func(*UserMutation) + +// newUserMutation creates new mutation for the User entity. +func newUserMutation(c config, op Op, opts ...userOption) *UserMutation { + m := &UserMutation{ + config: c, + op: op, + typ: TypeUser, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserID sets the ID field of the mutation. +func withUserID(id int) userOption { + return func(m *UserMutation) { + var ( + err error + once sync.Once + value *User + ) + m.oldValue = func(ctx context.Context) (*User, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().User.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUser sets the old User of the mutation. +func withUser(node *User) userOption { + return func(m *UserMutation) { + m.oldValue = func(context.Context) (*User, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().User.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetName sets the "name" field. +func (m *UserMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *UserMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *UserMutation) ResetName() { + m.name = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// Where appends a list predicates to the UserMutation builder. +func (m *UserMutation) Where(ps ...predicate.User) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.User, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (User). +func (m *UserMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserMutation) Fields() []string { + fields := make([]string, 0, 3) + if m.name != nil { + fields = append(fields, user.FieldName) + } + if m.created_at != nil { + fields = append(fields, user.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, user.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserMutation) Field(name string) (ent.Value, bool) { + switch name { + case user.FieldName: + return m.Name() + case user.FieldCreatedAt: + return m.CreatedAt() + case user.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case user.FieldName: + return m.OldName(ctx) + case user.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case user.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown User field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) SetField(name string, value ent.Value) error { + switch name { + case user.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case user.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case user.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown User numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserMutation) ClearField(name string) error { + return fmt.Errorf("unknown User nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserMutation) ResetField(name string) error { + switch name { + case user.FieldName: + m.ResetName() + return nil + case user.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case user.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown User unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown User edge %s", name) +} diff --git a/internal/ent/predicate/predicate.go b/internal/ent/predicate/predicate.go new file mode 100644 index 0000000..af21dfe --- /dev/null +++ b/internal/ent/predicate/predicate.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// User is the predicate function for user builders. +type User func(*sql.Selector) diff --git a/internal/ent/runtime.go b/internal/ent/runtime.go new file mode 100644 index 0000000..cffb9cc --- /dev/null +++ b/internal/ent/runtime.go @@ -0,0 +1,25 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "framework_v2/internal/ent/schema" + "framework_v2/internal/ent/user" + "time" +) + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { + userFields := schema.User{}.Fields() + _ = userFields + // userDescCreatedAt is the schema descriptor for created_at field. + userDescCreatedAt := userFields[1].Descriptor() + // user.DefaultCreatedAt holds the default value on creation for the created_at field. + user.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time) + // userDescUpdatedAt is the schema descriptor for updated_at field. + userDescUpdatedAt := userFields[2].Descriptor() + // user.DefaultUpdatedAt holds the default value on creation for the updated_at field. + user.DefaultUpdatedAt = userDescUpdatedAt.Default.(func() time.Time) +} diff --git a/internal/ent/runtime/runtime.go b/internal/ent/runtime/runtime.go new file mode 100644 index 0000000..07dc233 --- /dev/null +++ b/internal/ent/runtime/runtime.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +// The schema-stitching logic is generated in framework_v2/internal/ent/runtime.go + +const ( + Version = "v0.13.1" // Version of ent codegen. + Sum = "h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=" // Sum of ent codegen. +) diff --git a/internal/ent/schema/document.go b/internal/ent/schema/document.go deleted file mode 100644 index 11865b2..0000000 --- a/internal/ent/schema/document.go +++ /dev/null @@ -1,40 +0,0 @@ -package schema - -import ( - "entgo.io/ent" - "entgo.io/ent/schema/field" - "entgo.io/ent/schema/index" - "time" -) - -// Document holds the schema definition for the Document entity. -type Document struct { - ent.Schema -} - -// Fields of the Document. -func (Document) Fields() []ent.Field { - return []ent.Field{ - field.String("title"), - field.String("description"), - field.String("user_id"), - field.Uint("library_id"), - field.Bool("synced"), - field.JSON("document_block_ids", []int32{}), - field.Time("created_at").Default(time.Now), - field.Time("updated_at").Default(time.Now), - } -} - -// Edges of the Document. -func (Document) Edges() []ent.Edge { - return []ent.Edge{ - //edge.To("library", Library.Type), - } -} - -func (Document) Indexes() []ent.Index { - return []ent.Index{ - index.Fields("user_id", "library_id", "synced"), - } -} diff --git a/internal/ent/schema/user.go b/internal/ent/schema/user.go new file mode 100644 index 0000000..4e3916e --- /dev/null +++ b/internal/ent/schema/user.go @@ -0,0 +1,35 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" + "time" +) + +// User holds the schema definition for the User entity. +type User struct { + ent.Schema +} + +// Fields of the User. +func (User) Fields() []ent.Field { + return []ent.Field{ + field.String("name"), + field.Time("created_at").Default(time.Now), + field.Time("updated_at").Default(time.Now), + } +} + +// Edges of the User. +func (User) Edges() []ent.Edge { + return []ent.Edge{ + //edge.To("library", Library.Type), + } +} + +func (User) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("name"), + } +} diff --git a/internal/ent/tx.go b/internal/ent/tx.go new file mode 100644 index 0000000..8bbe7fa --- /dev/null +++ b/internal/ent/tx.go @@ -0,0 +1,210 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // User is the client for interacting with the User builders. + User *UserClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.User = NewUserClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: User.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/internal/ent/user.go b/internal/ent/user.go new file mode 100644 index 0000000..0f5c8e3 --- /dev/null +++ b/internal/ent/user.go @@ -0,0 +1,128 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "framework_v2/internal/ent/user" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" +) + +// User is the model entity for the User schema. +type User struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*User) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case user.FieldID: + values[i] = new(sql.NullInt64) + case user.FieldName: + values[i] = new(sql.NullString) + case user.FieldCreatedAt, user.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the User fields. +func (u *User) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case user.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + u.ID = int(value.Int64) + case user.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + u.Name = value.String + } + case user.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + u.CreatedAt = value.Time + } + case user.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + u.UpdatedAt = value.Time + } + default: + u.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the User. +// This includes values selected through modifiers, order, etc. +func (u *User) Value(name string) (ent.Value, error) { + return u.selectValues.Get(name) +} + +// Update returns a builder for updating this User. +// Note that you need to call User.Unwrap() before calling this method if this User +// was returned from a transaction, and the transaction was committed or rolled back. +func (u *User) Update() *UserUpdateOne { + return NewUserClient(u.config).UpdateOne(u) +} + +// Unwrap unwraps the User entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (u *User) Unwrap() *User { + _tx, ok := u.config.driver.(*txDriver) + if !ok { + panic("ent: User is not a transactional entity") + } + u.config.driver = _tx.drv + return u +} + +// String implements the fmt.Stringer. +func (u *User) String() string { + var builder strings.Builder + builder.WriteString("User(") + builder.WriteString(fmt.Sprintf("id=%v, ", u.ID)) + builder.WriteString("name=") + builder.WriteString(u.Name) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(u.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(u.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// Users is a parsable slice of User. +type Users []*User diff --git a/internal/ent/user/user.go b/internal/ent/user/user.go new file mode 100644 index 0000000..4fb450f --- /dev/null +++ b/internal/ent/user/user.go @@ -0,0 +1,72 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the user type in the database. + Label = "user" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // Table holds the table name of the user in the database. + Table = "users" +) + +// Columns holds all SQL columns for user fields. +var Columns = []string{ + FieldID, + FieldName, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the User queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} diff --git a/internal/ent/user/where.go b/internal/ent/user/where.go new file mode 100644 index 0000000..89a6f4e --- /dev/null +++ b/internal/ent/user/where.go @@ -0,0 +1,230 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "framework_v2/internal/ent/predicate" + "time" + + "entgo.io/ent/dialect/sql" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.User { + return predicate.User(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.User { + return predicate.User(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.User { + return predicate.User(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.User { + return predicate.User(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.User { + return predicate.User(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.User { + return predicate.User(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.User { + return predicate.User(sql.FieldLTE(FieldID, id)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldName, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldName, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.User) predicate.User { + return predicate.User(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.User) predicate.User { + return predicate.User(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.User) predicate.User { + return predicate.User(sql.NotPredicates(p)) +} diff --git a/internal/ent/user_create.go b/internal/ent/user_create.go new file mode 100644 index 0000000..b4e9bba --- /dev/null +++ b/internal/ent/user_create.go @@ -0,0 +1,240 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "framework_v2/internal/ent/user" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserCreate is the builder for creating a User entity. +type UserCreate struct { + config + mutation *UserMutation + hooks []Hook +} + +// SetName sets the "name" field. +func (uc *UserCreate) SetName(s string) *UserCreate { + uc.mutation.SetName(s) + return uc +} + +// SetCreatedAt sets the "created_at" field. +func (uc *UserCreate) SetCreatedAt(t time.Time) *UserCreate { + uc.mutation.SetCreatedAt(t) + return uc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (uc *UserCreate) SetNillableCreatedAt(t *time.Time) *UserCreate { + if t != nil { + uc.SetCreatedAt(*t) + } + return uc +} + +// SetUpdatedAt sets the "updated_at" field. +func (uc *UserCreate) SetUpdatedAt(t time.Time) *UserCreate { + uc.mutation.SetUpdatedAt(t) + return uc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (uc *UserCreate) SetNillableUpdatedAt(t *time.Time) *UserCreate { + if t != nil { + uc.SetUpdatedAt(*t) + } + return uc +} + +// Mutation returns the UserMutation object of the builder. +func (uc *UserCreate) Mutation() *UserMutation { + return uc.mutation +} + +// Save creates the User in the database. +func (uc *UserCreate) Save(ctx context.Context) (*User, error) { + uc.defaults() + return withHooks(ctx, uc.sqlSave, uc.mutation, uc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (uc *UserCreate) SaveX(ctx context.Context) *User { + v, err := uc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (uc *UserCreate) Exec(ctx context.Context) error { + _, err := uc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uc *UserCreate) ExecX(ctx context.Context) { + if err := uc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (uc *UserCreate) defaults() { + if _, ok := uc.mutation.CreatedAt(); !ok { + v := user.DefaultCreatedAt() + uc.mutation.SetCreatedAt(v) + } + if _, ok := uc.mutation.UpdatedAt(); !ok { + v := user.DefaultUpdatedAt() + uc.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (uc *UserCreate) check() error { + if _, ok := uc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "User.name"`)} + } + if _, ok := uc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "User.created_at"`)} + } + if _, ok := uc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "User.updated_at"`)} + } + return nil +} + +func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) { + if err := uc.check(); err != nil { + return nil, err + } + _node, _spec := uc.createSpec() + if err := sqlgraph.CreateNode(ctx, uc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + uc.mutation.id = &_node.ID + uc.mutation.done = true + return _node, nil +} + +func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { + var ( + _node = &User{config: uc.config} + _spec = sqlgraph.NewCreateSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt)) + ) + if value, ok := uc.mutation.Name(); ok { + _spec.SetField(user.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := uc.mutation.CreatedAt(); ok { + _spec.SetField(user.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := uc.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + return _node, _spec +} + +// UserCreateBulk is the builder for creating many User entities in bulk. +type UserCreateBulk struct { + config + err error + builders []*UserCreate +} + +// Save creates the User entities in the database. +func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { + if ucb.err != nil { + return nil, ucb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ucb.builders)) + nodes := make([]*User, len(ucb.builders)) + mutators := make([]Mutator, len(ucb.builders)) + for i := range ucb.builders { + func(i int, root context.Context) { + builder := ucb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ucb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ucb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ucb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ucb *UserCreateBulk) SaveX(ctx context.Context) []*User { + v, err := ucb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ucb *UserCreateBulk) Exec(ctx context.Context) error { + _, err := ucb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ucb *UserCreateBulk) ExecX(ctx context.Context) { + if err := ucb.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/ent/user_delete.go b/internal/ent/user_delete.go new file mode 100644 index 0000000..d7062fa --- /dev/null +++ b/internal/ent/user_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "framework_v2/internal/ent/predicate" + "framework_v2/internal/ent/user" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserDelete is the builder for deleting a User entity. +type UserDelete struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserDelete builder. +func (ud *UserDelete) Where(ps ...predicate.User) *UserDelete { + ud.mutation.Where(ps...) + return ud +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ud *UserDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ud.sqlExec, ud.mutation, ud.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ud *UserDelete) ExecX(ctx context.Context) int { + n, err := ud.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt)) + if ps := ud.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ud.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ud.mutation.done = true + return affected, err +} + +// UserDeleteOne is the builder for deleting a single User entity. +type UserDeleteOne struct { + ud *UserDelete +} + +// Where appends a list predicates to the UserDelete builder. +func (udo *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne { + udo.ud.mutation.Where(ps...) + return udo +} + +// Exec executes the deletion query. +func (udo *UserDeleteOne) Exec(ctx context.Context) error { + n, err := udo.ud.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{user.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (udo *UserDeleteOne) ExecX(ctx context.Context) { + if err := udo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/internal/ent/user_query.go b/internal/ent/user_query.go new file mode 100644 index 0000000..64d1f8e --- /dev/null +++ b/internal/ent/user_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "framework_v2/internal/ent/predicate" + "framework_v2/internal/ent/user" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserQuery is the builder for querying User entities. +type UserQuery struct { + config + ctx *QueryContext + order []user.OrderOption + inters []Interceptor + predicates []predicate.User + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserQuery builder. +func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery { + uq.predicates = append(uq.predicates, ps...) + return uq +} + +// Limit the number of records to be returned by this query. +func (uq *UserQuery) Limit(limit int) *UserQuery { + uq.ctx.Limit = &limit + return uq +} + +// Offset to start from. +func (uq *UserQuery) Offset(offset int) *UserQuery { + uq.ctx.Offset = &offset + return uq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (uq *UserQuery) Unique(unique bool) *UserQuery { + uq.ctx.Unique = &unique + return uq +} + +// Order specifies how the records should be ordered. +func (uq *UserQuery) Order(o ...user.OrderOption) *UserQuery { + uq.order = append(uq.order, o...) + return uq +} + +// First returns the first User entity from the query. +// Returns a *NotFoundError when no User was found. +func (uq *UserQuery) First(ctx context.Context) (*User, error) { + nodes, err := uq.Limit(1).All(setContextOp(ctx, uq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{user.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (uq *UserQuery) FirstX(ctx context.Context) *User { + node, err := uq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first User ID from the query. +// Returns a *NotFoundError when no User ID was found. +func (uq *UserQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = uq.Limit(1).IDs(setContextOp(ctx, uq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{user.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (uq *UserQuery) FirstIDX(ctx context.Context) int { + id, err := uq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single User entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one User entity is found. +// Returns a *NotFoundError when no User entities are found. +func (uq *UserQuery) Only(ctx context.Context) (*User, error) { + nodes, err := uq.Limit(2).All(setContextOp(ctx, uq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{user.Label} + default: + return nil, &NotSingularError{user.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (uq *UserQuery) OnlyX(ctx context.Context) *User { + node, err := uq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only User ID in the query. +// Returns a *NotSingularError when more than one User ID is found. +// Returns a *NotFoundError when no entities are found. +func (uq *UserQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = uq.Limit(2).IDs(setContextOp(ctx, uq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{user.Label} + default: + err = &NotSingularError{user.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (uq *UserQuery) OnlyIDX(ctx context.Context) int { + id, err := uq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Users. +func (uq *UserQuery) All(ctx context.Context) ([]*User, error) { + ctx = setContextOp(ctx, uq.ctx, "All") + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*User, *UserQuery]() + return withInterceptors[[]*User](ctx, uq, qr, uq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (uq *UserQuery) AllX(ctx context.Context) []*User { + nodes, err := uq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of User IDs. +func (uq *UserQuery) IDs(ctx context.Context) (ids []int, err error) { + if uq.ctx.Unique == nil && uq.path != nil { + uq.Unique(true) + } + ctx = setContextOp(ctx, uq.ctx, "IDs") + if err = uq.Select(user.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (uq *UserQuery) IDsX(ctx context.Context) []int { + ids, err := uq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (uq *UserQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, uq.ctx, "Count") + if err := uq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, uq, querierCount[*UserQuery](), uq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (uq *UserQuery) CountX(ctx context.Context) int { + count, err := uq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (uq *UserQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, uq.ctx, "Exist") + switch _, err := uq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (uq *UserQuery) ExistX(ctx context.Context) bool { + exist, err := uq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (uq *UserQuery) Clone() *UserQuery { + if uq == nil { + return nil + } + return &UserQuery{ + config: uq.config, + ctx: uq.ctx.Clone(), + order: append([]user.OrderOption{}, uq.order...), + inters: append([]Interceptor{}, uq.inters...), + predicates: append([]predicate.User{}, uq.predicates...), + // clone intermediate query. + sql: uq.sql.Clone(), + path: uq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.User.Query(). +// GroupBy(user.FieldName). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { + uq.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserGroupBy{build: uq} + grbuild.flds = &uq.ctx.Fields + grbuild.label = user.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Name string `json:"name,omitempty"` +// } +// +// client.User.Query(). +// Select(user.FieldName). +// Scan(ctx, &v) +func (uq *UserQuery) Select(fields ...string) *UserSelect { + uq.ctx.Fields = append(uq.ctx.Fields, fields...) + sbuild := &UserSelect{UserQuery: uq} + sbuild.label = user.Label + sbuild.flds, sbuild.scan = &uq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserSelect configured with the given aggregations. +func (uq *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect { + return uq.Select().Aggregate(fns...) +} + +func (uq *UserQuery) prepareQuery(ctx context.Context) error { + for _, inter := range uq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, uq); err != nil { + return err + } + } + } + for _, f := range uq.ctx.Fields { + if !user.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if uq.path != nil { + prev, err := uq.path(ctx) + if err != nil { + return err + } + uq.sql = prev + } + return nil +} + +func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) { + var ( + nodes = []*User{} + _spec = uq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*User).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &User{config: uq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, uq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) { + _spec := uq.querySpec() + _spec.Node.Columns = uq.ctx.Fields + if len(uq.ctx.Fields) > 0 { + _spec.Unique = uq.ctx.Unique != nil && *uq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, uq.driver, _spec) +} + +func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt)) + _spec.From = uq.sql + if unique := uq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if uq.path != nil { + _spec.Unique = true + } + if fields := uq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for i := range fields { + if fields[i] != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := uq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := uq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := uq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := uq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(uq.driver.Dialect()) + t1 := builder.Table(user.Table) + columns := uq.ctx.Fields + if len(columns) == 0 { + columns = user.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if uq.sql != nil { + selector = uq.sql + selector.Select(selector.Columns(columns...)...) + } + if uq.ctx.Unique != nil && *uq.ctx.Unique { + selector.Distinct() + } + for _, p := range uq.predicates { + p(selector) + } + for _, p := range uq.order { + p(selector) + } + if offset := uq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := uq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// UserGroupBy is the group-by builder for User entities. +type UserGroupBy struct { + selector + build *UserQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy { + ugb.fns = append(ugb.fns, fns...) + return ugb +} + +// Scan applies the selector query and scans the result into the given value. +func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ugb.build.ctx, "GroupBy") + if err := ugb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, ugb.build, ugb, ugb.build.inters, v) +} + +func (ugb *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ugb.fns)) + for _, fn := range ugb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ugb.flds)+len(ugb.fns)) + for _, f := range *ugb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ugb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ugb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserSelect is the builder for selecting fields of User entities. +type UserSelect struct { + *UserQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (us *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect { + us.fns = append(us.fns, fns...) + return us +} + +// Scan applies the selector query and scans the result into the given value. +func (us *UserSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, us.ctx, "Select") + if err := us.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserSelect](ctx, us.UserQuery, us, us.inters, v) +} + +func (us *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(us.fns)) + for _, fn := range us.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*us.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := us.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/internal/ent/user_update.go b/internal/ent/user_update.go new file mode 100644 index 0000000..44f730f --- /dev/null +++ b/internal/ent/user_update.go @@ -0,0 +1,278 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "framework_v2/internal/ent/predicate" + "framework_v2/internal/ent/user" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" +) + +// UserUpdate is the builder for updating User entities. +type UserUpdate struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserUpdate builder. +func (uu *UserUpdate) Where(ps ...predicate.User) *UserUpdate { + uu.mutation.Where(ps...) + return uu +} + +// SetName sets the "name" field. +func (uu *UserUpdate) SetName(s string) *UserUpdate { + uu.mutation.SetName(s) + return uu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (uu *UserUpdate) SetNillableName(s *string) *UserUpdate { + if s != nil { + uu.SetName(*s) + } + return uu +} + +// SetCreatedAt sets the "created_at" field. +func (uu *UserUpdate) SetCreatedAt(t time.Time) *UserUpdate { + uu.mutation.SetCreatedAt(t) + return uu +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (uu *UserUpdate) SetNillableCreatedAt(t *time.Time) *UserUpdate { + if t != nil { + uu.SetCreatedAt(*t) + } + return uu +} + +// SetUpdatedAt sets the "updated_at" field. +func (uu *UserUpdate) SetUpdatedAt(t time.Time) *UserUpdate { + uu.mutation.SetUpdatedAt(t) + return uu +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (uu *UserUpdate) SetNillableUpdatedAt(t *time.Time) *UserUpdate { + if t != nil { + uu.SetUpdatedAt(*t) + } + return uu +} + +// Mutation returns the UserMutation object of the builder. +func (uu *UserUpdate) Mutation() *UserMutation { + return uu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (uu *UserUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, uu.sqlSave, uu.mutation, uu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (uu *UserUpdate) SaveX(ctx context.Context) int { + affected, err := uu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (uu *UserUpdate) Exec(ctx context.Context) error { + _, err := uu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uu *UserUpdate) ExecX(ctx context.Context) { + if err := uu.Exec(ctx); err != nil { + panic(err) + } +} + +func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt)) + if ps := uu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := uu.mutation.Name(); ok { + _spec.SetField(user.FieldName, field.TypeString, value) + } + if value, ok := uu.mutation.CreatedAt(); ok { + _spec.SetField(user.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := uu.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, uu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + uu.mutation.done = true + return n, nil +} + +// UserUpdateOne is the builder for updating a single User entity. +type UserUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserMutation +} + +// SetName sets the "name" field. +func (uuo *UserUpdateOne) SetName(s string) *UserUpdateOne { + uuo.mutation.SetName(s) + return uuo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableName(s *string) *UserUpdateOne { + if s != nil { + uuo.SetName(*s) + } + return uuo +} + +// SetCreatedAt sets the "created_at" field. +func (uuo *UserUpdateOne) SetCreatedAt(t time.Time) *UserUpdateOne { + uuo.mutation.SetCreatedAt(t) + return uuo +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableCreatedAt(t *time.Time) *UserUpdateOne { + if t != nil { + uuo.SetCreatedAt(*t) + } + return uuo +} + +// SetUpdatedAt sets the "updated_at" field. +func (uuo *UserUpdateOne) SetUpdatedAt(t time.Time) *UserUpdateOne { + uuo.mutation.SetUpdatedAt(t) + return uuo +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableUpdatedAt(t *time.Time) *UserUpdateOne { + if t != nil { + uuo.SetUpdatedAt(*t) + } + return uuo +} + +// Mutation returns the UserMutation object of the builder. +func (uuo *UserUpdateOne) Mutation() *UserMutation { + return uuo.mutation +} + +// Where appends a list predicates to the UserUpdate builder. +func (uuo *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { + uuo.mutation.Where(ps...) + return uuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne { + uuo.fields = append([]string{field}, fields...) + return uuo +} + +// Save executes the query and returns the updated User entity. +func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) { + return withHooks(ctx, uuo.sqlSave, uuo.mutation, uuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (uuo *UserUpdateOne) SaveX(ctx context.Context) *User { + node, err := uuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (uuo *UserUpdateOne) Exec(ctx context.Context) error { + _, err := uuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uuo *UserUpdateOne) ExecX(ctx context.Context) { + if err := uuo.Exec(ctx); err != nil { + panic(err) + } +} + +func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeInt)) + id, ok := uuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := uuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for _, f := range fields { + if !user.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := uuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := uuo.mutation.Name(); ok { + _spec.SetField(user.FieldName, field.TypeString, value) + } + if value, ok := uuo.mutation.CreatedAt(); ok { + _spec.SetField(user.FieldCreatedAt, field.TypeTime, value) + } + if value, ok := uuo.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + } + _node = &User{config: uuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, uuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + uuo.mutation.done = true + return _node, nil +} diff --git a/internal/handlers/controllers/user/main.go b/internal/handlers/controllers/user/main.go index ab30b71..9e8b162 100644 --- a/internal/handlers/controllers/user/main.go +++ b/internal/handlers/controllers/user/main.go @@ -1,22 +1,15 @@ package user import ( - "fmt" - "framework_v2/internal/providers/auth" + "framework_v2/internal/logic" "github.com/gin-gonic/gin" ) -//func CurrentUser(req *consts.Request) { -// req.Http.JSON(http.StatusOK, gin.H{ -// "req": req.User.Sub, -// }) -// -//} +var AuthLogic = logic.NewAuthLogic() -func CurrentUser(c *gin.Context, user *auth.User) { - fmt.Println("CurrentUser", user) +func CurrentUser(c *gin.Context) { c.JSON(200, gin.H{ "IP": c.ClientIP(), - "User": user.Token.Sub, + "User": AuthLogic.GinUser(c).Valid, }) } diff --git a/internal/logic/auth.go b/internal/logic/auth.go new file mode 100644 index 0000000..890acfa --- /dev/null +++ b/internal/logic/auth.go @@ -0,0 +1,79 @@ +package logic + +import ( + "errors" + "framework_v2/consts" + "framework_v2/internal/providers" + "framework_v2/internal/providers/jwks" + "framework_v2/types" + "github.com/gin-gonic/gin" + "github.com/mitchellh/mapstructure" + "go.uber.org/zap" + "strings" +) + +type AuthLogic struct { +} + +const AnonymousUser = "anonymous" + +var ( + ErrNotValidToken = errors.New("无效的 JWT 令牌。") + ErrJWTFormatError = errors.New("JWT 格式错误。") + ErrNotBearerType = errors.New("不是 Bearer 类型。") + ErrEmptyResponse = errors.New("我们的服务器返回了空请求,可能某些环节出了问题。") + config = *providers.MustGet[providers.GlobalConfig]() + logger = *providers.MustGet[zap.Logger]() +) + +func NewAuthLogic() *AuthLogic { + return &AuthLogic{} +} + +func (a *AuthLogic) GinMiddlewareAuth(c *gin.Context) (*types.User, error) { + var sub = AnonymousUser + var jwtIdToken = &types.User{} + + if config.DebugMode.Enable { + jwtIdToken.Token.Sub = sub + jwtIdToken.Valid = true + return jwtIdToken, nil + } else { + authorization := c.Request.Header.Get(consts.AuthHeader) + + if authorization == "" { + return nil, ErrJWTFormatError + } + + authSplit := strings.Split(authorization, " ") + if len(authSplit) != 2 { + return nil, ErrJWTFormatError + } + + if authSplit[0] != consts.AuthPrefix { + return nil, ErrNotBearerType + } + + token, err := jwks.ParseJWT(authSplit[1]) + if err != nil { + return nil, ErrJWTFormatError + } + sub, err = token.Claims.GetSubject() + if err != nil { + return nil, ErrNotValidToken + } + + err = mapstructure.Decode(token.Claims, &jwtIdToken.Token) + if err != nil { + logger.Error("Failed to map token claims to JwtIDToken struct.\nError: " + err.Error()) + return nil, nil + } + } + + return jwtIdToken, nil +} + +func (a *AuthLogic) GinUser(c *gin.Context) *types.User { + user, _ := c.Get(consts.AuthMiddlewareKey) + return user.(*types.User) +} diff --git a/internal/middleware/grpc/auth.go b/internal/middleware/grpc/auth.go index e056f83..a04d83c 100644 --- a/internal/middleware/grpc/auth.go +++ b/internal/middleware/grpc/auth.go @@ -2,10 +2,8 @@ package grpc import ( "context" - auth2 "framework_v2/internal/providers/auth" - "framework_v2/internal/providers/config" "framework_v2/internal/providers/jwks" - "framework_v2/internal/providers/logger" + "framework_v2/types" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/auth" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" "github.com/mitchellh/mapstructure" @@ -20,9 +18,9 @@ func JwtAuth(ctx context.Context) (context.Context, error) { } sub := "anonymous" - var jwtIdToken *auth2.User + var jwtIdToken *types.User - if config.Config.DebugMode.Enable { + if config.DebugMode.Enable { jwtIdToken.Token.Sub = sub } else { token, err := jwks.ParseJWT(tokenString) @@ -36,7 +34,7 @@ func JwtAuth(ctx context.Context) (context.Context, error) { err = mapstructure.Decode(token.Claims, &jwtIdToken) if err != nil { - logger.Logger.Error("Failed to map token claims to JwtIDToken struct.\nError: " + err.Error()) + logger.Error("Failed to map token claims to JwtIDToken struct.\nError: " + err.Error()) return nil, err } } diff --git a/internal/middleware/grpc/init.go b/internal/middleware/grpc/init.go new file mode 100644 index 0000000..5500544 --- /dev/null +++ b/internal/middleware/grpc/init.go @@ -0,0 +1,53 @@ +package grpc + +import ( + "framework_v2/internal/providers" + "go.uber.org/zap" +) + +var logger *zap.Logger +var config *providers.GlobalConfig + +func init() { + config = providers.MustGet[providers.GlobalConfig]() + logger = providers.MustGet[zap.Logger]() +} + +//else { +// // get authorization header +// authorization := c.Request.Header.Get("Authorization") +// +// if authorization == "" { +// helper.ResponseError(c, http.StatusUnauthorized, ErrJWTFormatError) +// return nil +// } +// +// authSplit := strings.Split(authorization, " ") +// if len(authSplit) != 2 { +// helper.ResponseError(c, http.StatusUnauthorized, ErrJWTFormatError) +// return nil +// } +// +// if authSplit[0] != "Bearer" { +// helper.ResponseError(c, http.StatusUnauthorized, ErrNotBearerType) +// return nil +// } +// +// token, err := jwks.ParseJWT(authSplit[1]) +// if err != nil { +// helper.ResponseError(c, http.StatusUnauthorized, ErrJWTFormatError) +// return nil +// } +// sub, err = token.Claims.GetSubject() +// if err != nil { +// helper.ResponseError(c, http.StatusUnauthorized, ErrNotValidToken) +// return nil +// } +// +// err = mapstructure.Decode(token.Claims, &jwtIdToken.Token) +// if err != nil { +// facade.Logger.Error("Failed to map token claims to JwtIDToken struct.\nError: " + err.Error()) +// helper.ResponseError(c, http.StatusUnauthorized, ErrNotValidToken) +// return nil +// } +//} diff --git a/internal/middleware/grpc/log.go b/internal/middleware/grpc/log.go index 0bc4a4b..a274232 100644 --- a/internal/middleware/grpc/log.go +++ b/internal/middleware/grpc/log.go @@ -3,7 +3,6 @@ package grpc import ( "context" "fmt" - "framework_v2/internal/providers/logger" "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" "go.uber.org/zap" ) @@ -28,7 +27,7 @@ func ZapLogInterceptor() logging.Logger { } } - log := logger.Logger.WithOptions(zap.AddCallerSkip(1)).With(f...) + log := logger.WithOptions(zap.AddCallerSkip(1)).With(f...) switch lvl { case logging.LevelDebug: diff --git a/internal/middleware/http/init.go b/internal/middleware/http/init.go new file mode 100644 index 0000000..4c79614 --- /dev/null +++ b/internal/middleware/http/init.go @@ -0,0 +1,53 @@ +package http + +import ( + "framework_v2/internal/providers" + "go.uber.org/zap" +) + +var logger *zap.Logger +var config *providers.GlobalConfig + +func init() { + config = providers.MustGet[providers.GlobalConfig]() + logger = providers.MustGet[zap.Logger]() +} + +//else { +// // get authorization header +// authorization := c.Request.Header.Get("Authorization") +// +// if authorization == "" { +// helper.ResponseError(c, http.StatusUnauthorized, ErrJWTFormatError) +// return nil +// } +// +// authSplit := strings.Split(authorization, " ") +// if len(authSplit) != 2 { +// helper.ResponseError(c, http.StatusUnauthorized, ErrJWTFormatError) +// return nil +// } +// +// if authSplit[0] != "Bearer" { +// helper.ResponseError(c, http.StatusUnauthorized, ErrNotBearerType) +// return nil +// } +// +// token, err := jwks.ParseJWT(authSplit[1]) +// if err != nil { +// helper.ResponseError(c, http.StatusUnauthorized, ErrJWTFormatError) +// return nil +// } +// sub, err = token.Claims.GetSubject() +// if err != nil { +// helper.ResponseError(c, http.StatusUnauthorized, ErrNotValidToken) +// return nil +// } +// +// err = mapstructure.Decode(token.Claims, &jwtIdToken.Token) +// if err != nil { +// facade.Logger.Error("Failed to map token claims to JwtIDToken struct.\nError: " + err.Error()) +// helper.ResponseError(c, http.StatusUnauthorized, ErrNotValidToken) +// return nil +// } +//} diff --git a/internal/middleware/http/jwt.go b/internal/middleware/http/jwt.go index 59bac59..2d0ff9f 100644 --- a/internal/middleware/http/jwt.go +++ b/internal/middleware/http/jwt.go @@ -1,72 +1,23 @@ package http import ( - "errors" - "framework_v2/internal/providers/auth" - "framework_v2/internal/providers/config" - "framework_v2/internal/providers/facade" + "framework_v2/consts" + "framework_v2/internal/logic" "framework_v2/internal/providers/helper" - "framework_v2/internal/providers/jwks" "github.com/gin-gonic/gin" - "github.com/mitchellh/mapstructure" "net/http" - "strings" ) -const AnonymousUser = "anonymous" +func ValidateUser(c *gin.Context) { + auth := logic.NewAuthLogic() + user, err := auth.GinMiddlewareAuth(c) -var ( - ErrNotValidToken = errors.New("无效的 JWT 令牌。") - ErrJWTFormatError = errors.New("JWT 格式错误。") - ErrNotBearerType = errors.New("不是 Bearer 类型。") - ErrEmptyResponse = errors.New("我们的服务器返回了空请求,可能某些环节出了问题。") -) - -// DIJWTAuth 用于注入到方法签名中。我觉得下面的代码以后可以优化。 -func DIJWTAuth(c *gin.Context) *auth.User { - var sub = AnonymousUser - var jwtIdToken = &auth.User{} - - if config.Config.DebugMode.Enable { - jwtIdToken.Token.Sub = sub - } else { - // get authorization header - authorization := c.Request.Header.Get("Authorization") - - if authorization == "" { - helper.ResponseError(c, http.StatusUnauthorized, ErrJWTFormatError) - return nil - } - - authSplit := strings.Split(authorization, " ") - if len(authSplit) != 2 { - helper.ResponseError(c, http.StatusUnauthorized, ErrJWTFormatError) - return nil - } - - if authSplit[0] != "Bearer" { - helper.ResponseError(c, http.StatusUnauthorized, ErrNotBearerType) - return nil - } - - token, err := jwks.ParseJWT(authSplit[1]) - if err != nil { - helper.ResponseError(c, http.StatusUnauthorized, ErrJWTFormatError) - return nil - } - sub, err = token.Claims.GetSubject() - if err != nil { - helper.ResponseError(c, http.StatusUnauthorized, ErrNotValidToken) - return nil - } - - err = mapstructure.Decode(token.Claims, &jwtIdToken.Token) - if err != nil { - facade.Logger.Error("Failed to map token claims to JwtIDToken struct.\nError: " + err.Error()) - helper.ResponseError(c, http.StatusUnauthorized, ErrNotValidToken) - return nil - } + if err != nil { + c.Abort() + helper.ResponseError(c, http.StatusUnauthorized, err) + return } - return jwtIdToken + c.Set(consts.AuthMiddlewareKey, user) + c.Next() } diff --git a/internal/providers/config.go b/internal/providers/config.go new file mode 100644 index 0000000..a993ad0 --- /dev/null +++ b/internal/providers/config.go @@ -0,0 +1,112 @@ +package providers + +import ( + "github.com/joho/godotenv" + "github.com/kos-v/dsnparser" + "os" + "strings" +) + +// depth 是 .env 文件的搜索深度 +var depth = 8 + +type GlobalConfig struct { + Redis struct { + Addr string + Pass string + } + + DB struct { + DSN string + DSN2 string + Driver string + } + + ListenAddr struct { + GRPC string + HTTP string + } + + JWKS struct { + Url string + } + + DebugMode struct { + Enable bool + } + + S3 struct { + Endpoint string + Bucket string + AccessKeyID string + SecretAccessKey string + UseSSL bool + } +} + +func GetEnv(key string, defaultValue ...string) string { + r := os.Getenv(key) + if len(r) == 0 && len(defaultValue) > 0 { + return defaultValue[0] + } else { + return r + } +} +func GetEnvFilePath() string { + var path string + if os.Getenv("ENV_PATH") != "" { + path = os.Getenv("ENV_PATH") + return path + } + var pathOptions []string + for i := 0; i <= depth; i++ { + pathOptions = append(pathOptions, strings.Repeat("../", i)+".env") + } + for _, p := range pathOptions { + if _, err := os.Stat(p); err == nil { + path = p + break + } + } + return path +} + +func provideConfig() *GlobalConfig { + envPath := GetEnvFilePath() + _ = godotenv.Load(envPath) + + var Config = &GlobalConfig{} + + Config.DB.DSN = GetEnv("DB_DSN") + Config.DB.Driver = "postgres" + Config.Redis.Addr = GetEnv("REDIS_ADDR") + Config.Redis.Pass = GetEnv("REDIS_PASS") + Config.ListenAddr.GRPC = GetEnv("GRPC_LISTEN_ADDR") + Config.ListenAddr.HTTP = GetEnv("HTTP_LISTEN_ADDR") + Config.JWKS.Url = GetEnv("JWKS_URL") + Config.DebugMode.Enable = GetEnv("DEBUG", "false") == "true" + Config.S3.Endpoint = GetEnv("S3_ENDPOINT") + Config.S3.Bucket = GetEnv("S3_BUCKET") + Config.S3.AccessKeyID = GetEnv("S3_ACCESS_KEY_ID") + Config.S3.SecretAccessKey = GetEnv("S3_SECRET_ACCESS_KEY") + Config.S3.UseSSL = GetEnv("S3_USE_SSL", "true") == "true" + + dsn := dsnparser.Parse(Config.DB.DSN) + var dsn2 = "" + + dsn2 += "user=" + dsn.GetUser() + dsn2 += " password=" + dsn.GetPassword() + dsn2 += " dbname=" + dsn.GetPath() + + if dsn.HasParam("sslmode") { + dsn2 += " sslmode=" + dsn.GetParam("sslmode") + } + + Config.DB.DSN2 = dsn2 + + return Config +} + +func init() { + Must(Container.Provide(provideConfig)) +} diff --git a/internal/providers/config/config.go b/internal/providers/config/config.go deleted file mode 100644 index c55b388..0000000 --- a/internal/providers/config/config.go +++ /dev/null @@ -1,72 +0,0 @@ -package config - -import ( - "os" - "strings" -) - -// depth 是 .env 文件的搜索深度 -var depth = 8 - -type defaultConfig struct { - Redis struct { - Addr string - Pass string - } - - DB struct { - DSN string - DSN2 string - Driver string - } - - ListenAddr struct { - GRPC string - HTTP string - } - - JWKS struct { - Url string - } - - DebugMode struct { - Enable bool - } - - S3 struct { - Endpoint string - Bucket string - AccessKeyID string - SecretAccessKey string - UseSSL bool - } -} - -var Config = defaultConfig{} - -func GetEnv(key string, defaultValue ...string) string { - r := os.Getenv(key) - if len(r) == 0 && len(defaultValue) > 0 { - return defaultValue[0] - } else { - return r - } -} -func GetEnvFilePath() string { - var path string - if os.Getenv("ENV_PATH") != "" { - path = os.Getenv("ENV_PATH") - return path - } - var pathOptions []string - for i := 0; i <= depth; i++ { - pathOptions = append(pathOptions, strings.Repeat("../", i)+".env") - } - for _, p := range pathOptions { - if _, err := os.Stat(p); err == nil { - path = p - break - } - } - return path -} diff --git a/internal/providers/config/init.go b/internal/providers/config/init.go deleted file mode 100644 index 8212fe1..0000000 --- a/internal/providers/config/init.go +++ /dev/null @@ -1,38 +0,0 @@ -package config - -import ( - "github.com/joho/godotenv" - "github.com/kos-v/dsnparser" -) - -func InitConfig() { - envPath := GetEnvFilePath() - _ = godotenv.Load(envPath) - - Config.DB.DSN = GetEnv("DB_DSN") - Config.DB.Driver = "postgres" - Config.Redis.Addr = GetEnv("REDIS_ADDR") - Config.Redis.Pass = GetEnv("REDIS_PASS") - Config.ListenAddr.GRPC = GetEnv("GRPC_LISTEN_ADDR") - Config.ListenAddr.HTTP = GetEnv("HTTP_LISTEN_ADDR") - Config.JWKS.Url = GetEnv("JWKS_URL") - Config.DebugMode.Enable = GetEnv("DEBUG", "false") == "true" - Config.S3.Endpoint = GetEnv("S3_ENDPOINT") - Config.S3.Bucket = GetEnv("S3_BUCKET") - Config.S3.AccessKeyID = GetEnv("S3_ACCESS_KEY_ID") - Config.S3.SecretAccessKey = GetEnv("S3_SECRET_ACCESS_KEY") - Config.S3.UseSSL = GetEnv("S3_USE_SSL", "true") == "true" - - dsn := dsnparser.Parse(Config.DB.DSN) - var dsn2 = "" - - dsn2 += "user=" + dsn.GetUser() - dsn2 += " password=" + dsn.GetPassword() - dsn2 += " dbname=" + dsn.GetPath() - - if dsn.HasParam("sslmode") { - dsn2 += " sslmode=" + dsn.GetParam("sslmode") - } - - Config.DB.DSN2 = dsn2 -} diff --git a/internal/providers/container.go b/internal/providers/container.go new file mode 100644 index 0000000..5c5e7c4 --- /dev/null +++ b/internal/providers/container.go @@ -0,0 +1,20 @@ +package providers + +import "go.uber.org/dig" + +var Container = dig.New() + +func Must(err error) { + if err != nil { + panic(err) + } +} + +// MustGet 方法,用于封装 Invoke 函数,然后将结果返回,类型是 T +func MustGet[T any]() *T { + var t T + Must(Container.Invoke(func(e *T) { + t = *e + })) + return &t +} diff --git a/internal/providers/ent.go b/internal/providers/ent.go new file mode 100644 index 0000000..0231f51 --- /dev/null +++ b/internal/providers/ent.go @@ -0,0 +1,26 @@ +package providers + +import ( + "framework_v2/internal/ent" + _ "github.com/lib/pq" +) + +func providerEnt() (*ent.Client, error) { + var config = MustGet[GlobalConfig]() + return ent.Open( + config.DB.Driver, + config.DB.DSN2, + ) +} +func init() { + Must(Container.Provide(providerEnt)) + + // + //if err := Ent.Schema.Create( + // context.Background(), + // migrate.WithDropIndex(true), + // migrate.WithDropColumn(true), + //); err != nil { + // Logger.Fatal("failed creating schema resources", zap.Error(err)) + //} +} diff --git a/internal/providers/ent/ent.go b/internal/providers/ent/ent.go deleted file mode 100644 index 08bd458..0000000 --- a/internal/providers/ent/ent.go +++ /dev/null @@ -1,31 +0,0 @@ -package ent - -import ( - "errors" - "framework_v2/internal/providers/logger" - _ "github.com/lib/pq" - "go.uber.org/zap" -) - -func InitEnt() { - var err error - // uncomment after run make ent - //facade.Ent, err = ent.Open( - // config.Config.DB.Driver, - // config.Config.DB.DSN2, - //) - // remove after run make ent - err = errors.New("ent not implemented") - - if err != nil { - logger.Logger.Fatal("failed opening connection to db", zap.Error(err)) - } - // - //if err := Ent.Schema.Create( - // context.Background(), - // migrate.WithDropIndex(true), - // migrate.WithDropColumn(true), - //); err != nil { - // Logger.Fatal("failed creating schema resources", zap.Error(err)) - //} -} diff --git a/internal/providers/facade/bind.go b/internal/providers/facade/bind.go deleted file mode 100644 index 46c72ab..0000000 --- a/internal/providers/facade/bind.go +++ /dev/null @@ -1,21 +0,0 @@ -package facade - -import ( - "github.com/gin-gonic/gin" - "github.com/minio/minio-go/v7" - "github.com/redis/go-redis/v9" - "go.uber.org/zap" -) - -var ( - // uncommit it after run make ent - //Ent *ent.Client - Redis *redis.Client - Logger *zap.Logger - Router *gin.Engine - S3 *minio.Client -) - -func init() { - //panic("ent not impl, if impl, uncommit this line") -} diff --git a/internal/providers/gin.go b/internal/providers/gin.go new file mode 100644 index 0000000..fceeab4 --- /dev/null +++ b/internal/providers/gin.go @@ -0,0 +1,100 @@ +package providers + +import ( + "github.com/gin-gonic/gin" +) + +func providerGin() *gin.Engine { + var config = MustGet[GlobalConfig]() + if config.DebugMode.Enable { + gin.SetMode(gin.DebugMode) + } else { + gin.SetMode(gin.ReleaseMode) + } + + return gin.Default() +} + +func init() { + Must(Container.Provide(providerGin)) +} + +// +//func GET(relativePath string, handlers ...interface{}) { +// router.GET(relativePath, func(c *gin.Context) { +// doHandler(c, handlers...) +// }) +//} +// +//func POST(relativePath string, handlers ...interface{}) { +// router.POST(relativePath, func(c *gin.Context) { +// doHandler(c, handlers...) +// }) +//} +// +//func PUT(relativePath string, handlers ...interface{}) { +// router.PUT(relativePath, func(c *gin.Context) { +// doHandler(c, handlers...) +// }) +//} +// +//func PATCH(relativePath string, handlers ...interface{}) { +// router.PATCH(relativePath, func(c *gin.Context) { +// doHandler(c, handlers...) +// }) +//} +// +//func DELETE(relativePath string, handlers ...interface{}) { +// router.DELETE(relativePath, func(c *gin.Context) { +// doHandler(c, handlers...) +// }) +//} +// +//func doHandler(c *gin.Context, handlers ...interface{}) { +// for _, handler := range handlers { +// if c.IsAborted() { +// // 是否已经响应 +// if c.Writer.Written() { +// return +// } else { +// helper.ResponseError(c, http.StatusBadRequest, http2.ErrEmptyResponse) +// } +// +// return +// } +// +// wrapHandler(c, handler) +// } +//} + +// +//func wrapHandler(c *gin.Context, f interface{}) { +// fnValue := reflect.ValueOf(f) +// fnType := fnValue.Type() +// +// var args []reflect.Value +// +// for i := 0; i < fnType.NumIn(); i++ { +// argType := fnType.In(i) +// +// var argValue reflect.Value +// switch argType { +// case reflect.TypeOf((*gin.Context)(nil)): +// argValue = reflect.ValueOf(c) +// case reflect.TypeOf((*types.User)(nil)): +// userInfo := http2.DIJWTAuth(c) +// if userInfo == nil { +// helper.ResponseError(c, http.StatusUnauthorized, http2.ErrNotValidToken) +// return +// } +// argValue = reflect.ValueOf(userInfo) +// default: +// helper.ResponseError(c, http.StatusBadRequest, fmt.Errorf("invalid argument type: %s", argType.String())) +// return +// } +// +// args = append(args, argValue) +// } +// +// fnValue.Call(args) +//} diff --git a/internal/providers/gin/gin.go b/internal/providers/gin/gin.go deleted file mode 100644 index b7213ae..0000000 --- a/internal/providers/gin/gin.go +++ /dev/null @@ -1,90 +0,0 @@ -package gin - -import ( - "fmt" - http2 "framework_v2/internal/middleware/http" - "framework_v2/internal/providers/auth" - "framework_v2/internal/providers/facade" - "framework_v2/internal/providers/helper" - "github.com/gin-gonic/gin" - "net/http" - "reflect" -) - -func GET(relativePath string, handlers ...interface{}) { - facade.Router.GET(relativePath, func(c *gin.Context) { - doHandler(c, handlers...) - }) -} - -func POST(relativePath string, handlers ...interface{}) { - facade.Router.POST(relativePath, func(c *gin.Context) { - doHandler(c, handlers...) - }) -} - -func PUT(relativePath string, handlers ...interface{}) { - facade.Router.PUT(relativePath, func(c *gin.Context) { - doHandler(c, handlers...) - }) -} - -func PATCH(relativePath string, handlers ...interface{}) { - facade.Router.PATCH(relativePath, func(c *gin.Context) { - doHandler(c, handlers...) - }) -} - -func DELETE(relativePath string, handlers ...interface{}) { - facade.Router.DELETE(relativePath, func(c *gin.Context) { - doHandler(c, handlers...) - }) -} - -func doHandler(c *gin.Context, handlers ...interface{}) { - for _, handler := range handlers { - if c.IsAborted() { - // 是否已经响应 - if c.Writer.Written() { - return - } else { - helper.ResponseError(c, http.StatusBadRequest, http2.ErrEmptyResponse) - } - - return - } - - wrapHandler(c, handler) - } -} - -func wrapHandler(c *gin.Context, f interface{}) { - fnValue := reflect.ValueOf(f) - fnType := fnValue.Type() - - var args []reflect.Value - - for i := 0; i < fnType.NumIn(); i++ { - argType := fnType.In(i) - - var argValue reflect.Value - switch argType { - case reflect.TypeOf((*gin.Context)(nil)): - argValue = reflect.ValueOf(c) - case reflect.TypeOf((*auth.User)(nil)): - userInfo := http2.DIJWTAuth(c) - if userInfo == nil { - helper.ResponseError(c, http.StatusUnauthorized, http2.ErrNotValidToken) - return - } - argValue = reflect.ValueOf(userInfo) - default: - helper.ResponseError(c, http.StatusBadRequest, fmt.Errorf("invalid argument type: %s", argType.String())) - return - } - - args = append(args, argValue) - } - - fnValue.Call(args) -} diff --git a/internal/providers/gin/init.go b/internal/providers/gin/init.go deleted file mode 100644 index 1285e9e..0000000 --- a/internal/providers/gin/init.go +++ /dev/null @@ -1,24 +0,0 @@ -package gin - -import ( - "framework_v2/internal/handlers/controllers/user" - "framework_v2/internal/middleware/http" - "framework_v2/internal/providers/facade" - ginzap "github.com/gin-contrib/zap" - "github.com/gin-gonic/gin" - "time" -) - -func InitGin() { - gin.SetMode(gin.ReleaseMode) - - facade.Router = gin.New() - - facade.Router.Use(ginzap.Ginzap(facade.Logger, time.RFC3339, true)) - - InitApiRoutes() -} - -func InitApiRoutes() { - GET("/", http.MiddlewareJSONResponse, user.CurrentUser) -} diff --git a/internal/providers/jobs.go b/internal/providers/jobs.go new file mode 100644 index 0000000..15b4b8b --- /dev/null +++ b/internal/providers/jobs.go @@ -0,0 +1,27 @@ +package providers + +import ( + "github.com/hibiken/asynq" +) + +func getAsynQRedisOpt() asynq.RedisClientOpt { + var config = MustGet[GlobalConfig]() + return asynq.RedisClientOpt{ + Addr: config.Redis.Addr, + Password: config.Redis.Pass, + DB: 0, + } +} + +func ProvideAsynQClient() *asynq.Client { + return asynq.NewClient(getAsynQRedisOpt()) +} + +func ProvideAsynQServer() *asynq.Server { + return asynq.NewServer(getAsynQRedisOpt(), asynq.Config{Concurrency: 10}) +} + +func init() { + Must(Container.Provide(ProvideAsynQClient)) + Must(Container.Provide(ProvideAsynQServer)) +} diff --git a/internal/providers/jobs/init.go b/internal/providers/jobs/init.go deleted file mode 100644 index e6a4c30..0000000 --- a/internal/providers/jobs/init.go +++ /dev/null @@ -1,25 +0,0 @@ -package jobs - -import ( - "framework_v2/internal/providers/config" - "github.com/hibiken/asynq" -) - -var AsynQClient *asynq.Client -var AsynQServer *asynq.Server - -func getAsynQRedisOpt() asynq.RedisClientOpt { - return asynq.RedisClientOpt{ - Addr: config.Config.Redis.Addr, - Password: config.Config.Redis.Pass, - DB: 0, - } -} - -func InitAsynQClient() { - AsynQClient = asynq.NewClient(getAsynQRedisOpt()) -} - -func InitAsynQServer() { - AsynQServer = asynq.NewServer(getAsynQRedisOpt(), asynq.Config{Concurrency: 10}) -} diff --git a/internal/providers/jwks/jwks.go b/internal/providers/jwks/jwks.go index 9601a54..6b42f38 100644 --- a/internal/providers/jwks/jwks.go +++ b/internal/providers/jwks/jwks.go @@ -2,10 +2,10 @@ package jwks import ( "errors" - "framework_v2/internal/providers/config" - "framework_v2/internal/providers/logger" + "framework_v2/internal/providers" "github.com/MicahParks/keyfunc/v3" "github.com/golang-jwt/jwt/v5" + "go.uber.org/zap" "time" ) @@ -17,17 +17,21 @@ var ( ErrJWKSNotInitialized = errors.New("JWKS is not initialized") ) +var logger = providers.MustGet[zap.Logger]() +var config = providers.MustGet[providers.GlobalConfig]() + func RefreshJWKS() { - logger.Logger.Info("Refreshing JWKS...") + + logger.Info("Refreshing JWKS...") var err error - Jwks, err = keyfunc.NewDefault([]string{config.Config.JWKS.Url}) + Jwks, err = keyfunc.NewDefault([]string{config.JWKS.Url}) if err != nil { - logger.Logger.Error("Failed to create JWK Set from resource at the given URL.\nError: " + err.Error()) + logger.Error("Failed to create JWK Set from resource at the given URL.\nError: " + err.Error()) } - logger.Logger.Info("JWKS refreshed.") + logger.Info("JWKS refreshed.") } func ParseJWT(jwtB64 string) (*jwt.Token, error) { diff --git a/internal/providers/logger.go b/internal/providers/logger.go new file mode 100644 index 0000000..50adb3a --- /dev/null +++ b/internal/providers/logger.go @@ -0,0 +1,13 @@ +package providers + +import ( + "go.uber.org/zap" +) + +func providerLogger() (*zap.Logger, error) { + return zap.NewProduction() +} + +func init() { + Must(Container.Provide(providerLogger)) +} diff --git a/internal/providers/logger/init.go b/internal/providers/logger/init.go deleted file mode 100644 index eec79ba..0000000 --- a/internal/providers/logger/init.go +++ /dev/null @@ -1,23 +0,0 @@ -package logger - -import ( - "framework_v2/internal/providers/facade" - "go.uber.org/zap" -) - -var Logger *zap.Logger - -func InitLogger() { - if Logger != nil { - return - } - - var err error - Logger, err = zap.NewProduction() - - if err != nil { - panic(err) - } - - facade.Logger = Logger -} diff --git a/internal/providers/redis.go b/internal/providers/redis.go new file mode 100644 index 0000000..e99d23d --- /dev/null +++ b/internal/providers/redis.go @@ -0,0 +1,19 @@ +package providers + +import ( + "github.com/redis/go-redis/v9" +) + +func provideRedis() *redis.Client { + var config = MustGet[GlobalConfig]() + + return redis.NewClient(&redis.Options{ + Addr: config.Redis.Addr, + Password: config.Redis.Pass, + DB: 0, // use default DB + }) +} + +func init() { + Must(Container.Provide(provideRedis)) +} diff --git a/internal/providers/redis/init.go b/internal/providers/redis/init.go deleted file mode 100644 index f36a004..0000000 --- a/internal/providers/redis/init.go +++ /dev/null @@ -1,29 +0,0 @@ -package redis - -import "C" -import ( - "framework_v2/internal/providers/config" - "framework_v2/internal/providers/facade" - "github.com/redis/go-redis/v9" - "golang.org/x/net/context" -) - -var Redis *redis.Client - -func InitRedis() { - Redis = redis.NewClient(&redis.Options{ - Addr: config.Config.Redis.Addr, - Password: config.Config.Redis.Pass, - DB: 0, // use default DB - }) - - ctx := context.Background() - - err := Redis.Ping(ctx).Err() - - if err != nil { - panic(err) - } - - facade.Redis = Redis -} diff --git a/internal/providers/s3.go b/internal/providers/s3.go new file mode 100644 index 0000000..32d5671 --- /dev/null +++ b/internal/providers/s3.go @@ -0,0 +1,18 @@ +package providers + +import ( + "github.com/minio/minio-go/v7" + "github.com/minio/minio-go/v7/pkg/credentials" +) + +func ProvideS3Driver() (*minio.Client, error) { + var config = MustGet[GlobalConfig]() + return minio.New(config.S3.Endpoint, &minio.Options{ + Creds: credentials.NewStaticV4(config.S3.AccessKeyID, config.S3.SecretAccessKey, ""), + Secure: config.S3.UseSSL, + }) +} + +func init() { + Must(Container.Provide(ProvideS3Driver)) +} diff --git a/internal/providers/s3/init.go b/internal/providers/s3/init.go deleted file mode 100644 index c50ba33..0000000 --- a/internal/providers/s3/init.go +++ /dev/null @@ -1,21 +0,0 @@ -package s3 - -import ( - "framework_v2/internal/providers/config" - "framework_v2/internal/providers/facade" - "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" -) - -func InitS3Driver() { - var err error - facade.S3, err = minio.New(config.Config.S3.Endpoint, &minio.Options{ - Creds: credentials.NewStaticV4(config.Config.S3.AccessKeyID, config.Config.S3.SecretAccessKey, ""), - Secure: config.Config.S3.UseSSL, - }) - - if err != nil { - panic(err) - } - -} diff --git a/internal/providers/auth/user.go b/types/user.go similarity index 97% rename from internal/providers/auth/user.go rename to types/user.go index a822ce1..fca2bb9 100644 --- a/internal/providers/auth/user.go +++ b/types/user.go @@ -1,4 +1,4 @@ -package auth +package types type UserTokenInfo struct { Exp int `json:"exp"` @@ -25,4 +25,5 @@ type UserTokenInfo struct { type User struct { Token UserTokenInfo + Valid bool }