jschaf / pggen

Generate type-safe Go for any Postgres query. If Postgres can run the query, pggen can generate code for it.
MIT License
281 stars 26 forks source link

Type override to *time.Time missing time package import #70

Open jaohurtas opened 2 years ago

jaohurtas commented 2 years ago

Using this flag: --go-type 'date=*time.Time'

The type is replaced in the struct, but the time package is no longer imported and it will not compile.

jschaf commented 2 years ago

Thanks for reporting. I can provide a fix faster if you're able to provide:

This might have been fixed in https://github.com/jschaf/pggen/issues/67 relased at https://github.com/jschaf/pggen/releases/tag/2022-07-16

jaohurtas commented 2 years ago

I just installed the latest version with go install ... @latest. It just returns this:

pggen version                              
pggen version dev, commit head

Query:

-- name: FindEvents2 :many
SELECT * FROM event2  LIMIT pggen.arg('limit');

Command:

pggen gen go --query-glob events2.sql  --go-type 'varchar=*string' --go-type 'date=*time.Time'  -postgres-connection $DB_CONN

Table

CREATE TABLE IF NOT EXISTS "test".event2
(
    id_event numeric NOT NULL,
    event_date date NOT NULL,
    CONSTRAINT event_pkey PRIMARY KEY (id_event)
)

Generated code:

// Code generated by pggen. DO NOT EDIT.

package db

import (
    "context"
    "fmt"
    "github.com/jackc/pgconn"
    "github.com/jackc/pgtype"
    "github.com/jackc/pgx/v4"
)

// Querier is a typesafe Go interface backed by SQL queries.
//
// Methods ending with Batch enqueue a query to run later in a pgx.Batch. After
// calling SendBatch on pgx.Conn, pgxpool.Pool, or pgx.Tx, use the Scan methods
// to parse the results.
type Querier interface {
    FindEvents2(ctx context.Context, limit int) ([]FindEvents2Row, error)
    // FindEvents2Batch enqueues a FindEvents2 query into batch to be executed
    // later by the batch.
    FindEvents2Batch(batch genericBatch, limit int)
    // FindEvents2Scan scans the result of an executed FindEvents2Batch query.
    FindEvents2Scan(results pgx.BatchResults) ([]FindEvents2Row, error)
}

type DBQuerier struct {
    conn  genericConn   // underlying Postgres transport to use
    types *typeResolver // resolve types by name
}

var _ Querier = &DBQuerier{}

// genericConn is a connection to a Postgres database. This is usually backed by
// *pgx.Conn, pgx.Tx, or *pgxpool.Pool.
type genericConn interface {
    // Query executes sql with args. If there is an error the returned Rows will
    // be returned in an error state. So it is allowed to ignore the error
    // returned from Query and handle it in Rows.
    Query(ctx context.Context, sql string, args ...interface{}) (pgx.Rows, error)

    // QueryRow is a convenience wrapper over Query. Any error that occurs while
    // querying is deferred until calling Scan on the returned Row. That Row will
    // error with pgx.ErrNoRows if no rows are returned.
    QueryRow(ctx context.Context, sql string, args ...interface{}) pgx.Row

    // Exec executes sql. sql can be either a prepared statement name or an SQL
    // string. arguments should be referenced positionally from the sql string
    // as $1, $2, etc.
    Exec(ctx context.Context, sql string, arguments ...interface{}) (pgconn.CommandTag, error)
}

// genericBatch batches queries to send in a single network request to a
// Postgres server. This is usually backed by *pgx.Batch.
type genericBatch interface {
    // Queue queues a query to batch b. query can be an SQL query or the name of a
    // prepared statement. See Queue on *pgx.Batch.
    Queue(query string, arguments ...interface{})
}

// NewQuerier creates a DBQuerier that implements Querier. conn is typically
// *pgx.Conn, pgx.Tx, or *pgxpool.Pool.
func NewQuerier(conn genericConn) *DBQuerier {
    return NewQuerierConfig(conn, QuerierConfig{})
}

type QuerierConfig struct {
    // DataTypes contains pgtype.Value to use for encoding and decoding instead
    // of pggen-generated pgtype.ValueTranscoder.
    //
    // If OIDs are available for an input parameter type and all of its
    // transitive dependencies, pggen will use the binary encoding format for
    // the input parameter.
    DataTypes []pgtype.DataType
}

// NewQuerierConfig creates a DBQuerier that implements Querier with the given
// config. conn is typically *pgx.Conn, pgx.Tx, or *pgxpool.Pool.
func NewQuerierConfig(conn genericConn, cfg QuerierConfig) *DBQuerier {
    return &DBQuerier{conn: conn, types: newTypeResolver(cfg.DataTypes)}
}

// WithTx creates a new DBQuerier that uses the transaction to run all queries.
func (q *DBQuerier) WithTx(tx pgx.Tx) (*DBQuerier, error) {
    return &DBQuerier{conn: tx}, nil
}

// preparer is any Postgres connection transport that provides a way to prepare
// a statement, most commonly *pgx.Conn.
type preparer interface {
    Prepare(ctx context.Context, name, sql string) (sd *pgconn.StatementDescription, err error)
}

// PrepareAllQueries executes a PREPARE statement for all pggen generated SQL
// queries in querier files. Typical usage is as the AfterConnect callback
// for pgxpool.Config
//
// pgx will use the prepared statement if available. Calling PrepareAllQueries
// is an optional optimization to avoid a network round-trip the first time pgx
// runs a query if pgx statement caching is enabled.
func PrepareAllQueries(ctx context.Context, p preparer) error {
    if _, err := p.Prepare(ctx, findEvents2SQL, findEvents2SQL); err != nil {
        return fmt.Errorf("prepare query 'FindEvents2': %w", err)
    }
    return nil
}

// typeResolver looks up the pgtype.ValueTranscoder by Postgres type name.
type typeResolver struct {
    connInfo *pgtype.ConnInfo // types by Postgres type name
}

func newTypeResolver(types []pgtype.DataType) *typeResolver {
    ci := pgtype.NewConnInfo()
    for _, typ := range types {
        if txt, ok := typ.Value.(textPreferrer); ok && typ.OID != unknownOID {
            typ.Value = txt.ValueTranscoder
        }
        ci.RegisterDataType(typ)
    }
    return &typeResolver{connInfo: ci}
}

// findValue find the OID, and pgtype.ValueTranscoder for a Postgres type name.
func (tr *typeResolver) findValue(name string) (uint32, pgtype.ValueTranscoder, bool) {
    typ, ok := tr.connInfo.DataTypeForName(name)
    if !ok {
        return 0, nil, false
    }
    v := pgtype.NewValue(typ.Value)
    return typ.OID, v.(pgtype.ValueTranscoder), true
}

// setValue sets the value of a ValueTranscoder to a value that should always
// work and panics if it fails.
func (tr *typeResolver) setValue(vt pgtype.ValueTranscoder, val interface{}) pgtype.ValueTranscoder {
    if err := vt.Set(val); err != nil {
        panic(fmt.Sprintf("set ValueTranscoder %T to %+v: %s", vt, val, err))
    }
    return vt
}

const findEvents2SQL = `SELECT * FROM event2  LIMIT $1;`

type FindEvents2Row struct {
    IDEvent   pgtype.Numeric `json:"id_event"`
    EventDate *time.Time     `json:"event_date"`
}

// FindEvents2 implements Querier.FindEvents2.
func (q *DBQuerier) FindEvents2(ctx context.Context, limit int) ([]FindEvents2Row, error) {
    ctx = context.WithValue(ctx, "pggen_query_name", "FindEvents2")
    rows, err := q.conn.Query(ctx, findEvents2SQL, limit)
    if err != nil {
        return nil, fmt.Errorf("query FindEvents2: %w", err)
    }
    defer rows.Close()
    items := []FindEvents2Row{}
    for rows.Next() {
        var item FindEvents2Row
        if err := rows.Scan(&item.IDEvent, &item.EventDate); err != nil {
            return nil, fmt.Errorf("scan FindEvents2 row: %w", err)
        }
        items = append(items, item)
    }
    if err := rows.Err(); err != nil {
        return nil, fmt.Errorf("close FindEvents2 rows: %w", err)
    }
    return items, err
}

// FindEvents2Batch implements Querier.FindEvents2Batch.
func (q *DBQuerier) FindEvents2Batch(batch genericBatch, limit int) {
    batch.Queue(findEvents2SQL, limit)
}

// FindEvents2Scan implements Querier.FindEvents2Scan.
func (q *DBQuerier) FindEvents2Scan(results pgx.BatchResults) ([]FindEvents2Row, error) {
    rows, err := results.Query()
    if err != nil {
        return nil, fmt.Errorf("query FindEvents2Batch: %w", err)
    }
    defer rows.Close()
    items := []FindEvents2Row{}
    for rows.Next() {
        var item FindEvents2Row
        if err := rows.Scan(&item.IDEvent, &item.EventDate); err != nil {
            return nil, fmt.Errorf("scan FindEvents2Batch row: %w", err)
        }
        items = append(items, item)
    }
    if err := rows.Err(); err != nil {
        return nil, fmt.Errorf("close FindEvents2Batch rows: %w", err)
    }
    return items, err
}

// textPreferrer wraps a pgtype.ValueTranscoder and sets the preferred encoding
// format to text instead binary (the default). pggen uses the text format
// when the OID is unknownOID because the binary format requires the OID.
// Typically occurs if the results from QueryAllDataTypes aren't passed to
// NewQuerierConfig.
type textPreferrer struct {
    pgtype.ValueTranscoder
    typeName string
}

// PreferredParamFormat implements pgtype.ParamFormatPreferrer.
func (t textPreferrer) PreferredParamFormat() int16 { return pgtype.TextFormatCode }

func (t textPreferrer) NewTypeValue() pgtype.Value {
    return textPreferrer{pgtype.NewValue(t.ValueTranscoder).(pgtype.ValueTranscoder), t.typeName}
}

func (t textPreferrer) TypeName() string {
    return t.typeName
}

// unknownOID means we don't know the OID for a type. This is okay for decoding
// because pgx call DecodeText or DecodeBinary without requiring the OID. For
// encoding parameters, pggen uses textPreferrer if the OID is unknown.
const unknownOID = 0
offlinehacker commented 2 years ago

Problem seems to be if type is a pointer. The following command imports time correctly:

pggen gen go
    --query-glob 'internal/postgres/queries/*.sql' \
    --postgres-connection ${DB_URL} \
    --go-type "timestamptz=time.Time"

But this does not work:

pggen gen go
    --query-glob 'internal/postgres/queries/*.sql' \
    --postgres-connection ${DB_URL} \
    --go-type "timestamptz=*time.Time"

The same problem is present also with other pointer types.

0xjac commented 2 years ago

I just stumbled on this issue as well.

One fix is to run goimports on the generated files, such as:

goimports -w <pggen_output_dir>/*.sql.go

This will ensure all generated files have all imports, nicely organized.