I want to retrieve an array of app IDs from a MySQL database. I used http://go-database-sql.org's example code:
func QueryAppList() *[]int64 {
var (
appList []int64
appid int64
)
qry := "SELECT a.appid FROM app a WHERE a.app_status IN (1, 2);"
// cfg.GetDb() supplies the database connection already established
rows, err := cfg.GetDb().Query(qry)
if err != nil {
logg.Error(err)
return &appList
}
defer func(rows *sql.Rows) {
// simple defer does not catch every error: https://www.joeshaw.org/dont-defer-close-on-writable-files/
err := rows.Close()
if err != nil {
logg.Error(err)
}
}(rows)
for rows.Next() {
err := rows.Scan(&appid)
if err != nil {
logg.Error(err)
return &appList
}
appidList = append(appList, appid)
}
err = rows.Err()
if err != nil {
logg.Error(err)
return &appList
}
return &appidList
}
My programm will be littered with queries like this. All the ways of getting the result list and how it to prevent failure make this small query hard to read what is actually going on.
Is there a way to make queries more concise?
These are my thoughts to make the code less verbose:
Use functions to handle the errors reducing the error handling to one line.
If it's one column array I want, I could pass the query and column name as parameters and reuse the query function. I rather just rewrite a query function than to deal with complicated abstractions.
Are there packages I missed that help reduce the clutter?
Using an ORM like gorm is NOT an option.
I just started Go programming so I am lacking experience with the language.
Below is the same query in Node.js with the same result. It has 9 lines compared to Go's 34 i.e. 65% more concise in terms of length. That's where I ideally would like to get to.
import {query} from "../db/pool"; // connection pool query from https://github.com/sidorares/node-mysql2
export const queryAppList = async () => {
try {
const qry = "SELECT a.appid FROM app a WHERE a.app_status IN (1, 2);";
const [appList] = await query(qry);
return appList;
} catch (err) {
console.error(err)
return [];
}
};
You can make a Query struct which has reusable methods for do such things.
Something like this:
type Query struct{
conn *sql.DB
rows *sql.Rows
...
}
func NewQuery(conn *sql.DB) *Query {
return &Query{
conn: conn,
rows: nil,
}
}
func (q Query) OpenSQL(sql string) error {
q.rows, err = q.conn.Query(sql)
if err != nil {
log.Error("SQL error during query ("+sql+"). "+err.Error())
return err
}
return nil
}
func (q Query)Close() (error) {
err := q.rows.Close()
if err != nil {
log.Error("Error closing rows. "+err.Error())
return err
}
return nil
}
//You can use generic functions to make the code even smaller
func FetchToSlice[T any](q Query) ([]T, error) {
result := make([]T, 0)
var value T
for q.rows.Next() {
err := q.rows.Scan(&value)
if err != nil {
log.Error("Error during fetching. "+err.Error())
return nil, err
}
result = append(result, value)
}
return result, nil
}
With this you code will look something like this:
qry := NewQuery(cfg.GetDB())
err := qry.OpenSQL("SELECT a.appid FROM app a WHERE a.app_status IN (1, 2);")
if err != nil {
return err
}
defer qry.Close()
appidList, err := FetchToSlice[int](qry)
if err != nil {
return err
}
You can later add more methods to your Query to handle more complex cases, even you can use a sync.Pool to cache your query structs and so on.
Related
I'm working on a program that makes a query to MySQL, then for each row, changes something with that row and then update the row.
The problem is that sometimes when performing an update I get a deadlock, I'm not sure if it's because the query isn't releasing the lock by the time I update or if it's something else.
Example of what I'm doing:
const (
selectQuery = `select user_id, original_transaction_id, max(payment_id) as max_payment_id from Payment_Receipt
where auto_renew_status = 1 group by user_id, original_transaction_id having count(*) > 1`
updateQuery = `update Payment_Receipt set auto_renew_status = 0, changed_by = "payment_receipt_condenser",
changed_time = ? where user_id = ? and original_transaction_id = ? and payment_id != ? and auto_renew_status = 1`
)
mysql.go:
func New(db *sql.DB, driver string) (database.Database, error) {
sqlDB := sqlx.NewDb(db, driver)
if err := db.Ping(); err != nil {
return nil, errors.Wrap(err, "connecting to database")
}
selectStmt, err := sqlDB.Preparex(selectQuery)
if err != nil {
return nil, errors.Wrap(err, "preparing select query")
}
updateStmt, err := sqlDB.Preparex(updateQuery)
if err != nil {
return nil, errors.Wrap(err, "preparing update query")
}
return &mysql{
db: sqlDB,
selectStmt: selectStmt,
updateStmt: updateStmt,
}, nil
}
func (m *mysql) Query() (<- chan *database.Row, error) {
rowsChan := make(chan *database.Row)
rows, err := m.selectStmt.Queryx()
if err != nil {
return nil, errors.Wrap(err, "making query")
}
go func() {
defer rows.Close()
defer close(rowsChan)
for rows.Next() {
row := &database.Row{}
if err := rows.StructScan(row); err != nil {
log.WithError(err).WithField("user_id", row.UserID.Int32).Error("scanning row")
}
// change some of the data here
// and put into channel for worker to consume
rowsChan <- row
}
}()
return rowsChan, nil
}
func (m *mysql) Update(row *database.Row) error {
tx, err := m.db.Beginx()
if err != nil {
return errors.Wrap(err, "beginning transaction")
}
if _, err := tx.Stmtx(m.updateStmt).Exec(row.ChangedTime); err != nil {
return errors.Wrap(err, "executing update")
}
if err := tx.Commit(); err != nil {
return errors.Wrap(err, "committing transaction")
}
return nil
}
worker.go
func (w *worker) Run(wg *sync.WaitGroup) {
rowsChan, err := w.db.Query()
if err != nil {
log.WithError(err).Fatal("failed making query")
}
for i := 0; i < w.config.Count(); i++ {
wg.Add(1)
go func() {
defer wg.Done()
for row := range rowsChan {
if err := w.db.Update(row); err != nil {
log.WithError(err).WithField("user_id", row.UserID.Int32).Error("updating row")
}
}
}()
}
}
You could make the results (row) channel from a Query() buffered:
func (m *mysql) Query() (<- chan *database.Row, error) {
rowsChan := make(chan *database.Row, 1000) // <- band-aid fix
// ...
}
This will ensure that the row collector function can write multiple results without waiting for your worker go-routine to read the results. The query operation will complete (provided there are 1000 rows or less), and the update go-routine operations can begin their parallel work.
If this fixes things, then consider putting say an SQL limit on your queries (e.g. LIMIT 1000) to ensure you don't hit deadlock again (if 1000+ records is a real possibility).
Crafting "pagination" style queries to grab the next say 1000 rows, using RowID markers etc. to ensure full coverage of results - all while avoiding locking out any of your update operations.
I am working with Gorm and Graphql. I hadn't run into problems querying data until I tried to connect two existing items using their foreignkey relationships. Here are my two models:
type Report struct {
db.Base
OwnerID string
Patient patients.Patient `gorm:"association_name:Patient;"`
PatientID string
}
type Patient struct {
db.Base
ReportID string
}
I have a function to save the relationship to the database:
func (s *Store) AddPatientToReport(ctx context.Context, id string, patient *patients.Patient) (*Report, error) {
// check against user using context
report, err := s.Report(ctx, id)
if err != nil {
log.Error("Could not find report.")
return nil, err
}
report.PatientID = patient.ID
if err := s.db.Save(&report).Association("Patient").Append(patient).Error; err != nil {
log.WithError(err).Error("add patient failed")
return nil, err
}
return report, nil
}
After the above function I can query the Report and see the patient_id. I can also query the Patient and see the report_id. But the following query to get the whole Patient from the Report just returns empty.
query {
report(id: "report_id") {
id
patientID // this will return the correct patient_id
patient {
id // this field always comes back as an empty string
}
}
}
Here is how the DB is setup:
// NewConn creates a new database connection
func NewConn(cc ConnConf) (*Conn, error) {
db, err := gorm.Open("mysql", cc.getCtxStr())
if err != nil {
return nil, err
}
// Models are loaded from each package. Patients is created before Reports.
if err := db.AutoMigrate(Models...).Error; err != nil {
log.Fatal(err)
}
db.LogMode(cc.Logger)
return &Conn{db}, err
}
I can't figure out how to get the whole patient back. Any suggestions?
Ok, so as it turns out I just needed to ask and then I'd figure it out on my own a few minutes later.
I'd read about Preload() in the Gorm docs, but didn't know where to implement it. I first tried when the DB fired up thinking it would load the associations. But I really needed to use Preload() as I run the query.
result := &Report{}
if err = s.db.Preload("Patient").Where(&query).First(&result).Error; err != nil {
log.WithField("id", id).WithError(err).
Error("could not find report")
return nil, err
}
Now, the graphql query:
query {
report(id: "some_id") {
id
patient {
id // now it returns the id
birthyear // now it returns other fields, too
...
}
}
}
I'm trying to create a very simple Bolt database called "ledger.db" that includes one Bucket, called "Users", which contains Usernames as a Key and Balances as the value that allows users to transfer their balance to one another. I am using Bolter to view the database in the command line
There are two problems, both contained in this transfer function issue resides in the transfer function.
The First: Inside the transfer function is an if/else. If the condition is true, it executes as it should. If it's false, nothing happens. There's no syntax errors and the program runs as though nothing is wrong, it just doesn't execute the else statement.
The Second: Even if the condition is true, when it executes, it doesn't update BOTH the respective balance values in the database. It updates the balance of the receiver, but it doesn't do the same for the sender. The mathematical operations are completed and the values are marshaled into a JSON-compatible format.
The problem is that the sender balance is not updated in the database.
Everything from the second "Success!" fmt.Println() function onward is not processed
I've tried changing the "db.Update()" to "db.Batch()". I've tried changing the order of the Put() functions. I've tried messing with goroutines and defer, but I have no clue how to use those, as I am rather new to golang.
func (from *User) transfer(to User, amount int) error{
var fbalance int = 0
var tbalance int = 0
db, err := bolt.Open("ledger.db", 0600, nil)
if err != nil {
log.Fatal(err)
}
defer db.Close()
return db.Update(func(tx *bolt.Tx) error {
uBuck := tx.Bucket([]byte("Users"))
json.Unmarshal(uBuck.Get([]byte(from.username)), &fbalance)
json.Unmarshal(uBuck.Get([]byte(to.username)), &tbalance)
if (amount <= fbalance) {
fbalance = fbalance - amount
encoded, err := json.Marshal(fbalance)
if err != nil {
return err
}
tbalance = tbalance + amount
encoded2, err := json.Marshal(tbalance)
if err != nil {
return err
}
fmt.Println("Success!")
c := uBuck
err = c.Put([]byte(to.username), encoded2)
return err
fmt.Println("Success!")
err = c.Put([]byte(from.username), encoded)
return err
fmt.Println("Success!")
} else {
return fmt.Errorf("Not enough in balance!", amount)
}
return nil
})
return nil
}
func main() {
/*
db, err := bolt.Open("ledger.db", 0600, nil)
if err != nil {
log.Fatal(err)
}
defer db.Close()
*/
var b User = User{"Big", "jig", 50000, 0}
var t User = User{"Trig", "pig", 40000, 0}
// These two functions add each User to the database, they aren't
// the problem
b.createUser()
t.createUser()
/*
db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("Users"))
get := c.Get([]byte(b.username))
fmt.Printf("The return value %v",get)
return nil
})
*/
t.transfer(b, 40000)
}
I expect the database to show Big:90000 Trig:0 from the beginning values of Big:50000 Trig:40000
Instead, the program outputs Big:90000 Trig:40000
You return unconditionally:
c := uBuck
err = c.Put([]byte(to.username), encoded2)
return err
fmt.Println("Success!")
err = c.Put([]byte(from.username), encoded)
return err
fmt.Println("Success!")
You are not returning and checking errors.
json.Unmarshal(uBuck.Get([]byte(from.username)), &fbalance)
json.Unmarshal(uBuck.Get([]byte(to.username)), &tbalance)
t.transfer(b, 40000)
And so on.
Debug your code statement by statement.
I have a MySQL database with one value in it, a string: "192.168.0.1"
Here is my code:
package main
import (
"database/sql"
"fmt"
_ "github.com/go-sql-driver/mysql"
)
func checkErr(err error) {
if err != nil {
panic(err)
}
}
func main() {
db, err := sql.Open("mysql", "be:me#tcp(127.0.0.1:3306)/ipdb?charset=utf8")
checkErr(err)
ip := "'192.168.0.1'"
rows, err := db.Query("SELECT * FROM Ip_ipdata WHERE ipHost=" + ip)
fmt.Println("insert")
if rows != nil {
for rows.Next() {
var id int
var ip string
err = rows.Scan(&id, &ip)
checkErr(err)
fmt.Println(id)
fmt.Println(ip)
}
} else {
fmt.Println("insert2")
stmt, err2 := db.Prepare("INSERT Ip_ipdata SET ipHost=2")
checkErr(err2)
_, err3 := stmt.Exec(ip)
checkErr(err3)
}
fmt.Println("end")
}
When I put "'192.168.0.1'" in ip it works and shows as expected.
But when I put "'192.168.0.2'" in ip the else statement isn't run and it just exits.
It didn't print "insert2"
screenshot 1
screenshot 2
You should get used to using '?' placeholders in your sql to allow for proper escaping and prevent any potential SQL injection attacks.
You should always check the error in Go before using the returned value.
ip := "192.168.0.1"
rows, err := db.Query("SELECT * FROM Ip_ipdata WHERE ipHost=?", ip)
if err != nil {
// handle error
}
// this will ensure that the DB connection gets put back into the pool
defer rows.Close()
for rows.Next() {
// scan here
}
The Rows returned by Query will not be nil in the case of no results, it will be empty. Try something like this:
func main() {
...
fmt.Println("insert")
checkErr(err)
defer rows.Close()
var found bool
for rows.Next() {
found = true
...
}
if !found {
fmt.Println("insert2")
...
}
fmt.Println("end")
}
Note that like #jmaloney said, more robust error handling is a must as is closing your Rows pointer.
Recently, I'm learning about Go (Golang). I'm trying to make a simple web service using Martini and jwt-go. I didn't find any difficulty in retrieving a single row data and put in JSON as the response. But, when dealing with multiple-rows, it's a whole different story. Basically, I refer to the accepted answer here.
Here is the piece of my code:
m.Get("/users", func(params martini.Params, r render.Render) {
db, err := sql.Open("mysql", "root:#/sirat_v2")
if err != nil {
panic(err.Error())
}
defer db.Close()
rows, err := db.Query("SELECT user_id, nama FROM `users` WHERE password = ?", "asdfasdf")
defer rows.Close()
cols, err := rows.Columns()
if err != nil {
panic(err.Error())
}
partages := make([]*Partage, 0, 10)
var user_id int
var nama string
for rows.Next() {
err = rows.Scan(&user_id, &nama)
if err != nil { /* error handling */
}
partages = append(partages, &Partage{user_id, nama})
}
})
When trying to build, there's an error said that Partage is undefined.
The error showing up because you use struct Partage to create an object, but you haven't declared it.
type Partage struct {
user_id string
nama string
}
But how do I display the result as JSON response? I've tried r.JSON(200, partages) but the results aren't displayed
In martini you can use r.JSON() to print rows as JSON
m.Get("/users", func(params martini.Params, r render.Render) {
// ...
r.JSON(200, map[string]interface{}{
data: rows
})
})