Gorm mariadb cache query select in transaction - mysql

I use gorm and mariadb
there is a function to update to the terminal table and insert to the terminalLog table.
by using the transaction begin commit . before inserting I checked the terminalLog table by calling the GetByUniqueTerminalLog function.
If the GetByUniqueTerminalLog function is placed before the transaction begin, it returns the query result data.
but if the function is between the transaction begin and commit it does not return the data that was previously inserted into the terminalLog table.
what caused it?
what should i do if GetByUniqueTerminalLog is placed between transactions begin commit.
this is the source code snippet
func (r *TerminalRepository) GetByUniqueTerminalLog(ctx *gin.Context, id int64, terminal_id int64, version string, device_id string, payment_id int64) (res *model.TerminalLog, err error) {
log.Debug("TerminalRepository - GetByUniqueTerminalLog() - starting...")
db := r.DbContext.DB
if id > 0 {
db = db.Where("id = ?", id)
}
if terminal_id > 0 {
db = db.Where("terminal_id = ? ", terminal_id)
}
if payment_id > 0 {
db = db.Where("payment_id = ? ", payment_id)
}
if device_id != "" {
db = db.Where("device_id = ? ", device_id)
}
if version != "" {
db = db.Where("version = ? ", version)
}
result := db.Debug().Find(&res)
if result.Error != nil {
return res, result.Error
}
log.Debug("TerminalRepository - GetByUniqueTerminalLog() - finished.")
return res, nil
}
func (r *TerminalRepository) UpdateTerminalVersion(ctx *gin.Context, data *model.Terminal) (err error) {
log.Debug("TerminalRepository - UpdateTerminalVersion() - starting...")
if err = data.Validate(); err != nil {
return err
}
dataUpdate, err := r.GetByUnique(ctx, data.ID, "", 0, "", "")
if err != nil {
return err
}
if dataUpdate.ID < 1 {
return custom.ErrorNotFoundDB(data.TableName(), "id", data.ID)
}
TerminalLog ,err := r.GetByUniqueTerminalLog(ctx,0,dataUpdate.ID,data.Version,"",0)
dataUpdate.Version = data.Version
dataUpdate.InitAudit(constant.OPERATION_SQL_UPDATE, data.UpdatedUser)
tx := r.DbContext.DB.Begin()
result := tx.Updates(&dataUpdate)
if result.Error != nil {
tx.Rollback()
return result.Error
}
if result.RowsAffected < 1 {
tx.Rollback()
return custom.ErrorOperationDB(dataUpdate.TableName(), "update")
}
//TerminalLog ,err := r.GetByUniqueTerminalLog(ctx,0,dataUpdate.ID,data.Version,"",0)
fmt.Printf("TerminalLog %+v\n ",TerminalLog)
if TerminalLog.ID < 1 {
DataTerminalLog := model.TerminalLog{
Version: data.Version,
Activity: "UPDATE_VERSION",
TerminalId: &dataUpdate.ID,
MerchantId: &dataUpdate.MerchantId,
PaymentId: dataUpdate.PaymentId,
DeviceId: dataUpdate.DeviceId,
}
DataTerminalLog.InitAudit(constant.OPERATION_SQL_INSERT, 1)
resultInsertLog := tx.Create(&DataTerminalLog)
if resultInsertLog.Error != nil {
tx.Rollback()
return resultInsertLog.Error
}
if resultInsertLog.RowsAffected < 1 {
tx.Rollback()
return custom.ErrorOperationDB(dataUpdate.TableName(), "insert")
}
}
tx.Commit()
log.Debug("TerminalRepository - UpdateTerminalVersion() - finished.")
return nil
}

Related

How to edit only some fields in the database instead of all?

I have an application in go, and there is an endpoint for editing user information. But I found a problem, if in json it sees blank information (let's assume the user just wants to edit the name, and leaves the other fields blank), the data will be edited in the DB and the information that came blank will overwrite the information that are DB, even if the user didn't want to edit them.
How can I make only the fields that arrive in json to be effectively edited in the DB? Or is there a better way?
I'm looking forward to knowing!
My controller
func EditUser(w http.ResponseWriter, r *http.Request) {
params := mux.Vars(r)
userID, err := strconv.ParseUint(params["userID"], 10, 64)
if err != nil {
returns.ERROR(w, http.StatusInternalServerError, err)
return
}
userIDInToken, err := auth.ExtractUserID(r)
if err != nil {
returns.ERROR(w, http.StatusInternalServerError, err)
return
}
if userIDInToken != userID {
returns.ERROR(w, http.StatusForbidden, errors.New("you can't update other user"))
return
}
bodyRequest, err := ioutil.ReadAll(r.Body)
if err != nil {
returns.ERROR(w, http.StatusBadRequest, err)
return
}
var user models.User
if err := json.Unmarshal(bodyRequest, &user); err != nil {
returns.ERROR(w, http.StatusUnprocessableEntity, err)
return
}
db, err := db.ConnectToDB()
if err != nil {
returns.ERROR(w, http.StatusInternalServerError, err)
return
}
defer db.Close()
repository := repositories.NewUsersRepository(db)
if err := repository.UpdateUserInfo(userID, user); err != nil {
returns.ERROR(w, http.StatusInternalServerError, err)
return
}
returns.JSON_RESPONSE(w, http.StatusOK, nil)
}
My repository (that access DB)
func (repository Users) UpdateUserInfo(userID uint64, user models.User) error {
stmt, err := repository.db.Prepare(
"UPDATE user SET name = ?, cpf = ?, email = ?, password = ?, city = ?, state = ? WHERE id = ?")
if err != nil {
return err
}
defer stmt.Close()
if _, err := stmt.Exec(
user.Name,
user.CPF,
user.Email,
user.Password,
user.City,
user.State,
userID,
); err != nil {
return err
}
return nil
}
Model for user
type User struct {
ID uint64 `json:"id,omitempty"`
Name string `json:"name,omitempty"`
CPF string `json:"cpf,omitempty"`
Email string `json:"email,omitempty"`
Password string `json:"password,omitempty"`
City string `json:"city,omitempty"`
State string `json:"state,omitempty"`
}
I would dynamically construct the UPDATE statement as well as a slice of the fields that need to be edited by checking if the field is not empty before adding it to the slice.
Something like this:
func (repository Users) UpdateUserInfo(userID uint64, user User) error {
fields := make([]string, 0)
values := make([]string, 0)
if user.Name != "" {
values = append(values, user.Name)
fields = append(fields, "name = ?")
}
if user.CPF != "" {
values = append(values, user.CPF)
fields = append(fields, "cpf = ?")
}
if user.Email != "" {
values = append(values, user.Email)
fields = append(fields, "email = ?")
}
if user.Password != "" {
values = append(values, user.Password)
fields = append(fields, "password = ?")
}
if user.City != "" {
values = append(values, user.City)
fields = append(fields, "city = ?")
}
if user.State != "" {
values = append(values, user.State)
fields = append(fields, "state = ?")
}
if len(fields) != 0 {
return errors.New("no fields to update")
}
updateString := fmt.Sprintf("UPDATE user SET %s WHERE id = ?", strings.Join(fields, ","))
stmt, err := repository.db.Prepare(updateString)
if err != nil {
return err
}
defer stmt.Close()
if _, err := stmt.Exec(values...,userID); err != nil {
return err
}
return nil
}
For cleaner code, I'd recommend maybe extracting the "if statements"/validation into a separate function.

sql.Query truncated or incomplete results

I've the following code:
const qInstances = `
SELECT
i.uuid,
i.host,
i.hostname
FROM
db.instances AS i
WHERE
i.deleted_at IS NULL
GROUP BY i.uuid;
`
...
instancesMap := make(map[string]*models.InstanceModel)
instances := []models.Instance{}
instancesCount := 0
instancesRow, err := db.Query(qInstances)
if err != nil {
panic(err.Error())
}
defer instancesRow.Close()
for instancesRow.Next() {
i := models.Instance{}
err = instancesRow.Scan(&i.UUID, &i.Host, &i.Hostname)
if err != nil {
log.Printf("[Error] - While Scanning Instances Rows, error msg: %s\n", err)
panic(err.Error())
} else {
if i.UUID.String != "" {
instancesCount++
}
if _, ok := instancesMap[i.UUID.String]; !ok {
instancesMap[i.UUID.String] = &models.InstanceModel{}
inst := instancesMap[i.UUID.String]
inst.UUID = i.UUID.String
inst.Host = i.Host.String
inst.Hostname = i.Hostname.String
} else {
inst := instancesMap[i.UUID.String]
inst.UUID = i.UUID.String
inst.Host = i.Host.String
inst.Hostname = i.Hostname.String
}
instances = append(instances, i)
}
}
log.Printf("[Instances] - Total Count: %d\n", instancesCount)
The problem that I'm facing is that if run the SQL query directly to the database (mariadb) it returns 7150 records, but the total count inside the program outputs 5196 records. I also check the SetConnMaxLifetime parameter for the db connection and set it to 240 seconds, and it doesn't show any errors or broken connectivity between the db and the program. Also I try to do some pagination (LIMIT to 5000 records each) and issue two different queries and the first one returns the 5000 records, but the second one just 196 records. I'm using the "github.com/go-sql-driver/mysql" package. Any ideas?

Does gorm shares an ID from previous transaction Create

I have a query, that requires ID from the previous insert. Does gorm gives away the ID in transaction mode:
Example:
err := db.Transaction(func(tx *gorm.DB) error {
if err := tx.Model(&model.user).Create(&user).Error; err != nil {
tx.Rollback()
}
// if user inserted get the user ID from previous user insert of DB
for _, v := profiles {
v.UserID = user.ID // Can I get this ID from previous Create operation?
}
if err := tx.Model(&model.profile).Create(&profiles).Error; err != nil {
tx.Rollback()
}
})
Yes, you can use the Find function toget the data that you created.
if err := tx.Model(&model.profile).Create(&profiles).Find(&profiles).Error; err != nil {
tx.Rollback()
}

Multiple rows.Next() in the same time golang sql/database

I am working on a function which gets users. My problem is that I have to return the result of two queries as one result in the rows.Next() and rows1.Next() like user_id, subject, phone.
Here is what I have but it doesn't work. Could you please help me out with some suggestions:
func GetUsers() (users []Users, err error) {
users = make([]Users, 0)
rows, err := db1.Query("SELECT user_id, subject,phone FROM users limit 11")
rows1, err := db1.Query("Select body from users limit 11")
defer rows.Close()
if err != nil {
// handle this error better than this
log.Fatal(err)
//return
}
var user Users
for rows.Next() {
rows.Scan(&user.ID, &user.Subject, &user.Phone)
users = append(users, user)
}
for rows1.Next() {
rows1.Scan(&user.Body)
users = append(users, user)
}
return
}
I have a table called users which has no primary key, basically:
user_id . name . body
-----------------------
1 . model . 2
1 . brand . 1
1 . fuel . 3
1 . date . 1
1 . year . 1
I have to have a result:
userid . model . brand . fuel . date . year
1 . 2 . 1 . 3 . 1 . 1
so the values in the column name gonna be as rows. I have already defined my struct which contains all the fields needed.
Looking at the example code it's clear that you should do whatever you're doing in one query instead of two. Even if your real code is a little different, say you need to query more than one table, you should still probably do only one query using JOINs.
users := make([]*User, 0)
rows, err := db1.Query("SELECT user_id, subject,phone, body FROM users limit 11")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
for rows.Next() {
user := new(User)
if err := rows.Scan(&user.ID, &user.Subject, &user.Phone, &user.Body); err != nil {
panic(err)
}
users = append(users, user)
}
if err := rows.Err(); err != nil {
panic(err)
}
That said, if you want to do it the way you illustrated in your question you can do that like this.
(this probably not what you should do)
users := make([]*User, 0)
rows, err := db1.Query("SELECT user_id, subject,phone FROM users limit 11")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
rows1, err := db1.Query("Select body from users limit 11")
if err != nil {
panic(err)
}
defer rows1.Close()
for rows.Next() {
user := new(User)
if err := rows.Scan(&user.ID, &user.Subject, &user.Phone); err != nil {
panic(err)
}
if !rows1.Next() {
panic("no next body row")
}
if err := rows1.Scan(&user.Body); err != nil {
panic(err)
}
users = append(users, user)
}
if err := rows.Err(); err != nil {
panic(err)
}
if err := rows1.Err(); err != nil {
panic(err)
}
const maxPerPage = 100
type Scanable interface {
GetScans() []interface{}
}
type User struct {
ID int
Subject string
Phone string
Body string
}
func (s *User) GetScans() []interface{} {
return []interface{}{&s.ID, &s.Subject, &s.Phone, &s.Body}
}
func getNewUserList() []*User {
users := make([]*User, maxPerPage)
for i := 0; i < maxPerPage; i++ {
users[i] = new(User)
}
return users
}
var usersPool = sync.Pool{
New: func() interface{} {
return getNewUserList()
},
}
func getUsers() (b []*User) {
ifc := usersPool.Get()
if ifc != nil {
b = ifc.([]*User)
} else {
b = getNewUserList()
}
return
}
func putUsers(b []*User) {
// if cap(b) <= maxPerPage {
// b = b[:0] // reset
// usersPool.Put(&b)
// }
usersPool.Put(&b)
}
func TestUsers(t *testing.T) {
users := getUsers()
rows, err := PostgresConnection.Query("SELECT user_id, subject,phone, body FROM users limit 11")
if err != nil {
log.Fatal(err)
}
defer rows.Close()
length := 0
for rows.Next() {
if err := rows.Scan(users[i].GetScans()...); err != nil {
panic(err)
}
if err := rows.Err(); err != nil {
panic(err)
}
length++
}
result := users[:length]
fmt.Println(result)
putUsers(users)
}
this is the sample function to get multiple document from db using golang sqlx library.
func GetDocuments() ([]SupportedDocument, error) {
var documents []SupportedDocument
query := `SELECT * FROM documents limit 10`
err := database.Select(&documents, query)
return documents, err
}

Dumping MySQL tables to JSON with Golang

Was putting together a quick dumper for MySQL to JSON in Go. However I find that everything that I retrieve from the database is a []byte array. Thus instead of native JSON integers or booleans, I'm getting everything encoded as strings.
Subset of the code:
import (
"encoding/json"
"database/sql"
_ "github.com/go-sql-driver/mysql"
)
func dumpTable(w io.Writer, table) {
// ...
rows, err := Query(db, fmt.Sprintf("SELECT * FROM %s", table))
checkError(err)
columns, err := rows.Columns()
checkError(err)
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
err = rows.Scan(scanArgs...)
checkError(err)
record := make(map[string]interface{})
for i, col := range values {
if col != nil {
fmt.Printf("\n%s: type= %s\n", columns[i], reflect.TypeOf(col))
switch t := col.(type) {
default:
fmt.Printf("Unexpected type %T\n", t)
case bool:
fmt.Printf("bool\n")
record[columns[i]] = col.(bool)
case int:
fmt.Printf("int\n")
record[columns[i]] = col.(int)
case int64:
fmt.Printf("int64\n")
record[columns[i]] = col.(int64)
case float64:
fmt.Printf("float64\n")
record[columns[i]] = col.(float64)
case string:
fmt.Printf("string\n")
record[columns[i]] = col.(string)
case []byte: // -- all cases go HERE!
fmt.Printf("[]byte\n")
record[columns[i]] = string(col.([]byte))
case time.Time:
// record[columns[i]] = col.(string)
}
}
}
s, _ := json.Marshal(record)
w.Write(s)
io.WriteString(w, "\n")
}
}
I also needed to dump database tables to json and here is how I achieved:
(different than another answer in this topic, everything is not string, thanks to this answer: https://stackoverflow.com/a/17885636/4124416, I could get integer fields correctly)
func getJSON(sqlString string) (string, error) {
rows, err := db.Query(sqlString)
if err != nil {
return "", err
}
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
return "", err
}
count := len(columns)
tableData := make([]map[string]interface{}, 0)
values := make([]interface{}, count)
valuePtrs := make([]interface{}, count)
for rows.Next() {
for i := 0; i < count; i++ {
valuePtrs[i] = &values[i]
}
rows.Scan(valuePtrs...)
entry := make(map[string]interface{})
for i, col := range columns {
var v interface{}
val := values[i]
b, ok := val.([]byte)
if ok {
v = string(b)
} else {
v = val
}
entry[col] = v
}
tableData = append(tableData, entry)
}
jsonData, err := json.Marshal(tableData)
if err != nil {
return "", err
}
fmt.Println(string(jsonData))
return string(jsonData), nil
}
Here is a sample output:
[{"ID":0,"Text":"Zero"},{"ID":1,"Text":"One"},{"ID":2,"Text":"Two"}]
It is needed to use prepared statements to get the native types. MySQL has two protocols, one transmits everything as text, the other as the "real" type. And that binary protocol is only used when you use prepared statements. See https://github.com/go-sql-driver/mysql/issues/407
The function getJSON below is correct:
func getJSON(sqlString string) (string, error) {
stmt, err := db.Prepare(sqlString)
if err != nil {
return "", err
}
defer stmt.Close()
rows, err := stmt.Query()
if err != nil {
return "", err
}
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
return "", err
}
tableData := make([]map[string]interface{}, 0)
count := len(columns)
values := make([]interface{}, count)
scanArgs := make([]interface{}, count)
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
err := rows.Scan(scanArgs...)
if err != nil {
return "", err
}
entry := make(map[string]interface{})
for i, col := range columns {
v := values[i]
b, ok := v.([]byte)
if (ok) {
entry[col] = string(b)
} else {
entry[col] = v
}
}
tableData = append(tableData, entry)
}
jsonData, err := json.Marshal(tableData)
if err != nil {
return "", err
}
return string(jsonData), nil
}
There's not much you can do because the driver - database/sql interaction is pretty much a one way street and the driver can't help you with anything when the data is handed over to database/sql.
You could try your luck with http://godoc.org/github.com/arnehormann/sqlinternals/mysqlinternals
Query the database
Retrieve the Column slice with cols, err := mysqlinternals.Columns(rows)
Create a new var values := make([]interface{}, len(cols)) and iterate over cols
Get the closest matching Go type per column with refType, err := cols[i].ReflectGoType()
Create type placeholders with values[i] = reflect.Zero(refType).Interface()
rows.Next() and err = rows.Scan(values...). Don't recreate values, copy and reuse it.
I guess this will still be pretty slow, but you should be able to get somewhere with it.
If you encounter problems, please file an issue - I'll get to it as soon as I can.
I have a table named users inside practice_db database. I have mentioned the table structure with data in the following program that converts the users table into JSON format.
You can also check the source code at https://gist.github.com/hygull/1725442b0f121a5fc17b28e04796714d.
/**
{
"created_on": "26 may 2017",
"todos": [
"go get github.com/go-sql-driver/mysql",
"postman(optional)",
"browser(optional)",
],
"aim": "Reading fname column into []string(slice of strings)"
}
*/
/*
mysql> select * from users;
+----+-----------+----------+----------+-------------------------------+--------------+-------------------------------------------------------------------------------------------------+
| id | fname | lname | uname | email | contact | profile_pic |
+----+-----------+----------+----------+-------------------------------+--------------+-------------------------------------------------------------------------------------------------+
| 1 | Rishikesh | Agrawani | hygull | rishikesh0014051992#gmail.com | 917353787704 | https://cdn4.iconfinder.com/data/icons/rcons-user/32/user_group_users_accounts_contacts-512.png |
| 2 | Sandeep | E | sandeep | sandeepeswar8#gmail.com | 919739040038 | https://cdn4.iconfinder.com/data/icons/eldorado-user/40/user-512.png |
| 3 | Darshan | Sidar | darshan | sidardarshan#gmail.com | 917996917565 | https://cdn4.iconfinder.com/data/icons/rcons-user/32/child_boy-512.png |
| 4 | Surendra | Prajapat | surendra | surendrakgadwal#gmail.com | 918385894407 | https://cdn4.iconfinder.com/data/icons/rcons-user/32/account_male-512.png |
| 5 | Mukesh | Jakhar | mukesh | mjakhar.kjakhar#gmail.com | 919772254140 | https://cdn2.iconfinder.com/data/icons/rcons-user/32/male-circle-512.png |
+----+-----------+----------+----------+-------------------------------+--------------+-------------------------------------------------------------------------------------------------+
5 rows in set (0.00 sec)
mysql>
*/
package main
import "log"
import "net/http"
import "encoding/json"
import (
_"github.com/go-sql-driver/mysql"
"database/sql"
)
func users(w http.ResponseWriter, r *http.Request) {
// db, err := sql.Open("mysql", "<username>:<password>#tcp(127.0.0.1:<port>)/<dbname>?charset=utf8" )
db, err := sql.Open("mysql", "hygull:admin#67#tcp(127.0.0.1:3306)/practice_db?charset=utf8")
w.Header().Set("Content-Type", "application/json")
if err != nil {
log.Fatal(err)
}
rows, err := db.Query("select id, fname, lname, uname, email, contact, profile_pic from users")
if err != nil {
log.Fatal(err)
}
type User struct {
Id int `json:"id"`
Fname string `json:"firstname"`
Lname string `json:"lastname"`
Uname string `json:"username"`
Email string `json:"email"`
Contact int `json:"contact"`
ProfilePic string `json:"profile_pic"`
}
var users []User
for rows.Next() {
var id, contact int
var fname string
var lname string
var uname, email, profile_pic string
rows.Scan(&id ,&fname, &lname, &uname, &email, &contact, &profile_pic)
users = append(users, User{id, fname, lname, uname, email, contact, &profile_pic })
}
usersBytes, _ := json.Marshal(&users)
w.Write(usersBytes)
db.Close()
}
func main() {
http.HandleFunc("/users/", users)
http.ListenAndServe(":8080", nil)
}
/* REQUSET
http://127.0.0.1:8080/users/
*/
/* RESPONSE
[
{
"id": 1,
"firstname": "Rishikesh",
"lastname": "Agrawani",
"username": "hygull",
"email": "rishikesh0014051992#gmail.com",
"contact": 917353787704,
"profile_pic": "https://cdn4.iconfinder.com/data/icons/rcons-user/32/user_group_users_accounts_contacts-512.png"
},
{
"id": 2,
"firstname": "Sandeep",
"lastname": "E",
"username": "sandeep",
"email": "sandeepeswar8#gmail.com",
"contact": 919739040038,
"profile_pic": "https://cdn4.iconfinder.com/data/icons/eldorado-user/40/user-512.png"
},
{
"id": 3,
"firstname": "Darshan",
"lastname": "Sidar",
"username": "darshan",
"email": "sidardarshan#gmail.com",
"contact": 917996917565,
"profile_pic": "https://cdn4.iconfinder.com/data/icons/rcons-user/32/child_boy-512.png"
},
{
"id": 4,
"firstname": "Surendra",
"lastname": "Prajapat",
"username": "surendra",
"email": "surendrakgadwal#gmail.com",
"contact": 918385894407,
"profile_pic": "https://cdn4.iconfinder.com/data/icons/rcons-user/32/account_male-512.png"
},
{
"id": 5,
"firstname": "Mukesh",
"lastname": "Jakhar",
"username": "mukesh",
"email": "mjakhar.kjakhar#gmail.com",
"contact": 919772254140,
"profile_pic": "https://cdn2.iconfinder.com/data/icons/rcons-user/32/male-circle-512.png"
}
]
*/
Based on the answers here, this is the most efficient code I could come up with. Note that this is outputting each row as a separate JSON array to save key name repetition.
// OutputJSONMysqlRowsStream outputs rows as a JSON array stream to save ram & output size due to key name repetition
func OutputJSONMysqlRowsStream(writer http.ResponseWriter, rows *sql.Rows) {
defer rows.Close()
columns, err := rows.Columns()
if err != nil {
OutputJSONError(writer, "Failed to get column names")
return
}
jsonColumns, err := json.Marshal(columns)
if err != nil {
OutputJSONError(writer, "Failed to encode json of column names")
return
}
writer.Header().Set("Content-Type", "application/cal-json-stream; charset=utf-8")
fmt.Fprintln(writer, "{\"status\": \"done\", \"data\":{ \"json_stream_fields\":"+string(jsonColumns)+"}}")
columnCount := len(columns)
rowDataHolder := make([]interface{}, columnCount)
rowDataHolderPointers := make([]interface{}, columnCount)
if err != nil {
log.Println(err)
}
for rows.Next() {
for i := 0; i < columnCount; i++ {
rowDataHolderPointers[i] = &rowDataHolder[i]
}
err := rows.Scan(rowDataHolderPointers...)
if err != nil {
log.Println(err)
} else {
for i, value := range rowDataHolder {
tempValue, ok := value.([]byte)
if ok {
rowDataHolder[i] = string(tempValue)
}
}
jsonEncoder := json.NewEncoder(writer)
err = jsonEncoder.Encode(rowDataHolder)
if err != nil {
log.Println(err)
}
}
}
}
you can dump the table into json just fine, however everything will be string :(
q := "select * from table"
debug("SQL: %s", q)
rows, err := db.Query(q)
checkError(err)
defer rows.Close()
columns, err := rows.Columns()
checkError(err)
scanArgs := make([]interface{}, len(columns))
values := make([]interface{}, len(columns))
for i := range values {
scanArgs[i] = &values[i]
}
for rows.Next() {
err = rows.Scan(scanArgs...)
checkError(err)
record := make(map[string]interface{})
for i, col := range values {
if col != nil {
record[columns[i]] = fmt.Sprintf("%s", string(col.([]byte)))
}
}
s, _ := json.Marshal(record)
fmt.Printf("%s\n", s)
}