Cause of unconnected node warnings in verilog code - configuration

I am writing code that performs the trapezoidal integration method. The code has the FPGA clock (I'm using the Mimas Spartan 6), SIGNAL (the new point to be accounted for in the integration), x (the interval between points), and SUM (the result of the past integrations for inputs, and the output is OUT. Since there must be two inputs to the trapezoidal method, there are two registers yregone and yregtwo, so that SIGNAL is set to yregone and yregtwo is set to the old yregone (the past SIGNAL). The two are added, then bit shifted over so that it's divided by two, multiplied by x, and then have SUM added (OUT is mapped to SUM on the board).
The code compiles with the warnings given. I had read elsewhere that warnings could be ignored. The code compiled despite the errors so I attempted to download the code onto the board and it said that configuration failed. Therefore I assumed that the warnings must point out some error in the code that must be fixed. What is wrong?
Code
module trapverilog(
input CLK,
input signed [7:0] SIGNAL,
input [7:0] x,
input signed [20:0] SUM, // OUT pins are mapped to SUM pins on board
output reg OUT1,
output reg OUT2,
output reg OUT3,
output reg OUT4,
output reg OUT5,
output reg OUT6,
output reg OUT7,
output reg OUT8,
output reg OUT9,
output reg OUT10,
output reg OUT11,
output reg OUT12,
output reg OUT13,
output reg OUT14,
output reg OUT15,
output reg OUT16,
output reg OUT17,
output reg OUT18,
output reg OUT19,
output reg OUT20
);
reg signed [7:0] yregone;
reg signed [7:0] yregtwo;
reg signed [20:0] innerSumOutput;
reg signed [20:0] innerSum;
function [20:0] multiply;
input signed [7:0] a;
input signed [7:0] b;
reg [15:0] a1, a2, a3, a4, a5, a6, a7, a8;
begin
a1 = (b[0]==1'b1) ? {8'b00000000, a} : 16'b0000000000000000;
a2 = (b[1]==1'b1) ? {7'b0000000, a, 1'b0} : 16'b0000000000000000;
a3 = (b[2]==1'b1) ? {6'b000000, a, 2'b00} : 16'b0000000000000000;
a4 = (b[3]==1'b1) ? {5'b00000, a, 3'b000} : 16'b0000000000000000;
a5 = (b[4]==1'b1) ? {4'b0000, a, 4'b0000} : 16'b0000000000000000;
a6 = (b[5]==1'b1) ? {3'b000, a, 5'b00000} : 16'b0000000000000000;
a7 = (b[6]==1'b1) ? {2'b00, a, 6'b000000} : 16'b0000000000000000;
a8 = (b[7]==1'b1) ? {1'b0, a, 7'b0000000} : 16'b0000000000000000;
multiply = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
end
endfunction
always #(posedge CLK)
begin
yregtwo <= yregone;
yregone <= SIGNAL;
if (yregone != 0)
begin
innerSum <= multiply((yregone + yregtwo), x); // treats x as plain h, change if treated as h/2 // multiply defined by function shift-adds
innerSumOutput <= (innerSum <<< 1) + SUM; // <<< is signed one bit shift which = /2
OUT20 <= innerSumOutput[20];
OUT1 <= innerSumOutput[1]; // OUT is two's complement
OUT2 <= innerSumOutput[2];
OUT3 <= innerSumOutput[3];
OUT4 <= innerSumOutput[4];
OUT5 <= innerSumOutput[5];
OUT6 <= innerSumOutput[6];
OUT7 <= innerSumOutput[7];
OUT8 <= innerSumOutput[8];
OUT9 <= innerSumOutput[9];
OUT10 <= innerSumOutput[10];
OUT11 <= innerSumOutput[11];
OUT12 <= innerSumOutput[12];
OUT13 <= innerSumOutput[13];
OUT14 <= innerSumOutput[14];
OUT15 <= innerSumOutput[15];
OUT16 <= innerSumOutput[16];
OUT17 <= innerSumOutput[17];
OUT18 <= innerSumOutput[18];
OUT19 <= innerSumOutput[19];
end
end
endmodule
UCF
NET "CLK" LOC = P126;
NET "SIGNAL[0]" LOC = P35 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "SIGNAL[1]" LOC = P34 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "SIGNAL[2]" LOC = P33 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "SIGNAL[3]" LOC = P32 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "SIGNAL[4]" LOC = P30 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "SIGNAL[5]" LOC = P29 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "SIGNAL[6]" LOC = P27 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "SIGNAL[7]" LOC = P26 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "x[0]" LOC = P24 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "x[1]" LOC = P23 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "x[2]" LOC = P22 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "x[3]" LOC = P21 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "x[4]" LOC = P17 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "x[5]" LOC = P16 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "x[6]" LOC = P15 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "x[7]" LOC = P14 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "SUM[0]" LOC = P12 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "SUM[1]" LOC = P11 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST;
NET "SUM[2]" LOC = P10 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[3]" LOC = P9 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[4]" LOC = P8 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[5]" LOC = P7 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[6]" LOC = P6 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[7]" LOC = P5 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[8]" LOC = P2 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[9]" LOC = P1 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[10]" LOC = P142 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[11]" LOC = P141 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[12]" LOC = P140 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[13]" LOC = P139 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[14]" LOC = P138 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[15]" LOC = P137 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[16]" LOC = P134 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[17]" LOC = P133 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[18]" LOC = P132 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[19]" LOC = P131 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "SUM[20]" LOC = P43 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT1" LOC = P44 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT2" LOC = P45 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT3" LOC = P46 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT4" LOC = P47 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT5" LOC = P48 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT6" LOC = P50 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT7" LOC = P51 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT8" LOC = P55 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT9" LOC = P56 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT10" LOC = P74 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT11" LOC = P75 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT12" LOC = P78 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT13" LOC = P79 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT14" LOC = P80 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT15" LOC = P81 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT16" LOC = P82 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT17" LOC = P83 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT18" LOC = P84 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT19" LOC = P85 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
NET "OUT20" LOC = P87 | IOSTANDARD = LVCMOS33 | DRIVE = 8 | SLEW = FAST ;
Warnings
WARNING:Xst:2677 - Node <innerSum_20> of sequential type is unconnected in block <trapverilog>.
WARNING:Xst:2677 - Node <innerSumOutput_0> of sequential type is unconnected in block <trapverilog>.
WARNING:Xst:2677 - Node <innerSum_20> of sequential type is unconnected in block <trapverilog>.
WARNING:Xst:2677 - Node <innerSumOutput_0> of sequential type is unconnected in block <trapverilog>.
WARNING:Xst:1710 - FF/Latch <innerSum_19> (without init value) has a constant value of 0 in block <trapverilog>. This FF/Latch will be trimmed during the optimization process.
WARNING:Xst:1710 - FF/Latch <innerSum_16> (without init value) has a constant value of 0 in block <trapverilog>. This FF/Latch will be trimmed during the optimization process.
WARNING:Xst:1895 - Due to other FF/Latch trimming, FF/Latch <innerSum_17> (without init value) has a constant value of 0 in block <trapverilog>. This FF/Latch will be trimmed during the optimization process.
WARNING:Xst:1895 - Due to other FF/Latch trimming, FF/Latch <innerSum_18> (without init value) has a constant value of 0 in block <trapverilog>. This FF/Latch will be trimmed during the optimization process.
WARNING:Security:42 - Your software subscription period has lapsed. Your current
WARNING:Security:42 - Your software subscription period has lapsed. Your current version of Xilinx tools will continue
WARNING:Par:288 - The signal SUM<0>_IBUF has no load. PAR will not attempt to route this signal.
WARNING:Par:283 - There are 1 loadless signals in this design. This design will cause Bitgen to issue DRC warnings.

You need to understand the warnings that the tools give you, not just ignore them. It looks like you have a lot of unconnected signals and FF/Latches with constant values in your design--that probably means it won't do what you want it to do even if you do manage to download it. The last PAR warning indicates a DRC error, which may be the source of your problems because Bitgen isn't sure what to do with your loadless signal. Try cleaning up the warnings (or at least make sure you understand all of them and know which ones are safe to disregard) and then try again.
Does your design simulate OK? If the answer is "What simulation?" I'd seriously suggest you make sure you can run a simple simulation of your design before you synthesize and PAR it.

Related

How to import CSV file into Octave and keep the column headers

I am trying to import a CSV file so that I can use it with the k-means clustering algorithm. The file contains 6 columns and over 400 rows. Here is a picture of the excel document I used (before exporting it into a CSV file). In essence, I want to be able to use the column header names in my code so that I can use the column names when plotting the data, as well as clustering it.
I looked into some other documentation and came up with this code but nothing came as an output when I just put it into the command window:
[Player BA OPS RBI OBP] = CSVIMPORT( 'MLBdata.csv', 'columns', {'Player', 'BA', 'OPS', 'RBI', 'OBP'}
The only thing that has worked for me so far is the dlm read function, but it returns 0 when there is a String of words
N = dlmread('MLBdata.csv')
Octave
Given file data.csv with the following contents:
Player,Year,BA,OPS,RBI,OBP
SandyAlcantara,2019,0.086,0.22,4,0.117
PeteAlonso,2019,0.26,0.941,120,0.358
BrandonLowe,2019,0.27,0.85,51,0.336
MikeSoroka,2019,0.077,0.22,3,0.143
Open an octave terminal and type:
pkg load io
C = csv2cell( 'data.csv' )
resulting in the following cell array:
C =
{
[1,1] = Player
[2,1] = SandyAlcantara
[3,1] = PeteAlonso
[4,1] = BrandonLowe
[5,1] = MikeSoroka
[1,2] = Year
[2,2] = 2019
[3,2] = 2019
[4,2] = 2019
[5,2] = 2019
[1,3] = BA
[2,3] = 0.086000
[3,3] = 0.2600
[4,3] = 0.2700
[5,3] = 0.077000
[1,4] = OPS
[2,4] = 0.2200
[3,4] = 0.9410
[4,4] = 0.8500
[5,4] = 0.2200
[1,5] = RBI
[2,5] = 4
[3,5] = 120
[4,5] = 51
[5,5] = 3
[1,6] = OBP
[2,6] = 0.1170
[3,6] = 0.3580
[4,6] = 0.3360
[5,6] = 0.1430
}
From there on, you can collect that data into arrays or structs as you like and continue working. One nice option is Andrew Janke's nice 'tablicious' package:
octave:13> pkg load tablicious
octave:14> T = cell2table( C(2:end,:), 'VariableNames', C(1,:) );
octave:15> prettyprint(T)
-------------------------------------------------------
| Player | Year | BA | OPS | RBI | OBP |
-------------------------------------------------------
| SandyAlcantara | 2019 | 0.086 | 0.22 | 4 | 0.117 |
| PeteAlonso | 2019 | 0.26 | 0.941 | 120 | 0.358 |
| BrandonLowe | 2019 | 0.27 | 0.85 | 51 | 0.336 |
| MikeSoroka | 2019 | 0.077 | 0.22 | 3 | 0.143 |
-------------------------------------------------------

Mysql / ignore duplicates on multiple columns

Afternoon folks
Following on from a previous question,
I have created a small python app that will scrape job postings for me from a website.
I have them saved in a small sql db
Columns are
Job_id / job_title / job company / job_salary / job_location / job_post_date
I was planning on running my script once per day and hoping to ignore duplicate entries.
What sort of query can check against 2 columns I. E. Title and company to make sure it doesn't get inserted again, the date posted will always increment by 1 per day
Python Code
import mysql.connector
import requests
from bs4 import BeautifulSoup
my_db = mysql.connector.connect(
host="192.168.1.1",
user='job_user',
password='job1',
database='job_db'
)
my_cursor = my_db.cursor()
radius = "10"
val1 = input("Enter Job: ")
val2 = input("Enter Location: ")
val3 = input("Enter Radius default:(10): ")
url = "https://www.indeed.co.uk/jobs?q={}&l={}&radius={}".format(val1, val2, val3)
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
result1 = soup.find_all(class_="jobsearch-SerpJobCard")
for results in result1:
job = results.find('a', attrs={'data-tn-element': 'jobTitle'})
company = results.find('span', attrs={'class': 'company'})
location = results.find('span', attrs={'class': 'location accessible-contrast-color-location'})
salary = results.find('span', attrs={'class': 'salaryText'})
date_pos = results.find('span', attrs={'class': 'date'})
i1 = job.text.strip()
i2 = company.text.strip()
if location is not None:
i3 = location.text.strip()
else:
i3 = "N/A"
if salary is not None:
i4 = salary.text.strip()
else:
i4 = "N/A"
i5 = date_pos.text.strip()[:1]
print(i1)
print(i2)
print(i3)
print(i4)
print("\n")
sql = "INSERT INTO job_tbl (job_title, job_company, job_salary, job_location, job_posted) \
VALUES (%s, %s, %s, %s, %s)"
val = (i1, i2, i3, i4, i5)
my_cursor.execute(sql, val)
my_db.commit()
SQL Query
+--------+-------------------------------------------------------------------+-------------------------------+----------------+----------------------------+------------+
| job_id | job_title | job_company | job_salary | job_location | job_posted |
+--------+-------------------------------------------------------------------+-------------------------------+----------------+----------------------------+------------+
| 1 | IT Technician | Strathallan School | N/A | £19,000 - £23,000 a year | 3 |
| 2 | English into Romanian IT/Technical Translator (relocation to... | Alpha CRC Ltd. | N/A | £22,000 - £25,000 a year | 7 |
| 3 | IT/Trainee IT Support Analyst | CJ Lang & Son Limited | Dundee DD4 8JU | N/A | 3 |
| 4 | IT Technical Support Apprentice | GP Strategies Training Ltd | Dundee | £10,000 - £12,000 a year | 1 |
| 5 | IT Operations Manager - IRC84524 | Scottish Government | Dundee DD1 | £48,930 - £61,006 a year | 3 |
| 6 | Temporary IT Buyer | brightsolid | Dundee | N/A | 7 |
| 7 | IT Site Support Analyst | Thermo Fisher Scientific | Perth | N/A | 6 |
| 8 | Network and System Administrator | Solutions Driven | Forfar | £30,000 - £35,000 a year | 3 |
| 9 | IT Service Desk Team Leader | Cross Resourcing | Dundee | N/A | 3 |
| 10 | Senior Network Engineer | Raytheon Intelligence & Space | Glenrothes | N/A | 3 |
| 11 | Solutions Architect | NCR | Dundee | N/A | 3 |
| 12 | Technical Support Specialist | The Army | N/A | £15,985 - £20,400 a year | 3 |
| 13 | Pre-Sales Solutions Architect – 12 Month Graduate Internship... | DELL | N/A | N/A | 3 |
+--------+-------------------------------------------------------------------+-------------------------------+----------------+----------------------------+------------+
13 rows in set (0.002 sec)
If I run the same application again it will add the same results, What I want is to match BOTH a Title & Company and check if it has already been added
You would typically put a unique constraint on that tuple of columns, and use update ... on duplicate key when inserting so duplicates are not inserted, but instead the date of the current row is updatedd.
So something like:
create table mytable (
id int primary key auto_increment,
title varchar(50), -- ajust the size as needed
company varchar(50),
salary int,
location varchar(50),
post_date datetime
default current_timestamp, -- not mandatory, but maybe helpful?
unique (title, company)
);
Then:
insert into mytable (title, company, salary, location)
values (?, ?, ?, ?)
on duplicate key update post_date = current_timestamp;

Sphinxsearch can not match arabic words

I have sphinxsearch and use real time index and this is my config of rt table
mc_offers{
type = rt
path = /var/lib/sphinxsearch/mc_offers
rt_mem_limit = 16M
rt_field = title
rt_attr_string = title
min_word_len = 1
min_infix_len = 1
enable_star = 1
dict = keywords
charset_type = utf-8
charset_table = 0..9, A..Z->a..z, _, !, /, +, a..z, U+410..U+42F->U+430..U+44F, U+430..U+44F,\
U+0531..U+0556->U+0561..U+0586, U+0561..U+0586, U+0587, U+2116,\
U+0626,U+0627..U+063A,U+0641..U+064A,U+0679,U+067E,U+0686,U+0688,U+0691,U+0698,U+06AF,U+06BA, U+06BB,U+0660..U+0669→0..9,U+06F0..U+06F9→0..9, U+0622→U+0627, U+0623→U+0627, U+0625→U+0627, U+0671→U+0627, U+0672→U+0627, U+0673→U+0627, U+0675→U+0627, U+066E→U+0628, U+067B→U+0628, U+0680→U+0628, U+06C0→U+0629, U+06C1→U+0629, U+06C2→U+0629, U+06C3→U+0629, U+067A→U+062A, U+067B→U+062A, U+067C→U+062A, U+067D→U+062A, U+067F→U+062A, U+0680→U+062A, U+0681→U+062D, U+0682→U+062D, U+0683→U+062D, U+0684→U+062D, U+0685→U+062D, U+0687→U+0686, U+06BF→U+0686, U+0689→U+062F, U+068A→U+062F, U+068C→U+062F, U+068D→U+062F, U+068E→U+062F, U+068F→U+062F, U+0690→U+062F, U+06EE→U+062F, U+068B→U+0688, U+0692→U+0631, U+0693→U+0631, U+0694→U+0631, U+0695→U+0631, U+0696→U+0631, U+0697→U+0631, U+0699→U+0631, U+06EF→U+0631, U+069A→U+0633, U+069B→U+0633, U+069C→U+0633, U+06FA→U+0633, U+069D→U+0635, U+069E→U+0635, U+06FB→U+0635, U+069F→U+0637, U+06A0→U+0639, U+06FC→U+0639, U+06A1→U+0641, U+06A2→U+0641, U+06A3→U+0641, U+06A4→U+0641, U+06A5→U+0641, U+06A6→U+0641, U+066F→U+0642, U+06A7→U+0642, U+06A8→U+0642, U+063B→U+0643, U+063C→U+0643, U+06A9→U+0643, U+06AA→U+0643, U+06AB→U+0643, U+06AC→U+0643, U+06AD→U+0643, U+06AE→U+0643, U+06B0→U+06AF, U+06B1→U+06AF, U+06B2→U+06AF, U+06B3→U+06AF, U+06B4→U+06AF, U+06B5→U+0644, U+06B6→U+0644, U+06B7→U+0644, U+06B8→U+0644, U+06FE→U+0645, U+06B9→U+0646, U+06BC→U+0646, U+06BD→U+0646, U+06BE→U+0647, U+06C0→U+0647, U+06C1→U+0647, U+06C2→U+0647, U+06C3→U+0647, U+06D5→U+0647, U+06FF→U+0647, U+06C4→U+0648, U+06C5→U+0648, U+06C6→U+0648, U+06C7→U+0648, U+06C8→U+0648, U+06C9→U+0648, U+06CA→U+0648, U+06CB→U+0648, U+06CF→U+0648, U+063D→U+064A, U+063E→U+064A, U+063F→U+064A, U+06CC→U+064A, U+06CD→U+064A, U+06CE→U+064A, U+06D0→U+064A, U+06D1→U+064A, U+06D2→U+064A, U+06D3→U+064A
docinfo = extern
morphology = none
ignore_chars=U+0640,U+064B..U+065F,U+06D6..U+06DC,U+06DF..U+06E8,U+06EA..U+06ED
}
and I have row like this one
| id | weight | partner_offer_id | section_id | location_id | place_id|price_aed | price_usd | label_id | lat | lng | end_date |title | description | short_description | tags | type | owner_type |sub_section | user_residency | available_lng_id |
| 405 | 1 | 0 | 1 | 1 | 0 | 123 | 19 | 0 | 25.269428 | 55.279106 | 1893441600 | test offer asd քաք | nknkn انضم | knkjnk انضم | | regular | partner | 4 | visitor resident | 1 8 |
which contains arabic and armenian words.
arabic - انضم
armenian - քաք
and when I run this query it works fine
SELECT id, sub_section, WEIGHT() as relevance FROM mc_offers WHERE MATCH('(քաք*)');
it return result
but when I run same query to match arabic it return empty result
SELECT id, sub_section, WEIGHT() as relevance FROM mc_offers WHERE MATCH('(انضم*)');
Empty set (0.00 sec)
did you try on the source config to add sql_query_pre = SET NAMES utf8 ?

Google sheets, sort function

I want to auto-sort through the script editor in Google Sheets. I have in my Google Sheets several columns:
+--------+------------+-------+--------+------------+
| Region | Mag | Comp | Region | Mag |
+--------+------------+-------+--------+------------+
| A | MIKA | TRUE | A | MIKA |
| B | KALO | FALSE | B | NOKA |
| C | MINA | FALSE | C | South-East |
| D | North | TRUE | D | North |
| B | NOKA | FALSE | B | KALO |
| C | South-East | FALSE | C | MINA |
+--------+------------+-------+--------+------------+
I would like to match the two columns (Region, Mag) on the left with the two columns (Region, Mag) on the right, so in the end I would have my comparison column (which has a formula like =exact(string1,string2)) with TRUEs only.
I want to have a kind of button so that my two columns (Region, Mag) on the right of Comp could sort themselves.
I had this script, thanks to #JPV
function onOpen() {
SpreadsheetApp.getUi().createMenu('Sort').addItem('Sort Col D and E', 'sort').addToUi();
}
function sort() {
var ss = SpreadsheetApp.getActiveSheet();
var srange = ss.getRange('A2:B7').getValues();
var trange = ss.getRange('D2:E7');
var trangeVal = trange.getValues();
var returnarr = [];
for (var i = 0, ilen = trangeVal.length; i < ilen; i++) {
for (var j = 0, jlen = srange.length; j < jlen; j++) {
if (trangeVal[i][0] == srange[j][0] && trangeVal[i][1] == srange[j][1]) {
returnarr[j] = trangeVal[i];
}
}
}
trange.setValues(returnarr);
}
But seems not working and throwing an error like "Cannot convert Array to Object[][]"
Any help please!
Again thanks to #JPV
Ok. Maybe this script will help you ?
function onOpen() {
SpreadsheetApp.getUi().createMenu('Sort').addItem('Sort Col D and E', 'sort').addToUi();
}
function sort() {
var ss = SpreadsheetApp.getActiveSheet();
var srange = ss.getRange('A2:B7').getValues();
var trange = ss.getRange('D2:E7');
var trangeVal = trange.getValues();
var returnarr = [];
for (var i = 0, ilen = trangeVal.length; i < ilen; i++) {
for (var j = 0, jlen = srange.length; j < jlen; j++) {
if (trangeVal[i][0] == srange[j][0] && trangeVal[i][1] == srange[j][1]) {
returnarr[j] = trangeVal[i];
}
}
}
trange.setValues(returnarr);
}
Note: this will only work if the values in D&E are somewhere to be found in A&B. Also be aware of the fact that his will overwrite the values in D&E.
Test sheet here

How to extract data from Mysql column stored in serialized JSON column?

I have column in mysql DB which contains serialized data (used PHP JSON encode to serialize them).
Here is an example:
{"name":"Group ltd","email":"support#domain.org","auth":"Andrey Ucholnik"}
Is there a built in function in mysql to extract these values without PHP ?
I mean to build query that will unserialize data.
Of course its possible to use combination of LOCATE and SUBSTR function to do that but I prefer something built in if possible.
There is no build in function in MySQL but by using small code in PHP you can easily do it as under:
<?php
$json = '{{"generated": "2010-02-26T22:26:03.156866 blahblahblah ": null, "thumbnail_url": "http://thumbs.mochiads.com/c/g/tetword-pro/_thumb_100x100.jpg", "screen4_url": "http://thumbs.mochiads.com/c/g/tetword-pro/screen4.png", "leaderboard_enabled": true, "resolution": "600x550", "width": 600}]}}';
$out = json_decode($json, true);
foreach($out["games"] as $game) {
$name = addslashes($game[name]);
$description = addslashes($game[description]);
//Here you can use your mysql_insert code
mysql_query("INSERT INTO games (name, description) VALUES('$name', '$description')") or die (mysql_error());
}
?>
There are no built-in MySQL functions to work with JSON, but here is a very simple stored function to extract values from JSON:
DELIMITER $$
CREATE FUNCTION JSON_EXTRACT(json TEXT, name CHAR(64))
RETURNS CHAR(64) DETERMINISTIC
BEGIN
SET #namePos = LOCATE(name, json);
IF #namePos = 0 THEN RETURN ''; END IF;
SET #valuePos = LOCATE(':', json, #namePos) + 1;
IF SUBSTR(json, #valuePos, 1) = '"' THEN
SET #valuePos = #valuePos + 1;
RETURN SUBSTR(json, #valuePos, LOCATE('"', json, #valuePos) - #valuePos);
ELSE
SET #valueBegin = TRIM(SUBSTR(json, #valuePos));
SET #delim1 = LOCATE(' ', #valueBegin); SET #delim1 = IF(#delim1 = 0, 64, #delim1);
SET #delim2 = LOCATE(',', #valueBegin); SET #delim2 = IF(#delim2 = 0, 64, #delim2);
SET #delim3 = LOCATE('}', #valueBegin); SET #delim3 = IF(#delim3 = 0, 64, #delim3);
RETURN LEFT(#valueBegin, LEAST(#delim1, #delim2, #delim3) - 1);
END IF;
END$$
Usage example:
SELECT JSON_EXTRACT('{"a":"aa","b" : 1, "c": 3}', 'b') AS test;
Note that the function has many limitations. For example, it doesn't handle nested classes and key names shouldn't be included in the values.
Yes , you can definitely to it using JSON_EXTRACT() function in mysql.
lets take a table that contains JSON (table client_services here) :
+-----+-----------+--------------------------------------+
| id | client_id | service_values |
+-----+-----------+------------+-------------------------+
| 100 | 1000 | { "quota": 1,"data_transfer":160000} |
| 101 | 1000 | { "quota": 2,"data_transfer":800000} |
| 102 | 1000 | { "quota": 3,"data_transfer":70000} |
| 103 | 1001 | { "quota": 1,"data_transfer":97000} |
| 104 | 1001 | { "quota": 2,"data_transfer":1760} |
| 105 | 1002 | { "quota": 2,"data_transfer":1060} |
+-----+-----------+--------------------------------------+
To Select each JSON fields , run this query :
SELECT
id, client_id,
json_extract(service_values, '$.quota') AS quota,
json_extract(service_values, '$.data_transfer') AS data_transfer
FROM client_services;
So the output will be :
+-----+-----------+----------------------+
| id | client_id | quota | data_transfer|
+-----+-----------+----------------------+
| 100 | 1000 | 1 | 160000 |
| 101 | 1000 | 2 | 800000 |
| 102 | 1000 | 3 | 70000 |
| 103 | 1001 | 1 | 97000 |
| 104 | 1001 | 2 | 1760 |
| 105 | 1002 | 2 | 1060 |
+-----+-----------+----------------------+
Hope this helps!