Please could anyone help with the following query? (180352 rows)
SELECT COUNT(p.stock_id) AS num_products,
p.master_photo, p.product_photo, p.stock_id, p.master, p.title, p.price, p.stock_level, p.on_order, p.location, p.supplier, p.category, p.sub_category, p.reorder
FROM products AS p
WHERE p.sub_category != 'Subscriptions'
GROUP BY p.master
ORDER BY p.stock_id ASC
LIMIT 0, 20
It's running at 6 seconds.
When I remove the order by it run's at 0.0023 seconds.
And also the same when I remove the group by.
The stock_id (unique) and sub_category are indexed.
I can't think of another way to approach a query like this as it is vital that I group by the master to get the count of product variations and also vital that they can be ordered (not necessarily by stock_id but that's the default).
Thank you
As requested by e4c5 below is the result of the explain with the order by
id: 1
select_type: SIMPLE
table: p
type: range
possible_keys: sub_category
key: sub_category
key_len: 52
ref: NULL
rows: 181691
Extra: Using where; Using temporary; Using filesort
and then without the order by
id: 1
select_type: SIMPLE
table: p
type: index
possible_keys: sub_category
key: master
key_len: 52
ref: NULL
rows: 21
Extra: Using where
and then below is the create table
CREATE TABLE IF NOT EXISTS `products` (
`stock_id` varchar(50) NOT NULL,
`conv_stock_id` varchar(100) NOT NULL,
`conv_quantity` decimal(10,2) NOT NULL,
`master` varchar(50) NOT NULL,
`master_photo` varchar(255) NOT NULL,
`free_guide_photo` varchar(255) NOT NULL,
`product_var_photo` varchar(255) NOT NULL,
`master_title` varchar(255) NOT NULL,
`master_slug` varchar(255) NOT NULL,
`master_page_title` varchar(255) NOT NULL,
`product_photo` varchar(255) NOT NULL,
`original_product_photo` varchar(255) NOT NULL,
`title` varchar(255) NOT NULL,
`orig_title` varchar(255) NOT NULL,
`page_title` varchar(255) NOT NULL,
`description` longtext NOT NULL,
`slug` varchar(255) NOT NULL,
`custom_url` varchar(255) NOT NULL,
`location` varchar(255) NOT NULL,
`supplier` varchar(50) NOT NULL,
`supplier_stock_id` varchar(50) NOT NULL,
`supplier_discount` int(11) NOT NULL,
`category` varchar(50) NOT NULL,
`sub_category` varchar(50) NOT NULL,
`cost_price` decimal(10,2) NOT NULL,
`discount_cost_price` decimal(10,2) NOT NULL,
`price` decimal(10,2) NOT NULL,
`sale_price` decimal(10,2) NOT NULL,
`sale_price_startdate` date NOT NULL,
`sale_price_enddate` date NOT NULL,
`orig_price_trail` int(3) NOT NULL,
`price_trail` varchar(50) NOT NULL,
`price_rule` int(1) NOT NULL,
`pack_size` int(11) NOT NULL,
`parcel_size` int(1) NOT NULL,
`packaging_rule` int(11) NOT NULL,
`cut_tear` int(1) NOT NULL,
`oversized_parcel` int(1) NOT NULL,
`print_label` int(1) NOT NULL,
`stock_level` decimal(10,1) NOT NULL,
`stock_level_group` varchar(50) NOT NULL,
`stock_level_increment` decimal(10,2) NOT NULL,
`stock_check_date` datetime NOT NULL,
`reorder` int(1) NOT NULL,
`reorder_level` decimal(10,1) NOT NULL,
`reorder_quantity` decimal(10,1) NOT NULL,
`reorder_attempts` int(1) NOT NULL,
`unit_size` decimal(10,1) NOT NULL,
`on_order` decimal(10,1) NOT NULL,
`date_ordered` datetime NOT NULL,
`back_order` decimal(10,1) NOT NULL,
`uom` decimal(10,1) NOT NULL,
`uom_value` varchar(100) NOT NULL,
`stock_estimate` int(1) NOT NULL,
`due_date` datetime NOT NULL,
`quantity` varchar(255) NOT NULL,
`colour` varchar(255) NOT NULL,
`colour_family` varchar(255) NOT NULL,
`type` varchar(255) NOT NULL,
`style` varchar(255) NOT NULL,
`pattern` varchar(255) NOT NULL,
`shape` varchar(255) NOT NULL,
`design` varchar(255) NOT NULL,
`fibre` varchar(255) NOT NULL,
`material` varchar(255) NOT NULL,
`pattern_for` varchar(255) NOT NULL,
`difficulty` varchar(255) NOT NULL,
`fabric_count` varchar(255) NOT NULL,
`yarn_thickness` varchar(255) NOT NULL,
`suggested_needle_size` varchar(255) NOT NULL,
`tension` varchar(255) NOT NULL,
`collections` varchar(255) NOT NULL,
`product_features` varchar(255) NOT NULL,
`size` varchar(255) NOT NULL,
`actual_size` varchar(255) NOT NULL,
`length` varchar(255) NOT NULL,
`width` varchar(255) NOT NULL,
`weight` varchar(255) NOT NULL,
`weight_gsm` varchar(255) NOT NULL,
`brand` varchar(255) NOT NULL,
`designer` varchar(255) NOT NULL,
`composition` varchar(255) NOT NULL,
`washing_instructions` varchar(255) NOT NULL,
`matching_thread` varchar(50) NOT NULL,
`sample` varchar(50) NOT NULL,
`fat_quarter` varchar(50) NOT NULL,
`barcode` varchar(13) NOT NULL,
`list_international` int(1) NOT NULL,
`token` varchar(50) NOT NULL,
`create_sample` int(1) NOT NULL,
`create_fatquarter` int(1) NOT NULL,
`create_listing_type` int(1) NOT NULL,
`create_listing_size` int(11) NOT NULL,
`create_listing_price` decimal(10,2) NOT NULL,
`create_listing_price_rule` int(11) NOT NULL,
`create_listing_sale_price` decimal(10,2) NOT NULL,
`create_listing_parcelsize` int(1) NOT NULL,
`create_listing_barcode` varchar(13) NOT NULL,
`auto_listing` int(1) NOT NULL,
`custom_bridal` int(1) NOT NULL,
`pickwave_assign` int(1) NOT NULL,
`kit_product` int(11) NOT NULL,
`fatquarter_product` int(1) NOT NULL,
`sample_product` int(1) NOT NULL,
`grouped_product` int(1) NOT NULL,
`grouped_product_quantity` decimal(10,1) NOT NULL,
`multiple_product` int(1) NOT NULL,
`freepost_product` int(1) NOT NULL,
`status` int(1) NOT NULL,
`update_stock_level` int(1) NOT NULL,
`force_product_photo` int(1) NOT NULL,
`created_master_photo` int(1) NOT NULL,
`force_master_photo` int(1) NOT NULL,
`created_free_guide_photo` int(1) NOT NULL,
`force_free_guide_photo` int(1) NOT NULL,
`created_product_var_photo` int(1) NOT NULL,
`force_product_var_photo` int(1) NOT NULL,
`force_additional_photo` int(1) NOT NULL,
`created_price_levelling` int(1) NOT NULL,
`created_grouped_product` int(1) NOT NULL,
`updated_stock_level` int(1) NOT NULL,
`create_multiple_listing` int(1) NOT NULL,
`create_freepost_listing` int(1) NOT NULL,
`create_freeguide_info` int(1) NOT NULL,
`created_by` int(11) NOT NULL,
`date_created` datetime NOT NULL,
UNIQUE KEY `stock_id` (`stock_id`),
KEY `token` (`token`),
KEY `title` (`title`),
KEY `stock_level_group` (`stock_level_group`),
KEY `sub_category` (`sub_category`),
KEY `stock_level` (`stock_level`),
KEY `category` (`category`),
KEY `conv_stock_id` (`conv_stock_id`),
KEY `conv_quantity` (`conv_quantity`),
KEY `created_price_levelling` (`created_price_levelling`),
KEY `master` (`master`),
KEY `colour` (`colour`),
KEY `auto_listing` (`auto_listing`),
KEY `multiple_product` (`multiple_product`),
KEY `status` (`status`),
KEY `ebay_master` (`ebay_master`),
KEY `parcel_size` (`parcel_size`),
KEY `grouped_product` (`grouped_product`),
KEY `sample_product` (`sample_product`),
KEY `fatquarter_product` (`fatquarter_product`),
KEY `created_grouped_product` (`created_grouped_product`),
KEY `price` (`price`),
KEY `freepost_product` (`freepost_product`),
KEY `master_title` (`master_title`),
KEY `c_sub_category_master` (`sub_category`,`master`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
You haven't provided the output from explain, however based on your query it would seem that ORDER BY forces a full table scan. That would make the query very slow.
When you don't use the ORDER BY, the db reads the results for the first 20 master values (there maybe quite a few of them) and groups them together and returns the result.
When you order by stock_id the whole table needs to be looked at to find which masters are associated with the lowest values stock_ids
It maybe possible to improve performance with a composite index on sub_category,master however a conclusion cannot be made unless you share your SHOW CREATE TABLES, EXPLAIN output.
Update
Based on your CREATE TABLE statements, I see that your database isn't normalized. For example Why do I get the feeling that the following columns should in a table of their own?
supplier varchar(50) NOT NULL,
supplier_stock_id varchar(50) NOT NULL,
supplier_discount int(11) NOT NULL,
You should only have a supplier_stock_id in your products table (foreign key to the suppliers table). There are similar sets of columns which really should be moved out.
When you do so you can create leaner and meaner indexes on this table. But that's not all the table becomes narrower. Which in turn means the worst case scenario of a full table scan actually becomes a lot faster.
I also noticed that the table does not have a primary key. Which is a big no-no. The stock_id if it's numeric should be primary key. If it's not numeric it might stil be the best candidate for primary key but this is something you need to decide.
Try adding an Index on stock_id in the products table... that should help.
Related
Please guys help me to optimize the below query
SELECT
`dti`.`CompanyId`,
`dti`.`Samiti`,
`dti`.`toll_date`,
`dti`.`MajorFee`,
`dti`.`MinorFee`,
`dti`.`SawalFee`,
SUM(dti.Tmwt) as Tmwt,
SUM(dti.Localminor) as Localminor,
SUM(dti.Swt) as Swt,
SUM(dti.Twt) as Twt,
SUM(((dti.Tmwt * dti.MajorFee) + (dti.Localminor * dti.MinorFee) + (dti.Swt * dti.SawalFee))) as total_wages,
SUM((dti.Twt * dti.govt_charges)) as govt_deduction,
SUM((((dti.Tmwt * dti.MajorFee) + (dti.Localminor * dti.MinorFee) + (dti.Swt * dti.SawalFee)) - (dti.Twt * dti.govt_charges))) as net_amount,
(SELECT (SUM(ld.amount) + SUM(ld.advance_deduction))
FROM psac_liability_deduction ld
WHERE ld.status = "Active" AND
ld.from_date >="2017-08-24" AND
ld.to_date <="2017-08-31" AND
ld.deducted_for = dti.CompanyId
) as group_liability_deduction,
(SELECT CONCAT(SUM(wi.GroupLiabilityDeduction), "|", SUM(wi.AdvanceWagesDeduction))
FROM psac_wagesitem wi
WHERE wi.status="Active" AND
wi.from_date >= "2017-08-24" AND
wi.to_date <= "2017-08-31" AND
wi.MainGroup=dti.Samiti AND
wi.FishermanId=dti.CompanyId
) as wages_deduction,
(SELECT CONCAT(SUM(cdp.product_liability), "|", SUM(cdp.wages_liability))
FROM psac_cash_deposited_payment cdp
WHERE cdp.status="Active" AND
cdp.deposit_date >= "2017-08-24" AND
cdp.deposit_date <= "2017-08-31" AND
cdp.maingroup_id=dti.Samiti AND
cdp.fisherman_id=dti.CompanyId
) as cash_deposited,
`fm`.`Name` as `fishername`,
`fm`.`Code` as `fishername_code`,
`fm`.`Bank`,
`fm`.`IfscCode`,
`fm`.`AccountNo`
FROM `psac_dailytollinfo` `dti`
LEFT JOIN `psac_fisherman` `fm` ON `fm`.`ID`=`dti`.`CompanyId`
WHERE
`dti`.`status` = 'Active' AND
`dti`.`toll_date` >= '2017-08-24' AND
`dti`.`toll_date` <= '2017-08-31'
GROUP BY `dti`.`toll_date`, `dti`.`CompanyId`
ORDER BY `dti`.`toll_date` ASC
please help me to optimize this query. If i remove sub queries it will works perfect but with subqueries it takes too much time.
below are table structures
psac_dailytollinfo table
CREATE TABLE `psac_dailytollinfo` (
`ID` int(11) NOT NULL,
`toll_date` date NOT NULL,
`Point` int(11) NOT NULL,
`group_type` int(11) NOT NULL,
`Samiti` int(11) NOT NULL,
`DailytollId` int(11) NOT NULL,
`CompanyId` int(11) NOT NULL,
`Name` varchar(250) NOT NULL,
`govt_charges` float(15,2) NOT NULL,
`MajorFee` float(15,2) NOT NULL,
`MinorFee` float(15,2) NOT NULL,
`SawalFee` float(15,2) NOT NULL,
`Cqty` varchar(150) NOT NULL,
`Cwt` varchar(150) NOT NULL,
`Rqty` varchar(150) NOT NULL,
`Rwt` varchar(150) NOT NULL,
`Mqty` varchar(150) NOT NULL,
`Mwt` varchar(150) NOT NULL,
`Kqty` varchar(150) NOT NULL,
`Kwt` varchar(150) NOT NULL,
`Aqty` varchar(150) NOT NULL,
`Awt` varchar(150) NOT NULL,
`Sqty` varchar(11) NOT NULL,
`Swt` varchar(11) NOT NULL,
`Lqty` varchar(150) NOT NULL,
`Lwt` varchar(150) NOT NULL,
`Localminor` varchar(150) NOT NULL,
`Tmqty` varchar(150) NOT NULL,
`Tmwt` varchar(150) NOT NULL,
`Tqty` varchar(150) NOT NULL,
`Twt` varchar(150) NOT NULL,
`added_by` int(11) NOT NULL,
`updated_by` int(11) NOT NULL,
`added_date` datetime NOT NULL,
`updated_date` datetime NOT NULL,
`action_microtime` varchar(20) NOT NULL,
`status` enum('Active','Inactive','Deleted') NOT NULL DEFAULT 'Active'
) ENGINE=InnoDB DEFAULT CHARSET=latin1;
psac_liability_deduction table
CREATE TABLE `psac_liability_deduction` (
`ID` bigint(20) NOT NULL,
`wages_id` int(11) NOT NULL,
`wages_item_id` int(11) NOT NULL,
`amount` float(15,2) NOT NULL,
`advance_deduction` float(11,2) NOT NULL,
`group_type_id` int(11) NOT NULL,
`maingroup_id` int(11) NOT NULL,
`deducted_by` int(11) NOT NULL,
`deducted_for` int(11) NOT NULL,
`from_date` date NOT NULL,
`to_date` date NOT NULL,
`status` enum('Active','Inactive','Deleted') COLLATE utf8_unicode_ci NOT NULL DEFAULT 'Active',
`added_by` int(11) NOT NULL,
`updated_by` int(11) NOT NULL,
`added_date` datetime NOT NULL,
`updated_date` datetime NOT NULL,
`action_microtime` varchar(50) COLLATE utf8_unicode_ci NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
psac_wagesitem table
CREATE TABLE `psac_wagesitem` (
`ID` int(11) NOT NULL,
`wages_for` enum('Fisherman','Group') NOT NULL DEFAULT 'Fisherman',
`wages_id` int(11) NOT NULL,
`from_date` date NOT NULL,
`to_date` date NOT NULL,
`group_type_id` int(11) NOT NULL,
`MainGroup` int(11) NOT NULL,
`FishermanId` int(11) NOT NULL,
`MajorFee` float(15,2) NOT NULL,
`MinorFee` float(15,2) NOT NULL,
`SawalFee` float(15,2) NOT NULL,
`major_wt` float(15,2) NOT NULL,
`minor_wt` float(15,2) NOT NULL,
`sawal_wt` float(15,2) NOT NULL,
`major_wage` float(15,2) NOT NULL,
`minor_wage` float(15,2) NOT NULL,
`sawal_wage` float(15,2) NOT NULL,
`TotalWage` float(15,2) NOT NULL,
`group_liability` float(15,2) NOT NULL,
`advance_wages` float(15,2) NOT NULL,
`GovDeduction` float(15,2) NOT NULL,
`GroupLiabilityDeduction` float(15,2) NOT NULL,
`AdvanceWagesDeduction` float(15,2) NOT NULL,
`final_wages` float(15,2) NOT NULL,
`added_by` int(11) NOT NULL,
`updated_by` int(11) NOT NULL,
`added_date` datetime NOT NULL,
`updated_date` datetime NOT NULL,
`action_microtime` varchar(20) NOT NULL,
`status` enum('Active','Inactive','Deleted') NOT NULL DEFAULT 'Active',
`editable` enum('Lock','Unlock') NOT NULL DEFAULT 'Unlock'
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
psac_cash_deposited_payment table
CREATE TABLE `psac_cash_deposited_payment` (
`deposit_id` int(11) NOT NULL,
`deposited_by` enum('Fisherman','Group') COLLATE utf8_unicode_ci NOT NULL DEFAULT 'Fisherman',
`deposit_date` date NOT NULL,
`group_type_id` int(11) NOT NULL,
`maingroup_id` int(11) NOT NULL,
`fisherman_id` int(11) NOT NULL,
`product_liability` float(11,2) NOT NULL,
`wages_liability` float(11,2) NOT NULL,
`receipt_number` varchar(50) COLLATE utf8_unicode_ci NOT NULL,
`remark` text COLLATE utf8_unicode_ci NOT NULL,
`status` enum('Active','Inactive','Deleted') COLLATE utf8_unicode_ci NOT NULL DEFAULT 'Active',
`editable` enum('Lock','Unlock') COLLATE utf8_unicode_ci NOT NULL DEFAULT 'Unlock',
`added_by` int(11) NOT NULL,
`added_date` datetime NOT NULL,
`updated_by` int(11) NOT NULL,
`updated_date` datetime NOT NULL,
`action_microtime` varchar(20) COLLATE utf8_unicode_ci NOT NULL
) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci;
You understand that you are looking at a full 8 days? If you only wanted one week, change <= to <.
Switch all tables to InnoDB.
Have a PRIMARY KEY for every table -- either a 'natural' PK (composed of one or more columns that, together, uniquely define each row), or an AUTO_INCREMENT.
dti needs INDEX(status, toll_date)
Don't use (m,n) on FLOAT, it leads to an extra roundoff.
Don't use FLOAT for money, it leads to a roundoff.
FLOAT (with or without (m,n)) contains no more than 7 significant digits.
Consider DECIMAL(11,2) instead of float(11,2).
Be cautious about using latin1 in one table and utf8 in another -- if you need to JOIN on a VARCHAR; it must have the same charset and collation in order to use an index.
Where practical, make the ORDER BY identical to the GROUP BY.
These composite indexes that are likely to help performance:
dti: INDEX(status, toll_date)
ld: INDEX(status, deducted_for, from_date)
ld: INDEX(status, deducted_for, to_date)
wi: INDEX(status, MainGroup, FishermanId, from_date)
wi: INDEX(status, MainGroup, FishermanId, to_date)
cdp: INDEX(status, maingroup_id, fisherman_id, deposit_date)
(The date must be last; the other column(s) can be in any order.)
If you still have performance problems with the subqueries after you have added those indexes, let's see EXPLAIN SELECT ... so we can look again.
Don't splay an array across columns:
`Cqty` varchar(150) NOT NULL,
`Cwt` varchar(150) NOT NULL,
etc
Consider having qty and wt as two columns in another table.
Could there be two different values for MajorFee in a single day for a single company? That, and other things say that the GROUP BY is improperly formed.
I have this MySQL query that I want to optimize:
SELECT r.WarehouseLocation,sum(sir.qty)
FROM repairableissue as r
INNER JOIN SIR ON r.sirno=sir.sirno
AND r.region=sir.region
AND r.ItemName=sir.Itemdesc
AND r.SerialNo=sir.Serialno
WHERE r.status='Pending'
GROUP BY r.warehouseLocation
How do I optimize this query? I read about optimization and found out that indexes might help but still could not achieve the desired performance.
Which index should be used and which should be removed?
Below is the explain of query:
Repairableissue
CREATE TABLE `repairableissue` (
`Vendor` varchar(40) NOT NULL,
`ItemName` varchar(200) NOT NULL,
`SerialNo` varchar(50) NOT NULL,
`person` varchar(200) NOT NULL,
`siteid` varchar(10) NOT NULL,
`invuser` varchar(50) NOT NULL,
`region` varchar(50) NOT NULL,
`id` int(11) NOT NULL AUTO_INCREMENT,
`Dated` date NOT NULL,
`Sirno` varchar(50) NOT NULL,
`status` varchar(30) NOT NULL DEFAULT 'Pending',
`trackthrough` varchar(30) NOT NULL,
`reason` varchar(100) NOT NULL,
`ckh` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`WarehouseType` varchar(20) NOT NULL,
`WarehouseLocation` varchar(20) NOT NULL,
PRIMARY KEY (`id`),
UNIQUE KEY `id` (`id`),
KEY `I1` (`status`),
KEY `ind2` (`ItemName`),
KEY `ind3` (`region`),
KEY `ind5` (`SerialNo`),
KEY `ind4` (`Sirno`)
) ENGINE=MyISAM AUTO_INCREMENT=63029 DEFAULT CHARSET=latin1
sir
CREATE TABLE `sir` (
`SirNo` varchar(50) NOT NULL,
`SiteId` varchar(80) NOT NULL,
`Vendor` varchar(70) NOT NULL,
`Type` varchar(15) NOT NULL,
`ItemDesc` varchar(200) NOT NULL,
`ItemCode` varchar(25) NOT NULL,
`SerialNo` varchar(50) NOT NULL,
`Unit` varchar(15) NOT NULL,
`AssetCode` varchar(50) NOT NULL,
`Qty` decimal(11,0) NOT NULL,
`Region` varchar(15) NOT NULL,
`Status` varchar(20) NOT NULL DEFAULT 'Installed',
`FaultInfo` varchar(100) NOT NULL DEFAULT 'date()',
`chk` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`Phase` varchar(15) NOT NULL,
`Category` varchar(200) NOT NULL,
`Issue_Vendor` varchar(30) NOT NULL,
`AssetName` varchar(150) NOT NULL,
`Ownership` varchar(20) NOT NULL,
`Dated` date NOT NULL,
`PersonName` varchar(150) NOT NULL,
`Remarks` varchar(300) NOT NULL,
`po` varchar(100) NOT NULL,
`invuser` varchar(50) NOT NULL,
`id` int(11) NOT NULL AUTO_INCREMENT,
`grnno` varchar(30) NOT NULL,
`WarehouseType` varchar(20) NOT NULL,
`WarehouseLocation` varchar(20) NOT NULL,
`mainpartserial` varchar(200) NOT NULL,
PRIMARY KEY (`Vendor`,`Type`,`ItemCode`,`ItemDesc`,`SerialNo`,`Ownership`,`SirNo`,`Region`,`WarehouseType`,`WarehouseLocation`,`po`,`Qty`,`id`),
KEY `id` (`id`),
KEY `ind4` (`ItemDesc`),
KEY `ind6` (`SerialNo`),
KEY `ind7` (`SerialNo`)
) ENGINE=MyISAM AUTO_INCREMENT=228007 DEFAULT CHARSET=latin1
One multi-column index on r.status + r.warehouseLocation, in that order.
One multi-column index on sir.sirno + sir.region + sir.Itemdesc + sir.Serialno, in order of most cardinality to least cardinality, with sir.qty tacked on the end.
This assumes the fields are small enough to fit (combined) into an index.
Still, join seeks are unavoidable. The number of records that match r.status='Pending' is going to dictate the speed of this query.
I have the following structure with indexes that help us to retrieve faster:
CREATE TABLE IF NOT EXISTS `index_site` ( `id_building` char(32) NOT NULL, `id_client` char(32) NOT NULL, `id_broker` smallint(5) unsigned NOT NULL, `kind_client` char(1) NOT NULL, `city` smallint(6) unsigned NOT NULL, `lat` float(10,6) NOT NULL, `lng` float(10,6) NOT NULL, `zone` smallint(2) unsigned NOT NULL, `sector` smallint(4) unsigned NOT NULL, `subregion` smallint(6) unsigned NOT NULL, `country` char(2) NOT NULL, `habs` smallint(5) unsigned NOT NULL, `bath` smallint(5) unsigned NOT NULL, `persons` smallint(5) unsigned NOT NULL, `include_elevator` enum('1','0') NOT NULL, `build_level` varchar(20) NOT NULL, `area` mediumint(8) unsigned NOT NULL, `area_um` enum('1','2','3','4','5') NOT NULL, `area_str` varchar(10) NOT NULL, `code` char(10) NOT NULL, `title` tinytext NOT NULL, `type_offer` varchar(50) NOT NULL, `offer_name` varchar(20) NOT NULL, `comments` text NOT NULL, `type_building` varchar(50) NOT NULL, `address` tinytext NOT NULL, `sector_name` tinytext NOT NULL, `city_name` varchar(50) NOT NULL, `subregion_name` varchar(50) NOT NULL, `area_terrain` varchar(10) NOT NULL, `area_um_terrain` tinyint(4) NOT NULL, `image` varchar(70) NOT NULL, `image_total` tinyint(2) unsigned NOT NULL, `build_status` tinyint(3) unsigned NOT NULL, `tags` text NOT NULL, `url` varchar(200) NOT NULL, `include_offer_value` enum('1','0') NOT NULL, `offer_value` varchar(15) NOT NULL, `offer_value_format` varchar(20) NOT NULL, `prc_comission` varchar(5) NOT NULL, `date_added` datetime NOT NULL, `date_updated` datetime NOT NULL, `date_expire` datetime NOT NULL, `date_suspended` date NOT NULL, `visits` int(11) NOT NULL, `kind_offer` tinyint(4) NOT NULL, `kind_building` tinyint(5) unsigned NOT NULL, `kind_building_type` tinyint(5) unsigned NOT NULL, `mark_bld` tinyint(3) unsigned NOT NULL, `mark_bld_color` char(7) NOT NULL, `status` tinyint(1) unsigned NOT NULL, `is_made` enum('0','1') NOT NULL, `is_project` enum('0','1') NOT NULL, `is_bm` enum('0','1') NOT NULL, `is_demo` enum('0','1') NOT NULL, `is_leading` enum('0','1') NOT NULL, `visible_in_metasearch` mediumtext NOT NULL, `visible_in_web` mediumtext NOT NULL, `seller_image` varchar(150) NOT NULL, `seller_name` varchar(50) NOT NULL,
KEY `id_broker` (`id_broker`), KEY `id_client` (`id_client`), KEY `kind_building` `kind_building`), KEY `city` (`city`), KEY `offer_value` (`offer_value`), KEY `is_bm` (`is_bm`), KEY `status` (`status`), KEY `sector` (`sector`), KEY `zone` (`zone`), KEY `area` (`area`), KEY `prc_comission` (`prc_comission`), KEY `is_made` (`is_made`), KEY `is_leading` (`is_leading`), KEY `id_building` (`id_building`), KEY `date_added` (`date_added`), KEY `code` (`code`), KEY `country` (`country`), KEY `habs` (`habs`), KEY `kind_offer` (`kind_offer`), FULLTEXT KEY `tags` (`tags`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 ROW_FORMAT=FIXED;
Yes, it's too big!!! :)
Okay, the topic is in structure I use some keys when I find results; this is normal and I execute the following query:
SELECT * FROM `index_site` WHERE kind_building='1' AND kind_offer='1' AND city='1'
This query took 0.0179 seconds, great, but I add EXPLAIN to my query:
EXPLAIN SELECT * FROM `index_site` WHERE kind_building='1' AND kind_offer='1' AND city='1'
I got the following result:
+----+-------------+------------+-------------+-------------------------------+-------------------------------+---------+------+------+-------------------------------------------------------------+
| id | select_type | table | type | possible_keys | key | key_len | ref | rows | Extra | +----+-------------+------------+-------------+-------------------------------+-------------------------------+---------+------+------+-------------------------------------------------------------+
| 1 | SIMPLE | index_site | index_merge | kind_building,city,kind_offer | kind_offer,city,kind_building | 1,2,1 | NULL | 184 | Using intersect(kind_offer,city,kind_building); Using where |
+----+-------------+------------+-------------+-------------------------------+-------------------------------+---------+------+------+-------------------------------------------------------------+
And I use the right keys but in the column Extra by MySQL when I get "Using where" they say that "you look something is wrong".
My question is, if I have a correct query with indexes, What is the problem to get "Using where" Whats wrong?
Thanks for your help!
From the docs:
If the Extra column also says Using where, it means the index is being used to perform lookups of key values.
You are selecting all fields (*) from the table.
Since not all fields are covered by indexes used in the merge intersect, the fields need to be looked up in the table itself.
Try running this:
SELECT kind_building, kind_offer, city
FROM index_site
WHERE kind_building = '1'
AND kind_offer = '1'
AND city = '1'
, and Using where should go.
Below is my mysql query
SELECT
opensalesorder.so_number,
items.VendorName,
opensalesorder.item_number,
items_custom_fields.FieldValue AS `Stock Item`,
vendor_custom_fields.FieldValue AS `Paid Freight Allowance`,
items.QuantityOnHand,
items.ReorderPoint,
items.MaxQty,
SUM(opensalesorder.quantity_on_order),
items.PurchaseCost,
items.VendorName,
items.VendorName,
items.PurchaseCost,
opensalesorder.status,
items.ItemType
FROM
vendor,
`opensalesorder`
inner join items
on opensalesorder.item_number = items.ItemName
JOIN items_custom_fields
ON items_custom_fields.ItemName = items.ItemName
JOIN vendor_custom_fields
ON vendor_custom_fields.VName = vendor.VName
WHERE opensalesorder.item_number = items.ItemName
and items_custom_fields.FieldName ='Stock Item'
and vendor_custom_fields.FieldName ='Paid Freight Allowance'
and opensalesorder.status NOT LIKE 'on po'
AND opensalesorder.so_number NOT IN ('2','3')
AND items.VendorName NOT IN ('Access')
AND opensalesorder.item_number NOT IN ('018-0001')
group by opensalesorder.item_number
LIMIT 100
on executing this query I am getting error like
#1054 - Unknown column 'vendor.VName' in 'on clause'
But I have included the vendor table in FROM clause.
Is this right way to include a table in JOIN ?
So whats wrong is with this query ?
EDIT:
SHOW CREATE TABLE FOR opensalesorder
CREATE TABLE `opensalesorder` (
`so_number` decimal(10,0) NOT NULL,
`item_number` varchar(20) NOT NULL,
`quantity_on_order` int(11) NOT NULL,
`quantity_to_order` int(11) NOT NULL,
`status` varchar(20) NOT NULL,
`editsequence` text NOT NULL,
`TxnLineID` text NOT NULL,
`TxnID` text NOT NULL,
`dateCreated` date NOT NULL,
`shipDate` date NOT NULL,
`customer` text NOT NULL,
`itemclass` text NOT NULL,
UNIQUE KEY `unique_mapping` (`so_number`,`item_number`),
KEY `so_number` (`so_number`),
KEY `item_number` (`item_number`),
KEY `status` (`status`)
)
SHOW CREATE TABLE FOR items
CREATE TABLE `items` (
`ItemName` varchar(30) NOT NULL,
`VendorName` varchar(40) DEFAULT NULL,
`QuantityOnHand` int(11) DEFAULT NULL,
`QuantityOnSalesOrder` int(11) DEFAULT NULL,
`ReorderPoint` int(11) DEFAULT NULL,
`PurchaseCost` double DEFAULT NULL,
`AverageCost` double DEFAULT NULL,
`SalesPrice` double DEFAULT NULL,
`PurchaseDesc` varchar(200) DEFAULT NULL,
`SalesDesc` varchar(200) DEFAULT NULL,
`ItemType` varchar(30) DEFAULT NULL,
`FreeCode` int(11) DEFAULT NULL,
`SubGroup` varchar(10) DEFAULT NULL,
`DateNewItem` date DEFAULT NULL,
`Notes` text,
`MaxQty` int(11) DEFAULT NULL,
`QuantityOnPO` int(11) DEFAULT NULL,
PRIMARY KEY (`ItemName`),
KEY `ItemName` (`ItemName`),
KEY `VendorName` (`VendorName`)
)
SHOW CREATE TABLE FOR vendor_custom_fields
CREATE TABLE `vendor_custom_fields` (
`VName` text NOT NULL,
`FieldName` text NOT NULL,
`FieldValue` text NOT NULL,
`FieldType` text NOT NULL,
PRIMARY KEY (`VName`(120),`FieldName`(120)),
FULLTEXT KEY `VName_index` (`VName`)
)
SHOW CREATE TABLE FOR vendor
CREATE TABLE `vendor` (
`VName` varchar(60) NOT NULL,
`CompanyName` varchar(100) NOT NULL,
`Address1` varchar(120) NOT NULL,
`Address2` varchar(120) NOT NULL,
`City` varchar(40) NOT NULL,
`State` varchar(50) NOT NULL,
`PostalCode` varchar(13) NOT NULL,
`Phone` varchar(13) NOT NULL,
`Fax` varchar(13) NOT NULL,
`AlternatePhone` varchar(13) NOT NULL,
`AlternateContact` varchar(30) NOT NULL,
`Email` varchar(40) NOT NULL,
`AccountNumber` varchar(30) NOT NULL,
`Balance` double NOT NULL,
`RepEmail` varchar(40) NOT NULL,
`FreightAllowance` double DEFAULT NULL,
`MinimumPOLimit` double DEFAULT NULL,
`Notes` text NOT NULL,
PRIMARY KEY (`VName`)
)
I think the problem is mixing the implied join with the (unimplied) 'stated' joins. When I put 'vendor' in as a regular join the query was fine.
(I had to comment out references to 'items_custom_fields' as you didn't include the table definition)
Here's a fiddle.
I have a sql-statement which correctly fetches data from one table. However, I need to fetch in the same way from n number of tables named table_n. All of these tables contain a 3-field primary key of projid, docid and revnr. I need to return n as docType as well, to differentiate the tables. The result will be sorted by projid and/or docid.
I tried sorting all the outputs from the different queries in PHP but it was way too slow (at least a few seconds on a 3MB database). I'm convinced MySQL/MSSQL will do it faster.
This is my current query:
SELECT a.* FROM `table_1` a
INNER JOIN (SELECT docid,
Max(revnr) max_val
FROM `table_1`
WHERE ( projid = something )
GROUP BY docid) b
ON a.docid = b.docid
AND a.revnr = b.max_val ORDER BY docid DESC
My current query gets the rows with highest revnr for each docid and projid.
I'm developing on MySQL but I need it to work on MSSQL as well. A general SQL solution would be great.
Thanks!
EDIT: Table schemas of the tables i currently have:
CREATE TABLE IF NOT EXISTS `table_1` (
`projid` int(11) NOT NULL,
`docid` int(11) NOT NULL,
`revnr` int(11) NOT NULL,
`revname` varchar(64) NOT NULL,
`signedOn` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`sign` int(11) NOT NULL,
`ritningsnr` varchar(128) NOT NULL,
`moment` varchar(256) NOT NULL,
`omrade` varchar(256) NOT NULL,
`start` datetime NOT NULL,
`stop` datetime NOT NULL,
`extTodo` int(11) NOT NULL,
PRIMARY KEY (`projid`,`docid`,`revnr`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='egenkontroll';
CREATE TABLE IF NOT EXISTS `table_2` (
`projid` int(11) NOT NULL,
`docid` int(11) NOT NULL,
`revnr` int(11) NOT NULL,
`revname` varchar(64) NOT NULL,
`signedOn` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`sign` int(11) NOT NULL,
`extWater` int(11) NOT NULL,
`extRisk` int(11) NOT NULL,
`extSystem` int(11) NOT NULL,
`extHelp` int(11) NOT NULL,
`extProvtryck` int(11) NOT NULL,
`extDoc` int(11) NOT NULL,
`extEgenkontroll` int(11) NOT NULL COMMENT 'exttabell',
`extOther` int(11) NOT NULL,
`extMontorer` int(11) NOT NULL,
PRIMARY KEY (`projid`,`docid`,`revnr`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COMMENT='arbetsberedning';
CREATE TABLE IF NOT EXISTS `table_3` (
`projid` int(11) NOT NULL,
`docid` int(11) NOT NULL,
`revnr` int(11) NOT NULL,
`revname` varchar(64) NOT NULL,
`adress` varchar(256) NOT NULL,
`pipesMark` tinyint(1) NOT NULL,
`pipesKulvert` tinyint(1) NOT NULL,
`pipesBasement` tinyint(1) NOT NULL,
`pipesVaning` tinyint(1) NOT NULL,
`pipesIngjutna` tinyint(1) NOT NULL,
`ledningTappvatten` tinyint(1) NOT NULL,
`ledningVarmevatten` tinyint(1) NOT NULL,
`ledningHetvatten` tinyint(1) NOT NULL,
`ledningKylaPrim` tinyint(1) NOT NULL,
`ledningKylaSek` tinyint(1) NOT NULL,
`ledningGas` tinyint(1) NOT NULL,
`ledningLuft` tinyint(1) NOT NULL,
`ledningAvlopp` tinyint(1) NOT NULL,
`ledningOther` varchar(512) NOT NULL,
`materialGjutjarn` tinyint(1) NOT NULL,
`materialSteel` tinyint(1) NOT NULL,
`materialKoppar` tinyint(1) NOT NULL,
`materialPlast` tinyint(1) NOT NULL,
`materialRostfritt` tinyint(1) NOT NULL,
`materialOther` varchar(512) NOT NULL,
`omfattningLength` int(11) NOT NULL COMMENT 'meter',
`omfattningDimension` varchar(16) NOT NULL,
`omfattningRitningnr` varchar(128) NOT NULL,
`doneWithPump` tinyint(1) NOT NULL,
`doneWithVattenledning` tinyint(1) NOT NULL,
`doneWithKompressor` tinyint(1) NOT NULL,
`doneWithTathetsprovare` tinyint(1) NOT NULL,
`tryckmedieVatten` tinyint(1) NOT NULL,
`tryckmedieLuft` tinyint(1) NOT NULL,
`tryckmedieOther` varchar(128) NOT NULL,
`manometerDiameter` int(11) NOT NULL COMMENT 'mm',
`manometerGradering` int(11) NOT NULL COMMENT 'kPa',
`manometerReadPressure` int(11) NOT NULL,
`manometerTid` int(11) NOT NULL COMMENT 'sekunder',
`testedOn` datetime NOT NULL,
`testedBy` varchar(128) NOT NULL COMMENT '"id_" + (userid)',
`comments` varchar(1024) NOT NULL,
`commentsBy` varchar(128) NOT NULL COMMENT '"id_" + (userid)',
`signedOn` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
`sign` int(11) NOT NULL,
PRIMARY KEY (`projid`,`docid`,`revnr`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
The fields I need are projid, docid, revnr, revname, signedOn, sign and are present in all current and future tables.