Range access on multiple columns using an index - mysql

As per my understanding of the MySQL documentation which states:
The optimizer attempts to use additional key parts to determine the interval as long as the comparison operator is =, <=>, or IS NULL. If the operator is >, <, >=, <=, !=, <>, BETWEEN, or LIKE, the optimizer uses it but considers no more key parts.
https://dev.mysql.com/doc/refman/5.6/en/range-optimization.html#range-access-multi-part
I was trying to optimize the query the following query:
SELECT col3 FROM tbl2 WHERE col3 = 'fHI' AND col1 > 20 AND col4 > 0.5;
where tbl2 is defined as:
CREATE TABLE `tbl2` (
`col1` int NOT NULL,
`col2` varchar(512) NOT NULL DEFAULT '',
`col3` varchar(512) DEFAULT NULL,
`col4` double DEFAULT NULL,
PRIMARY KEY (`col2`),
KEY `cd1` (`col3`,`col1`,`col4`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1
I was under the impression that the columns used would be col3 and col1 since the predicate on col1 uses the > operator. However, the optimizer plan says that it uses all three index key columns as per the plan.
{
"query_block": {
"select_id": 1,
"cost_info": {
"query_cost": "92.10"
},
"table": {
"table_name": "tbl2",
"access_type": "index",
"possible_keys": [
"cd1"
],
"key": "cd1",
"used_key_parts": [
"col3",
"col1",
"col4"
],
"key_length": "528",
"rows_examined_per_scan": 901,
"rows_produced_per_join": 881,
"filtered": "97.78",
"using_index": true,
"cost_info": {
"read_cost": "4.00",
"eval_cost": "88.10",
"prefix_cost": "92.10",
"data_read_per_join": "901K"
},
"used_columns": [
"col1",
"col3",
"col4"
],
"attached_condition": "((`test`.`tbl2`.`col3` = 'fHI') and (`test`.`tbl2`.`col1` > 20) and (`test`.`tbl2`.`col4` > 0.5))"
}
}
}
Is the documentation wrong or is col4 being used in a different way?

mysql> explain SELECT col3 FROM tbl2 WHERE col3 = 'fHI' AND col1 > 20 AND col4 > 0.5;
+----+-------------+-------+------------+-------+---------------+------+---------+------+------+----------+--------------------------+
| id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra |
+----+-------------+-------+------------+-------+---------------+------+---------+------+------+----------+--------------------------+
| 1 | SIMPLE | tbl2 | NULL | index | cd1 | cd1 | 528 | NULL | 1 | 100.00 | Using where; Using index |
+----+-------------+-------+------------+-------+---------------+------+---------+------+------+----------+--------------------------+
I think the answer is that EXPLAIN is sloppy in multiple ways.
I cannot explain where "528" comes from.
Anyway, note that the Optimizer Trace mentions only
"ranges": [
"fHI <= col3 <= fHI AND 20 < col1"
],
Optimizer Trace:
{
"steps": [
{
"join_preparation": {
"select#": 1,
"steps": [
{
"expanded_query": "/* select#1 */ select `tbl2`.`col3` AS `col3` from `tbl2` where ((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5)) limit 33"
}
]
}
},
{
"join_optimization": {
"select#": 1,
"steps": [
{
"condition_processing": {
"condition": "WHERE",
"original_condition": "((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5))",
"steps": [
{
"transformation": "equality_propagation",
"resulting_condition": "((`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5) and multiple equal('fHI', `tbl2`.`col3`))"
},
{
"transformation": "constant_propagation",
"resulting_condition": "((`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5) and multiple equal('fHI', `tbl2`.`col3`))"
},
{
"transformation": "trivial_condition_removal",
"resulting_condition": "((`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5) and multiple equal('fHI', `tbl2`.`col3`))"
}
]
}
},
{
"substitute_generated_columns": {
}
},
{
"table_dependencies": [
{
"table": "`tbl2`",
"row_may_be_null": false,
"map_bit": 0,
"depends_on_map_bits": [
]
}
]
},
{
"ref_optimizer_key_uses": [
{
"table": "`tbl2`",
"field": "col3",
"equals": "'fHI'",
"null_rejecting": true
}
]
},
{
"rows_estimation": [
{
"table": "`tbl2`",
"range_analysis": {
"table_scan": {
"rows": 1,
"cost": 2.45
},
"potential_range_indexes": [
{
"index": "PRIMARY",
"usable": false,
"cause": "not_applicable"
},
{
"index": "cd1",
"usable": true,
"key_parts": [
"col3",
"col1",
"col4",
"col2"
]
}
],
"best_covering_index_scan": {
"index": "cd1",
"cost": 0.35,
"chosen": true
},
"setup_range_conditions": [
],
"group_index_range": {
"chosen": false,
"cause": "not_group_by_or_distinct"
},
"skip_scan_range": {
"potential_skip_scan_indexes": [
{
"index": "cd1",
"usable": false,
"cause": "prefix_not_const_equality"
}
]
},
"analyzing_range_alternatives": {
"range_scan_alternatives": [
{
"index": "cd1",
"ranges": [
"fHI <= col3 <= fHI AND 20 < col1"
],
"index_dives_for_eq_ranges": true,
"rowid_ordered": false,
"using_mrr": false,
"index_only": true,
"rows": 1,
"cost": 0.36,
"chosen": false,
"cause": "cost"
}
],
"analyzing_roworder_intersect": {
"usable": false,
"cause": "too_few_roworder_scans"
}
}
}
}
]
},
{
"considered_execution_plans": [
{
"plan_prefix": [
],
"table": "`tbl2`",
"best_access_path": {
"considered_access_paths": [
{
"access_type": "ref",
"index": "cd1",
"chosen": false,
"cause": "range_uses_more_keyparts"
},
{
"rows_to_scan": 1,
"access_type": "scan",
"resulting_rows": 1,
"cost": 0.35,
"chosen": true
}
]
},
"condition_filtering_pct": 100,
"rows_for_plan": 1,
"cost_for_plan": 0.35,
"chosen": true
}
]
},
{
"attaching_conditions_to_tables": {
"original_condition": "((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5))",
"attached_conditions_computation": [
],
"attached_conditions_summary": [
{
"table": "`tbl2`",
"attached": "((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5))"
}
]
}
},
{
"finalizing_table_conditions": [
{
"table": "`tbl2`",
"original_table_condition": "((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5))",
"final_table_condition ": "((`tbl2`.`col3` = 'fHI') and (`tbl2`.`col1` > 20) and (`tbl2`.`col4` > 0.5))"
}
]
},
{
"refine_plan": [
{
"table": "`tbl2`"
}
]
}
]
}
},
{
"join_execution": {
"select#": 1,
"steps": [
]
}
}
]
}

Related

How to flatten and split this JSON in Groovy?

I could use some help with writing a Groovy script to flatten and split JSON into multiple JSON-s based on nested array elements. Here is the original JSON:
{
"input_query": {
"discount_guid": "3afeb169-7969-4f6f-8928-d801692848b1",
"user_uid": 5467890,
"shopping_list": [
{
"article_id": 311729,
"current_price_without_promo": 7.69,
"promo_discount": 0,
"count": 1,
"apply_discount": true
},
{
"article_id": 229752,
"current_price_without_promo": 11.29,
"promo_discount": 0,
"count": 1,
"apply_discount": true
},
{
"article_id": 193672,
"current_price_without_promo": 79.99,
"promo_discount": 0,
"count": 1,
"apply_discount": true
},
{
"article_id": 261657,
"current_price_without_promo": 16.99,
"promo_discount": 0,
"count": 1,
"apply_discount": true
},
{
"article_id": 318153,
"current_price_without_promo": 13.99,
"promo_discount": 0,
"count": 1,
"apply_discount": true
}
],
"discount_params_per_article": [
{
"article_id": 311729,
"min_discount": 0,
"max_discount": 4.12,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1
},
{
"article_id": 229752,
"min_discount": 0,
"max_discount": 7.52,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1
},
{
"article_id": 193672,
"min_discount": 0,
"max_discount": 60,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1
},
{
"article_id": 261657,
"min_discount": 0,
"max_discount": 12.4,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1
},
{
"article_id": 318153,
"min_discount": 0,
"max_discount": 8,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1
}
],
"target_probability_increase": null,
"request_time": "2019-12-21T21:32:13.018635"
},
"total_discount": 0.94,
"article_discounts": [
{
"article_id": 311729,
"discount": 0.04
},
{
"article_id": 229752,
"discount": 0.08
},
{
"article_id": 193672,
"discount": 0.61
},
{
"article_id": 261657,
"discount": 0.13
},
{
"article_id": 318153,
"discount": 0.08
}
]
}
What I would like to do is flatten the original JSON to an array of JSON-s like this:
[{
"discount_guid": "3afeb169-7969-4f6f-8928-d801692848b1",
"user_uid": 5467890,
"article_id": 318153,
"current_price_without_promo": 13.99,
"promo_discount": 0,
"count": 1,
"apply_discount": true,
"min_discount": 0,
"max_discount": 8,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1,
"target_probability_increase": null,
"request_time": "2019-12-21T21:32:13.018635",
"total_discount": 0.94,
"discount": 0.08
},
{
"discount_guid": ...
},
...
]
I've managed to get a single flatten JSON this way:
import groovy.json.JsonOutput as jo
def content = new File('response.json')
def slurper = new groovy.json.JsonSlurper()
def object = slurper.parseText(content)
def flattenMap(Map map) {
def result = [:]
map.each { k, v ->
if (v instanceof Map) {
result << flattenMap(v)
} else if (v instanceof Collection && v.every {it instanceof Map}) {
v.each {
result << flattenMap(it)
}
} else {
result[k] = v
}
}
result
}
println(jo.prettyPrint(jo.toJson(flattenMap(object))))
But I don't have a clue how to get a full array of JSON-s. I am sure that there is an easy way to accomplish this, but I'm quite new to Groovy and so far I didn't find a solution. Any help would be greatly appreciated.
One way of solving this would be something like this:
import groovy.json.*
def str = '''
{
"input_query": {
"discount_guid": "3afeb169-7969-4f6f-8928-d801692848b1",
"user_uid": 5467890,
"shopping_list": [
{
"article_id": 311729,
"current_price_without_promo": 7.69,
"promo_discount": 0,
"count": 1,
"apply_discount": true
},
{
"article_id": 229752,
"current_price_without_promo": 11.29,
"promo_discount": 0,
"count": 1,
"apply_discount": true
},
{
"article_id": 193672,
"current_price_without_promo": 79.99,
"promo_discount": 0,
"count": 1,
"apply_discount": true
},
{
"article_id": 261657,
"current_price_without_promo": 16.99,
"promo_discount": 0,
"count": 1,
"apply_discount": true
},
{
"article_id": 318153,
"current_price_without_promo": 13.99,
"promo_discount": 0,
"count": 1,
"apply_discount": true
}
],
"discount_params_per_article": [
{
"article_id": 311729,
"min_discount": 0,
"max_discount": 4.12,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1
},
{
"article_id": 229752,
"min_discount": 0,
"max_discount": 7.52,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1
},
{
"article_id": 193672,
"min_discount": 0,
"max_discount": 60,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1
},
{
"article_id": 261657,
"min_discount": 0,
"max_discount": 12.4,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1
},
{
"article_id": 318153,
"min_discount": 0,
"max_discount": 8,
"imposed_discount": null,
"article_target_probability_increase": 1.15,
"discount_downscale_factor": 1
}
],
"target_probability_increase": null,
"request_time": "2019-12-21T21:32:13.018635"
},
"total_discount": 0.94,
"article_discounts": [
{
"article_id": 311729,
"discount": 0.04
},
{
"article_id": 229752,
"discount": 0.08
},
{
"article_id": 193672,
"discount": 0.61
},
{
"article_id": 261657,
"discount": 0.13
},
{
"article_id": 318153,
"discount": 0.08
}
]
}
'''
def json = new JsonSlurper().parseText(str)
// a predicate to check if a value is a plain value vs map or list
def isPlain = { v -> !(v instanceof Map) && !(v instanceof List) }
// for the two maps json.input_query and the root level json map,
// find all plain values
def plainValues = json.input_query.findAll { k, v -> isPlain(v) } +
json.findAll { k, v -> isPlain(v) }
// find the three lists of maps, group by article_id and add the
// values for each article id to a cumulative map and finally
// add the plain values collected above to each cumulative map
def result = (json.input_query.shopping_list +
json.input_query.discount_params_per_article +
json.article_discounts).groupBy {
it.article_id
}.values().collect { listOfMaps ->
listOfMaps.sum() + plainValues
}
// print result
result.each { m ->
println "-----"
m.sort().each { k, v ->
println "${k.padLeft(35)} -> $v"
}
}
Executing the above produces:
─➤ groovy solution.groovy
-----
apply_discount -> true
article_id -> 311729
article_target_probability_increase -> 1.15
count -> 1
current_price_without_promo -> 7.69
discount -> 0.04
discount_downscale_factor -> 1
discount_guid -> 3afeb169-7969-4f6f-8928-d801692848b1
imposed_discount -> null
max_discount -> 4.12
min_discount -> 0
promo_discount -> 0
request_time -> 2019-12-21T21:32:13.018635
target_probability_increase -> null
total_discount -> 0.94
user_uid -> 5467890
-----
apply_discount -> true
article_id -> 229752
article_target_probability_increase -> 1.15
count -> 1
current_price_without_promo -> 11.29
discount -> 0.08
discount_downscale_factor -> 1
discount_guid -> 3afeb169-7969-4f6f-8928-d801692848b1
imposed_discount -> null
max_discount -> 7.52
min_discount -> 0
promo_discount -> 0
request_time -> 2019-12-21T21:32:13.018635
target_probability_increase -> null
total_discount -> 0.94
user_uid -> 5467890
-----
...
the printout at the end sorts by keys and does some indentation for readability.
You can then get the output json using something like this:
def outputJson = JsonOutput.toJson(result)

Mysql slow performance with group by order by

I am using Mysql 5.7 I have table which having 7006500 rows. My query performing group by and fetching row which has maximum count with each group on column which is already indexed but still takes time for execution. Below is my query,execution plan and table schema.
Table Schema
CREATE TABLE templog (
id bigint(20) unsigned NOT NULL AUTO_INCREMENT,
userid bigint(12) unsigned NOT NULL,
type tinyint(3) NOT NULL DEFAULT '0',
os tinyint(4) NOT NULL DEFAULT '0',
day date DEFAULT NULL,
activetime smallint(5) unsigned NOT NULL DEFAULT '0',
createdat datetime NOT NULL DEFAULT CURRENT_TIMESTAMP,
timegroupid tinyint(4) NOT NULL DEFAULT '0',
PRIMARY KEY (`id`),
KEY templog_type_IDX (`type`,`day`,`userid`,`timegroupid`) USING BTREE
) ENGINE=InnoDB AUTO_INCREMENT=7006500 DEFAULT CHARSET=utf8;
My Query:-
SELECT SQL_NO_CACHE y.userid, y.timegroupid as besttime,y.cnt
FROM (
SELECT #row_number := CASE WHEN #userid=x.userid THEN #row_number+1 ELSE 1 END AS row_number ,
#userid := x.userid AS userid ,x.cnt,x.timegroupid
FROM (
SELECT userid, timegroupid ,COUNT(userid) as cnt
from templog
where type = 3
AND day BETWEEN '2020-01-01' AND '2020-01-20'
AND userid < 771267
GROUP by userid, timegroupid
ORDER by userid DESC ,cnt DESC
) x,
( SELECT #row_number:=0, #userid:='') AS t
) y
where y.row_number = 1
ORDER by y.userid DESC
LIMIT 1000;
Query Explain format:
{
"query_block": {
"select_id": 1,
"cost_info": {
"query_cost": "12.00"
},
"ordering_operation": {
"using_filesort": true,
"table": {
"table_name": "y",
"access_type": "ref",
"possible_keys": [
"<auto_key0>"
],
"key": "<auto_key0>",
"used_key_parts": [
"row_number"
],
"key_length": "9",
"ref": [
"const"
],
"rows_examined_per_scan": 10,
"rows_produced_per_join": 10,
"filtered": "100.00",
"cost_info": {
"read_cost": "10.00",
"eval_cost": "2.00",
"prefix_cost": "12.00",
"data_read_per_join": "320"
},
"used_columns": [
"row_number",
"userid",
"cnt",
"timegroupid"
],
"attached_condition": "((`y`.`row_number` <=> 1))",
"materialized_from_subquery": {
"using_temporary_table": true,
"dependent": false,
"cacheable": true,
"query_block": {
"select_id": 2,
"cost_info": {
"query_cost": "6441.25"
},
"nested_loop": [
{
"table": {
"table_name": "t",
"access_type": "system",
"rows_examined_per_scan": 1,
"rows_produced_per_join": 1,
"filtered": "100.00",
"cost_info": {
"read_cost": "0.00",
"eval_cost": "0.20",
"prefix_cost": "0.00",
"data_read_per_join": "16"
},
"used_columns": [
"#row_number:=0",
"#userid:=''"
],
"materialized_from_subquery": {
"using_temporary_table": true,
"dependent": false,
"cacheable": true,
"query_block": {
"select_id": 4,
"message": "No tables used"
}
}
}
},
{
"table": {
"table_name": "x",
"access_type": "ALL",
"rows_examined_per_scan": 25725,
"rows_produced_per_join": 25725,
"filtered": "100.00",
"cost_info": {
"read_cost": "1296.25",
"eval_cost": "5145.00",
"prefix_cost": "6441.25",
"data_read_per_join": "602K"
},
"used_columns": [
"userid",
"timegroupid",
"cnt"
],
"materialized_from_subquery": {
"using_temporary_table": true,
"dependent": false,
"cacheable": true,
"query_block": {
"select_id": 3,
"cost_info": {
"query_cost": "140807.11"
},
"ordering_operation": {
"using_filesort": true,
"grouping_operation": {
"using_temporary_table": true,
"using_filesort": false,
"table": {
"table_name": "templog",
"access_type": "range",
"possible_keys": [
"templog_type_IDX"
],
"key": "templog_type_IDX",
"used_key_parts": [
"type",
"day"
],
"key_length": "13",
"rows_examined_per_scan": 694718,
"rows_pr
oduced_per_join": 25725,
"filtered": "33.33",
"using_index": true,
"cost_info": {
"read_cost": "1863.51",
"eval_cost": "5145.03",
"prefix_cost": "140807.11",
"data_read_per_join": "803K"
},
"used_columns": [
"id",
"userid",
"type",
"day",
"timegroupid"
],
"attached_condition": "((`templog`.`type` = 3) and (`templog`.`day` between '2020-01-01' and '2020-01-20') and (`templog`.`userid` < 771267))"
}
}
}
}
}
}
}
]
}
}
}
}
}
}
Is there any other to optimize query or change index order or rewrite query in another way for better performance?
Do not count on #variables working like you would expect them to. I think the next version is beginning to disallow them.
The optimizer is free to throw away the ORDER BY in the derived table. This will lead to wrong results. Tacking on a large LIMIT to the subquery may prevent that.
Build and maintain a "summary table". This can significantly speed up this and similar queries.
CREATE TABLE Summary (
userid ...,
timegroupid ...,
type ...,
day ...,
cnt SMALLINT UNSIGNED NOT NULL, -- COUNT(*)
tottime INT UNSIGNED NOT NULL, -- SUM(activetime)
PRIMARY KEY(timegroupid, userid, type, day)
However, without understanding the data better, I cannot predict whether this table will be noticeably smaller than the original. If it is significantly smaller, this summary table will not be practical.
I added another tag -- follow it for more discussion of groupwise-max.

How to index nested array

How to index (N1QL query in Couchbase) above document to speed up searching by SerialNumber field in nested array (doc => groups => items => item.SerialNumber)?
Sample:
{
"Id": "0012ed6e-41af-4e45-b53f-bac3b2eb0b82",
"Machine": "Machine2",
"Groups": [
{
"Id": "0fed9b14-fa38-e511-893a-001125665867",
"Name": "Name",
"Items": [
{
"Id": "64e69b14-fa38-e511-893a-001125665867",
"SerialNumber": "1504H365",
"Position": 73
},
{
"Id": "7be69b14-fa38-e511-893a-001125665867",
"SerialNumber": "1504H364",
"Position": 72
}
]
},
{
"Id": "0fed9b14-fa38-e511-893a-001125665867",
"Name": "Name",
"Items": [
{
"Id": "64e69b14-fa38-e511-893a-001125665867",
"SerialNumber": "1504H365",
"Position": 73
},
{
"Id": "7be69b14-fa38-e511-893a-001125665867",
"SerialNumber": "1504H364",
"Position": 72
}
]
}
]
}
my query:
CREATE INDEX idx_serial ON `aplikomp-bucket`
(ALL ARRAY(ALL ARRAY i.SerialNumber FOR i IN g.Items END ) FOR g In Groups END);
CREATE INDEX idx_serial ON `aplikomp-bucket` (DISTINCT ARRAY(DISTINCT ARRAY i.SerialNumber FOR i IN g.Items END ) FOR g In Groups END);
SELECT META().id FROM `aplikomp-bucket` AS a
WHERE ANY g IN a.Groups SATISFIES (ANY i IN g.Items SATISFIES i.SerialNumber > 123 END) END;

mysql select sql very slow, which one index lose

Helo, I have a doubt, uid column different integral can not use index, what's cause?:
please someone tell me, thanks!!!
table item_sort_20170525 have 222466057 lines,
show create table this:
CREATE TABLE `item_sort_20170525` (
`id` int(10) NOT NULL AUTO_INCREMENT
`iid` bigint(20) NOT NULL DEFAULT '0'
`uid` bigint(20) NOT NULL DEFAULT '0'
`kw_id` int(10) NOT NULL DEFAULT '0'
`platform` tinyint(2) NOT NULL DEFAULT '0'
`is_p4p` tinyint(1) NOT NULL DEFAULT '0'
`page` tinyint(2) NOT NULL DEFAULT '1'
`pos` smallint(4) NOT NULL DEFAULT '0'
`real_pos` char(6) NOT NULL DEFAULT ''
`created` int(10) NOT NULL DEFAULT '0'
PRIMARY KEY (`id`),
KEY `idx_keyword` (`kw_id`) USING BTREE,
KEY `idx_iid` (`iid`,`platform`) USING BTREE,
KEY `idx_uid` (`uid`,`platform`) USING BTREE
) ENGINE=InnoDB DEFAULT CHARSET=utf8
if uid = 896588234 then
SELECT `kw_id`, COUNT(kw_id) AS `count` FROM `item_sort_20170525`
WHERE `uid` = 896588234 AND `platform` IN (12, 11) GROUP BY `kw_id` ORDER BY `kw_id` DESC LIMIT 21;
show explain:
select_type : SIMPLE
table : item_sort_20170525
type : range
possible_keys : idx_keyword,idx_uid
key : idx_uid
key_len : 9
ref :
rows : 585
Extra : Using index condition; Using temporary; Using filesort
if uid = 2259613579 then
SELECT `kw_id`, COUNT(kw_id) AS `count` FROM `item_sort_20170525` force index(`idx_uid`)
WHERE `uid` = 2259613579 AND `platform` IN (12, 11) GROUP BY `kw_id` ORDER BY `kw_id` DESC LIMIT 21;
show explain:
select_type : SIMPLE
table : item_sort_20170525
type : ALL
possible_keys : idx_keyword,idx_uid
key :
key_len :
ref :
rows : 225015710
Extra : Using where; Using temporary; Using filesort
lose index(idx_uid) where uid eq a big int like 2259613579 , then use force index(idx_uid) just same failed !
this mysql optimer_trace :
{
"steps": [
{
"join_preparation": {
"select#": 1,
"steps": [
{
"expanded_query": "/* select#1 */ select `tem_sort_20170525`.`kw_id` AS `kw_id`,count(`tem_sort_20170525`.`kw_id`) AS `count` from `tem_sort_20170525` where ((`tem_sort_20170525`.`uid` = 2259613579) and (`tem_sort_20170525`.`platform` in (12,11))) group by `tem_sort_20170525`.`kw_id` order by `tem_sort_20170525`.`kw_id` desc limit 21"
}
] /* steps */
} /* join_preparation */
},
{
"join_optimization": {
"select#": 1,
"steps": [
{
"condition_processing": {
"condition": "WHERE",
"original_condition": "((`tem_sort_20170525`.`uid` = 2259613579) and (`tem_sort_20170525`.`platform` in (12,11)))",
"steps": [
{
"transformation": "equality_propagation",
"resulting_condition": "((`tem_sort_20170525`.`platform` in (12,11)) and multiple equal(2259613579, `tem_sort_20170525`.`uid`))"
},
{
"transformation": "constant_propagation",
"resulting_condition": "((`tem_sort_20170525`.`platform` in (12,11)) and multiple equal(2259613579, `tem_sort_20170525`.`uid`))"
},
{
"transformation": "trivial_condition_removal",
"resulting_condition": "((`tem_sort_20170525`.`platform` in (12,11)) and multiple equal(2259613579, `tem_sort_20170525`.`uid`))"
}
] /* steps */
} /* condition_processing */
},
{
"table_dependencies": [
{
"table": "`tem_sort_20170525`",
"row_may_be_null": false,
"map_bit": 0,
"depends_on_map_bits": [
] /* depends_on_map_bits */
}
] /* table_dependencies */
},
{
"ref_optimizer_key_uses": [
{
"table": "`tem_sort_20170525`",
"field": "uid",
"equals": "2259613579",
"null_rejecting": false
}
] /* ref_optimizer_key_uses */
},
{
"rows_estimation": [
{
"table": "`tem_sort_20170525`",
"const_keys_added": {
"keys": [
"idx_keyword"
] /* keys */,
"cause": "group_by"
} /* const_keys_added */,
"range_analysis": {
"table_scan": {
"rows": 225015710,
"cost": 4.61e7
} /* table_scan */,
"potential_range_indices": [
{
"index": "PRIMARY",
"usable": false,
"cause": "not_applicable"
},
{
"index": "idx_keyword",
"usable": true,
"key_parts": [
"kw_id",
"id"
] /* key_parts */
},
{
"index": "idx_iid",
"usable": false,
"cause": "not_applicable"
},
{
"index": "idx_uid",
"usable": true,
"key_parts": [
"uid",
"platform",
"id"
] /* key_parts */
}
] /* potential_range_indices */,
"setup_range_conditions": [
] /* setup_range_conditions */,
"group_index_range": {
"chosen": false,
"cause": "not_applicable_aggregate_function"
} /* group_index_range */,
"analyzing_range_alternatives": {
"range_scan_alternatives": [
{
"index": "idx_uid",
"ranges": [
"2259613579 <= uid <= 2259613579 AND 11 <= platform <= 11",
"2259613579 <= uid <= 2259613579 AND 12 <= platform <= 12"
] /* ranges */,
"index_dives_for_eq_ranges": true,
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
"rows": 29,
"cost": 36.81,
"chosen": true
}
] /* range_scan_alternatives */,
"analyzing_roworder_intersect": {
"usable": false,
"cause": "too_few_roworder_scans"
} /* analyzing_roworder_intersect */
} /* analyzing_range_alternatives */,
"chosen_range_access_summary": {
"range_access_plan": {
"type": "range_scan",
"index": "idx_uid",
"rows": 29,
"ranges": [
"2259613579 <= uid <= 2259613579 AND 11 <= platform <= 11",
"2259613579 <= uid <= 2259613579 AND 12 <= platform <= 12"
] /* ranges */
} /* range_access_plan */,
"rows_for_plan": 29,
"cost_for_plan": 36.81,
"chosen": true
} /* chosen_range_access_summary */
} /* range_analysis */
}
] /* rows_estimation */
},
{
"considered_execution_plans": [
{
"plan_prefix": [
] /* plan_prefix */,
"table": "`tem_sort_20170525`",
"best_access_path": {
"considered_access_paths": [
{
"access_type": "ref",
"index": "idx_uid",
"rows": 36,
"cost": 43.2,
"chosen": true
},
{
"access_type": "range",
"rows": 22,
"cost": 42.61,
"chosen": true
}
] /* considered_access_paths */
} /* best_access_path */,
"cost_for_plan": 42.61,
"rows_for_plan": 22,
"chosen": true
}
] /* considered_execution_plans */
},
{
"attaching_conditions_to_tables": {
"original_condition": "((`tem_sort_20170525`.`uid` = 2259613579) and (`tem_sort_20170525`.`platform` in (12,11)))",
"attached_conditions_computation": [
{
"table": "`tem_sort_20170525`",
"rechecking_index_usage": {
"recheck_reason": "low_limit",
"limit": 21,
"row_estimate": 22,
"range_analysis": {
"table_scan": {
"rows": 225015710,
"cost": 2.7e8
} /* table_scan */,
"potential_range_indices": [
{
"index": "PRIMARY",
"usable": false,
"cause": "not_applicable"
},
{
"index": "idx_keyword",
"usable": true,
"key_parts": [
"kw_id",
"id"
] /* key_parts */
},
{
"index": "idx_iid",
"usable": false,
"cause": "not_applicable"
},
{
"index": "idx_uid",
"usable": false,
"cause": "not_applicable"
}
] /* potential_range_indices */,
"setup_range_conditions": [
] /* setup_range_conditions */,
"group_index_range": {
"chosen": false,
"cause": "cannot_do_reverse_ordering"
} /* group_index_range */
} /* range_analysis */
} /* rechecking_index_usage */
}
] /* attached_conditions_computation */,
"attached_conditions_summary": [
{
"table": "`tem_sort_20170525`",
"attached": "((`tem_sort_20170525`.`uid` = 2259613579) and (`tem_sort_20170525`.`platform` in (12,11)))"
}
] /* attached_conditions_summary */
} /* attaching_conditions_to_tables */
},
{
"clause_processing": {
"clause": "ORDER BY",
"original_clause": "`tem_sort_20170525`.`kw_id` desc",
"items": [
{
"item": "`tem_sort_20170525`.`kw_id`"
}
] /* items */,
"resulting_clause_is_simple": true,
"resulting_clause": "`tem_sort_20170525`.`kw_id` desc"
} /* clause_processing */
},
{
"clause_processing": {
"clause": "GROUP BY",
"original_clause": "`tem_sort_20170525`.`kw_id`",
"items": [
{
"item": "`tem_sort_20170525`.`kw_id`"
}
] /* items */,
"resulting_clause_is_simple": true,
"resulting_clause": "`tem_sort_20170525`.`kw_id`"
} /* clause_processing */
},
{
"refine_plan": [
{
"table": "`tem_sort_20170525`",
"access_type": "table_scan"
}
] /* refine_plan */
},
{
"reconsidering_access_paths_for_index_ordering": {
"clause": "GROUP BY",
"index_order_summary": {
"table": "`tem_sort_20170525`",
"index_provides_order": true,
"order_direction": "desc",
"index": "idx_keyword",
"plan_changed": true,
"access_type": "index_scan"
} /* index_order_summary */
} /* reconsidering_access_paths_for_index_ordering */
}
] /* steps */
} /* join_optimization */
},
{
"join_execution": {
"select#": 1,
"steps": [
] /* steps */
} /* join_execution */
}
] /* steps */
}
{
"steps": [
{
"join_preparation": {
"select#": 1,
"steps": [
{
"expanded_query": "/* select#1 */ select `item_sort_20170525`.`kw_id` AS `kw_id`,count(`item_sort_20170525`.`kw_id`) AS `count` from `item_sort_20170525` FORCE INDEX (`idx_uid`) where ((`item_sort_20170525`.`uid` = 896588234) and (`item_sort_20170525`.`platform` in (12,11))) group by `item_sort_20170525`.`kw_id` order by `item_sort_20170525`.`kw_id` desc limit 21"
}
] /* steps */
} /* join_preparation */
},
{
"join_optimization": {
"select#": 1,
"steps": [
{
"condition_processing": {
"condition": "WHERE",
"original_condition": "((`item_sort_20170525`.`uid` = 896588234) and (`item_sort_20170525`.`platform` in (12,11)))",
"steps": [
{
"transformation": "equality_propagation",
"resulting_condition": "((`item_sort_20170525`.`platform` in (12,11)) and multiple equal(896588234, `item_sort_20170525`.`uid`))"
},
{
"transformation": "constant_propagation",
"resulting_condition": "((`item_sort_20170525`.`platform` in (12,11)) and multiple equal(896588234, `item_sort_20170525`.`uid`))"
},
{
"transformation": "trivial_condition_removal",
"resulting_condition": "((`item_sort_20170525`.`platform` in (12,11)) and multiple equal(896588234, `item_sort_20170525`.`uid`))"
}
] /* steps */
} /* condition_processing */
},
{
"table_dependencies": [
{
"table": "`item_sort_20170525` FORCE INDEX (`idx_uid`)",
"row_may_be_null": false,
"map_bit": 0,
"depends_on_map_bits": [
] /* depends_on_map_bits */
}
] /* table_dependencies */
},
{
"ref_optimizer_key_uses": [
{
"table": "`item_sort_20170525` FORCE INDEX (`idx_uid`)",
"field": "uid",
"equals": "896588234",
"null_rejecting": false
}
] /* ref_optimizer_key_uses */
},
{
"rows_estimation": [
{
"table": "`item_sort_20170525` FORCE INDEX (`idx_uid`)",
"const_keys_added": {
"keys": [
"idx_keyword"
] /* keys */,
"cause": "group_by"
} /* const_keys_added */,
"range_analysis": {
"table_scan": {
"rows": 225015710,
"cost": 2e308
} /* table_scan */,
"potential_range_indices": [
{
"index": "PRIMARY",
"usable": false,
"cause": "not_applicable"
},
{
"index": "idx_keyword",
"usable": false,
"cause": "not_applicable"
},
{
"index": "idx_iid",
"usable": false,
"cause": "not_applicable"
},
{
"index": "idx_uid",
"usable": true,
"key_parts": [
"uid",
"platform",
"id"
] /* key_parts */
}
] /* potential_range_indices */,
"setup_range_conditions": [
] /* setup_range_conditions */,
"group_index_range": {
"chosen": false,
"cause": "not_applicable_aggregate_function"
} /* group_index_range */,
"analyzing_range_alternatives": {
"range_scan_alternatives": [
{
"index": "idx_uid",
"ranges": [
"896588234 <= uid <= 896588234 AND 11 <= platform <= 11",
"896588234 <= uid <= 896588234 AND 12 <= platform <= 12"
] /* ranges */,
"index_dives_for_eq_ranges": true,
"rowid_ordered": false,
"using_mrr": false,
"index_only": false,
"rows": 585,
"cost": 704.01,
"chosen": true
}
] /* range_scan_alternatives */,
"analyzing_roworder_intersect": {
"usable": false,
"cause": "too_few_roworder_scans"
} /* analyzing_roworder_intersect */
} /* analyzing_range_alternatives */,
"chosen_range_access_summary": {
"range_access_plan": {
"type": "range_scan",
"index": "idx_uid",
"rows": 585,
"ranges": [
"896588234 <= uid <= 896588234 AND 11 <= platform <= 11",
"896588234 <= uid <= 896588234 AND 12 <= platform <= 12"
] /* ranges */
} /* range_access_plan */,
"rows_for_plan": 585,
"cost_for_plan": 704.01,
"chosen": true
} /* chosen_range_access_summary */
} /* range_analysis */
}
] /* rows_estimation */
},
{
"considered_execution_plans": [
{
"plan_prefix": [
] /* plan_prefix */,
"table": "`item_sort_20170525` FORCE INDEX (`idx_uid`)",
"best_access_path": {
"considered_access_paths": [
{
"access_type": "ref",
"index": "idx_uid",
"rows": 585,
"cost": 702,
"chosen": true
},
{
"access_type": "range",
"rows": 439,
"cost": 821.01,
"chosen": false
}
] /* considered_access_paths */
} /* best_access_path */,
"cost_for_plan": 702,
"rows_for_plan": 585,
"chosen": true
}
] /* considered_execution_plans */
},
{
"attaching_conditions_to_tables": {
"original_condition": "((`item_sort_20170525`.`uid` = 896588234) and (`item_sort_20170525`.`platform` in (12,11)))",
"attached_conditions_computation": [
{
"access_type_changed": {
"table": "`item_sort_20170525` FORCE INDEX (`idx_uid`)",
"index": "idx_uid",
"old_type": "ref",
"new_type": "range",
"cause": "uses_more_keyparts"
} /* access_type_changed */
}
] /* attached_conditions_computation */,
"attached_conditions_summary": [
{
"table": "`item_sort_20170525` FORCE INDEX (`idx_uid`)",
"attached": "((`item_sort_20170525`.`uid` = 896588234) and (`item_sort_20170525`.`platform` in (12,11)))"
}
] /* attached_conditions_summary */
} /* attaching_conditions_to_tables */
},
{
"clause_processing": {
"clause": "ORDER BY",
"original_clause": "`item_sort_20170525`.`kw_id` desc",
"items": [
{
"item": "`item_sort_20170525`.`kw_id`"
}
] /* items */,
"resulting_clause_is_simple": true,
"resulting_clause": "`item_sort_20170525`.`kw_id` desc"
} /* clause_processing */
},
{
"clause_processing": {
"clause": "GROUP BY",
"original_clause": "`item_sort_20170525`.`kw_id`",
"items": [
{
"item": "`item_sort_20170525`.`kw_id`"
}
] /* items */,
"resulting_clause_is_simple": true,
"resulting_clause": "`item_sort_20170525`.`kw_id`"
} /* clause_processing */
},
{
"refine_plan": [
{
"table": "`item_sort_20170525` FORCE INDEX (`idx_uid`)",
"pushed_index_condition": "((`item_sort_20170525`.`uid` = 896588234) and (`item_sort_20170525`.`platform` in (12,11)))",
"table_condition_attached": null,
"access_type": "range"
}
] /* refine_plan */
}
] /* steps */
} /* join_optimization */
},
{
"join_execution": {
"select#": 1,
"steps": [
{
"creating_tmp_table": {
"tmp_table_info": {
"table": "intermediate_tmp_table",
"row_length": 13,
"key_length": 4,
"unique_constraint": false,
"location": "memory (heap)",
"row_limit_estimate": 161319
} /* tmp_table_info */
} /* creating_tmp_table */
},
{
"filesort_information": [
{
"direction": "desc",
"table": "intermediate_tmp_table",
"field": "kw_id"
}
] /* filesort_information */,
"filesort_priority_queue_optimization": {
"limit": 21,
"rows_estimate": 540,
"row_size": 12,
"memory_available": 720896,
"chosen": true
} /* filesort_priority_queue_optimization */,
"filesort_execution": [
] /* filesort_execution */,
"filesort_summary": {
"rows": 22,
"examined_rows": 530,
"number_of_tmp_files": 0,
"sort_buffer_size": 440,
"sort_mode": "<sort_key, rowid>"
} /* filesort_summary */
}
] /* steps */
} /* join_execution */
}
] /* steps */
}
Your query:
SELECT `kw_id`, COUNT(kw_id) AS `count`
FROM `item_sort_20170525`
WHERE `uid` = 896588234
AND `platform` IN (12, 11)
GROUP BY `kw_id`
ORDER BY `kw_id` DESC
LIMIT 21;
Here you have two filtering criteria: uid equality and platform in a set.
Then you have a grouping criterion that's also a reverse ordering criterion.
Can you change the platform criterion from a set to a range? If so, do it. platform BETWEEN 11 AND 12. It does look like the query planner figured that one out on its own, though.
Then try a compound index that starts with equality criteria, then has range criteria, then has grouping criteria. In this case:
(uid, platform, kw_id)
That should allow your query to be satisfied from an index range scan. Adding kw_id to the index makes it a covering index, meaning everything required by the query can be satisfied by the index. It also may allow a reverse range scan to produce the DESC ordering.
Also, because you have declared kw_id as NOT NULL, you can use COUNT(*) in place of COUNT(kw_id). That may help, but probably not much.
Pro tip: Always format your queries so your selection, filtering, grouping, and ordering criteria jump out at you when you look at them. The more rows you have in your tables, the more important this becomes.
O.Jones answer is very good. There are two more things you can try:
SELECT kw_id, SUM(cnt)
FROM ((SELECT `kw_id`, COUNT(*) AS cnt
FROM `item_sort_20170525`
WHERE `uid` = 896588234 AND `platform` = 11
) UNION ALL
(SELECT `kw_id`, COUNT(*) AS cnt
FROM `item_sort_20170525`
WHERE `uid` = 896588234 AND `platform` = 12
)
) i
GROUP BY `kw_id`
ORDER BY `kw_id` DESC
LIMIT 21;
You want the same index, item_sort_20170525(uid, platform, kw_id). MySQL should be able to remove the file sort for the inner group by. So, if there are not very many kw_ids, then the outer group by should not be very expensive.
Another option is to use a correlated subquery. This assumes that you have a list of kw_ids somewhere. The query looks like
select kw_id,
(select count(*)
from `item_sort_20170525` i
where i.kw_id = k.kw_id and `uid` = 896588234 and
`platform` in (11, 12)
) as cnt
from kw
order by kw.kw_id desc;
This version will work will if most/all kw_ids have at least one matching row in the items table. For this query, you want an index on item_sort_20170525(kw_id, uid, platform).

Powershell convertto-json without labels

how to correctly convert a Powershell array of object properties to a json array of values, that is without the objects property lables in the array.
for example:
I want to make 2 json arrays for chart.js
I will group some process objects:
$processgroup = get-process | group -property name
$processgroup.gettype()
IsPublic IsSerial Name BaseType
-------- -------- ---- --------
True True Object[] System.Array
$chartlabels = $processgroup.name | convertto-json
$chartlabels
$chartlabels
[
"ApMsgFwd",
"ApntEx",
"Apoint",
"ApplicationFrameHost",
"armsvc",
"BtwRSupportService",
"chrome",
"com.docker.proxy",
"com.docker.service",
"concentr",
"conhost",
"csrss",
"dllhost",
"Docker for Windows",
"dockerd",
"dwm",
"Everything",
"EXCEL",
"explorer",
"fontdrvhost",
"GROOVE",
"hidfind",
"HidMonitorSvc",
"Idle",
"iexplore",
"IpOverUsbSvc",
"jucheck",
"jusched",
"LicensingUI",
"lsass",
"mDNSResponder",
"Memory Compression",
"mqsvc",
"MSASCuiL",
"MsMpEng",
"MSOIDSVC",
"MSOIDSVCM",
"MySQLNotifier",
"NisSrv",
"notepad",
"notepad++",
"nvSCPAPISvr",
"nvvsvc",
"nvwmi64",
"nvxdsync",
"OfficeClickToRun",
"OneDrive",
"OUTLOOK",
"powershell",
"powershell_ise",
"prevhost",
"Receiver",
"redirector",
"rundll32",
"RuntimeBroker",
"SearchIndexer",
"SearchUI",
"Secure System",
"SecurityHealthService",
"SelfServicePlugin",
"services",
"SettingSyncHost",
"ShellExperienceHost",
"sihost",
"SkypeHost",
"smss",
"SMSvcHost",
"spiceworks",
"spiceworks-httpd",
"spoolsv",
"SppExtComObj",
"sppsvc",
"sqlwriter",
"svchost",
"System",
"SystemSettings",
"taskhostw",
"TSVNCache",
"vmcompute",
"vmms",
"vmnat",
"vmnetdhcp",
"vmware-authd",
"vmware-tray",
"vmware-usbarbitrator64",
"wfcrun32",
"wininit",
"winlogon",
"WINWORD",
"WmiPrvSE",
"WUDFHost"
]
#this is the array I want for charts labels, now for the chart value array
$chartvalues = $processgroup | select count | convertto-json
$chartvalues
[
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 30
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 5
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 4
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 30
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 2
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 75
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 1
},
{
"Count": 2
},
{
"Count": 1
},
{
"Count": 1
}
]
how do I omit the "Count" label so PowerShell creates a single json array of the values only, as in the process name array.
I have tried
$chartvalues = $processgroup.count
results in the count of the number of groups
and I have tried
$chartvalues = $ processgroup | select count -expandproperty count | convertto-json
with the same result as above example
You want a list, rather than object with key/value pair, so this should work*:
$groups| foreach-object {$_.count}|convertto-json
When you do select after the pipeline it creates an object, thus pushes the object property name as well as the value as key/value pair into the json conversion process.
i.e. If you have used select for name property, you would get the name/value pair in the json too...
$groups| select-object name| convertto-json
*may be better solutions out there... but itworks..
Silly me - I only needed to select and expand the property. like so.
$chartvalues = $processgroup | select -expandproperty count | convertto-json
$chartvalues
[
1,
1,
1,
1,
1,
1,
30,
1,
1,
1,
5,
2,
1,
1,
1,
1,
2,
1,
1,
2,
1,
1,
1,
1,
4,
1,
1,
1,
30,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
2,
2,
1,
1,
1,
1,
1,
2,
1,
1,
1,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
2,
1,
1,
1,
1,
75,
1,
1,
1,
2,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1
]
All you have to do use the -ExpandProperty property of Select-Object to achieve your stated desire. Is there any reason not to preserve the property->value notation which association inside of a single JSON array? e.g.:
$processgroup = get-process | group -property name,count | select name,count | convertto-json
yields:
$processgroup
[
{
"Name": "acevents",
"Count": 1
},
{
"Name": "acrotray",
"Count": 1
},
{
"Name": "AGSService",
"Count": 1
},
{
"Name": "aiCOMMAPI",
"Count": 1
},
{
"Name": "armsvc",
"Count": 1
},
{
"Name": "audiodg",
"Count": 1
},
{
"Name": "AuditManagerService",
"Count": 1
},
{
"Name": "CcmExec",
"Count": 1
},
{
"Name": "chrome",
"Count": 9
},
{
"Name": "conhost",
"Count": 2
},
{
"Name": "csrss",
"Count": 2
},
{
"Name": "dllhost",
"Count": 2
},
{
"Name": "dwm",
"Count": 1
},
{
"Name": "explorer",
"Count": 1
},
{
"Name": "Idle",
"Count": 1
},
{
"Name": "lsass",
"Count": 1
},
{
"Name": "lync",
"Count": 1
},
{
"Name": "msdtc",
"Count": 1
}
]