caffe: What is ReLU split - deep-learning

Here is my blob shape:
data 4096 4.10e+03 (1, 2, 1, 2048)
Convolution1 130944 1.31e+05 (1, 64, 1, 2046)
ReLU1 130944 1.31e+05 (1, 64, 1, 2046)
Convolution2 130816 1.31e+05 (1, 64, 1, 2044)
ReLU2 130816 1.31e+05 (1, 64, 1, 2044)
ReLU2_ReLU2_0_split_0 130816 1.31e+05 (1, 64, 1, 2044)
ReLU2_ReLU2_0_split_1 130816 1.31e+05 (1, 64, 1, 2044)
Pooling1 65408 6.54e+04 (1, 64, 1, 1022)
Convolution3 130560 1.31e+05 (1, 128, 1, 1020)
ReLU3 130560 1.31e+05 (1, 128, 1, 1020)
Convolution4 130304 1.30e+05 (1, 128, 1, 1018)
ReLU4 130304 1.30e+05 (1, 128, 1, 1018)
ReLU4_ReLU4_0_split_0 130304 1.30e+05 (1, 128, 1, 1018)
ReLU4_ReLU4_0_split_1 130304 1.30e+05 (1, 128, 1, 1018)
Pooling2 65152 6.52e+04 (1, 128, 1, 509)
What is 2 lines of "ReLU2_0_split_0" and "ReLU2_ReLU2_0_split_1"? where they come from?

Your ReLU layer's output is used as a "bottom" for two layers. Therefore, Caffe automatically adds a "Split" layer that creates two copies of the ReLU output and feed each copy to one of the top layers. These two copies are named ReLU_split0 and ReLU_split1.

Related

PyTorch - RuntimeError: Sizes of tensors must match except in dimension 2. Got 55 and 54 (The offending index is 0)

I used a 3DUnet with resblock to segment a CT image with input torch size of [1, 1, 96, 176, 176], but it throws the following error:
RuntimeError: Sizes of tensors must match except in dimension 2. Got 55 and 54 (The offending index is 0)
Hence I traced back, I found the error comes from
outputs = self.decoder_stage2(torch.cat([short_range6, long_range3], dim=1)) + short_range6
The short_range6 has torch.Size([1, 64, 24, 55, 40]) while the long_range3 has torch.Size([1, 128, 24, 54, 40]). I think this is because something not being a power of 2, but cannot find where to modify.
Below is the complete structure of the network, really thanks for any help!
class ResUNet(nn.Module):
def __init__(self, in_channel=1, out_channel=2 ,training=True):
super().__init__()
self.training = training
self.dorp_rate = 0.2
self.encoder_stage1 = nn.Sequential(
nn.Conv3d(in_channel, 16, 3, 1, padding=1),
nn.PReLU(16),
nn.Conv3d(16, 16, 3, 1, padding=1),
nn.PReLU(16),
)
self.encoder_stage2 = nn.Sequential(
nn.Conv3d(32, 32, 3, 1, padding=1),
nn.PReLU(32),
nn.Conv3d(32, 32, 3, 1, padding=1),
nn.PReLU(32),
nn.Conv3d(32, 32, 3, 1, padding=1),
nn.PReLU(32),
)
self.encoder_stage3 = nn.Sequential(
nn.Conv3d(64, 64, 3, 1, padding=1),
nn.PReLU(64),
nn.Conv3d(64, 64, 3, 1, padding=2, dilation=2),
nn.PReLU(64),
nn.Conv3d(64, 64, 3, 1, padding=4, dilation=4),
nn.PReLU(64),
)
self.encoder_stage4 = nn.Sequential(
nn.Conv3d(128, 128, 3, 1, padding=3, dilation=3),
nn.PReLU(128),
nn.Conv3d(128, 128, 3, 1, padding=4, dilation=4),
nn.PReLU(128),
nn.Conv3d(128, 128, 3, 1, padding=5, dilation=5),
nn.PReLU(128),
)
self.decoder_stage1 = nn.Sequential(
nn.Conv3d(128, 256, 3, 1, padding=1),
nn.PReLU(256),
nn.Conv3d(256, 256, 3, 1, padding=1),
nn.PReLU(256),
nn.Conv3d(256, 256, 3, 1, padding=1),
nn.PReLU(256),
)
self.decoder_stage2 = nn.Sequential(
nn.Conv3d(128 + 64, 128, 3, 1, padding=1),
nn.PReLU(128),
nn.Conv3d(128, 128, 3, 1, padding=1),
nn.PReLU(128),
nn.Conv3d(128, 128, 3, 1, padding=1),
nn.PReLU(128),
)
self.decoder_stage3 = nn.Sequential(
nn.Conv3d(64 + 32, 64, 3, 1, padding=1),
nn.PReLU(64),
nn.Conv3d(64, 64, 3, 1, padding=1),
nn.PReLU(64),
nn.Conv3d(64, 64, 3, 1, padding=1),
nn.PReLU(64),
)
self.decoder_stage4 = nn.Sequential(
nn.Conv3d(32 + 16, 32, 3, 1, padding=1),
nn.PReLU(32),
nn.Conv3d(32, 32, 3, 1, padding=1),
nn.PReLU(32),
)
self.down_conv1 = nn.Sequential(
nn.Conv3d(16, 32, 2, 2),
nn.PReLU(32)
)
self.down_conv2 = nn.Sequential(
nn.Conv3d(32, 64, 2, 2),
nn.PReLU(64)
)
self.down_conv3 = nn.Sequential(
nn.Conv3d(64, 128, 2, 2),
nn.PReLU(128)
)
self.down_conv4 = nn.Sequential(
nn.Conv3d(128, 256, 3, 1, padding=1),
nn.PReLU(256)
)
self.up_conv2 = nn.Sequential(
nn.ConvTranspose3d(256, 128, 2, 2),
nn.PReLU(128)
)
self.up_conv3 = nn.Sequential(
nn.ConvTranspose3d(128, 64, 2, 2),
nn.PReLU(64)
)
self.up_conv4 = nn.Sequential(
nn.ConvTranspose3d(64, 32, 2, 2),
nn.PReLU(32)
)
# 256*256
self.map4 = nn.Sequential(
nn.Conv3d(32, out_channel, 1, 1),
nn.Upsample(scale_factor=(1, 1, 1), mode='trilinear', align_corners=False),
nn.Softmax(dim=1)
)
# 128*128
self.map3 = nn.Sequential(
nn.Conv3d(64, out_channel, 1, 1),
nn.Upsample(scale_factor=(2, 2, 2), mode='trilinear', align_corners=False),
nn.Softmax(dim=1)
)
# 64*64
self.map2 = nn.Sequential(
nn.Conv3d(128, out_channel, 1, 1),
nn.Upsample(scale_factor=(4, 4, 4), mode='trilinear', align_corners=False),
nn.Softmax(dim=1)
)
# 32*32
self.map1 = nn.Sequential(
nn.Conv3d(256, out_channel, 1, 1),
nn.Upsample(scale_factor=(8, 8, 8), mode='trilinear', align_corners=False),
nn.Softmax(dim=1)
)
def forward(self, inputs):
long_range1 = self.encoder_stage1(inputs) + inputs
short_range1 = self.down_conv1(long_range1)
long_range2 = self.encoder_stage2(short_range1) + short_range1
long_range2 = F.dropout(long_range2, self.dorp_rate, self.training)
short_range2 = self.down_conv2(long_range2)
long_range3 = self.encoder_stage3(short_range2) + short_range2
long_range3 = F.dropout(long_range3, self.dorp_rate, self.training)
short_range3 = self.down_conv3(long_range3)
long_range4 = self.encoder_stage4(short_range3) + short_range3
long_range4 = F.dropout(long_range4, self.dorp_rate, self.training)
short_range4 = self.down_conv4(long_range4)
outputs = self.decoder_stage1(long_range4) + short_range4
outputs = F.dropout(outputs, self.dorp_rate, self.training)
output1 = self.map1(outputs)
short_range6 = self.up_conv2(outputs)
outputs = self.decoder_stage2(torch.cat([short_range6, long_range3], dim=1)) + short_range6
outputs = F.dropout(outputs, self.dorp_rate, self.training)
output2 = self.map2(outputs)
short_range7 = self.up_conv3(outputs)
outputs = self.decoder_stage3(torch.cat([short_range7, long_range2], dim=1)) + short_range7
outputs = F.dropout(outputs, self.dorp_rate, self.training)
output3 = self.map3(outputs)
short_range8 = self.up_conv4(outputs)
outputs = self.decoder_stage4(torch.cat([short_range8, long_range1], dim=1)) + short_range8
output4 = self.map4(outputs)
if self.training is True:
return output1, output2, output3, output4
else:
return output4```
You can pad your image's dimensions to be multiple of 32's. By doing this, you won't have to change the 3DUnet's parameters.
I will provide you a simple code to show you the way.
# I assume that you named your input image as img
padding1_mult = math.floor(img.shape[3] / 32) + 1
padding2_mult = math.floor(img.shape[4] / 32) + 1
pad1 = (32 * padding1_mult) - img.shape[3]
pad2 = (32 * padding2_mult) - img.shape[4]
padding = nn.ReplicationPad2d((0, pad2, pad1, 0, 0 ,0))
img = padding(img)
After this operation, your image shape must be torch.Size([1, 1, 96, 192, 192])

AWK: filtering of the multi-column data

I am dealing with the post-procession of CSV log filles arranged in the multi-column format. Usually the first column corresponds to the line number (ID), the second one containts its population (POP, the number of the samples fell into this ID) and the third column (dG) represent some inherent value of this ID (which is always negative):
ID, POP, dG
1, 7, -6.9700
2, 2, -6.9500
3, 2, -6.8500
4, 6, -6.7200
5, 14, -6.7100
6, 5, -6.7000
7, 10, -6.5600
8, 10, -6.4800
9, 7, -6.4500
10, 3, -6.4400
11, 8, -6.4300
12, 10, -6.4200
13, 3, -6.3300
14, 7, -6.2200
15, 1, -6.2000
16, 3, -6.2000
17, 4, -6.1700
18, 1, -6.0500
19, 9, -6.0200
20, 1, -6.0100
21, 1, -6.0000
22, 3, -5.9900
23, 4, -5.9800
24, 3, -5.9200
25, 2, -5.9100
26, 1, -5.8900
27, 1, -5.8500
28, 1, -5.8200
29, 1, -5.7900
30, 8, -5.7800
31, 1, -5.7800
32, 1, -5.7200
33, 3, -5.7100
34, 2, -5.7100
35, 1, -5.6900
36, 4, -5.6800
37, 2, -5.6500
38, 4, -5.6100
39, 1, -5.5900
40, 1, -5.5600
41, 1, -5.5500
42, 2, -5.5500
43, 1, -5.5200
44, 1, -5.5100
45, 2, -5.5000
46, 1, -5.5000
47, 3, -5.4700
48, 2, -5.4500
49, 1, -5.4500
50, 4, -5.4300
51, 1, -5.4300
52, 1, -5.3800
53, 2, -5.3800
54, 1, -5.3500
55, 1, -5.2800
56, 1, -5.2500
57, 2, -5.2500
58, 2, -5.2400
59, 2, -5.2300
60, 1, -5.1400
61, 1, -5.1100
62, 1, -5.1000
63, 2, -5.0300
64, 2, -5.0100
65, 2, -5.0100
66, 1, -4.9700
67, 1, -4.9200
68, 1, -4.9000
69, 2, -4.9000
70, 1, -4.8900
71, 1, -4.8600
72, 3, -4.7900
73, 2, -4.7900
74, 1, -4.7900
75, 1, -4.7700
76, 2, -4.7600
77, 1, -4.7500
78, 1, -4.7400
79, 1, -4.7300
80, 1, -4.7200
81, 2, -4.7100
82, 1, -4.6800
83, 2, -4.6300
84, 1, -4.5500
85, 1, -4.5000
86, 1, -4.4800
87, 2, -4.4500
88, 1, -4.4300
89, 1, -4.3900
90, 1, -4.3000
91, 1, -4.2500
92, 1, -4.2300
93, 1, -4.2200
94, 2, -4.1600
95, 1, -4.1500
96, 1, -4.1100
97, 1, -4.0300
98, 1, -4.0100
I need to reduce the total number of these lines, keeping in the output CSV only the first N lines from the first to the line with the biggest population (POP, the value of the second column) observed in the whole dataset. So in my example the expected output should be the first 5 lines, since the 5th ID has the biggest value of the second column (POP) compared to the all 98 lines:
ID, POP, dG
1, 7, -6.9700
2, 2, -6.9500
3, 2, -6.8500
4, 6, -6.7200
5, 14, -6.7100
Could you suggest me some AWK solution which would accept my CSV file and produce new one after such filtering based on the values in the second column?
You could try this awk command:
awk -F "," 'a < $2 {for(idx=0; idx < i; idx++) {print arr[idx]} print $0; a=int($2); i=0} a > $2 && NR > 1 {arr[i]=$0; i++}' input
See demo at: https://awk.js.org/?gist=c8751cc25e444fb2e2b1a8f29849f127
This approach processed the file twice: once to find the max, and again to print the lines up to the max. I've incorporated your request to print a minimum number of lines.
awk -F ', ' -v min_lines=5 '
NR == FNR {
if ($2 > max) max=$2
next
}
{print}
$2 == max {
for (i = FNR; i <= min_lines; i++) {
getline
print
}
exit
}
' file.csv file.csv
$ awk -F, -v minlines=5 'NR==FNR { if($2>=max && NR>1) {max=$2; maxi=NR} next }
FNR<=minlines+1 || FNR<=maxi' file{,}
ID, POP, dG
1, 7, -6.9700
2, 2, -6.9500
3, 2, -6.8500
4, 6, -6.7200
5, 14, -6.7100
this will print until the last occurrence of the max value. If you want the first instance change $2>=max to $2>max

how do i rewrite the correct statement for COUNT under this condition

Bring back the count of all the batsmen who have played a certain number of innings only if there are more than one batsman who have played that number of innings. 4 innings – 2 batsmen, 5 innings – 3 batsmen etc.
SELECT COUNT(Innings) AS TotalInnings, COUNT(player) AS Totalplayer
FROM Batting
GROUP BY Player
Player is the field name for the batsmens name. I am kinda stuck on how to write the last lines statement
4 innings – 2 batsmen, 5 innings – 3 batsmen etc
This is the table design
CREATE TABLE Batting (
Player CHAR(25),
CarrerSpan VARCHAR(20),
Matches INT,
Innings INT,
Playing INT,
Runs INT,
HighestScore CHAR(10),
AverageScore NUMERIC,
BallsFaced INT,
StrikeRate NUMERIC,
Hundreds INT,
Fifties INT,
Zeros INT,
Fours INT,
Sixs INT
);
INSERT INTO Batting
VALUES ('JP Duminy', '2007-2018', 77, 71, 22, 1825, '96*', 37.24, 1468, 124.31, 0, 11, 6, 130, 65);
INSERT INTO Batting
VALUES ('AB de Villiers', '2006-2017', 78, 75, 11, 1672, '79*', 26.12, 1237, 135.16, 0, 10, 5, 140, 60);
INSERT INTO Batting
VALUES ('HM Amla', '2009-2018', 41, 41, 5, 1158, '97*', 32.16, 883, 131.14, 0, 7, 2, 133, 23);
INSERT INTO Batting
VALUES ('F du Plessis', '2012-2017', 36, 36, 6, 1129, 119, 37.63, 849, 132.97, 1, 7, 0, 102, 35);
INSERT INTO Batting
VALUES('DA Miller', '2010-2018', 58, 51, 15, 1043, '101*', 28.97, 745, 140.00, 1, 1, 0, 71, 46);
INSERT INTO Batting
VALUES('GC Smith', '2005-2011', 33, 33, 2, 982, '89*', 31.67, 770, 127.53, 0, 5, 1, 123, 26);
INSERT INTO Batting
VALUES('Q de Kock','2012-2018', 32, 32, 4, 821, 59, 29.32, 637, 128.88, 0, 2, 3, 97, 24);
INSERT INTO Batting
VALUES('JH Kallis', '2005-2012', 25, 23, 4, 666, 73, 35.05, 558, 119.35, 0, 5, 0, 56, 20);
INSERT INTO Batting
VALUES('JA Morkel', '2005-2015', 50, 38, 11, 572, 43, 21.18, 402, 142.28, 0, 0, 1, 29, 39);
INSERT INTO Batting
VALUES('F Behardien', '2012-2018', 37, 29, 13, 515, '64*', 32.18, 402, 128.10, 0, 1, 1, 37, 16);
INSERT INTO Batting
VALUES('HH Gibbs', '2005-2010', 23, 23, 1, 400, '90*', 18.18, 318, 125.78, 0, 3, 4, 45, 12);
INSERT INTO Batting
VALUES('RR Rossouw', '2014-2016', 15, 14, 3, 327, 78, 29.72, 237, 137.97, 0, 2, 2, 29, 12);
INSERT INTO Batting
VALUES('LE Bosman', '2006-2010', 14, 14, 1, 323, 94, 24.84, 219, 147.48, 0, 3, 2, 27, 20);
INSERT INTO Batting
VALUES('RR Hendricks', '2014-2018', 13, 13, 0, 294, 70, 22.61, 262, 112.21, 0, 1, 2, 33, 3);
INSERT INTO Batting
VALUES('MV Boucher', '2005-2010', 25, 21, 6, 268, '36*', 17.86, 275, 97.45, 0, 0, 0, 22, 2);
INSERT INTO Batting
VALUES('RE Levi', '2012-2012', 13 ,13, 2, 236, '117*', 21.45, 167, 141.31, 1, 1, 3, 20, 15);
INSERT INTO Batting
VALUES('MN van Wyk', '2007-2015', 8, 7, 1, 225, '114*', 37.50, 157, 143.31, 1, 1, 0, 19, 14);
INSERT INTO Batting
VALUES('CA Ingram', '2010-2012', 9, 9, 1, 210, 78, 26.25, 162, 129.62, 0, 1, 1, 23, 7);
INSERT INTO Batting
VALUES('JM Kemp', '2005-2007', 8, 7, 3, 203, '89*', 50.75, 160, 126.87, 0, 1, 0, 17, 10);
INSERT INTO Batting
VALUES('J Botha', '2006-2012', 40, 0, 9, 201, 34, 18.27, 165, 121.81, 0, 0, 1, 15, 9);
INSERT INTO Batting
VALUES('H Davids', '2012-2013', 9, 9, 0, 161, 68, 17.88, 134, 120.14, 0, 2, 2, 18, 4);
INSERT INTO Batting
VALUES('JL Ontong', '2008-2015', 14, 10, 0, 158, 48, 15.80, 109, 144.95, 0, 0, 1, 6, 11);
INSERT INTO Batting
VALUES('JT Smuts', '2017-2018', 8, 8, 0, 126, 45, 15.75, 114, 110.52, 0, 0, 1, 14, 5);
INSERT INTO Batting
VALUES('RJ Peterson', '2006-2014', 21, 12, 4, 124, 34, 15.50, 113, 109.73, 0, 0, 1, 13, 2);
INSERT INTO Batting
VALUES('WD Parnell', '2009-2017', 40, 13, 9, 114, '29*', 28.50, 96, 118.75, 0, 0, 0, 10, 3);
INSERT INTO Batting
VALUES( 'H Klaasen', '2018-2018', 4, 4, 0, 110, '69', 27.50, 64, 171.87, 0, 1, 0, 5, 9);
INSERT INTO Batting
VALUES( 'M Mosehle', '2017-2017', 7, 6, 1, 105, '36', 21.00, 65, 161.53, 0, 0, 0, 6, 9);
INSERT INTO Batting
VALUES( 'D Wiese', '2013-2016', 20, 11, 4, 92, '28', 13.14, 75, 122.66, 0, 0, 1, 4, 3);
INSERT INTO Batting
VALUES( 'SM Pollock', '2005-2008', 12, 9, 2, 86, '36*', 12.28, 70, 122.85, 0, 0, 3, 4, 4);
INSERT INTO Batting
VALUES( 'CH Morris', '2012-2018', 17, 10, 3, 77, '17*', 11.00, 70, 110.00, 0, 0, 2, 7, 2);
INSERT INTO Batting
VALUES( 'RE van der Merwe', '2009-2010', 13, 6, 3, 57, '48', 19.00, 50, 114.00, 0, 0, 1, 2, 4);
INSERT INTO Batting
VALUES( 'C Jonker', '2018-2018', 1, 1, 0, 49, '49', 49.00, 24, 204.16, 0, 0, 0, 5, 2);
INSERT INTO Batting
VALUES( 'HG Kuhn', '2009-2017', 7, 6, 2, 49, '29', 12.25, 42, 116.66, 0, 0, 0, 3, 2);
INSERT INTO Batting
VALUES( 'JJ van der Wath', '2006-2007', 8, 4, 1, 46, '21', 15.33, 39, 117.94, 0, 0, 0, 3, 1);
INSERT INTO Batting
I think this could be the solution to your problem:
Query1
SELECT Innings,
COUNT(player) AS Totalplayer
FROM Batting
GROUP BY Innings
HAVING COUNT(player) > 1;
OR
Query2
SELECT Innings,
COUNT(player) AS Totalplayer
FROM Batting
GROUP BY Innings
HAVING COUNT(Innings) > 1;
Sample Output :
Innings
Totalplayer
4
2
6
3
7
2
9
3
10
2
13
3
14
2
23
2
View on DB Fiddle
Well you can save the number of rows for which the Innings column has certain value. You can save this number in a temporary table and then query the table. See following SQL:
CREATE TEMPORARY TABLE BatsmenCount (SELECT count(*) as total FROM Batting WHERE Innings=71);
SELECT * FROM BatsmenCount WHERE total > 1;
This query will return the number of batsmen that have played 71 innings and save the result in a temporary table. Next this number is fetched using SELECT query. If the query returns no results, then the number of batsmen that have played 71 innings is 1 or less. If the query returns a result, then this result is the number of batsmen that have played 71 innings.
See the SQL code on dbfiddle.

how to reduce line space for <pre>

reproducible code
Hello, I used the following code in markdown
<pre><span style="color:blue">Text = ' Sooo SAD I will miss you here in San Diego!!!'</span>, <span style="color:blue">Selected Text='Sooo SAD'</span>, <span style="color:blue">Sentiment = 'negative'</span></pre>
<pre>tokens =</pre>
<pre>input_ids = [0, 2430, 98, 3036, 5074, 939, 40, 2649, 47, 259, 11, 15610, 1597, 2977, 16506, 2, 1, 1...1]</pre>
<pre>attention_masks = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0...0]</pre>
<pre>start_tokens = [0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...0]</pre>
<pre>end_tokens = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...0]</pre>
<pre>Edit Text = ' Sooo SAD I will miss you here in San Diego!!!' (len=46), Edit Seletected Text='Sooo SAD' (len=8)</pre>
<pre>char = [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0...0] (len=46, num_1=9)</pre>
<pre>offset = [(0, 3), (3, 5), (5, 9), (9, 11), (11, 16), (16, 21), (21, 25), (25, 30), (30, 33), (33, 37), (37, 41), (41, 43), (43, 46)]</pre>
undesired effect
and generate the following effect
But the too wide line space makes the effect is kind of ugly. Do you know how to redue the line space?
Tried method
I tried this method
<pre style='display:inline'>tokens =</pre> <br>
<pre>input_ids = [0, 2430, 98, 3036, 5074, 939, 40, 2649, 47, 259, 11, 15610, 1597, 2977, 16506, 2, 1, 1...1]</pre>
But it only works for one line, if I want to apply to other lines such as this
<pre style='display:inline'>tokens =</pre> <br>
<pre style='display:inline'>input_ids = [0, 2430, 98, 3036, 5074, 939, 40, 2649, 47, 259, 11, 15610, 1597, 2977, 16506, 2, 1, 1...1]</pre> <br>
<pre>attention_masks = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0...0]</pre>
it would cover part of line content
Question
How to reduce the line space? Thank you
You could use css display and line-height properties on the pre elements and adjust the line-height you want.
pre {
display: inline;
line-height: 0.8em;
}
<pre><span style="color:blue">Text = ' Sooo SAD I will miss you here in San Diego!!!'</span>, <span style="color:blue">Selected Text='Sooo SAD'</span>, <span style="color:blue">Sentiment = 'negative'</span></pre>
<pre>tokens =</pre>
<pre>input_ids = [0, 2430, 98, 3036, 5074, 939, 40, 2649, 47, 259, 11, 15610, 1597, 2977, 16506, 2, 1, 1...1]</pre>
<pre>attention_masks = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0...0]</pre>
<pre>start_tokens = [0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...0]</pre>
<pre>end_tokens = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...0]</pre>
<pre>Edit Text = ' Sooo SAD I will miss you here in San Diego!!!' (len=46), Edit Seletected Text='Sooo SAD' (len=8)</pre>
<pre>char = [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0...0] (len=46, num_1=9)</pre>
<pre>offset = [(0, 3), (3, 5), (5, 9), (9, 11), (11, 16), (16, 21), (21, 25), (25, 30), (30, 33), (33, 37), (37, 41), (41, 43), (43, 46)]</pre>
Or reset the default margin.
You can use margin: 0 (or any other value) for pre in CSS:
pre {
margin: 0;
}
<pre><span style="color:blue">Text = ' Sooo SAD I will miss you here in San Diego!!!'</span>, <span style="color:blue">Selected Text='Sooo SAD'</span>, <span style="color:blue">Sentiment = 'negative'</span></pre>
<pre>tokens =</pre>
<pre>input_ids = [0, 2430, 98, 3036, 5074, 939, 40, 2649, 47, 259, 11, 15610, 1597, 2977, 16506, 2, 1, 1...1]</pre>
<pre>attention_masks = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0...0]</pre>
<pre>start_tokens = [0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...0]</pre>
<pre>end_tokens = [0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0...0]</pre>
<pre>Edit Text = ' Sooo SAD I will miss you here in San Diego!!!' (len=46), Edit Seletected Text='Sooo SAD' (len=8)</pre>
<pre>char = [1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0...0] (len=46, num_1=9)</pre>
<pre>offset = [(0, 3), (3, 5), (5, 9), (9, 11), (11, 16), (16, 21), (21, 25), (25, 30), (30, 33), (33, 37), (37, 41), (41, 43), (43, 46)]</pre>

MYSQL- Substract two colums and get the result

I am trying to write a query wherein it should count the number of students and tell me the remaining seats available in a vehicle.
Have managaed to identify which student is associated to which bus but getting stuck to find the seat remaining
Below is data :
vehnum route seats student id
23 2 45 2345
33 3 46 6789
Below is the query :
SELECT deveh.vehicle_reg_no AS vehnum
, veh.route_code AS route
, deveh.seating_capacity AS vehseat
, class.fk_stu_id
FROM tbl_stu_class AS class
JOIN tbl_stu_route AS route
ON route.fk_stu_cls_id = class.pk_stu_cls_id
JOIN list_routes AS veh
ON route.fk_route_id = veh.pk_route_id
JOIN list_vehicles AS deveh
ON deveh.pk_vehicle_id = veh.fk_vehicle_id
WHERE class.fk_year_id = 62
AND class.current_yr = 'Y'
Added sample data :
INSERT INTO `list_vehicles` (`pk_vehicle_id`, `vehicle_reg_no`, `vehicle_type`, `regd_owner_name`, `seating_capacity`, `brand_model`, `type_of_body`, `reg_address`, `fuel_type`, `chasis_no`, `reg_authority`, `engine_no`, `color`, `reg_date`, `reg_valid_date`, `month_yr_mfg`, `fk_user_id`, `timestamp`) VALUES
(46, 'J58987', 'Bus', 'M', 30, 'VOlvo', 'Steel', 'FBD', 'Petrol', '565', 'M1', '5689', 'blue', '2016-10-02', '2016-10-02', '2014-12-31', 1, '2018-07-11 18:01:06'),
(53, 'J1234', 'Bus', 'der', 45, 'Volvo', 'Metal', 'Indirapuram', 'Petrol', '123456', 'det', '2365', 'blue', '2010-12-12', '2020-12-12', '2009-12-11', 1, '2018-07-12 06:54:50'),
(54, 'J1234er', 'Van', 'der', 46, 'Volvo', 'Metal', 'Indirapuram', 'Petrol', '12345634', 'det', '236534', 'blue', '2020-02-03', '2020-02-03', '2008-11-11', 1, '2018-07-12 06:57:59');
INSERT INTO `tbl_stu_class` (`pk_stu_cls_id`, `fk_stu_id`, `fk_year_id`, `fk_class_id`, `fk_section_id`, `current_yr`, `fk_user_id`, `timestamp`) VALUES
(1, 56, 50, 22, 10, 'N', 1, '2018-06-08 06:57:34'),
(3, 123, 50, 24, 7, 'N', 1, '2018-06-12 07:54:46'),
(4, 126, 50, 24, 7, 'N', 56, '2018-06-12 07:54:46'),
(5, 123, 52, 25, 7, 'Y', 1, '2018-06-12 17:30:32'),
(6, 126, 52, 25, 7, 'Y', 1, '2018-06-12 17:30:32'),
(7, 132, 50, 22, 9, 'Y', 1, '2018-06-24 10:27:57'),
(8, 133, 51, 23, NULL, 'Y', 1, '2018-06-24 18:22:33'),
(10, 127, 51, 23, NULL, 'Y', 0, '2018-07-11 17:47:05'),
(11, 134, 62, 22, NULL, 'Y', 0, '2018-07-13 08:11:16'),
(12, 135, 62, 21, 7, 'Y', 1, '2018-07-13 11:12:08'),
(13, 136, 62, 21, 9, 'Y', 1, '2018-07-13 14:59:04');