mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 11:45:32 +00:00
MDEV-34954 Add JSON flag for mysqldumpslow.sh output
By using the perl JSON module, we've added an additional flag -j/--json to output the dumped data in JSON format.
This commit is contained in:
@ -50,10 +50,22 @@ GetOptions(\%opt,
|
|||||||
'h=s', # hostname/basename of db server for *-slow.log filename (can be wildcard)
|
'h=s', # hostname/basename of db server for *-slow.log filename (can be wildcard)
|
||||||
'i=s', # name of server instance (if using mysql.server startup script)
|
'i=s', # name of server instance (if using mysql.server startup script)
|
||||||
'l!', # don't subtract lock time from total time
|
'l!', # don't subtract lock time from total time
|
||||||
|
'json|j!', # print as a JSON-formatted string
|
||||||
) or usage("bad option");
|
) or usage("bad option");
|
||||||
|
|
||||||
$opt{'help'} and usage();
|
$opt{'help'} and usage();
|
||||||
|
|
||||||
|
# check if JSON module is available
|
||||||
|
if ($opt{json}) {
|
||||||
|
eval {
|
||||||
|
require JSON;
|
||||||
|
JSON->import();
|
||||||
|
1;
|
||||||
|
} or do {
|
||||||
|
die "JSON module not found. Please install the JSON module to use --json option.\n";
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
unless (@ARGV) {
|
unless (@ARGV) {
|
||||||
my $defaults = `my_print_defaults --mysqld`;
|
my $defaults = `my_print_defaults --mysqld`;
|
||||||
|
|
||||||
@ -99,14 +111,16 @@ warn "\nReading mysql slow query log from @ARGV\n";
|
|||||||
my @pending;
|
my @pending;
|
||||||
my %stmt;
|
my %stmt;
|
||||||
$/ = ";\n#"; # read entire statements using paragraph mode
|
$/ = ";\n#"; # read entire statements using paragraph mode
|
||||||
while ( defined($_ = shift @pending) or defined($_ = <>) ) {
|
while (<>) {
|
||||||
warn "[[$_]]\n" if $opt{d}; # show raw paragraph being read
|
warn "[[$_]]\n" if $opt{d}; # show raw paragraph being read
|
||||||
|
|
||||||
my @chunks = split /^\/.*Version.*started with[\000-\377]*?Time.*Id.*Command.*Argument.*\n/m;
|
# remove fluff that mysqld writes to log when it (re)starts:
|
||||||
if (@chunks > 1) {
|
s!^.*Version.*started with:.*\n!!mg;
|
||||||
unshift @pending, map { length($_) ? $_ : () } @chunks;
|
s!^Tcp port: \d+ Unix socket: \S+\n!!mg;
|
||||||
warn "<<".join(">>\n<<",@chunks).">>" if $opt{d};
|
s!^Time.*Id.*Command.*Argument.*\n!!mg;
|
||||||
next;
|
# if there is only header info, skip
|
||||||
|
if ($_ eq '') {
|
||||||
|
next;
|
||||||
}
|
}
|
||||||
|
|
||||||
s/^#? Time: \d{6}\s+\d+:\d+:\d+.*\n//;
|
s/^#? Time: \d{6}\s+\d+:\d+:\d+.*\n//;
|
||||||
@ -120,18 +134,13 @@ while ( defined($_ = shift @pending) or defined($_ = <>) ) {
|
|||||||
|
|
||||||
$t -= $l unless $opt{l};
|
$t -= $l unless $opt{l};
|
||||||
|
|
||||||
# remove fluff that mysqld writes to log when it (re)starts:
|
|
||||||
s!^/.*Version.*started with:.*\n!!mg;
|
|
||||||
s!^Tcp port: \d+ Unix socket: \S+\n!!mg;
|
|
||||||
s!^Time.*Id.*Command.*Argument.*\n!!mg;
|
|
||||||
|
|
||||||
# Remove optimizer info
|
# Remove optimizer info
|
||||||
s!^# QC_Hit: \S+\s+Full_scan: \S+\s+Full_join: \S+\s+Tmp_table: \S+\s+Tmp_table_on_disk: \S+[^\n]+\n!!mg;
|
s!^# QC_Hit: \S+\s+Full_scan: \S+\s+Full_join: \S+\s+Tmp_table: \S+\s+Tmp_table_on_disk: \S+[^\n]+\n!!mg;
|
||||||
s!^# Filesort: \S+\s+Filesort_on_disk: \S+[^\n]+\n!!mg;
|
s!^# Filesort: \S+\s+Filesort_on_disk: \S+[^\n]+\n!!mg;
|
||||||
s!^# Full_scan: \S+\s+Full_join: \S+[^\n]+\n!!mg;
|
s!^# Full_scan: \S+\s+Full_join: \S+[^\n]+\n!!mg;
|
||||||
|
|
||||||
s/^use \w+;\n//; # not consistently added
|
s!^SET timestamp=\d+;\n!!m; # remove the redundant timestamp that is always added to each query
|
||||||
s/^SET timestamp=\d+;\n//;
|
s!^use \w+;\n!!m; # not consistently added
|
||||||
|
|
||||||
s/^[ ]*\n//mg; # delete blank lines
|
s/^[ ]*\n//mg; # delete blank lines
|
||||||
s/^[ ]*/ /mg; # normalize leading whitespace
|
s/^[ ]*/ /mg; # normalize leading whitespace
|
||||||
@ -181,15 +190,86 @@ my @sorted = sort { $stmt{$b}->{$opt{s}} <=> $stmt{$a}->{$opt{s}} } keys %stmt;
|
|||||||
@sorted = @sorted[0 .. $opt{t}-1] if $opt{t};
|
@sorted = @sorted[0 .. $opt{t}-1] if $opt{t};
|
||||||
@sorted = reverse @sorted if $opt{r};
|
@sorted = reverse @sorted if $opt{r};
|
||||||
|
|
||||||
foreach (@sorted) {
|
if(!$opt{json}) {
|
||||||
my $v = $stmt{$_} || die;
|
foreach (@sorted) {
|
||||||
my ($c, $t, $at, $l, $al, $r, $ar, $e, $ae, $a, $aa) = @{ $v }{qw(c t at l al r ar e ae a aa)};
|
my $v = $stmt{$_} || die;
|
||||||
my @users = keys %{$v->{users}};
|
my ($c, $t, $at, $l, $al, $r, $ar, $e, $ae, $a, $aa) = @{ $v }{qw(c t at l al r ar e ae a aa)};
|
||||||
my $user = (@users==1) ? $users[0] : sprintf "%dusers",scalar @users;
|
my @users = keys %{$v->{users}};
|
||||||
my @hosts = keys %{$v->{hosts}};
|
my $user = (@users==1) ? $users[0] : sprintf "%dusers",scalar @users;
|
||||||
my $host = (@hosts==1) ? $hosts[0] : sprintf "%dhosts",scalar @hosts;
|
my @hosts = keys %{$v->{hosts}};
|
||||||
printf "Count: %d Time=%.2fs (%ds) Lock=%.2fs (%ds) Rows_sent=%.1f (%d), Rows_examined=%.1f (%d), Rows_affected=%.1f (%d), $user\@$host\n%s\n\n",
|
my $host = (@hosts==1) ? $hosts[0] : sprintf "%dhosts",scalar @hosts;
|
||||||
$c, $at,$t, $al,$l, $ar,$r, $ae, $e, $aa, $a, $_;
|
printf "Count: %d Time=%.2fs (%ds) Lock=%.2fs (%ds) Rows_sent=%.1f (%d), Rows_examined=%.1f (%d), Rows_affected=%.1f (%d), $user\@$host\n%s\n\n",
|
||||||
|
$c, $at,$t, $al,$l, $ar,$r, $ae, $e, $aa, $a, $_;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
my @json_output;
|
||||||
|
foreach (@sorted) {
|
||||||
|
my $v = $stmt{$_} || die;
|
||||||
|
my ($c, $t, $at, $l, $al, $r, $ar, $e, $ae, $a, $aa) = @{ $v }{qw(c t at l al r ar e ae a aa)};
|
||||||
|
my @users = keys %{$v->{users}};
|
||||||
|
my $user = (@users==1) ? $users[0] : sprintf "%dusers",scalar @users;
|
||||||
|
my @hosts = keys %{$v->{hosts}};
|
||||||
|
my $host = (@hosts==1) ? $hosts[0] : sprintf "%dhosts",scalar @hosts;
|
||||||
|
|
||||||
|
# parse the engine data
|
||||||
|
my %engine;
|
||||||
|
if ($_ =~ /^\s*#\s*Pages_accessed:\s*(\S+)\s+Pages_read:\s*(\S+)\s+Pages_prefetched:\s*(\S+)\s+Pages_updated:\s*(\S+)\s+Old_rows_read:\s*(\S+)/m) {
|
||||||
|
@engine{qw(Pages_accessed Pages_read Pages_prefetched Pages_updated Old_rows_read)} = ($1, $2, $3, $4, $5);
|
||||||
|
}
|
||||||
|
if ($_ =~ /^\s*#\s*Pages_read_time:\s*(\S+)\s+Engine_time:\s*(\S+)/m) {
|
||||||
|
@engine{qw(Pages_read_time Engine_time)} = ($1, $2);
|
||||||
|
}
|
||||||
|
# convert engine data to numbers
|
||||||
|
map { $engine{$_} += 0 } keys %engine if $opt{a};
|
||||||
|
|
||||||
|
# build a structured explain output
|
||||||
|
my @explain_lines = ($_ =~ /^\s*# explain: (.+)$/mg);
|
||||||
|
my $explain;
|
||||||
|
if (@explain_lines >= 2) {
|
||||||
|
my @headers = split /\s+/, shift @explain_lines;
|
||||||
|
$explain = [
|
||||||
|
map {
|
||||||
|
my @values = split /\s+/, $_;
|
||||||
|
my %row;
|
||||||
|
@row{@headers} = @values;
|
||||||
|
\%row;
|
||||||
|
} @explain_lines
|
||||||
|
];
|
||||||
|
# normalize the explain data
|
||||||
|
foreach my $row (@$explain) {
|
||||||
|
foreach my $key (keys %$row) {
|
||||||
|
my $val = $row->{$key};
|
||||||
|
$row->{$key} = undef if $val eq 'NULL';
|
||||||
|
$row->{$key} = $val + 0 if $opt{a} and $val =~ /^\d+(?:\.\d+)?$/;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# get the query string
|
||||||
|
(my $query = $_) =~ s/^\s*#.*\n//mg;
|
||||||
|
$query =~ s/^\s+|\s+$//g; # trim leading/trailing whitespace
|
||||||
|
|
||||||
|
# output the data as JSON
|
||||||
|
push @json_output, {
|
||||||
|
count => $c,
|
||||||
|
avg_time => $at,
|
||||||
|
total_time => $t,
|
||||||
|
avg_lock => $al,
|
||||||
|
total_lock => $l,
|
||||||
|
avg_rows_sent => $ar,
|
||||||
|
total_rows_sent => $r,
|
||||||
|
avg_examined => $ae,
|
||||||
|
total_examined => $e,
|
||||||
|
avg_affected => $aa,
|
||||||
|
total_affected => $a,
|
||||||
|
user => $user,
|
||||||
|
host => $host,
|
||||||
|
query => $query,
|
||||||
|
engine => (%engine ? \%engine : undef),
|
||||||
|
explain => ($explain ? $explain : undef),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
print JSON->new->canonical(1)->pretty->encode(\@json_output);
|
||||||
}
|
}
|
||||||
|
|
||||||
sub usage {
|
sub usage {
|
||||||
@ -202,6 +282,7 @@ Parse and summarize the MySQL slow query log. Options are
|
|||||||
--verbose verbose
|
--verbose verbose
|
||||||
--debug debug
|
--debug debug
|
||||||
--help write this text to standard output
|
--help write this text to standard output
|
||||||
|
--json print as a JSON-formatted string
|
||||||
|
|
||||||
-v verbose
|
-v verbose
|
||||||
-d debug
|
-d debug
|
||||||
@ -226,6 +307,7 @@ Parse and summarize the MySQL slow query log. Options are
|
|||||||
default is '*', i.e. match all
|
default is '*', i.e. match all
|
||||||
-i NAME name of server instance (if using mysql.server startup script)
|
-i NAME name of server instance (if using mysql.server startup script)
|
||||||
-l don't subtract lock time from total time
|
-l don't subtract lock time from total time
|
||||||
|
-j print as a JSON-formatted string
|
||||||
|
|
||||||
HERE
|
HERE
|
||||||
if ($str) {
|
if ($str) {
|
||||||
|
Reference in New Issue
Block a user