upload android base code part8
This commit is contained in:
parent
841ae54672
commit
5425409085
57075 changed files with 9846578 additions and 0 deletions
27
android/packages/experimental/procstatlog/Android.mk
Normal file
27
android/packages/experimental/procstatlog/Android.mk
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Copyright (C) 2010 The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
LOCAL_PATH := $(call my-dir)
|
||||
|
||||
# Build a static binary for maximum portability
|
||||
# (we want to run this on oddball partner-integrated devices).
|
||||
|
||||
include $(CLEAR_VARS)
|
||||
LOCAL_FORCE_STATIC_EXECUTABLE := true
|
||||
LOCAL_MODULE := procstatlog
|
||||
LOCAL_MODULE_PATH := $(TARGET_OUT_OPTIONAL_EXECUTABLES)
|
||||
LOCAL_SRC_FILES := procstatlog.c
|
||||
LOCAL_STATIC_LIBRARIES := libc
|
||||
|
||||
include $(BUILD_EXECUTABLE)
|
7
android/packages/experimental/procstatlog/README
Normal file
7
android/packages/experimental/procstatlog/README
Normal file
|
@ -0,0 +1,7 @@
|
|||
procstatlog:
|
||||
|
||||
A statically linked binary that polls and dumps /proc/*/stat files
|
||||
on an ongoing basis, to measure CPU and other activity as a function
|
||||
of time during system operation.
|
||||
|
||||
Owner: Dan Egnor <egnor@google.com>
|
45
android/packages/experimental/procstatlog/diskload.sh
Executable file
45
android/packages/experimental/procstatlog/diskload.sh
Executable file
|
@ -0,0 +1,45 @@
|
|||
#!/bin/sh
|
||||
#
|
||||
# Copyright (C) 2010 The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This is designed to be run on the device to create I/O load.
|
||||
#
|
||||
# adb push diskload.sh /data/local/tmp
|
||||
# adb shell su root sh /data/local/tmp/diskload.sh /data/testfile
|
||||
|
||||
dd if=/dev/zero bs=65536 of=$1.0 & pid0=$!; echo start $pid0; sleep 2
|
||||
dd if=/dev/zero bs=65536 of=$1.1 & pid1=$!; echo start $pid1; sleep 2
|
||||
dd if=/dev/zero bs=65536 of=$1.2 & pid2=$!; echo start $pid2; sleep 2
|
||||
dd if=/dev/zero bs=65536 of=$1.3 & pid3=$!; echo start $pid3; sleep 2
|
||||
dd if=/dev/zero bs=65536 of=$1.4 & pid4=$!; echo start $pid4; sleep 2
|
||||
dd if=/dev/zero bs=65536 of=$1.5 & pid5=$!; echo start $pid5; sleep 2
|
||||
dd if=/dev/zero bs=65536 of=$1.6 & pid6=$!; echo start $pid6; sleep 2
|
||||
dd if=/dev/zero bs=65536 of=$1.7 & pid7=$!; echo start $pid7; sleep 2
|
||||
dd if=/dev/zero bs=65536 of=$1.8 & pid8=$!; echo start $pid8; sleep 2
|
||||
dd if=/dev/zero bs=65536 of=$1.9 & pid9=$!; echo start $pid9; sleep 2
|
||||
|
||||
kill $pid0; echo kill $pid0; sleep 2
|
||||
kill $pid1; echo kill $pid1; sleep 2
|
||||
kill $pid2; echo kill $pid2; sleep 2
|
||||
kill $pid3; echo kill $pid3; sleep 2
|
||||
kill $pid4; echo kill $pid4; sleep 2
|
||||
kill $pid5; echo kill $pid5; sleep 2
|
||||
kill $pid6; echo kill $pid6; sleep 2
|
||||
kill $pid7; echo kill $pid7; sleep 2
|
||||
kill $pid8; echo kill $pid8; sleep 2
|
||||
kill $pid9; echo kill $pid9; sleep 2
|
||||
|
||||
ls -l $1.0 $1.1 $1.2 $1.3 $1.4 $1.5 $1.6 $1.7 $1.8 $1.9
|
||||
rm $1.0 $1.1 $1.2 $1.3 $1.4 $1.5 $1.6 $1.7 $1.8 $1.9
|
406
android/packages/experimental/procstatlog/procstatlog.c
Normal file
406
android/packages/experimental/procstatlog/procstatlog.c
Normal file
|
@ -0,0 +1,406 @@
|
|||
/*
|
||||
* Copyright (C) 2010 The Android Open Source Project
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include <assert.h>
|
||||
#include <ctype.h>
|
||||
#include <dirent.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/stat.h>
|
||||
#include <sys/time.h>
|
||||
#include <sys/types.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
|
||||
// This program is as dumb as possible -- it reads a whole bunch of data
|
||||
// from /proc and reports when it changes. It's up to analysis tools to
|
||||
// actually parse the data. This program only does enough parsing to split
|
||||
// large files (/proc/stat, /proc/yaffs) into individual values.
|
||||
//
|
||||
// The output format is a repeating series of observed differences:
|
||||
//
|
||||
// T + <beforetime.stamp>
|
||||
// /proc/<new_filename> + <contents of newly discovered file>
|
||||
// /proc/<changed_filename> = <contents of changed file>
|
||||
// /proc/<deleted_filename> -
|
||||
// /proc/<filename>:<label> = <part of a multiline file>
|
||||
// T - <aftertime.stamp>
|
||||
//
|
||||
//
|
||||
// Files read:
|
||||
//
|
||||
// /proc/*/stat - for all running/selected processes
|
||||
// /proc/*/wchan - for all running/selected processes
|
||||
// /proc/binder/stats - per line: "/proc/binder/stats:BC_REPLY"
|
||||
// /proc/diskstats - per device: "/proc/diskstats:mmcblk0"
|
||||
// /proc/net/dev - per interface: "/proc/net/dev:rmnet0"
|
||||
// /proc/stat - per line: "/proc/stat:intr"
|
||||
// /proc/yaffs - per device/line: "/proc/yaffs:userdata:nBlockErasures"
|
||||
// /sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state
|
||||
// - per line: "/sys/.../time_in_state:245000"
|
||||
|
||||
struct data {
|
||||
char *name; // filename, plus ":var" for many-valued files
|
||||
char *value; // text to be reported when it changes
|
||||
};
|
||||
|
||||
// Like memcpy, but replaces spaces and unprintables with '_'.
|
||||
static void unspace(char *dest, const char *src, int len) {
|
||||
while (len-- > 0) {
|
||||
char ch = *src++;
|
||||
*dest++ = isgraph(ch) ? ch : '_';
|
||||
}
|
||||
}
|
||||
|
||||
// Set data->name and data->value to malloc'd strings with the
|
||||
// filename and contents of the file. Trims trailing whitespace.
|
||||
static void read_data(struct data *data, const char *filename) {
|
||||
char buf[4096];
|
||||
data->name = strdup(filename);
|
||||
int fd = open(filename, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
data->value = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
int len = read(fd, buf, sizeof(buf));
|
||||
if (len < 0) {
|
||||
perror(filename);
|
||||
close(fd);
|
||||
data->value = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
close(fd);
|
||||
while (len > 0 && isspace(buf[len - 1])) --len;
|
||||
data->value = malloc(len + 1);
|
||||
memcpy(data->value, buf, len);
|
||||
data->value[len] = '\0';
|
||||
}
|
||||
|
||||
// Read a name/value file and write data entries for each line.
|
||||
// Returns the number of entries written (always <= stats_count).
|
||||
//
|
||||
// delimiter: used to split each line into name and value
|
||||
// terminator: if non-NULL, processing stops after this string
|
||||
// skip_words: skip this many words at the start of each line
|
||||
static int read_lines(
|
||||
const char *filename,
|
||||
char delimiter, const char *terminator, int skip_words,
|
||||
struct data *stats, int stats_count) {
|
||||
char buf[8192];
|
||||
int fd = open(filename, O_RDONLY);
|
||||
if (fd < 0) return 0;
|
||||
|
||||
int len = read(fd, buf, sizeof(buf) - 1);
|
||||
if (len < 0) {
|
||||
perror(filename);
|
||||
close(fd);
|
||||
return 0;
|
||||
}
|
||||
buf[len] = '\0';
|
||||
close(fd);
|
||||
|
||||
if (terminator != NULL) {
|
||||
char *end = strstr(buf, terminator);
|
||||
if (end != NULL) *end = '\0';
|
||||
}
|
||||
|
||||
int filename_len = strlen(filename);
|
||||
int num = 0;
|
||||
char *line;
|
||||
for (line = strtok(buf, "\n");
|
||||
line != NULL && num < stats_count;
|
||||
line = strtok(NULL, "\n")) {
|
||||
// Line format: <sp>name<delim><sp>value
|
||||
|
||||
int i;
|
||||
while (isspace(*line)) ++line;
|
||||
for (i = 0; i < skip_words; ++i) {
|
||||
while (isgraph(*line)) ++line;
|
||||
while (isspace(*line)) ++line;
|
||||
}
|
||||
|
||||
char *name_end = strchr(line, delimiter);
|
||||
if (name_end == NULL) continue;
|
||||
|
||||
// Key format: <filename>:<name>
|
||||
struct data *data = &stats[num++];
|
||||
data->name = malloc(filename_len + 1 + (name_end - line) + 1);
|
||||
unspace(data->name, filename, filename_len);
|
||||
data->name[filename_len] = ':';
|
||||
unspace(data->name + filename_len + 1, line, name_end - line);
|
||||
data->name[filename_len + 1 + (name_end - line)] = '\0';
|
||||
|
||||
char *value = name_end + 1;
|
||||
while (isspace(*value)) ++value;
|
||||
data->value = strdup(value);
|
||||
}
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
// Read /proc/yaffs and write data entries for each line.
|
||||
// Returns the number of entries written (always <= stats_count).
|
||||
static int read_proc_yaffs(struct data *stats, int stats_count) {
|
||||
char buf[8192];
|
||||
int fd = open("/proc/yaffs", O_RDONLY);
|
||||
if (fd < 0) return 0;
|
||||
|
||||
int len = read(fd, buf, sizeof(buf) - 1);
|
||||
if (len < 0) {
|
||||
perror("/proc/yaffs");
|
||||
close(fd);
|
||||
return 0;
|
||||
}
|
||||
buf[len] = '\0';
|
||||
close(fd);
|
||||
|
||||
int num = 0, device_len = 0;
|
||||
char *line, *device = NULL;
|
||||
for (line = strtok(buf, "\n");
|
||||
line != NULL && num < stats_count;
|
||||
line = strtok(NULL, "\n")) {
|
||||
if (strncmp(line, "Device ", 7) == 0) {
|
||||
device = strchr(line, '"');
|
||||
if (device != NULL) {
|
||||
char *end = strchr(++device, '"');
|
||||
if (end != NULL) *end = '\0';
|
||||
device_len = strlen(device);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (device == NULL) continue;
|
||||
|
||||
char *name_end = line + strcspn(line, " .");
|
||||
if (name_end == line || *name_end == '\0') continue;
|
||||
|
||||
struct data *data = &stats[num++];
|
||||
data->name = malloc(12 + device_len + 1 + (name_end - line) + 1);
|
||||
memcpy(data->name, "/proc/yaffs:", 12);
|
||||
unspace(data->name + 12, device, device_len);
|
||||
data->name[12 + device_len] = ':';
|
||||
unspace(data->name + 12 + device_len + 1, line, name_end - line);
|
||||
data->name[12 + device_len + 1 + (name_end - line)] = '\0';
|
||||
|
||||
char *value = name_end;
|
||||
while (*value == '.' || isspace(*value)) ++value;
|
||||
data->value = strdup(value);
|
||||
}
|
||||
|
||||
return num;
|
||||
}
|
||||
|
||||
// Compare two "struct data" records by their name.
|
||||
static int compare_data(const void *a, const void *b) {
|
||||
const struct data *data_a = (const struct data *) a;
|
||||
const struct data *data_b = (const struct data *) b;
|
||||
return strcmp(data_a->name, data_b->name);
|
||||
}
|
||||
|
||||
// Return a malloc'd array of "struct data" read from all over /proc.
|
||||
// The array is sorted by name and terminated by a record with name == NULL.
|
||||
static struct data *read_stats(char *names[], int name_count) {
|
||||
static int bad[4096]; // Cache pids known not to match patterns
|
||||
static size_t bad_count = 0;
|
||||
|
||||
int pids[4096];
|
||||
size_t pid_count = 0;
|
||||
|
||||
DIR *proc_dir = opendir("/proc");
|
||||
if (proc_dir == NULL) {
|
||||
perror("Can't scan /proc");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
size_t bad_pos = 0;
|
||||
char filename[1024];
|
||||
struct dirent *proc_entry;
|
||||
while ((proc_entry = readdir(proc_dir))) {
|
||||
int pid = atoi(proc_entry->d_name);
|
||||
if (pid <= 0) continue;
|
||||
|
||||
if (name_count > 0) {
|
||||
while (bad_pos < bad_count && bad[bad_pos] < pid) ++bad_pos;
|
||||
if (bad_pos < bad_count && bad[bad_pos] == pid) continue;
|
||||
|
||||
char cmdline[4096];
|
||||
sprintf(filename, "/proc/%d/cmdline", pid);
|
||||
int fd = open(filename, O_RDONLY);
|
||||
if (fd < 0) {
|
||||
perror(filename);
|
||||
continue;
|
||||
}
|
||||
|
||||
int len = read(fd, cmdline, sizeof(cmdline) - 1);
|
||||
if (len < 0) {
|
||||
perror(filename);
|
||||
close(fd);
|
||||
continue;
|
||||
}
|
||||
|
||||
close(fd);
|
||||
cmdline[len] = '\0';
|
||||
int n;
|
||||
for (n = 0; n < name_count && !strstr(cmdline, names[n]); ++n);
|
||||
|
||||
if (n == name_count) {
|
||||
// Insertion sort -- pids mostly increase so this makes sense
|
||||
if (bad_count < sizeof(bad) / sizeof(bad[0])) {
|
||||
int pos = bad_count++;
|
||||
while (pos > 0 && bad[pos - 1] > pid) {
|
||||
bad[pos] = bad[pos - 1];
|
||||
--pos;
|
||||
}
|
||||
bad[pos] = pid;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (pid_count >= sizeof(pids) / sizeof(pids[0])) {
|
||||
fprintf(stderr, "warning: >%zu processes\n", pid_count);
|
||||
} else {
|
||||
pids[pid_count++] = pid;
|
||||
}
|
||||
}
|
||||
closedir(proc_dir);
|
||||
|
||||
size_t i, stats_count = pid_count * 2 + 200; // 200 for stat, yaffs, etc.
|
||||
struct data *stats = malloc((stats_count + 1) * sizeof(struct data));
|
||||
struct data *next = stats;
|
||||
for (i = 0; i < pid_count; i++) {
|
||||
assert(pids[i] > 0);
|
||||
sprintf(filename, "/proc/%d/stat", pids[i]);
|
||||
read_data(next++, filename);
|
||||
sprintf(filename, "/proc/%d/wchan", pids[i]);
|
||||
read_data(next++, filename);
|
||||
}
|
||||
|
||||
struct data *end = stats + stats_count;
|
||||
next += read_proc_yaffs(next, stats + stats_count - next);
|
||||
next += read_lines("/proc/net/dev", ':', NULL, 0, next, end - next);
|
||||
next += read_lines("/proc/stat", ' ', NULL, 0, next, end - next);
|
||||
next += read_lines("/proc/binder/stats", ':', "\nproc ", 0, next, end - next);
|
||||
next += read_lines("/proc/diskstats", ' ', NULL, 2, next, end - next);
|
||||
next += read_lines(
|
||||
"/sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state",
|
||||
' ', NULL, 0, next, end - next);
|
||||
|
||||
assert(next < stats + stats_count);
|
||||
next->name = NULL;
|
||||
next->value = NULL;
|
||||
qsort(stats, next - stats, sizeof(struct data), compare_data);
|
||||
return stats;
|
||||
}
|
||||
|
||||
// Print stats which have changed from one sorted array to the next.
|
||||
static void diff_stats(struct data *old_stats, struct data *new_stats) {
|
||||
while (old_stats->name != NULL || new_stats->name != NULL) {
|
||||
int compare;
|
||||
if (old_stats->name == NULL) {
|
||||
compare = 1;
|
||||
} else if (new_stats->name == NULL) {
|
||||
compare = -1;
|
||||
} else {
|
||||
compare = compare_data(old_stats, new_stats);
|
||||
}
|
||||
|
||||
if (compare < 0) {
|
||||
// old_stats no longer present
|
||||
if (old_stats->value != NULL) {
|
||||
printf("%s -\n", old_stats->name);
|
||||
}
|
||||
++old_stats;
|
||||
} else if (compare > 0) {
|
||||
// new_stats is new
|
||||
if (new_stats->value != NULL) {
|
||||
printf("%s + %s\n", new_stats->name, new_stats->value);
|
||||
}
|
||||
++new_stats;
|
||||
} else {
|
||||
// changed
|
||||
if (new_stats->value == NULL) {
|
||||
if (old_stats->value != NULL) {
|
||||
printf("%s -\n", old_stats->name);
|
||||
}
|
||||
} else if (old_stats->value == NULL) {
|
||||
printf("%s + %s\n", new_stats->name, new_stats->value);
|
||||
} else if (strcmp(old_stats->value, new_stats->value)) {
|
||||
printf("%s = %s\n", new_stats->name, new_stats->value);
|
||||
}
|
||||
++old_stats;
|
||||
++new_stats;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Free a "struct data" array and all the strings within it.
|
||||
static void free_stats(struct data *stats) {
|
||||
int i;
|
||||
for (i = 0; stats[i].name != NULL; ++i) {
|
||||
free(stats[i].name);
|
||||
free(stats[i].value);
|
||||
}
|
||||
free(stats);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
if (argc < 2) {
|
||||
fprintf(stderr,
|
||||
"usage: procstatlog poll_interval [procname ...] > procstat.log\n\n"
|
||||
"\n"
|
||||
"Scans process status every poll_interval seconds (e.g. 0.1)\n"
|
||||
"and writes data from /proc/stat, /proc/*/stat files, and\n"
|
||||
"other /proc status files every time something changes.\n"
|
||||
"\n"
|
||||
"Scans all processes by default. Listing some process name\n"
|
||||
"substrings will limit scanning and reduce overhead.\n"
|
||||
"\n"
|
||||
"Data is logged continuously until the program is killed.\n");
|
||||
return 2;
|
||||
}
|
||||
|
||||
long poll_usec = (long) (atof(argv[1]) * 1000000l);
|
||||
if (poll_usec <= 0) {
|
||||
fprintf(stderr, "illegal poll interval: %s\n", argv[1]);
|
||||
return 2;
|
||||
}
|
||||
|
||||
struct data *old_stats = malloc(sizeof(struct data));
|
||||
old_stats->name = NULL;
|
||||
old_stats->value = NULL;
|
||||
while (1) {
|
||||
struct timeval before, after;
|
||||
gettimeofday(&before, NULL);
|
||||
printf("T + %ld.%06ld\n", before.tv_sec, before.tv_usec);
|
||||
|
||||
struct data *new_stats = read_stats(argv + 2, argc - 2);
|
||||
diff_stats(old_stats, new_stats);
|
||||
free_stats(old_stats);
|
||||
old_stats = new_stats;
|
||||
gettimeofday(&after, NULL);
|
||||
printf("T - %ld.%06ld\n", after.tv_sec, after.tv_usec);
|
||||
|
||||
long elapsed_usec = (long) after.tv_usec - before.tv_usec;
|
||||
elapsed_usec += 1000000l * (after.tv_sec - before.tv_sec);
|
||||
if (poll_usec > elapsed_usec) usleep(poll_usec - elapsed_usec);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
676
android/packages/experimental/procstatlog/procstatreport.py
Executable file
676
android/packages/experimental/procstatlog/procstatreport.py
Executable file
|
@ -0,0 +1,676 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright (C) 2010 The Android Open Source Project
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import cgi
|
||||
import csv
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import urllib
|
||||
|
||||
"""Interpret output from procstatlog and write an HTML report file."""
|
||||
|
||||
|
||||
# TODO: Rethink dygraph-combined.js source URL?
|
||||
PAGE_BEGIN = """
|
||||
<html><head>
|
||||
<title>%(filename)s</title>
|
||||
<script type="text/javascript" src="http://www.corp.google.com/~egnor/no_crawl/dygraph-combined.js"></script>
|
||||
<script>
|
||||
var allCharts = [];
|
||||
var inDrawCallback = false;
|
||||
|
||||
OnDraw = function(me, initial) {
|
||||
if (inDrawCallback || initial) return;
|
||||
inDrawCallback = true;
|
||||
var range = me.xAxisRange();
|
||||
for (var j = 0; j < allCharts.length; j++) {
|
||||
if (allCharts[j] == me) continue;
|
||||
allCharts[j].updateOptions({dateWindow: range});
|
||||
}
|
||||
inDrawCallback = false;
|
||||
}
|
||||
|
||||
MakeChart = function(id, filename, options) {
|
||||
options.width = "75%%";
|
||||
options.xTicker = Dygraph.dateTicker;
|
||||
options.xValueFormatter = Dygraph.dateString_;
|
||||
options.xAxisLabelFormatter = Dygraph.dateAxisFormatter;
|
||||
options.drawCallback = OnDraw;
|
||||
allCharts.push(new Dygraph(document.getElementById(id), filename, options));
|
||||
}
|
||||
</script>
|
||||
</head><body>
|
||||
<p>
|
||||
<span style="font-size: 150%%">%(filename)s</span>
|
||||
- stat report generated by %(user)s on %(date)s</p>
|
||||
<table cellpadding=0 cellspacing=0 margin=0 border=0>
|
||||
"""
|
||||
|
||||
CHART = """
|
||||
<tr>
|
||||
<td valign=top width=25%%>%(label_html)s</td>
|
||||
<td id="%(id)s"> </td>
|
||||
</tr>
|
||||
<script>
|
||||
MakeChart(%(id_js)s, %(filename_js)s, %(options_js)s)
|
||||
|
||||
</script>
|
||||
"""
|
||||
|
||||
SPACER = """
|
||||
<tr><td colspan=2 height=20> </td></tr>
|
||||
"""
|
||||
|
||||
TOTAL_CPU_LABEL = """
|
||||
<b style="font-size: 150%%">Total CPU</b><br>
|
||||
jiffies: <nobr>%(sys)d sys</nobr>, <nobr>%(user)d user</nobr>
|
||||
"""
|
||||
|
||||
CPU_SPEED_LABEL = """
|
||||
<nobr>average CPU speed</nobr>
|
||||
"""
|
||||
|
||||
CONTEXT_LABEL = """
|
||||
context: <nobr>%(switches)d switches</nobr>
|
||||
"""
|
||||
|
||||
FAULTS_LABEL = """
|
||||
<nobr>page faults:</nobr> <nobr>%(major)d major</nobr>
|
||||
"""
|
||||
|
||||
BINDER_LABEL = """
|
||||
binder: <nobr>%(calls)d calls</nobr>
|
||||
"""
|
||||
|
||||
PROC_CPU_LABEL = """
|
||||
<span style="font-size: 150%%">%(process)s</span> (%(pid)d)<br>
|
||||
jiffies: <nobr>%(sys)d sys</nobr>, <nobr>%(user)d user</nobr>
|
||||
</div>
|
||||
"""
|
||||
|
||||
YAFFS_LABEL = """
|
||||
<span style="font-size: 150%%">yaffs: %(partition)s</span><br>
|
||||
pages: <nobr>%(nPageReads)d read</nobr>,
|
||||
<nobr>%(nPageWrites)d written</nobr><br>
|
||||
blocks: <nobr>%(nBlockErasures)d erased</nobr>
|
||||
"""
|
||||
|
||||
DISK_LABEL = """
|
||||
<span style="font-size: 150%%">disk: %(device)s</span><br>
|
||||
sectors: <nobr>%(reads)d read</nobr>, <nobr>%(writes)d written</nobr>
|
||||
"""
|
||||
|
||||
DISK_TIME_LABEL = """
|
||||
msec: <nobr>%(msec)d waiting</nobr>
|
||||
"""
|
||||
|
||||
NET_LABEL = """
|
||||
<span style="font-size: 150%%">net: %(interface)s</span><br>
|
||||
bytes: <nobr>%(tx)d tx</nobr>,
|
||||
<nobr>%(rx)d rx</nobr>
|
||||
"""
|
||||
|
||||
PAGE_END = """
|
||||
</table></body></html>
|
||||
"""
|
||||
|
||||
|
||||
def WriteChartData(titles, datasets, filename):
|
||||
writer = csv.writer(file(filename, "w"))
|
||||
writer.writerow(["Time"] + titles)
|
||||
|
||||
merged_rows = {}
|
||||
for set_num, data in enumerate(datasets):
|
||||
for when, datum in data.iteritems():
|
||||
if type(datum) == tuple: datum = "%d/%d" % datum
|
||||
merged_rows.setdefault(when, {})[set_num] = datum
|
||||
|
||||
num_cols = len(datasets)
|
||||
for when, values in sorted(merged_rows.iteritems()):
|
||||
msec = "%d" % (when * 1000)
|
||||
writer.writerow([msec] + [values.get(n, "") for n in range(num_cols)])
|
||||
|
||||
|
||||
def WriteOutput(history, log_filename, filename):
|
||||
out = []
|
||||
|
||||
out.append(PAGE_BEGIN % {
|
||||
"filename": cgi.escape(log_filename),
|
||||
"user": cgi.escape(os.environ.get("USER", "unknown")),
|
||||
"date": cgi.escape(time.ctime()),
|
||||
})
|
||||
|
||||
files_dir = "%s_files" % os.path.splitext(filename)[0]
|
||||
files_url = os.path.basename(files_dir)
|
||||
if not os.path.isdir(files_dir): os.makedirs(files_dir)
|
||||
|
||||
sorted_history = sorted(history.iteritems())
|
||||
date_window = [1000 * sorted_history[1][0], 1000 * sorted_history[-1][0]]
|
||||
|
||||
#
|
||||
# Output total CPU statistics
|
||||
#
|
||||
|
||||
sys_jiffies = {}
|
||||
sys_user_jiffies = {}
|
||||
all_jiffies = {}
|
||||
total_sys = total_user = 0
|
||||
|
||||
last_state = {}
|
||||
for when, state in sorted_history:
|
||||
last = last_state.get("/proc/stat:cpu", "").split()
|
||||
next = state.get("/proc/stat:cpu", "").split()
|
||||
if last and next:
|
||||
stime = sum([int(next[x]) - int(last[x]) for x in [2, 5, 6]])
|
||||
utime = sum([int(next[x]) - int(last[x]) for x in [0, 1]])
|
||||
idle = sum([int(next[x]) - int(last[x]) for x in [3, 4]])
|
||||
all = stime + utime + idle
|
||||
total_sys += stime
|
||||
total_user += utime
|
||||
|
||||
sys_jiffies[when] = (stime, all)
|
||||
sys_user_jiffies[when] = (stime + utime, all)
|
||||
all_jiffies[when] = all
|
||||
|
||||
last_state = state
|
||||
|
||||
WriteChartData(
|
||||
["sys", "sys+user"],
|
||||
[sys_jiffies, sys_user_jiffies],
|
||||
os.path.join(files_dir, "total_cpu.csv"))
|
||||
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("total_cpu"),
|
||||
"id_js": json.write("total_cpu"),
|
||||
"label_html": TOTAL_CPU_LABEL % {"sys": total_sys, "user": total_user},
|
||||
"filename_js": json.write(files_url + "/total_cpu.csv"),
|
||||
"options_js": json.write({
|
||||
"colors": ["blue", "green"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"fractions": True,
|
||||
"height": 100,
|
||||
"valueRange": [0, 110],
|
||||
}),
|
||||
})
|
||||
|
||||
#
|
||||
# Output CPU speed statistics
|
||||
#
|
||||
|
||||
cpu_speed = {}
|
||||
speed_key = "/sys/devices/system/cpu/cpu0/cpufreq/stats/time_in_state:"
|
||||
|
||||
last_state = {}
|
||||
for when, state in sorted_history:
|
||||
total_time = total_cycles = 0
|
||||
for key in state:
|
||||
if not key.startswith(speed_key): continue
|
||||
|
||||
last = int(last_state.get(key, -1))
|
||||
next = int(state.get(key, -1))
|
||||
if last != -1 and next != -1:
|
||||
speed = int(key[len(speed_key):])
|
||||
total_time += next - last
|
||||
total_cycles += (next - last) * speed
|
||||
|
||||
if total_time > 0: cpu_speed[when] = total_cycles / total_time
|
||||
last_state = state
|
||||
|
||||
WriteChartData(
|
||||
["kHz"], [cpu_speed],
|
||||
os.path.join(files_dir, "cpu_speed.csv"))
|
||||
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("cpu_speed"),
|
||||
"id_js": json.write("cpu_speed"),
|
||||
"label_html": CPU_SPEED_LABEL,
|
||||
"filename_js": json.write(files_url + "/cpu_speed.csv"),
|
||||
"options_js": json.write({
|
||||
"colors": ["navy"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"height": 50,
|
||||
"includeZero": True,
|
||||
}),
|
||||
})
|
||||
|
||||
#
|
||||
# Output total context switch statistics
|
||||
#
|
||||
|
||||
context_switches = {}
|
||||
|
||||
last_state = {}
|
||||
for when, state in sorted_history:
|
||||
last = int(last_state.get("/proc/stat:ctxt", -1))
|
||||
next = int(state.get("/proc/stat:ctxt", -1))
|
||||
if last != -1 and next != -1: context_switches[when] = next - last
|
||||
last_state = state
|
||||
|
||||
WriteChartData(
|
||||
["switches"], [context_switches],
|
||||
os.path.join(files_dir, "context_switches.csv"))
|
||||
|
||||
total_switches = sum(context_switches.values())
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("context_switches"),
|
||||
"id_js": json.write("context_switches"),
|
||||
"label_html": CONTEXT_LABEL % {"switches": total_switches},
|
||||
"filename_js": json.write(files_url + "/context_switches.csv"),
|
||||
"options_js": json.write({
|
||||
"colors": ["blue"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"height": 50,
|
||||
"includeZero": True,
|
||||
}),
|
||||
})
|
||||
|
||||
#
|
||||
# Collect (no output yet) per-process CPU and major faults
|
||||
#
|
||||
|
||||
process_name = {}
|
||||
process_start = {}
|
||||
process_sys = {}
|
||||
process_sys_user = {}
|
||||
|
||||
process_faults = {}
|
||||
total_faults = {}
|
||||
max_faults = 0
|
||||
|
||||
last_state = {}
|
||||
zero_stat = "0 (zero) Z 0 0 0 0 0 0 0 0 0 0 0 0"
|
||||
for when, state in sorted_history:
|
||||
for key in state:
|
||||
if not key.endswith("/stat"): continue
|
||||
|
||||
last = last_state.get(key, zero_stat).split()
|
||||
next = state.get(key, "").split()
|
||||
if not next: continue
|
||||
|
||||
pid = int(next[0])
|
||||
process_start.setdefault(pid, when)
|
||||
process_name[pid] = next[1][1:-1]
|
||||
|
||||
all = all_jiffies.get(when, 0)
|
||||
if not all: continue
|
||||
|
||||
faults = int(next[11]) - int(last[11])
|
||||
process_faults.setdefault(pid, {})[when] = faults
|
||||
tf = total_faults[when] = total_faults.get(when, 0) + faults
|
||||
max_faults = max(max_faults, tf)
|
||||
|
||||
stime = int(next[14]) - int(last[14])
|
||||
utime = int(next[13]) - int(last[13])
|
||||
process_sys.setdefault(pid, {})[when] = (stime, all)
|
||||
process_sys_user.setdefault(pid, {})[when] = (stime + utime, all)
|
||||
|
||||
last_state = state
|
||||
|
||||
#
|
||||
# Output total major faults (sum over all processes)
|
||||
#
|
||||
|
||||
WriteChartData(
|
||||
["major"], [total_faults],
|
||||
os.path.join(files_dir, "total_faults.csv"))
|
||||
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("total_faults"),
|
||||
"id_js": json.write("total_faults"),
|
||||
"label_html": FAULTS_LABEL % {"major": sum(total_faults.values())},
|
||||
"filename_js": json.write(files_url + "/total_faults.csv"),
|
||||
"options_js": json.write({
|
||||
"colors": ["gray"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"height": 50,
|
||||
"valueRange": [0, max_faults * 11 / 10],
|
||||
}),
|
||||
})
|
||||
|
||||
#
|
||||
# Output binder transaactions
|
||||
#
|
||||
|
||||
binder_calls = {}
|
||||
|
||||
last_state = {}
|
||||
for when, state in sorted_history:
|
||||
last = int(last_state.get("/proc/binder/stats:BC_TRANSACTION", -1))
|
||||
next = int(state.get("/proc/binder/stats:BC_TRANSACTION", -1))
|
||||
if last != -1 and next != -1: binder_calls[when] = next - last
|
||||
last_state = state
|
||||
|
||||
WriteChartData(
|
||||
["calls"], [binder_calls],
|
||||
os.path.join(files_dir, "binder_calls.csv"))
|
||||
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("binder_calls"),
|
||||
"id_js": json.write("binder_calls"),
|
||||
"label_html": BINDER_LABEL % {"calls": sum(binder_calls.values())},
|
||||
"filename_js": json.write(files_url + "/binder_calls.csv"),
|
||||
"options_js": json.write({
|
||||
"colors": ["green"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"height": 50,
|
||||
"includeZero": True,
|
||||
})
|
||||
})
|
||||
|
||||
#
|
||||
# Output network interface statistics
|
||||
#
|
||||
|
||||
if out[-1] != SPACER: out.append(SPACER)
|
||||
|
||||
interface_rx = {}
|
||||
interface_tx = {}
|
||||
max_bytes = 0
|
||||
|
||||
last_state = {}
|
||||
for when, state in sorted_history:
|
||||
for key in state:
|
||||
if not key.startswith("/proc/net/dev:"): continue
|
||||
|
||||
last = last_state.get(key, "").split()
|
||||
next = state.get(key, "").split()
|
||||
if not (last and next): continue
|
||||
|
||||
rx = int(next[0]) - int(last[0])
|
||||
tx = int(next[8]) - int(last[8])
|
||||
max_bytes = max(max_bytes, rx, tx)
|
||||
|
||||
net, interface = key.split(":", 1)
|
||||
interface_rx.setdefault(interface, {})[when] = rx
|
||||
interface_tx.setdefault(interface, {})[when] = tx
|
||||
|
||||
last_state = state
|
||||
|
||||
for num, interface in enumerate(sorted(interface_rx.keys())):
|
||||
rx, tx = interface_rx[interface], interface_tx[interface]
|
||||
total_rx, total_tx = sum(rx.values()), sum(tx.values())
|
||||
if not (total_rx or total_tx): continue
|
||||
|
||||
WriteChartData(
|
||||
["rx", "tx"], [rx, tx],
|
||||
os.path.join(files_dir, "net%d.csv" % num))
|
||||
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("net%d" % num),
|
||||
"id_js": json.write("net%d" % num),
|
||||
"label_html": NET_LABEL % {
|
||||
"interface": cgi.escape(interface),
|
||||
"rx": total_rx,
|
||||
"tx": total_tx
|
||||
},
|
||||
"filename_js": json.write("%s/net%d.csv" % (files_url, num)),
|
||||
"options_js": json.write({
|
||||
"colors": ["black", "purple"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"height": 75,
|
||||
"valueRange": [0, max_bytes * 11 / 10],
|
||||
})
|
||||
})
|
||||
|
||||
#
|
||||
# Output YAFFS statistics
|
||||
#
|
||||
|
||||
if out[-1] != SPACER: out.append(SPACER)
|
||||
|
||||
yaffs_vars = ["nBlockErasures", "nPageReads", "nPageWrites"]
|
||||
partition_ops = {}
|
||||
|
||||
last_state = {}
|
||||
for when, state in sorted_history:
|
||||
for key in state:
|
||||
if not key.startswith("/proc/yaffs:"): continue
|
||||
|
||||
last = int(last_state.get(key, -1))
|
||||
next = int(state.get(key, -1))
|
||||
if last == -1 or next == -1: continue
|
||||
|
||||
value = next - last
|
||||
yaffs, partition, var = key.split(":", 2)
|
||||
ops = partition_ops.setdefault(partition, {})
|
||||
if var in yaffs_vars:
|
||||
ops.setdefault(var, {})[when] = value
|
||||
|
||||
last_state = state
|
||||
|
||||
for num, (partition, ops) in enumerate(sorted(partition_ops.iteritems())):
|
||||
totals = [sum(ops.get(var, {}).values()) for var in yaffs_vars]
|
||||
if not sum(totals): continue
|
||||
|
||||
WriteChartData(
|
||||
yaffs_vars,
|
||||
[ops.get(var, {}) for var in yaffs_vars],
|
||||
os.path.join(files_dir, "yaffs%d.csv" % num))
|
||||
|
||||
values = {"partition": partition}
|
||||
values.update(zip(yaffs_vars, totals))
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("yaffs%d" % num),
|
||||
"id_js": json.write("yaffs%d" % num),
|
||||
"label_html": YAFFS_LABEL % values,
|
||||
"filename_js": json.write("%s/yaffs%d.csv" % (files_url, num)),
|
||||
"options_js": json.write({
|
||||
"colors": ["maroon", "gray", "teal"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"height": 75,
|
||||
"includeZero": True,
|
||||
})
|
||||
})
|
||||
|
||||
#
|
||||
# Output non-YAFFS statistics
|
||||
#
|
||||
|
||||
disk_reads = {}
|
||||
disk_writes = {}
|
||||
disk_msec = {}
|
||||
total_io = max_io = max_msec = 0
|
||||
|
||||
last_state = {}
|
||||
for when, state in sorted_history:
|
||||
for key in state:
|
||||
if not key.startswith("/proc/diskstats:"): continue
|
||||
|
||||
last = last_state.get(key, "").split()
|
||||
next = state.get(key, "").split()
|
||||
if not (last and next): continue
|
||||
|
||||
reads = int(next[2]) - int(last[2])
|
||||
writes = int(next[6]) - int(last[6])
|
||||
msec = int(next[10]) - int(last[10])
|
||||
total_io += reads + writes
|
||||
max_io = max(max_io, reads, writes)
|
||||
max_msec = max(max_msec, msec)
|
||||
|
||||
diskstats, device = key.split(":", 1)
|
||||
disk_reads.setdefault(device, {})[when] = reads
|
||||
disk_writes.setdefault(device, {})[when] = writes
|
||||
disk_msec.setdefault(device, {})[when] = msec
|
||||
|
||||
last_state = state
|
||||
|
||||
io_cutoff = total_io / 100
|
||||
for num, device in enumerate(sorted(disk_reads.keys())):
|
||||
if [d for d in disk_reads.keys()
|
||||
if d.startswith(device) and d != device]: continue
|
||||
|
||||
reads, writes = disk_reads[device], disk_writes[device]
|
||||
total_reads, total_writes = sum(reads.values()), sum(writes.values())
|
||||
if total_reads + total_writes <= io_cutoff: continue
|
||||
|
||||
WriteChartData(
|
||||
["reads", "writes"], [reads, writes],
|
||||
os.path.join(files_dir, "disk%d.csv" % num))
|
||||
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("disk%d" % num),
|
||||
"id_js": json.write("disk%d" % num),
|
||||
"label_html": DISK_LABEL % {
|
||||
"device": cgi.escape(device),
|
||||
"reads": total_reads,
|
||||
"writes": total_writes,
|
||||
},
|
||||
"filename_js": json.write("%s/disk%d.csv" % (files_url, num)),
|
||||
"options_js": json.write({
|
||||
"colors": ["gray", "teal"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"height": 75,
|
||||
"valueRange": [0, max_io * 11 / 10],
|
||||
}),
|
||||
})
|
||||
|
||||
msec = disk_msec[device]
|
||||
|
||||
WriteChartData(
|
||||
["msec"], [msec],
|
||||
os.path.join(files_dir, "disk%d_time.csv" % num))
|
||||
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("disk%d_time" % num),
|
||||
"id_js": json.write("disk%d_time" % num),
|
||||
"label_html": DISK_TIME_LABEL % {"msec": sum(msec.values())},
|
||||
"filename_js": json.write("%s/disk%d_time.csv" % (files_url, num)),
|
||||
"options_js": json.write({
|
||||
"colors": ["blue"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"height": 50,
|
||||
"valueRange": [0, max_msec * 11 / 10],
|
||||
}),
|
||||
})
|
||||
|
||||
#
|
||||
# Output per-process CPU and page faults collected earlier
|
||||
#
|
||||
|
||||
cpu_cutoff = (total_sys + total_user) / 200
|
||||
faults_cutoff = sum(total_faults.values()) / 100
|
||||
for start, pid in sorted([(s, p) for p, s in process_start.iteritems()]):
|
||||
sys = sum([n for n, d in process_sys.get(pid, {}).values()])
|
||||
sys_user = sum([n for n, d in process_sys_user.get(pid, {}).values()])
|
||||
if sys_user <= cpu_cutoff: continue
|
||||
|
||||
if out[-1] != SPACER: out.append(SPACER)
|
||||
|
||||
WriteChartData(
|
||||
["sys", "sys+user"],
|
||||
[process_sys.get(pid, {}), process_sys_user.get(pid, {})],
|
||||
os.path.join(files_dir, "proc%d.csv" % pid))
|
||||
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("proc%d" % pid),
|
||||
"id_js": json.write("proc%d" % pid),
|
||||
"label_html": PROC_CPU_LABEL % {
|
||||
"pid": pid,
|
||||
"process": cgi.escape(process_name.get(pid, "(unknown)")),
|
||||
"sys": sys,
|
||||
"user": sys_user - sys,
|
||||
},
|
||||
"filename_js": json.write("%s/proc%d.csv" % (files_url, pid)),
|
||||
"options_js": json.write({
|
||||
"colors": ["blue", "green"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"fractions": True,
|
||||
"height": 75,
|
||||
"valueRange": [0, 110],
|
||||
}),
|
||||
})
|
||||
|
||||
faults = sum(process_faults.get(pid, {}).values())
|
||||
if faults <= faults_cutoff: continue
|
||||
|
||||
WriteChartData(
|
||||
["major"], [process_faults.get(pid, {})],
|
||||
os.path.join(files_dir, "proc%d_faults.csv" % pid))
|
||||
|
||||
out.append(CHART % {
|
||||
"id": cgi.escape("proc%d_faults" % pid),
|
||||
"id_js": json.write("proc%d_faults" % pid),
|
||||
"label_html": FAULTS_LABEL % {"major": faults},
|
||||
"filename_js": json.write("%s/proc%d_faults.csv" % (files_url, pid)),
|
||||
"options_js": json.write({
|
||||
"colors": ["gray"],
|
||||
"dateWindow": date_window,
|
||||
"fillGraph": True,
|
||||
"height": 50,
|
||||
"valueRange": [0, max_faults * 11 / 10],
|
||||
}),
|
||||
})
|
||||
|
||||
out.append(PAGE_END)
|
||||
file(filename, "w").write("\n".join(out))
|
||||
|
||||
|
||||
def main(argv):
|
||||
if len(argv) != 3:
|
||||
print >>sys.stderr, "usage: procstatreport.py procstat.log output.html"
|
||||
return 2
|
||||
|
||||
history = {}
|
||||
current_state = {}
|
||||
scan_time = 0.0
|
||||
|
||||
for line in file(argv[1]):
|
||||
if not line.endswith("\n"): continue
|
||||
|
||||
parts = line.split(None, 2)
|
||||
if len(parts) < 2 or parts[1] not in "+-=":
|
||||
print >>sys.stderr, "Invalid input:", line
|
||||
sys.exit(1)
|
||||
|
||||
name, op = parts[:2]
|
||||
|
||||
if name == "T" and op == "+": # timestamp: scan about to begin
|
||||
scan_time = float(line[4:])
|
||||
continue
|
||||
|
||||
if name == "T" and op == "-": # timestamp: scan complete
|
||||
time = (scan_time + float(line[4:])) / 2.0
|
||||
history[time] = dict(current_state)
|
||||
|
||||
elif op == "-":
|
||||
if name in current_state: del current_state[name]
|
||||
|
||||
else:
|
||||
current_state[name] = "".join(parts[2:]).strip()
|
||||
|
||||
if len(history) < 2:
|
||||
print >>sys.stderr, "error: insufficient history to chart"
|
||||
return 1
|
||||
|
||||
WriteOutput(history, argv[1], argv[2])
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main(sys.argv))
|
Loading…
Add table
Add a link
Reference in a new issue