--- /dev/null
+//
+// CPPUtil.h
+// CPPUtil
+//
+// Created by James McIlree on 4/7/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_CPPUtil_h
+#define CPPUtil_CPPUtil_h
+
+#include <unistd.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+
+#include <cstdlib>
+#include <string>
+#include <sstream>
+#include <exception>
+#include <vector>
+#include <asl.h>
+
+#include <math.h>
+
+#include <mach/mach_time.h>
+
+namespace util {
+
+#include "UtilBase.hpp"
+#include "UtilAssert.hpp"
+#include "UtilException.hpp"
+#include "UtilMakeUnique.hpp"
+
+#include "UtilPath.hpp"
+
+#include "UtilTRange.hpp"
+#include "UtilTRangeValue.hpp"
+
+#include "UtilPrettyPrinting.hpp"
+#include "UtilTime.hpp"
+#include "UtilAbsTime.hpp"
+#include "UtilNanoTime.hpp"
+#include "UtilAbsInterval.hpp"
+#include "UtilNanoInterval.hpp"
+#include "UtilTimer.hpp"
+
+#include "UtilLog.hpp"
+
+#include "UtilFileDescriptor.hpp"
+#include "UtilString.hpp"
+
+#include "UtilMappedFile.hpp"
+#include "UtilMemoryBuffer.hpp"
+
+#include "UtilTerminalColor.hpp"
+
+}
+
+#endif
--- /dev/null
+//
+// UtilAbsInterval.cpp
+// CPPUtil
+//
+// Created by James McIlree on 9/8/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "CPPUtil.h"
+
+BEGIN_UTIL_NAMESPACE
+
+const AbsInterval* interval_beginning_timespan(const std::vector<AbsInterval>& intervals, AbsInterval timespan) {
+ auto it = std::upper_bound(intervals.begin(), intervals.end(), timespan.location(), AbsIntervalMaxVsAbsTimeComparator());
+
+ //
+ // For a beginning interval, there is no possible match if timespan.location() > intervals.back().max()
+ //
+ if (it != intervals.end()) {
+ //
+ // We found something. Does it contain the search point?
+ //
+ if (it->contains(timespan.location())) {
+ return &*it;
+ }
+
+ //
+ // If the AbsInterval found intersects the timespan, its still the first valid vm_fault in
+ // the given timespan, so return it anyway.
+ //
+ if (it->intersects(timespan)) {
+ return &*it;
+ }
+ }
+
+ return NULL;
+}
+
+const AbsInterval* interval_ending_timespan(const std::vector<AbsInterval>& intervals, AbsInterval timespan) {
+
+ // We could do this as timespan.max() and use lower_bound(...) to save the subtraction.
+ // But we need the max()-1 value later for the contains() test anyway, so might as well calculate
+ // it here.
+ AbsTime max = timespan.max() - AbsTime(1);
+ auto it = std::upper_bound(intervals.begin(), intervals.end(), max, AbsIntervalMaxVsAbsTimeComparator());
+
+ // Did we find something?
+ if (it != intervals.end()) {
+
+ if (it->contains(max)) {
+ return &*it;
+ }
+
+ // Okay, the matched interval is to the "right" of us on the
+ // timeline. Is there a previous interval that might work?
+ if (it != intervals.begin()) {
+ if ((--it)->intersects(timespan)) {
+ return &*it;
+ }
+ }
+ } else {
+ // Okay, we're off the end of the timeline. There still might
+ // be a previous interval that would match.
+ if (!intervals.empty()) {
+ if ((--it)->intersects(timespan)) {
+ return &*it;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+END_UTIL_NAMESPACE
--- /dev/null
+//
+// UtilAbsInterval.h
+// CPPUtil
+//
+// Created by James McIlree on 4/14/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __CPPUtil__UtilAbsInterval__
+#define __CPPUtil__UtilAbsInterval__
+
+typedef TRange<AbsTime> AbsInterval;
+
+struct AbsIntervalLocationVsAbsTimeComparator {
+ bool operator()(const AbsInterval& activity, const AbsTime& time) const {
+ return activity.location() < time;
+ }
+
+ bool operator()(const AbsTime& time, const AbsInterval& activity) const {
+ return time < activity.location();
+ }
+};
+
+struct AbsIntervalMaxVsAbsTimeComparator {
+ bool operator()(const AbsInterval& activity, const AbsTime& time) const {
+ return activity.max() < time;
+ }
+
+ bool operator()(const AbsTime& time, const AbsInterval& activity) const {
+ return time < activity.max();
+ }
+};
+
+//
+// Takes a vector of sorted non overlapping AbsInterval(s), and a timespan. Returns a pointer to the
+// youngest AbsInterval that intersects the timespan. Returns NULL if no interval intersects.
+//
+// vec: XXXXXXX XXXXXXXX XXXXX XXXXXX
+// ts: MMMMMMMMMMMMMMM
+// ret: XXXXXXX
+//
+// ----------------------------------
+//
+// vec: XXXXXXXX XXXXX XXXXXX
+// ts: MMMMMMMMMMMMMMM
+// ret: XXXXXXXX
+//
+// ----------------------------------
+//
+// vec: XXXXX XXXXXX
+// ts: MMMMMMMMMMMMMMM
+// ret: XXXXX
+//
+// ----------------------------------
+//
+// vec: XXXXXX
+// ts: MMMMMMMMMMMMMMM
+// ret: NULL
+//
+
+const AbsInterval* interval_beginning_timespan(const std::vector<AbsInterval>& intervals, AbsInterval timespan);
+
+//
+// Takes a vector of sorted non overlapping AbsInterval(s), and a timespan. Returns a pointer to the
+// oldest AbsInterval that intersects the timespan. Returns NULL if no interval intersects.
+//
+// vec: XXXXXXX XXXXXXXX XXXXX XXXXXX
+// ts: MMMMMMMMMMMMMMM
+// ret: XXXXXXX
+//
+// ----------------------------------
+//
+// vec: XXXXXXXX XXXXX XXXXXX
+// ts: MMMMMMMMMMMMMMM
+// ret: XXXXXXXX
+//
+// ----------------------------------
+//
+// vec: XXXXX XXXXXX
+// ts: MMMMMMMMMMMMMMM
+// ret: XXXXX
+//
+// ----------------------------------
+//
+// vec: XXXXXX
+// ts: MMMMMMMMMMMMMMM
+// ret: NULL
+//
+
+const AbsInterval* interval_ending_timespan(const std::vector<AbsInterval>& intervals, AbsInterval timespan);
+
+#endif /* defined(__CPPUtil__UtilAbsInterval__) */
--- /dev/null
+//
+// UtilAbsTime.cpp
+// CPPUtil
+//
+// Created by James McIlree on 4/14/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "CPPUtil.h"
+
+BEGIN_UTIL_NAMESPACE
+
+const AbsTime AbsTime::BEGINNING_OF_TIME = AbsTime(0ULL);
+const AbsTime AbsTime::END_OF_TIME = AbsTime(UINT64_MAX);
+
+AbsTime AbsTime::now() {
+ return AbsTime(mach_absolute_time());
+}
+
+NanoTime AbsTime::nano_time() const {
+ mach_timebase_info_data_t timebase_info;
+ mach_timebase_info(&timebase_info);
+ return NanoTime(_time * timebase_info.numer / timebase_info.denom);
+}
+
+NanoTime AbsTime::nano_time(mach_timebase_info_data_t timebase_info) const {
+ return NanoTime(_time * timebase_info.numer / timebase_info.denom);
+}
+
+END_UTIL_NAMESPACE
--- /dev/null
+//
+// UtilAbsTime.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/14/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __CPPUtil__UtilAbsTime__
+#define __CPPUtil__UtilAbsTime__
+
+class NanoTime;
+
+class AbsTime {
+ protected:
+ uint64_t _time;
+
+ public:
+ // Minimum and Maximum possible values
+ static const AbsTime BEGINNING_OF_TIME;
+ static const AbsTime END_OF_TIME;
+
+ static AbsTime now();
+
+ AbsTime() : _time(0ULL) {}
+ explicit AbsTime(uint64_t t) : _time(t) {}
+
+ bool operator==(const AbsTime& rhs) const { return this->_time == rhs._time; }
+ bool operator!=(const AbsTime &rhs) const { return !(*this == rhs); }
+
+ bool operator<(const AbsTime& rhs) const { return this->_time < rhs._time; }
+ bool operator<=(const AbsTime& rhs) const { return this->_time <= rhs._time; }
+ bool operator>(const AbsTime& rhs) const { return this->_time > rhs._time; }
+ bool operator>=(const AbsTime& rhs) const { return this->_time >= rhs._time; }
+
+ // We do not want to be able to mutate AbsTime(s)
+ // without type enforcement, but it is useful to be able
+ // to say "if (time == 0) {}", so we have value based
+ // operators for comparison
+ bool operator==(uint64_t value) const { return this->_time == value; }
+ bool operator!=(uint64_t value) const { return !(*this == value); }
+
+ bool operator<(uint64_t value) const { return this->_time < value; }
+ bool operator<=(uint64_t value) const { return this->_time <= value; }
+ bool operator>(uint64_t value) const { return this->_time > value; }
+ bool operator>=(uint64_t value) const { return this->_time >= value; }
+
+ AbsTime operator+(const AbsTime& rhs) const { return AbsTime(_time + rhs._time); }
+ AbsTime operator-(const AbsTime& rhs) const { return AbsTime(_time - rhs._time); }
+ AbsTime operator*(const AbsTime& rhs) const { return AbsTime(_time * rhs._time); }
+ AbsTime operator/(const AbsTime& rhs) const { return AbsTime(_time / rhs._time); }
+
+ AbsTime& operator+=(const AbsTime& rhs) { _time += rhs._time; return *this; }
+
+ NanoTime nano_time() const; // NOTE! Uses system mach_timebase_info, potentially expensive conversion costs.
+ NanoTime nano_time(mach_timebase_info_data_t timebase_info) const;
+
+ uint64_t value() const { return _time; }
+ double double_value() const { return (double)_time; }
+};
+
+#endif /* defined(__CPPUtil__UtilAbsTime__) */
--- /dev/null
+//
+// Assert.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/7/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_Assert_hpp
+#define CPPUtil_Assert_hpp
+
+#if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
+
+ #define DEBUG_ONLY( statement ) statement
+
+ #define ASSERT(e, d) \
+ { \
+ if (__builtin_expect(!(e), 0)) { \
+ ::printf("ASSERT(%s) %s %d, %s\n", #e, util::Path::basename((char*)__FILE__).c_str(), __LINE__, d); \
+ std::abort(); \
+ } \
+ }
+
+ #define SHOULD_NOT_REACH_HERE(d) \
+ { \
+ ::printf("SHOULD_NOT_REACH_HERE %s %d, %s\n", util::Path::basename((char*)__FILE__).c_str(), __LINE__, d); \
+ std::abort(); \
+ }
+
+ #define TrueInDebug true
+
+#else
+
+ #define DEBUG_ONLY( statement )
+ #define ASSERT(e, d)
+ #define SHOULD_NOT_REACH_HERE(d)
+
+ #define TrueInDebug false
+
+#endif
+
+#define GUARANTEE(e) \
+{ \
+ if (__builtin_expect(!(e), 0)) { \
+ ::printf("ASSERT(%s) %s %d\n", #e, util::Path::basename((char*)__FILE__).c_str(), __LINE__); \
+ std::abort(); \
+ } \
+}
+
+#endif
--- /dev/null
+//
+// UtilBase.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/7/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_UtilBase_hpp
+#define CPPUtil_UtilBase_hpp
+
+#define BEGIN_UTIL_NAMESPACE namespace util {
+#define END_UTIL_NAMESPACE }
+
+#endif
--- /dev/null
+//
+// Exception.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/7/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_Exception_hpp
+#define CPPUtil_Exception_hpp
+
+class Exception : public std::exception {
+ protected:
+ std::string _what;
+
+ public:
+ Exception(std::string& what) : _what(what) {} ;
+ virtual ~Exception() throw () {};
+
+ virtual char const* what() const throw() { return _what.c_str(); }
+};
+
+#define THROW(e) \
+{ \
+ std::ostringstream s; \
+ s << e; \
+ std::string str = s.str(); \
+ Exception exp(str); \
+ throw exp; \
+}
+
+#define UNIMPLEMENTED() THROW("Unimplemented: " << Path((char*)__FILE__).basename() << ":" << __LINE__ )
+
+#endif
--- /dev/null
+//
+// UtilFileDescriptor.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/16/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_UtilFileDescriptor_hpp
+#define CPPUtil_UtilFileDescriptor_hpp
+
+class FileDescriptor {
+ protected:
+ int _fd;
+
+ // FD's aren't reference counted, we allow move semantics but
+ // not copy semantics. Disable the copy constructor and copy
+ // assignment.
+ FileDescriptor(const FileDescriptor& that) = delete;
+ FileDescriptor& operator=(const FileDescriptor& other) = delete;
+
+ public:
+
+ FileDescriptor() : _fd(-1) {}
+ FileDescriptor(int fd) : _fd(fd) {}
+
+ template <typename... Args>
+ FileDescriptor(Args&& ... args) :
+ _fd(open(static_cast<Args &&>(args)...))
+ {
+ }
+
+ FileDescriptor (FileDescriptor&& rhs) noexcept :
+ _fd(rhs._fd)
+ {
+ rhs._fd = -1;
+ }
+
+ ~FileDescriptor() { close(); }
+
+ FileDescriptor& operator=(int fd) { close(); _fd = fd; return *this; }
+ FileDescriptor& operator=(FileDescriptor&& rhs) { std::swap(_fd, rhs._fd); return *this; }
+
+ bool is_open() const { return _fd > -1 ? true : false; }
+ void close() { if (is_open()) { ::close(_fd); _fd = -1; } }
+
+ explicit operator bool() const { return is_open(); }
+ operator int() const { return _fd; }
+};
+
+
+#endif
--- /dev/null
+//
+// UtilLog.cpp
+// CPPUtil
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "CPPUtil.h"
+
+BEGIN_UTIL_NAMESPACE
+
+void log_msg(int level, const char* format, ...) {
+ va_list list;
+ va_start(list, format);
+ asl_vlog(NULL, NULL, level, format, list);
+ va_end(list);
+
+ va_start(list, format);
+ vfprintf(stderr, format, list);
+ va_end(list);
+}
+
+END_UTIL_NAMESPACE
--- /dev/null
+//
+// UtilLog.h
+// CPPUtil
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __CPPUtil__UtilLog__
+#define __CPPUtil__UtilLog__
+
+void log_msg(int level, const char* format, ...) __attribute__((format(printf, 2, 3)));
+
+#endif /* defined(__CPPUtil__UtilLog__) */
--- /dev/null
+//
+// UtilMakeUnique.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_UtilMakeUnique_hpp
+#define CPPUtil_UtilMakeUnique_hpp
+
+/* Not needed in C++14 or later */
+#if __cplusplus <= 201103
+
+template<typename T, typename ...Args>
+std::unique_ptr<T> make_unique( Args&& ...args )
+{
+ return std::unique_ptr<T>( new T( std::forward<Args>(args)... ) );
+}
+
+#endif
+
+#endif
--- /dev/null
+//
+// UtilMappedFile.cpp
+// CPPUtil
+//
+// Created by James McIlree on 4/19/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "CPPUtil.h"
+
+#include <sys/mman.h>
+
+BEGIN_UTIL_NAMESPACE
+
+static int open_fd(const char* path, size_t& file_size)
+{
+ int fd = open(path, O_RDONLY, 0);
+ if(fd >= 0) {
+ struct stat data;
+ if (fstat(fd, &data) == 0) {
+ if (S_ISREG(data.st_mode)) {
+ // Is it zero sized?
+ if (data.st_size > 0) {
+ file_size = (size_t)data.st_size;
+ return fd;
+ }
+ }
+ }
+ close(fd);
+ }
+
+ return -1;
+}
+
+MappedFile::MappedFile(const char* path) :
+ _address(NULL),
+ _size(0)
+{
+ ASSERT(path, "Sanity");
+ int fd = open_fd(path, _size);
+ if (fd >= 0) {
+ _address = (unsigned char*)mmap(NULL, _size, PROT_READ, MAP_FILE | MAP_SHARED, fd, 0);
+ if (_address == (void*)-1) {
+ _address = NULL;
+ }
+ close(fd);
+ }
+}
+
+MappedFile::~MappedFile()
+{
+ if (_address != NULL) {
+ munmap(_address, _size);
+ }
+}
+
+END_UTIL_NAMESPACE
--- /dev/null
+//
+// UtilMappedFile.h
+// CPPUtil
+//
+// Created by James McIlree on 4/19/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __CPPUtil__UtilMappedFile__
+#define __CPPUtil__UtilMappedFile__
+
+class MappedFile {
+ protected:
+ unsigned char* _address;
+ size_t _size;
+
+ public:
+ MappedFile(const char* path);
+ ~MappedFile();
+
+ uint8_t* address() { return _address; }
+ size_t size() { return _size; }
+
+ bool mmap_failed() const { return _size > 0 && _address == nullptr; }
+};
+
+#endif /* defined(__CPPUtil__UtilMappedFile__) */
--- /dev/null
+//
+// UtilMemoryBuffer.h
+// CPPUtil
+//
+// Created by James McIlree on 4/20/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __CPPUtil__UtilMemoryBuffer__
+#define __CPPUtil__UtilMemoryBuffer__
+
+template <typename T>
+class MemoryBuffer {
+ protected:
+ T* _data;
+ size_t _capacity;
+
+ // No copying?
+ MemoryBuffer(const MemoryBuffer& that) = delete;
+ MemoryBuffer& operator=(const MemoryBuffer& other) = delete;
+
+ public:
+ // Capacity is in units of T!
+ //
+ // MemoryBuffer<char>(1); // 1 byte
+ // MemoryBuffer<uint32_t>(1); // 4 bytes
+ MemoryBuffer() { _data = NULL; _capacity = 0; }
+ MemoryBuffer(size_t capacity);
+ MemoryBuffer(MemoryBuffer&& rhs) noexcept :
+ _data(rhs._data),
+ _capacity(rhs._capacity)
+ {
+ rhs._data = NULL;
+ rhs._capacity = 0;
+ }
+
+ ~MemoryBuffer() { if (_data) { free(_data); } }
+
+ MemoryBuffer& operator=(MemoryBuffer&& rhs) { std::swap(_data, rhs._data); std::swap(_capacity, rhs._capacity); return *this; }
+
+ T* data() { return _data; }
+ size_t capacity() const { return _capacity; }
+ size_t capacity_in_bytes() const { return _capacity * sizeof(T); }
+ // This always results in an allocation and copy.
+ // If the new capacity is smaller, data is truncated.
+ void set_capacity(size_t capacity);
+};
+
+template <typename T>
+MemoryBuffer<T>::MemoryBuffer(size_t capacity) :
+ _capacity(capacity)
+{
+ _data = capacity ? (T*)malloc(capacity * sizeof(T)) : (T*)NULL;
+}
+
+template <typename T>
+void MemoryBuffer<T>::set_capacity(size_t capacity) {
+ MemoryBuffer<T> newbuf(capacity);
+ memcpy(newbuf.data(), _data, std::min(_capacity * sizeof(T), newbuf.capacity() * sizeof(T)));
+ *this = std::move(newbuf);
+}
+
+#endif /* defined(__CPPUtil__UtilMemoryBuffer__) */
--- /dev/null
+//
+// UtilNanoInterval.h
+// CPPUtil
+//
+// Created by James McIlree on 4/14/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __CPPUtil__UtilNanoInterval__
+#define __CPPUtil__UtilNanoInterval__
+
+typedef TRange<NanoTime> NanoInterval;
+
+#endif /* defined(__CPPUtil__UtilNanoInterval__) */
--- /dev/null
+//
+// UtilNanoTime.cpp
+// CPPUtil
+//
+// Created by James McIlree on 10/2/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "CPPUtil.h"
+
+BEGIN_UTIL_NAMESPACE
+
+AbsTime NanoTime::abs_time() const {
+ mach_timebase_info_data_t timebase_info;
+ mach_timebase_info(&timebase_info);
+ return AbsTime(_time * timebase_info.denom / timebase_info.numer);
+}
+
+END_UTIL_NAMESPACE
--- /dev/null
+//
+// UtilNanoTime.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/14/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __CPPUtil__UtilNanoTime__
+#define __CPPUtil__UtilNanoTime__
+
+class NanoTime {
+ protected:
+ uint64_t _time;
+
+ public:
+ NanoTime() : _time(0ULL) {}
+ NanoTime(uint64_t t) : _time(t) {}
+
+ bool operator==(const NanoTime& rhs) const { return this->_time == rhs._time; }
+ bool operator!=(const NanoTime &rhs) const { return !(*this == rhs); }
+
+ bool operator<(const NanoTime& rhs) const { return this->_time < rhs._time; }
+ bool operator<=(const NanoTime& rhs) const { return this->_time <= rhs._time; }
+ bool operator>(const NanoTime& rhs) const { return this->_time > rhs._time; }
+ bool operator>=(const NanoTime& rhs) const { return this->_time >= rhs._time; }
+
+ // We do not want to be able to mutate NanoTime(s)
+ // without type enforcement, but it is useful to be able
+ // to say "if (time == 0) {}", so we have value based
+ // operators for comparison
+ bool operator==(uint64_t value) const { return this->_time == value; }
+ bool operator!=(uint64_t value) const { return !(*this == value); }
+
+ bool operator<(uint64_t value) const { return this->_time < value; }
+ bool operator<=(uint64_t value) const { return this->_time <= value; }
+ bool operator>(uint64_t value) const { return this->_time > value; }
+ bool operator>=(uint64_t value) const { return this->_time >= value; }
+
+ NanoTime operator+(const NanoTime& rhs) const { return NanoTime(_time + rhs._time); }
+ NanoTime operator-(const NanoTime& rhs) const { return NanoTime(_time - rhs._time); }
+ NanoTime operator*(const NanoTime& rhs) const { return NanoTime(_time * rhs._time); }
+ NanoTime operator/(const NanoTime& rhs) const { return NanoTime(_time / rhs._time); }
+
+ NanoTime& operator+=(const NanoTime& rhs) { _time += rhs._time; return *this; }
+
+ AbsTime abs_time() const; // NOTE! Uses system mach_timebase_info, potentially expensive conversion costs.
+ AbsTime abs_time(mach_timebase_info_data_t timebase_info) const {
+ return AbsTime(_time * timebase_info.denom / timebase_info.numer);
+ }
+
+ uint64_t value() const { return _time; }
+ double double_value() const { return (double)_time; }
+};
+
+
+#endif /* defined(__CPPUtil__UtilNanoTime__) */
--- /dev/null
+//
+// UtilPath.inline.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/8/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "CPPUtil.h"
+
+#include <sys/stat.h>
+
+BEGIN_UTIL_NAMESPACE
+
+std::string Path::basename(const char* path) {
+ size_t length = strlen(path);
+
+ /*
+ * case: ""
+ * case: "/"
+ * case: [any-single-character-paths]
+ */
+ if (length < 2)
+ return std::string(path);
+
+ char temp[PATH_MAX];
+ char* temp_cursor = &temp[PATH_MAX - 1];
+ char* temp_end = temp_cursor;
+ *temp_end = 0; // NULL terminate
+
+ const char* path_cursor = &path[length-1];
+
+ while (path_cursor >= path) {
+ if (*path_cursor == '/') {
+ // If we have copied one or more chars, we're done
+ if (temp_cursor != temp_end)
+ return std::string(temp_cursor);
+ } else {
+ *(--temp_cursor) = *path_cursor;
+ }
+
+ // Is the temp buffer full?
+ if (temp_cursor == temp)
+ return std::string(temp);
+
+ --path_cursor;
+ }
+
+ if (path[0] == '/' && temp_cursor == temp_end) {
+ *(--temp_cursor) = '/';
+ }
+
+ return std::string(temp_cursor);
+}
+
+std::string Path::basename(std::string& path) {
+ return basename(path.c_str());
+}
+
+bool Path::exists(const char *path) {
+ struct stat statinfo;
+ return lstat(path, &statinfo) == 0;
+}
+
+bool Path::exists(std::string& path) {
+ return exists(path.c_str());
+}
+
+bool Path::is_file(const char* path, bool should_resolve_symlinks) {
+ struct stat statinfo;
+ if (should_resolve_symlinks) {
+ if (stat(path, &statinfo) == 0) {
+ if (S_ISREG(statinfo.st_mode)) {
+ return true;
+ }
+ }
+ } else {
+ if (lstat(path, &statinfo) == 0) {
+ if (S_ISREG(statinfo.st_mode)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool Path::is_file(std::string& path, bool should_resolve_symlinks) {
+ return is_file(path.c_str(), should_resolve_symlinks);
+}
+
+END_UTIL_NAMESPACE
--- /dev/null
+//
+// UtilPath.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/8/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_UtilPath_hpp
+#define CPPUtil_UtilPath_hpp
+
+class Path {
+ public:
+ /*
+ * INPUT OUTPUT
+ *
+ * /tmp/scratch.tiff scratch.tiff
+ * /tmp/scratch scratch
+ * /tmp/ tmp
+ * scratch scratch
+ * /mach_kernel mach_kernel
+ * / /
+ */
+ static std::string basename(const char* path);
+ static std::string basename(std::string& path);
+
+ static bool exists(const char* path);
+ static bool exists(std::string& path);
+
+ static bool is_file(const char* path, bool should_resolve_symlinks);
+ static bool is_file(std::string& path, bool should_resolve_symlinks);
+};
+
+#endif
--- /dev/null
+//
+// UtilPrettyPrinting.cpp
+// CPPUtil
+//
+// Created by James McIlree on 9/8/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "CPPUtil.h"
+
+BEGIN_UTIL_NAMESPACE
+
+std::string formated_byte_size(uint64_t bytes) {
+ if (bytes) {
+ char tmp[128];
+ const char *si_prefix[] = { "B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB" };
+ const int base = 1024;
+ int c = std::min((int)(log((double)bytes)/log((double)base)), (int)sizeof(si_prefix) - 1);
+ snprintf(tmp, sizeof(tmp), "%1.2f %s", bytes / pow((double)base, c), si_prefix[c]);
+ return std::string(tmp);
+ }
+
+ return std::string("0.00 B");
+}
+
+END_UTIL_NAMESPACE
--- /dev/null
+//
+// UtilPrettyPrinting.h
+// CPPUtil
+//
+// Created by James McIlree on 9/8/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __CPPUtil__UtilPrettyPrinting__
+#define __CPPUtil__UtilPrettyPrinting__
+
+std::string formated_byte_size(uint64_t bytes);
+
+#endif /* defined(__CPPUtil__UtilPrettyPrinting__) */
--- /dev/null
+//
+// UtilString.cpp
+// CPPUtil
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "CPPUtil.h"
+
+BEGIN_UTIL_NAMESPACE
+
+bool ends_with(std::string& str, std::string postfix) {
+ size_t pos = str.rfind(postfix);
+ return ((pos != std::string::npos) && (pos == (str.length() - postfix.length())));
+}
+
+END_UTIL_NAMESPACE
--- /dev/null
+//
+// UtilString.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/16/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_UtilString_hpp
+#define CPPUtil_UtilString_hpp
+
+struct ConstCharHash {
+ //
+ // Okay, by design std::hash<char*> hashes on the pointer,
+ // not the contents of that pointer.
+ //
+ // The C++11 std::hash<std::string> hash works, but must
+ // construct a copy of the passed in string to hash.
+ //
+ // That's 3x slower than this, minimum.
+ //
+ // This is just the __gnu_cxx hash code inlined.
+ //
+ std::size_t operator()(const char* __s) const {
+ unsigned long __h = 0;
+ for ( ; *__s; ++__s)
+ __h = 5 * __h + *__s;
+ return size_t(__h);
+ };
+
+};
+
+struct ConstCharEqualTo {
+ bool operator() (const char* s1, const char* s2) const {
+ return strcmp(s1, s2) == 0;
+ }
+};
+
+bool ends_with(std::string& str, std::string postfix);
+
+#endif
--- /dev/null
+//
+// UtilTRange.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/14/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_UtilTRange_hpp
+#define CPPUtil_UtilTRange_hpp
+
+template <typename T>
+class TRange {
+ protected:
+ T _location;
+ T _length;
+
+ public:
+ TRange() : _location(0), _length(0) {}
+ TRange(T location, T length) : _location(location), _length(length) {
+ DEBUG_ONLY(validate());
+ };
+
+ bool operator==(const TRange &rhs) const { return this->_location == rhs.location() && this->_length == rhs.length(); }
+
+ bool operator!=(const TRange &rhs) const { return !(*this == rhs); }
+
+ bool operator<(const TRange& rhs) const { return this->_location < rhs.location(); }
+ bool operator<(const TRange* rhs) const { return this->_location < rhs->location(); }
+
+ const T location() const { return _location; }
+ const T length() const { return _length; }
+ const T max() const { return _location + _length; }
+
+ void set_location(T location) { _location = location; DEBUG_ONLY(validate()); }
+ void set_length(T length) { _length = length; DEBUG_ONLY(validate()); }
+ void set_max(T max) { ASSERT(max >= _location, "Sanity"); _length = max - _location; }
+
+ const bool contains(const TRange& other) const { return (other.location() >= location()) && (other.max() <= max()); }
+ const bool contains(const T loc) const { return loc - _location < _length; } // Assumes unsigned!
+
+ const bool intersects(const TRange& o) const { return this->location() < o.max() && o.location() < this->max(); }
+
+ // "union" is a keyword :-(
+ TRange union_range(const TRange& other) const {
+ T maxend = (this->max() > other.max()) ? this->max() : other.max();
+ T minloc = this->location() < other.location() ? this->location() : other.location();
+ return TRange(minloc, maxend - minloc);
+ }
+
+ TRange intersection_range(const TRange& other) const {
+ if (this->intersects(other)) {
+ auto intersection_start = std::max(_location, other.location());
+ auto intersection_end = std::min(max(), other.max());
+ return TRange(intersection_start, intersection_end - intersection_start);
+ }
+
+ return TRange(T(0),T(0));
+ }
+
+ void validate() const { ASSERT((_location + _length >= _location) /*|| (_location + 1 == 0)*/, "range must not wrap"); }
+};
+
+template <typename TRANGE>
+bool is_trange_vector_sorted_and_non_overlapping(const std::vector<TRANGE>& vec) {
+ if (vec.size() > 1) {
+ auto last_it = vec.begin();
+ auto it = last_it + 1;
+
+ while (it < vec.end()) {
+ if (it < last_it)
+ return false;
+
+ if (last_it->intersects(*it))
+ return false;
+
+ last_it = it;
+ it++;
+ }
+ }
+ return true;
+}
+
+template <typename TRANGE>
+bool is_trange_vector_sorted(const std::vector<TRANGE>& vec) {
+ if (vec.size() > 1) {
+ auto last_it = vec.begin();
+ auto it = last_it + 1;
+
+ while (it < vec.end()) {
+ if (it < last_it)
+ return false;
+
+ last_it = it;
+ it++;
+ }
+ }
+ return true;
+}
+
+// NOTE!
+//
+// This produces an output vector with the
+// intervals "flattened".
+//
+// IOW, this:
+//
+// vec1: XXXXXXXX AAAAAAAAAA
+// YYYYYYYYYYY ZZZZZZZZZ
+//
+// becomes:
+//
+// res: IIIIIIIIIIII IIIIIIIIIIII
+//
+// The input vector should be sorted.
+//
+template <typename TRANGE>
+std::vector<TRANGE> trange_vector_union(std::vector<TRANGE>& input) {
+ std::vector<TRANGE> union_vec;
+
+ ASSERT(is_trange_vector_sorted(input), "Sanity");
+
+ if (!input.empty()) {
+ auto input_it = input.begin();
+ union_vec.push_back(*input_it);
+ while (++input_it < input.end()) {
+ TRANGE union_range = union_vec.back();
+
+ if (union_range.intersects(*input_it)) {
+ union_vec.pop_back();
+ union_vec.push_back(union_range.union_range(*input_it));
+ } else {
+ ASSERT(union_range < *input_it, "Out of order merging");
+ union_vec.push_back(*input_it);
+ }
+ }
+ }
+
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(union_vec), "union'd vector fails invariant");
+
+ return union_vec;
+}
+
+// NOTE!
+//
+// This will coalesce intervals that intersect.
+//
+// IOW, given two input vectors:
+//
+// vec1: XXXX XXXX
+// vec2: XXX
+//
+// res: XXXX XXX XXXX
+//
+// --------------------------------
+//
+// vec1: XXXX XX
+// vec2: XXXXXXXXXXXXXXX
+//
+// res: XXXXXXXXXXXXXXXXX
+
+template <typename TRANGE>
+std::vector<TRANGE> trange_vector_union(std::vector<TRANGE>& vec1, std::vector<TRANGE>& vec2) {
+ std::vector<TRANGE> union_vec;
+
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(vec1), "input vector violates invariants");
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(vec2), "input vector violates invariants");
+
+ // while (not done)
+ // select next interval (lowest location)
+ // if intersects with last union_vec entry, union, pop_back, push_back
+ // else push_back
+
+ auto vec1_it = vec1.begin();
+ auto vec2_it = vec2.begin();
+
+ while (uint32_t chose_vector = (((vec1_it != vec1.end()) ? 1 : 0) + ((vec2_it != vec2.end()) ? 2 : 0))) {
+ //
+ // This is a fancy "chose" algorithm
+ //
+ // vec1 == bit 1
+ // vec2 == bit 2
+ //
+ decltype(vec1_it) merge_it;
+ switch (chose_vector) {
+ case 1:
+ merge_it = vec1_it++;
+ break;
+
+ case 2:
+ merge_it = vec2_it++;
+ break;
+
+ case 3:
+ merge_it = (*vec1_it < * vec2_it) ? vec1_it++ : vec2_it++;
+ break;
+
+ default:
+ ASSERT(false, "ShouldNotReachHere");
+ return std::vector<TRANGE>();
+ }
+
+ if (union_vec.empty()) {
+ union_vec.push_back(*merge_it);
+ } else {
+ TRANGE last_range = union_vec.back();
+
+ if (last_range.intersects(*merge_it)) {
+ union_vec.pop_back();
+ union_vec.push_back(last_range.union_range(*merge_it));
+ } else {
+ ASSERT(last_range < *merge_it, "Out of order merging");
+ union_vec.push_back(*merge_it);
+ }
+ }
+ }
+
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(union_vec), "union'd vector fails invariant");
+
+ return union_vec;
+}
+
+template <typename TRANGE>
+std::vector<TRANGE> trange_vector_intersect(std::vector<TRANGE>& vec1, std::vector<TRANGE>& vec2) {
+ std::vector<TRANGE> intersect_vec;
+
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(vec1), "input vector violates invariants");
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(vec2), "input vector violates invariants");
+
+ auto vec1_it = vec1.begin();
+ auto vec2_it = vec2.begin();
+
+ // As soon as one vector empties, there can be no more intersections
+ while (vec1_it != vec1.end() && vec2_it != vec2.end()) {
+ TRANGE temp = vec1_it->intersection_range(*vec2_it);
+ if (temp.length() > 0) {
+ intersect_vec.push_back(temp);
+ }
+
+ // We keep the interval that ends last
+
+ if (vec1_it->max() > vec2_it->max()) {
+ vec2_it++;
+ } else {
+ vec1_it++;
+ }
+ }
+
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(intersect_vec), "intersection vector fails invariant");
+
+ return intersect_vec;
+}
+
+#endif
--- /dev/null
+//
+// UtilTRangeValue.hpp
+// CPPUtil
+//
+// Created by James McIlree on 12/10/07.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_UtilTRangeValue_hpp
+#define CPPUtil_UtilTRangeValue_hpp
+
+template <typename T1, typename T2>
+class TRangeValue : public TRange<T1> {
+ protected:
+ T2 _value;
+
+ public:
+ TRangeValue(T1 location, T1 length, T2 value) : TRange<T1>(location, length), _value(value) { };
+
+ const T2 value() const { return _value; }
+
+ // Sometimes we need to reference the value as a mutable reference (think std::vector types)
+ T2& mutable_value() { return _value; }
+};
+
+#endif
--- /dev/null
+//
+// UtilTerminalColor.cpp
+// CPPUtil
+//
+// Created by James McIlree on 4/26/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "CPPUtil.h"
+
+BEGIN_UTIL_NAMESPACE
+
+#define COLOR(FGBG, CODE, BOLD) "\033[0;" BOLD FGBG CODE "m"
+
+#define ALLCOLORS(FGBG,BOLD) {\
+ COLOR(FGBG, "0", BOLD),\
+ COLOR(FGBG, "1", BOLD),\
+ COLOR(FGBG, "2", BOLD),\
+ COLOR(FGBG, "3", BOLD),\
+ COLOR(FGBG, "4", BOLD),\
+ COLOR(FGBG, "5", BOLD),\
+ COLOR(FGBG, "6", BOLD),\
+ COLOR(FGBG, "7", BOLD)\
+}
+
+static const char colorcodes[2][2][8][10] = {
+ { ALLCOLORS("3",""), ALLCOLORS("3","1;") },
+ { ALLCOLORS("4",""), ALLCOLORS("4","1;") }
+};
+
+const char* TerminalColorStringFor(kTerminalColor code, bool is_bold, bool is_background) {
+ return colorcodes[is_background ? 1 : 0][is_bold ? 1 : 0][(uint32_t)code & 7];
+}
+
+const char* TerminalColorResetString(void) {
+ return "\033[0m";
+}
+
+END_UTIL_NAMESPACE
--- /dev/null
+//
+// UtilTerminalColor.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/26/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_UtilTerminalColor_hpp
+#define CPPUtil_UtilTerminalColor_hpp
+
+// This is borrowed from clang
+
+enum class kTerminalColor : std::uint32_t {
+ BLACK=0,
+ RED,
+ GREEN,
+ YELLOW,
+ BLUE,
+ MAGENTA,
+ CYAN,
+ WHITE
+};
+
+const char* TerminalColorStringFor(kTerminalColor code, bool is_bold, bool is_background);
+const char* TerminalColorResetString(void);
+
+#endif
--- /dev/null
+//
+// UtilTime.hpp
+// CPPUtil
+//
+// Created by James McIlree on 4/14/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef CPPUtil_UtilTime_hpp
+#define CPPUtil_UtilTime_hpp
+
+static const uint64_t NANOSECONDS_PER_MICROSECOND = 1000ULL;
+static const uint64_t NANOSECONDS_PER_MILLISECOND = 1000000ULL;
+static const uint64_t NANOSECONDS_PER_SECOND = 1000000000ULL;
+
+static const uint64_t MICROSECONDS_PER_MILLISECOND = 1000ULL;
+static const uint64_t MICROSECONDS_PER_SECOND = 1000000ULL;
+
+static const uint64_t MILLISECONDS_PER_SECOND = 1000ULL;
+
+#endif
--- /dev/null
+//
+// UtilTimer.cpp
+// CPPUtil
+//
+// Created by James McIlree on 10/9/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "CPPUtil.h"
+
+BEGIN_UTIL_NAMESPACE
+
+static mach_timebase_info_data_t timebase_info;
+
+Timer::Timer(const char* message) : _message(message) {
+ // C++ guarantees that static variable initialization is thread safe.
+ // We don't actually care what the returned value is, we just want to init timebase_info
+ // The pragma prevents spurious warnings.
+ static kern_return_t blah = mach_timebase_info(&timebase_info);
+#pragma unused(blah)
+
+ _start = AbsTime::now(); // Do this after the initialization check.
+}
+
+Timer::~Timer()
+{
+ _end = AbsTime::now();
+ printf("%s: %5.5f seconds\n", _message.c_str(), (double)(_end - _start).nano_time().value() / (double)NANOSECONDS_PER_SECOND);
+}
+
+END_UTIL_NAMESPACE
--- /dev/null
+//
+// UtilTimer.h
+// CPPUtil
+//
+// Created by James McIlree on 10/9/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __CPPUtil__UtilTimer__
+#define __CPPUtil__UtilTimer__
+
+class Timer {
+ protected:
+ AbsTime _start;
+ AbsTime _end;
+ std::string _message;
+
+ public:
+ Timer(const char* message);
+ ~Timer();
+};
+
+#endif /* defined(__CPPUtil__UtilTimer__) */
--- /dev/null
+//
+// CPUActivity.hpp
+// KDBG
+//
+// Created by James McIlree on 4/22/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kdprof_CPUActivity_hpp
+#define kdprof_CPUActivity_hpp
+
+// NOTE! Counted, not bits!
+enum class kCPUActivity : uint32_t {
+ Unknown = 0x00000001,
+ Idle = 0x00000002,
+ INTR = 0x00000003,
+ Run = 0x00000004 // *MUST* be the last definition. See "is_run()"
+};
+
+template <typename SIZE>
+class CPUActivity : public AbsInterval {
+
+ // Declaring this a union to make the behavior clearer.
+ //
+ // If _type > kCPUActivity::Run, the _thread portion of
+ // the union is valid, the _type is coonsider to be Run.
+ //
+ // However, if the _thread is valid, the low order bit of
+ // the thread indicates if this was a context switch.
+ //
+ // So:
+ //
+ // 0000000X == _type;
+ // XXXXXXX[0/1] == _thread;
+
+ union {
+ MachineThread<SIZE>* thread;
+ uintptr_t type;
+ } _u;
+
+ enum {
+ kCPUActivityRunIsContextSwitch = 0x1
+ };
+
+ public:
+ CPUActivity(kCPUActivity type, AbsInterval interval) :
+ AbsInterval(interval)
+ {
+ ASSERT(type != kCPUActivity::Run, "Cannot be Run without a thread");
+ _u.type = (uintptr_t)type;
+ }
+
+ CPUActivity(MachineThread<SIZE>* thread, AbsInterval interval, bool is_cntx_swtch) :
+ AbsInterval(interval)
+ {
+ _u.thread = thread;
+ if (is_cntx_swtch)
+ _u.type |= kCPUActivityRunIsContextSwitch;
+
+ ASSERT(is_run(), "Sanity");
+ ASSERT(is_context_switch() == is_cntx_swtch, "Sanity");
+ }
+
+ // We can safely assume that the memory system will never allocate
+ // a thread in the first page of memory.
+ bool is_run() const { return _u.type > (uintptr_t)kCPUActivity::Run; }
+ bool is_idle() const { return _u.type == (uintptr_t)kCPUActivity::Idle; }
+ bool is_intr() const { return _u.type == (uintptr_t)kCPUActivity::INTR; }
+ bool is_unknown() const { return _u.type == (uintptr_t)kCPUActivity::Unknown; }
+
+ bool is_context_switch() const {
+ if (is_run() && (_u.type & kCPUActivityRunIsContextSwitch))
+ return true;
+ return false;
+ }
+
+ kCPUActivity type() const {
+ if (_u.type > (uintptr_t)kCPUActivity::Run)
+ return kCPUActivity::Run;
+
+ return (kCPUActivity)_u.type;
+ }
+
+ const MachineThread<SIZE>* thread() const {
+ ASSERT(is_run(), "Sanity");
+ return (MachineThread<SIZE>* )((uintptr_t)_u.thread & ~kCPUActivityRunIsContextSwitch);
+ }
+};
+
+#endif
--- /dev/null
+//
+// CPUSummary.hpp
+// KDBG
+//
+// Created by James McIlree on 4/22/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kdprof_CPUSummary_hpp
+#define kdprof_CPUSummary_hpp
+
+template <typename SIZE>
+class MachineCPU;
+
+template <typename SIZE>
+class CPUSummary {
+ private:
+ // Disallow copy constructor to make sure that the compiler
+ // is moving these, instead of copying them when we pass around
+ CPUSummary(const CPUSummary& that) = delete;
+ CPUSummary& operator=(const CPUSummary& other) = delete;
+
+ friend class Machine<SIZE>;
+
+ public:
+ typedef std::unordered_set<ProcessSummary<SIZE>, ProcessSummaryHash<SIZE>, ProcessSummaryEqualTo<SIZE>> ProcessSummarySet;
+ typedef std::unordered_set<const MachineCPU<SIZE>*> CPUSummaryMachineCPUSet;
+
+ protected:
+ AbsTime _total_unknown_time;
+ AbsTime _total_run_time;
+ AbsTime _total_idle_time;
+ AbsTime _total_intr_time;
+ AbsTime _total_future_run_time;
+ AbsTime _total_wallclock_run_time;
+ AbsTime _total_all_cpus_idle_time;
+ AbsTime _total_vm_fault_time;
+ AbsTime _total_io_time;
+ AbsTime _total_jetsam_time;
+
+ uint32_t _context_switch_count;
+ uint32_t _count_idle_events;
+ uint32_t _count_intr_events;
+ uint32_t _count_vm_fault_events;
+ uint32_t _count_io_events;
+ uint32_t _count_processes_jetsamed;
+ uint32_t _active_cpus;
+
+ uint64_t _io_bytes_completed;
+
+ CPUSummaryMachineCPUSet _cpus;
+ ProcessSummarySet _process_summaries;
+
+ std::vector<AbsInterval> _wallclock_run_intervals; // This is the actual wallclock run interval data.
+ std::vector<AbsInterval> _per_cpu_wallclock_run_intervals; // We need to accumulate intervals during summary generation, this is a temp buffer.
+
+ bool _should_merge_all_cpus_idle_intervals;
+ std::vector<AbsInterval> _all_cpus_idle_intervals;
+ std::vector<AbsInterval> _per_cpu_all_cpus_idle_intervals;
+
+ void add_unknown_time(AbsTime time) { _total_unknown_time += time; }
+ void add_run_time(AbsTime time) { _total_run_time += time; }
+ void add_idle_time(AbsTime time) { _total_idle_time += time; _count_idle_events++; }
+ void add_intr_time(AbsTime time) { _total_intr_time += time; _count_intr_events++; }
+ void add_future_run_time(AbsTime time) { _total_future_run_time += time; }
+ void add_vm_fault_time(AbsTime time) { _total_vm_fault_time += time; _count_vm_fault_events++; }
+ void add_io_time(AbsTime time) { _total_io_time += time; _count_io_events++; } // We want to bump the event count on all IO activity, not just on completion
+ void add_jetsam_time(AbsTime time) { _total_jetsam_time += time; }
+
+ void add_io_bytes_completed(typename SIZE::ptr_t bytes) { _io_bytes_completed += bytes; }
+
+ void increment_processes_jetsamed() { _count_processes_jetsamed++; }
+
+ //
+ // NOTE! Why are the various interval(s) accumulated one cpu at a time,
+ // instead of storing them all in a single vector, sorting it and processing
+ // once at the end?
+ //
+ // The single vector, sort and postprocess would work for wallclock time
+ // calculation, because wallclock times involve "union" operations where
+ // the number of cpu(s) don't matter.
+ //
+ // However, for the all-idle and idle-while-wating-on-IO calculations, we
+ // need "intersects" operations, I.E. all 16 cores need to be idle to count
+ // as "all-idle". In this mode, the number of cores matters, an intersection
+ // requires all 16 cores to simultaneously be the same state. This is difficult
+ // to calculate with more than 2 sources. By calculating one at a time,
+ // that is avoided, the state remains sanity-checkable throughout.
+ //
+
+
+ //
+ // Wallclock run intervals are added as each cpu timeline is walked.
+ // Between cpu(s), the results are accumulated to a single buffer
+ // After all cpus have been processed, the single buffer is summarized
+ //
+ // wallclock run time is the *union* of cpu run intervals.
+ //
+ void add_wallclock_run_interval(AbsInterval interval);
+ void accumulate_wallclock_run_intervals();
+ void summarize_wallclock_run_intervals();
+
+ //
+ // all cpus idle intervals are added as each cpu timeline is walked.
+ // Between cpu(s), the results are accumulated to a single buffer
+ // After all cpus have been processed, the single buffer is summarized.
+ //
+ // all cpus idle time is the *intersection* of cpu idle intervals
+ //
+ void add_all_cpus_idle_interval(AbsInterval interval);
+ void accumulate_all_cpus_idle_intervals();
+ void summarize_all_cpus_idle_intervals();
+
+ void incr_context_switches() { _context_switch_count++; }
+ void incr_active_cpus() { _active_cpus++; }
+
+ // These bracket individual cpu timeline walks
+ void begin_cpu_timeline_walk(const MachineCPU<SIZE>* cpu);
+ void end_cpu_timeline_walk(const MachineCPU<SIZE>* cpu);
+
+ // These bracket all cpu timeline walks
+ void begin_cpu_timeline_walks(void);
+ void end_cpu_timeline_walks(void);
+
+ ProcessSummary<SIZE>* mutable_process_summary(const MachineProcess<SIZE>* process) {
+ auto it = _process_summaries.find(process);
+ if (it == _process_summaries.end()) {
+ // We create any process summary that is missing.
+ auto insert_result = _process_summaries.emplace(process);
+ ASSERT(insert_result.second, "Sanity");
+ it = insert_result.first;
+ }
+
+ // NOTE! Because we are using a Set instead of a Map, STL wants
+ // the objects to be immutable. "it" refers to a const Record, to
+ // prevent us from changing the hash or equality of the Set. We
+ // know that the allowed set of mutations will not change these,
+ // and so we evil hack(tm) and cast away the const'ness.
+ return const_cast<ProcessSummary<SIZE>*>(&*it);
+ }
+
+ ProcessSummarySet& mutable_process_summaries() { return _process_summaries; }
+
+ public:
+ CPUSummary() :
+ _context_switch_count(0),
+ _count_idle_events(0),
+ _count_intr_events(0),
+ _count_vm_fault_events(0),
+ _count_io_events(0),
+ _count_processes_jetsamed(0),
+ _active_cpus(0),
+ _io_bytes_completed(0),
+ _should_merge_all_cpus_idle_intervals(false)
+ {
+ }
+
+ CPUSummary (CPUSummary&& rhs) noexcept :
+ _total_unknown_time(rhs._total_unknown_time),
+ _total_run_time(rhs._total_run_time),
+ _total_idle_time(rhs._total_idle_time),
+ _total_intr_time(rhs._total_intr_time),
+ _total_future_run_time(rhs._total_future_run_time),
+ _total_wallclock_run_time(rhs._total_wallclock_run_time),
+ _total_all_cpus_idle_time(rhs._total_all_cpus_idle_time),
+ _total_vm_fault_time(rhs._total_vm_fault_time),
+ _total_io_time(rhs._total_io_time),
+ _context_switch_count(rhs._context_switch_count),
+ _count_idle_events(rhs._count_idle_events),
+ _count_intr_events(rhs._count_intr_events),
+ _count_vm_fault_events(rhs._count_vm_fault_events),
+ _count_io_events(rhs._count_io_events),
+ _count_processes_jetsamed(rhs._count_processes_jetsamed),
+ _active_cpus(rhs._active_cpus),
+ _io_bytes_completed(rhs._io_bytes_completed),
+ _cpus(rhs._cpus),
+ _process_summaries(rhs._process_summaries),
+ // _wallclock_run_intervals
+ // _per_cpu_wallclock_run_intervals
+ _should_merge_all_cpus_idle_intervals(false)
+ // _all_cpus_idle_intervals
+ // _per_cpu_all_cpus_idle_intervals
+ // _wallclock_vm_fault_intervals
+ // _wallclock_pgin_intervals
+ // _wallclock_disk_read_intervals
+ {
+ ASSERT(rhs._all_cpus_idle_intervals.empty(), "Sanity");
+ ASSERT(rhs._per_cpu_all_cpus_idle_intervals.empty(), "Sanity");
+ ASSERT(rhs._wallclock_run_intervals.empty(), "Sanity");
+ ASSERT(rhs._per_cpu_wallclock_run_intervals.empty(), "Sanity");
+ ASSERT(rhs._should_merge_all_cpus_idle_intervals == false, "Sanity");
+ }
+
+ AbsTime total_time() const { return _total_unknown_time + _total_run_time + _total_idle_time + _total_intr_time; }
+
+ AbsTime total_unknown_time() const { return _total_unknown_time; }
+ AbsTime total_run_time() const { return _total_run_time; }
+ AbsTime total_idle_time() const { return _total_idle_time; }
+ AbsTime total_intr_time() const { return _total_intr_time; }
+ AbsTime total_future_run_time() const { return _total_future_run_time; }
+ AbsTime total_wallclock_run_time() const { return _total_wallclock_run_time; }
+ AbsTime total_all_cpus_idle_time() const { return _total_all_cpus_idle_time; }
+ AbsTime total_vm_fault_time() const { return _total_vm_fault_time; }
+ AbsTime total_io_time() const { return _total_io_time; }
+ AbsTime total_jetsam_time() const { return _total_jetsam_time; }
+
+ AbsTime avg_on_cpu_time() const { return _total_run_time / _context_switch_count; }
+
+ uint32_t context_switches() const { return _context_switch_count; }
+ uint32_t num_idle_events() const { return _count_idle_events; }
+ uint32_t num_intr_events() const { return _count_intr_events; }
+ uint32_t num_vm_fault_events() const { return _count_vm_fault_events; }
+ uint32_t num_io_events() const { return _count_io_events; }
+ uint32_t num_processes_jetsammed() const { return _count_processes_jetsamed; }
+
+ uint32_t active_cpus() const { return _active_cpus; }
+
+ uint64_t io_bytes_completed() const { return _io_bytes_completed; }
+
+
+ // A CPUSummary may be a summary of one or more CPUs.
+ // The cpus set are the MachineCPU(s) that were used to
+ // construct this summary.
+ const CPUSummaryMachineCPUSet& cpus() const { return _cpus; }
+
+ const ProcessSummarySet& process_summaries() const { return _process_summaries; }
+ const ProcessSummary<SIZE>* process_summary(const MachineProcess<SIZE>* process) const {
+ auto it = _process_summaries.find(process);
+ return (it == _process_summaries.end()) ? NULL : &*it;
+ }
+
+ DEBUG_ONLY(void validate() const;)
+};
+
+template <typename SIZE>
+void CPUSummary<SIZE>::begin_cpu_timeline_walks() {
+ _should_merge_all_cpus_idle_intervals = true;
+}
+
+template <typename SIZE>
+void CPUSummary<SIZE>::begin_cpu_timeline_walk(const MachineCPU<SIZE>* cpu) {
+ ASSERT(cpu, "Sanity");
+ _cpus.emplace(cpu);
+}
+
+template <typename SIZE>
+void CPUSummary<SIZE>::end_cpu_timeline_walk(const MachineCPU<SIZE>* cpu) {
+ ASSERT(cpu, "Sanity");
+
+ accumulate_wallclock_run_intervals();
+ accumulate_all_cpus_idle_intervals();
+}
+
+template <typename SIZE>
+void CPUSummary<SIZE>::end_cpu_timeline_walks(void) {
+ summarize_wallclock_run_intervals();
+ summarize_all_cpus_idle_intervals();
+}
+
+template <typename SIZE>
+void CPUSummary<SIZE>::add_wallclock_run_interval(AbsInterval interval) {
+ ASSERT(_per_cpu_wallclock_run_intervals.empty() || (_per_cpu_wallclock_run_intervals.back() < interval && !interval.intersects(_per_cpu_wallclock_run_intervals.back())), "Invariant violated");
+ _per_cpu_wallclock_run_intervals.emplace_back(interval);
+}
+
+template <typename SIZE>
+void CPUSummary<SIZE>::accumulate_wallclock_run_intervals() {
+ _wallclock_run_intervals = trange_vector_union(_wallclock_run_intervals, _per_cpu_wallclock_run_intervals);
+ _per_cpu_wallclock_run_intervals.clear();
+ // We don't shrink_to_fit here as its expected another CPU's run intervals will be processed next.
+
+ for (auto& process_summary : _process_summaries) {
+ // NOTE! Because we are using a Set instead of a Map, STL wants
+ // the objects to be immutable. We know that the operations being
+ // invoked will not change the hash, but we still must throw away
+ // the const'ness. Care must be taken to avoid the construction of
+ // temporary objects, thus the use of pointers...
+ const_cast<ProcessSummary<SIZE>*>(&process_summary)->accumulate_wallclock_run_intervals();
+ }
+}
+
+template <typename SIZE>
+void CPUSummary<SIZE>::summarize_wallclock_run_intervals() {
+ ASSERT(_per_cpu_wallclock_run_intervals.empty(), "Sanity");
+ _per_cpu_wallclock_run_intervals.shrink_to_fit();
+
+ ASSERT(_total_wallclock_run_time == 0, "Called more than once");
+
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(_wallclock_run_intervals), "Sanity");
+
+ for (auto& interval : _wallclock_run_intervals) {
+ _total_wallclock_run_time += interval.length();
+ }
+
+ _wallclock_run_intervals.clear();
+ _wallclock_run_intervals.shrink_to_fit();
+
+ for (auto& process_summary : _process_summaries) {
+ // NOTE! Because we are using a Set instead of a Map, STL wants
+ // the objects to be immutable. We know that the operations being
+ // invoked will not change the hash, but we still must throw away
+ // the const'ness. Care must be taken to avoid the construction of
+ // temporary objects, thus the use of pointers...
+ const_cast<ProcessSummary<SIZE>*>(&process_summary)->summarize_wallclock_run_intervals();
+ }
+}
+
+template <typename SIZE>
+void CPUSummary<SIZE>::add_all_cpus_idle_interval(AbsInterval interval) {
+ ASSERT(_per_cpu_all_cpus_idle_intervals.empty() || (_per_cpu_all_cpus_idle_intervals.back() < interval && !interval.intersects(_per_cpu_all_cpus_idle_intervals.back())), "Invariant violated");
+ _per_cpu_all_cpus_idle_intervals.emplace_back(interval);
+}
+
+template <typename SIZE>
+void CPUSummary<SIZE>::accumulate_all_cpus_idle_intervals() {
+ if (_should_merge_all_cpus_idle_intervals) {
+ _should_merge_all_cpus_idle_intervals = false;
+ _all_cpus_idle_intervals = _per_cpu_all_cpus_idle_intervals;
+ } else {
+ _all_cpus_idle_intervals = trange_vector_intersect(_all_cpus_idle_intervals, _per_cpu_all_cpus_idle_intervals);
+ }
+ _per_cpu_all_cpus_idle_intervals.clear();
+}
+
+template <typename SIZE>
+void CPUSummary<SIZE>::summarize_all_cpus_idle_intervals() {
+ ASSERT(!_should_merge_all_cpus_idle_intervals, "Sanity");
+ ASSERT(_per_cpu_all_cpus_idle_intervals.empty(), "Sanity");
+ ASSERT(_total_all_cpus_idle_time == 0, "Called more than once");
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(_all_cpus_idle_intervals), "Sanity");
+
+ _per_cpu_all_cpus_idle_intervals.shrink_to_fit();
+ for (auto& interval : _all_cpus_idle_intervals) {
+ _total_all_cpus_idle_time += interval.length();
+ }
+
+ _all_cpus_idle_intervals.clear();
+ _all_cpus_idle_intervals.shrink_to_fit();
+}
+
+#if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
+template <typename SIZE>
+void CPUSummary<SIZE>::validate() const {
+ ASSERT(_total_wallclock_run_time <= _total_run_time, "Sanity");
+ ASSERT(_total_all_cpus_idle_time <= _total_idle_time, "Sanity");
+
+ for (const auto& process_summary : _process_summaries) {
+ process_summary.validate();
+ }
+}
+#endif
+
+#endif
--- /dev/null
+//
+// IOActivity.hpp
+// KDBG
+//
+// Created by James McIlree on 9/2/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kdprof_IOActivity_hpp
+#define kdprof_IOActivity_hpp
+
+template <typename SIZE>
+class IOActivity : public TRange<AbsTime> {
+ private:
+ MachineThread<SIZE>* _thread;
+ typename SIZE::ptr_t _size;
+
+ public:
+ IOActivity(AbsTime start, AbsTime length, MachineThread<SIZE>* thread, typename SIZE::ptr_t size) :
+ TRange(start, length),
+ _thread(thread),
+ _size(size)
+ {
+ ASSERT(_thread, "Sanity");
+ ASSERT(_size, "Zero length IO");
+ }
+
+ MachineThread<SIZE>* thread() const { return _thread; }
+ void set_thread(MachineThread<SIZE>* thread) { _thread = thread; }
+
+ typename SIZE::ptr_t size() const { return _size; }
+};
+
+#endif
--- /dev/null
+//
+// Kernel.cpp
+// KDBG
+//
+// Created by James McIlree on 10/24/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include <CPPUtil/CPPUtil.h>
+
+using namespace util;
+
+#include "KDebug.h"
+
+KDState KDBG::state()
+{
+ static_assert(sizeof(KDState) == sizeof(kbufinfo_t), "Types must be the same size");
+
+ KDState state;
+ int mib[3];
+ size_t len = sizeof(state);
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDGETBUF;
+
+ if (sysctl(mib, 3, &state, &len, 0, 0) < 0) {
+ DEBUG_ONLY(log_msg(ASL_LEVEL_ERR, "trace facility failure, KERN_KDGETBUF: %s\n", strerror(errno)));
+ THROW("trace facility failure, KERN_KDGETBUF: %s\n" << strerror(errno));
+ }
+
+ return state;
+}
+
+bool KDBG::reset()
+{
+ int mib[3];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDREMOVE;
+ if (sysctl(mib, 3, NULL, NULL, NULL, 0) < 0) {
+ DEBUG_ONLY(log_msg(ASL_LEVEL_WARNING, "trace facility failure, KERN_KDREMOVE: %s\n", strerror(errno)));
+ return false;
+ }
+
+ return true;
+}
+
+bool KDBG::set_buffer_capacity(uint32_t capacity)
+{
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDSETBUF;
+ mib[3] = (int)capacity;
+
+ if (sysctl(mib, 4, NULL, NULL, NULL, 0) < 0) {
+ DEBUG_ONLY(log_msg(ASL_LEVEL_WARNING, "trace facility failure, KERN_KDSETBUF: %s\n", strerror(errno)));
+ return false;
+ }
+
+ return true;
+}
+
+bool KDBG::set_nowrap(bool is_nowrap)
+{
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = is_nowrap ? KERN_KDEFLAGS : KERN_KDDFLAGS;
+ mib[3] = KDBG_NOWRAP;
+
+ if (sysctl(mib, 4, NULL, NULL, NULL, 0) < 0) {
+ DEBUG_ONLY(log_msg(ASL_LEVEL_WARNING, "trace facility failure, KDBG_NOWRAP: %s\n", strerror(errno)));
+ return false;
+ }
+
+ return true;
+}
+
+bool KDBG::initialize_buffers()
+{
+ int mib[3];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDSETUP;
+
+ if (sysctl(mib, 3, NULL, NULL, NULL, 0) < 0) {
+ DEBUG_ONLY(log_msg(ASL_LEVEL_WARNING, "trace facility failure, KERN_KDSETUP: %s\n", strerror(errno)));
+ return false;
+ }
+ return true;
+}
+
+
+//
+// Legal values are:
+//
+// KDEBUG_TRACE (full set of tracepoints)
+// KDEBUG_PPT (subset of tracepoints to minimize performance impact)
+// 0 (Disable)
+//
+bool KDBG::set_enabled(uint32_t value)
+{
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDENABLE;
+ mib[3] = value;
+
+ if (sysctl(mib, 4, NULL, NULL, NULL, 0) < 0) {
+ DEBUG_ONLY(log_msg(ASL_LEVEL_WARNING, "trace facility failure, KERN_KDENABLE: %s\n", strerror(errno)));
+ return false;
+ }
+ return true;
+}
+
+std::vector<KDCPUMapEntry> KDBG::cpumap()
+{
+ std::vector<KDCPUMapEntry> cpumap;
+
+ /*
+ * To fit in the padding space of a VERSION1 file, the max possible
+ * cpumap size is one page.
+ */
+ if (kd_cpumap_header* cpumap_header = (kd_cpumap_header*)malloc(PAGE_SIZE)) {
+ int mib[3];
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDCPUMAP;
+
+ size_t temp = PAGE_SIZE;
+ if (sysctl(mib, 3, cpumap_header, &temp, NULL, 0) == 0) {
+ if (PAGE_SIZE >= temp) {
+ if (cpumap_header->version_no == RAW_VERSION1) {
+ cpumap.resize(cpumap_header->cpu_count);
+ memcpy(cpumap.data(), &cpumap_header[1], cpumap_header->cpu_count * sizeof(KDCPUMapEntry));
+ }
+ }
+ }
+ free(cpumap_header);
+ }
+
+ return cpumap;
+}
+
+bool KDBG::write_maps(int fd)
+{
+ int mib[4];
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDWRITEMAP;
+ mib[3] = fd;
+
+ if (sysctl(mib, 4, NULL, NULL, NULL, 0) < 0)
+ return false;
+
+ return true;
+}
+
+int KDBG::write_events(int fd)
+{
+ int mib[4];
+ size_t events_written = 0;
+
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDWRITETR;
+ mib[3] = fd;
+
+ if (sysctl(mib, 4, NULL, &events_written, NULL, 0) < 0)
+ return -1;
+
+ return (int)events_written;
+}
--- /dev/null
+//
+// KDBG
+// KDBG
+//
+// Created by James McIlree on 10/24/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+class KDBG {
+ private:
+
+ // KERN_KDGETBUF == fill out kd_bufinfo struct. Tolerates undersize input structs to fetch less.
+ // KERN_KDSETBUF == set nkdbufs (kernel variable)
+ // KERN_KDSETUP == dealloc old buffers, alloc new ones
+ // KERN_KDEFLAGS == "Enable Flags" (masked against KDBG_USERFLAGS)
+ // KERN_KDDFLAGS == "Disable Flags" (masked against KDBG_USERFLAGS)
+ // KERN_KDENABLE == Actually turn on/off tracing
+ // KERN_KDSETREG == Set some kind of filtering.
+
+
+ // KERN_KDREMOVE == Turn off tracing, delete all buffers, set bufsize to zero.
+ // Clears KDBG_CKTYPES, KDBG_NOWRAP, KDBG_RANGECHECK, KDBG_VALCHECK,
+ // KDBG_PIDCHECK, and KDBG_PIDEXCLUDE.
+ // Sets controlling_pid to -1.
+ // Disables and deallocates thread map.
+
+ public:
+
+ static KDState state();
+
+ //
+ // Does not appear that this call can fail.
+ //
+ // Clears/disables everything, resets to base state. (Calls KDREMOVE)
+ //
+ static bool reset();
+
+ //
+ // Does not actually allocate any buffer space, you must
+ // call initialize_buffers to do that.
+ //
+ static bool set_buffer_capacity(uint32_t capacity);
+
+ //
+ // It appears this flag can be set or cleared at any time, even if a
+ // trace is currently active.
+ //
+ // If nowrap is true, the buffer state will not set is_wrapped, even
+ // if the buffer overflows.
+ //
+ static bool set_nowrap(bool is_nowrap);
+
+ //
+ // If tracing is active, disable it.
+ // If buffers are allocated, free them.
+ // If a thread map is allocated, delete it.
+ //
+ // clears KDBG_WRAPPED
+ //
+ // Allocates new buffers of the size set in set_buffer_capacity()
+ // Sets KDBG_BUFINIT
+ //
+ static bool initialize_buffers();
+
+ //
+ // Legal values are:
+ //
+ // KDEBUG_ENABLE_TRACE (full set of tracepoints)
+ // KDEBUG_ENABLE_PPT (subset of tracepoints to minimize performance impact)
+ // 0 (Disable)
+ //
+ static bool set_enabled(uint32_t value);
+
+ //
+ // Reads all available threadmap data
+ //
+ // Fails if KDBG_MAPINIT is not set.
+ //
+ template <typename KERNEL_SIZE>
+ static std::vector<KDThreadMapEntry<KERNEL_SIZE>> threadmap(KDState& state);
+
+ //
+ // Reads the *current* threadmap data
+ //
+ // NOTE that this differs from "threadmap", which reads the threadmap
+ // data that was snapshotted when the trace buffers were initialized.
+ //
+ template <typename SIZE>
+ static std::vector<KDThreadMapEntry<SIZE>> current_threadmap();
+
+ //
+ // Reads the current cpumap.
+ //
+ // Fails if the buffers have not been initialized.
+ //
+ // The caller is responsible for the memory returned, which should be free()'d
+ //
+ static std::vector<KDCPUMapEntry> cpumap();
+
+ //
+ // Writes the current cpumap to the given fd
+ //
+ // Fails if the buffers have not been initialized, or if the provided fd cannot be written to.
+ //
+ // Writes a VERSION 1+ threadmap (containing an embedded cpumap) to the fd, and then
+ // enough zero bytes to pad to a file block alignment
+ //
+ static bool write_maps(int fd);
+
+ //
+ // Blocks in the kernel until the trace buffers are 50% full.
+ // Then writes all events to the provided fd.
+ //
+ // Fails if the buffers are not initialized, tracing is not enabled, or the provided fd cannot be written to.
+ //
+ // Returns -1 on failure, otherwise the numbers of trace events written.
+ static int write_events(int fd);
+
+ //
+ // Reads all available trace data.
+ //
+ // Returns -1 on failure, otherwise the number of elements read.
+ //
+ template <typename SIZE>
+ static int read(KDEvent<SIZE>* buf, size_t buf_size_in_bytes);
+};
+
+template <typename SIZE>
+int KDBG::read(KDEvent<SIZE>* buf, size_t buf_size_in_bytes)
+{
+ ASSERT(buf, "Sanity");
+
+ int mib[3];
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDREADTR;
+
+ if (sysctl(mib, 3, buf, &buf_size_in_bytes, NULL, 0) < 0) {
+ DEBUG_ONLY(log_msg(ASL_LEVEL_WARNING, "trace facility failure, KERN_KDREADTR: %s\n", strerror(errno)));
+ return -1;
+ }
+
+ return (int)buf_size_in_bytes;
+ }
+
+template <typename SIZE>
+std::vector<KDThreadMapEntry<SIZE>> KDBG::threadmap(KDState& state)
+{
+ std::vector<KDThreadMapEntry<SIZE>> maps(state.thread_map_capacity());
+ size_t size = state.thread_map_capacity() * sizeof(KDThreadMapEntry<SIZE>);
+
+ int mib[3];
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDTHRMAP;
+
+ if (sysctl(mib, 3, maps.data(), &size, NULL, 0) < 0) {
+ maps.clear();
+ }
+
+ return maps;
+ }
+
+template <typename SIZE>
+std::vector<KDThreadMapEntry<SIZE>> KDBG::current_threadmap()
+{
+ std::vector<KDThreadMapEntry<SIZE>> maps(2048);
+
+resize:
+ size_t size_in = maps.size() * sizeof(KDThreadMapEntry<SIZE>);
+ size_t size_out = size_in;
+
+ int mib[3];
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_KDEBUG;
+ mib[2] = KERN_KDREADCURTHRMAP;
+
+ if (sysctl(mib, 3, maps.data(), &size_out, NULL, 0) < 0) {
+ // Grr, seems like this doesn't report a target size, we have to guess!
+ if (errno == EINVAL && size_out == size_in) {
+ maps.resize(maps.size() * 2);
+ goto resize;
+ }
+ maps.clear();
+ } else {
+ maps.resize(size_out / sizeof(KDThreadMapEntry<SIZE>));
+ }
+
+ return maps;
+}
--- /dev/null
+//
+// KDCPUMapEntry.hpp
+// KDBG
+//
+// Created by James McIlree on 4/18/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kdprof_KDCPUMapEntry_hpp
+#define kdprof_KDCPUMapEntry_hpp
+
+class KDCPUMapEntry {
+ protected:
+ uint32_t _cpu_id;
+ uint32_t _flags;
+ char _name[8];
+
+ public:
+ KDCPUMapEntry() {} // Default constructor must do nothing, so vector resizes do no work!
+ KDCPUMapEntry(uint32_t cpu_id, uint32_t flags, const char* cpu_name) :
+ _cpu_id(cpu_id),
+ _flags(flags)
+ {
+ ASSERT(cpu_name, "Sanity");
+ ASSERT(strlen(cpu_name) < sizeof(_name), "Name too long");
+ strlcpy(_name, cpu_name, sizeof(_name));
+ }
+
+ uint32_t cpu_id() const { return _cpu_id; }
+ uint32_t flags() const { return _flags; }
+ const char* name() const { return _name; }
+
+ bool is_iop() const { return _flags & KDBG_CPUMAP_IS_IOP; }
+};
+
+#endif
--- /dev/null
+//
+// KDEvent.hpp
+// KDBG
+//
+// Created by James McIlree on 10/25/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+template <typename SIZE> class KDEventFields {};
+
+template <>
+class KDEventFields<Kernel32> {
+ private:
+ static const uint64_t K32_TIMESTAMP_MASK = 0x00ffffffffffffffULL;
+ static const uint64_t K32_CPU_MASK = 0xff00000000000000ULL;
+ static const uint32_t K32_CPU_SHIFT = 56;
+
+ public:
+ uint64_t _timestamp;
+ uint32_t _arg1;
+ uint32_t _arg2;
+ uint32_t _arg3;
+ uint32_t _arg4;
+ uint32_t _thread;
+ uint32_t _debugid;
+
+ int cpu() const { return (int) ((_timestamp & K32_CPU_MASK) >> K32_CPU_SHIFT); }
+ uint64_t timestamp() const { return _timestamp & K32_TIMESTAMP_MASK; }
+
+ uint64_t unused() const { THROW("Calling method for field that does not exist"); }
+};
+
+template <>
+class KDEventFields<Kernel64> {
+ public:
+ uint64_t _timestamp;
+ uint64_t _arg1;
+ uint64_t _arg2;
+ uint64_t _arg3;
+ uint64_t _arg4;
+ uint64_t _thread;
+ uint32_t _debugid;
+ uint32_t _cpuid;
+ uint64_t _unused; // Defined as uintptr in orignal header
+
+ int cpu() const { return (int)_cpuid; }
+ uint64_t timestamp() const { return _timestamp; }
+
+ uint64_t unused() const { return _unused; }
+};
+
+/* The debug code consists of the following
+ *
+ * ----------------------------------------------------------------------
+ *| | | |Func |
+ *| Class (8) | SubClass (8) | Code (14) |Qual(2)|
+ * ----------------------------------------------------------------------
+ * The class specifies the higher level
+ */
+
+template <typename SIZE>
+class KDEvent {
+ private:
+ KDEventFields<SIZE> _fields;
+
+ static const uint32_t DBG_CLASS_MASK = 0xFF000000;
+ static const uint32_t DBG_CLASS_MASK_SHIFT = 24;
+ static const uint32_t DBG_SUBCLASS_MASK = 0x00FF0000;
+ static const uint32_t DBG_SUBCLASS_MASK_SHIFT = 16;
+ static const uint32_t DBG_CODE_MASK = 0x0000FFFC;
+ static const uint32_t DBG_CODE_MASK_SHIFT = 2;
+ static const uint32_t DBG_FUNC_MASK = DBG_FUNC_START | DBG_FUNC_END;
+
+ public:
+ //
+ // Provided only for lower_bounds/upper_bounds binary searches of event buffers
+ //
+ KDEvent() {}
+ KDEvent(AbsTime timestamp) { _fields._timestamp = timestamp.value(); }
+
+ // Sort by time operator for lower_bounds/upper_bounds
+ bool operator<(const KDEvent& rhs) const { return this->timestamp() < rhs.timestamp(); }
+
+ AbsTime timestamp() const { return AbsTime(_fields.timestamp()); }
+ typename SIZE::ptr_t tid() const { return _fields._thread; }
+ int cpu() const { return _fields.cpu(); }
+
+ uint32_t dbg_class() const { return (_fields._debugid & DBG_CLASS_MASK) >> DBG_CLASS_MASK_SHIFT; }
+ uint32_t dbg_subclass() const { return (_fields._debugid & DBG_SUBCLASS_MASK) >> DBG_SUBCLASS_MASK_SHIFT; }
+ uint32_t dbg_code() const { return (_fields._debugid & DBG_CODE_MASK) >> DBG_CODE_MASK_SHIFT; }
+ uint32_t dbg_cooked() const { return _fields._debugid & ~DBG_FUNC_MASK; }
+ uint32_t dbg_raw() const { return _fields._debugid; }
+
+ typename SIZE::ptr_t arg1() const { return _fields._arg1; }
+ typename SIZE::ptr_t arg2() const { return _fields._arg2; }
+ typename SIZE::ptr_t arg3() const { return _fields._arg3; }
+ typename SIZE::ptr_t arg4() const { return _fields._arg4; }
+
+ uint8_t* arg1_as_pointer() const { return (uint8_t*)&_fields._arg1; }
+ std::string arg1_as_string() const;
+ std::string all_args_as_string() const;
+
+ bool is_func_start() const { return (_fields._debugid & DBG_FUNC_MASK) == DBG_FUNC_START; }
+ bool is_func_end() const { return (_fields._debugid & DBG_FUNC_MASK) == DBG_FUNC_END; }
+ bool is_func_none() const { return (_fields._debugid & DBG_FUNC_MASK) == DBG_FUNC_NONE; }
+
+ uint64_t unused() const { return _fields.unused(); }
+
+ bool is_valid() {
+ // Must have a code set to be valid, no codes are 0x00
+ if (dbg_code() == 0)
+ return false;
+
+ // Legal values are NONE, START, and END.
+ if ((_fields._debugid & DBG_FUNC_MASK) == DBG_FUNC_MASK)
+ return false;
+
+ return true;
+ }
+
+ std::string to_string() const;
+};
+
+template <typename SIZE>
+std::string KDEvent<SIZE>::arg1_as_string() const {
+ // We can't count on the arg being NULL terminated, we have to copy.
+ // Using a uint32_t/uint64_t instead of a char[] guarantees alignment.
+ decltype(_fields._arg1) buf[2];
+
+ buf[0] = _fields._arg1;
+ buf[1] = 0;
+
+ return std::string(reinterpret_cast<char*>(buf));
+}
+
+template <typename SIZE>
+std::string KDEvent<SIZE>::all_args_as_string() const {
+ // We can't count on the arg being NULL terminated, we have to copy.
+ // Using a uint32_t/uint64_t instead of a char[] guarantees alignment.
+ decltype(_fields._arg1) buf[5];
+
+ buf[0] = _fields._arg1;
+ buf[1] = _fields._arg2;
+ buf[2] = _fields._arg3;
+ buf[3] = _fields._arg4;
+ buf[4] = 0;
+
+ return std::string(reinterpret_cast<char*>(buf));
+}
+
--- /dev/null
+//
+// KDState.hpp
+// KDBG
+//
+// Created by James McIlree on 8/10/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+class KDState {
+ protected:
+ kbufinfo_t _state;
+
+ public:
+ uint32_t flags() { return _state.flags; }
+ int capacity() { return _state.nkdbufs; }
+ int thread_map_capacity() { return _state.nkdthreads; }
+
+ bool is_enabled() { return !_state.nolog; }
+ bool is_initialized() { return flags() & KDBG_BUFINIT; }
+ bool is_thread_map_initialized() { return flags() & KDBG_MAPINIT; }
+ bool is_nowrap() { return flags() & KDBG_NOWRAP; }
+ bool is_freerun() { return flags() & KDBG_FREERUN; }
+ bool is_wrapped() { return flags() & KDBG_WRAPPED; }
+
+ bool is_lp64() { return (flags() & KDBG_LP64) > 0; }
+
+ bool is_range_collection_enabled() { return (flags() & KDBG_RANGECHECK) > 0; }
+ bool is_specific_value_collection_enabled() { return (flags() & KDBG_VALCHECK) > 0; }
+ bool is_filter_collection_enabled() { return (flags() & KDBG_TYPEFILTER_CHECK) > 0; }
+ bool is_inclusive_pid_collection_enabled() { return (flags() & KDBG_PIDCHECK) > 0; }
+ bool is_exclusive_pid_collection_enabled() { return (flags() & KDBG_PIDEXCLUDE) > 0; }
+
+ pid_t controlling_pid() { return _state.bufid; }
+
+ void print() {
+ printf("KDebug State\n");
+ printf("\tBuffer is %s\n", this->is_initialized() ? "initialized" : "not initialized");
+ printf("\tCapacity is %d\n", this->capacity());
+ printf("\tRaw flags 0x%08x\n", this->flags());
+ printf("\tLogging is %s\n", this->is_enabled() ? "enabled" : "disabled");
+ printf("\tWrapping is %s\n", this->is_nowrap() ? "disabled" : "enabled");
+ printf("\tBuffer %s wrapped\n", this->is_wrapped() ? "has" : "has not");
+
+ // Two bits, 4 possible states:
+ //
+ // INC EXC
+ // 1 0 ALL_MARKED_PIDS
+ // 0 1 ALL_UNMARKED_PIDS
+ // 0 0 ALL_PIDS
+ // 1 1 ERROR
+
+ const char* style;
+ switch (flags() & (KDBG_PIDEXCLUDE | KDBG_PIDCHECK)) {
+ case 0:
+ style = "all-pids";
+ break;
+ case KDBG_PIDCHECK:
+ style = "includes-marked-pids";
+ break;
+ case KDBG_PIDEXCLUDE:
+ style = "excludes-marked-pids";
+ break;
+ default:
+ style = "ERROR";
+ break;
+ }
+ printf("\tCollection style is %s\n", style);
+ printf("\tCollection by range is %s\n", this->is_range_collection_enabled() ? "enabled" : "disabled");
+ printf("\tCollection by value is %s\n", this->is_specific_value_collection_enabled() ? "enabled" : "disabled");
+ printf("\tCollection by filter is %s\n", this->is_filter_collection_enabled() ? "enabled" : "disabled");
+ printf("\tThread map is %s ", this->is_thread_map_initialized() ? "initialized\n" : "not initialized\n");
+ printf("\tThread map entries %d\n", this->thread_map_capacity());
+ if (this->controlling_pid() == -1)
+ printf("\tNo controlling pid\n");
+ else
+ printf("\tControlled by pid %d\n", this->controlling_pid());
+ }
+};
+
--- /dev/null
+//
+// KDThreadMapEntry.hpp
+// KDBG
+//
+// Created by James McIlree on 10/25/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kdprof_KDThreadMapEntry_hpp
+#define kdprof_KDThreadMapEntry_hpp
+
+//
+// This is the kd_threadmap from the kernel
+//
+// There is one interesting conflict I have noticed so far.
+//
+// The _pid field is set to 1 for kernel threads that have no user space
+// representation. However, 1 is a valid pid, and in fact, used by launchd.
+//
+// A full disambiguation of entries *must* include the tid, pid, AND name:
+//
+// 000000000000011f 00000001 launchd
+// 000000000000014f 00000001 launchd
+// 0000000000000150 00000001 launchd
+//
+// 0000000000000110 00000001 kernel_task
+// 0000000000000120 00000001 kernel_task
+// 0000000000000133 00000001 kernel_task
+//
+template <typename SIZE>
+class KDThreadMapEntry {
+ protected:
+ typename SIZE::ptr_t _tid;
+ int32_t _pid;
+ char _name[20]; // This is process name, not thread name!
+
+ public:
+ typename SIZE::ptr_t tid() const { return _tid; }
+ int32_t pid() const { return _pid; }
+ const char* name() const { return _name; }
+};
+
+#endif
--- /dev/null
+//
+// KDebug.h
+// KDBG
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef KDebug_KDebug_h
+#define KDebug_KDebug_h
+
+#include <sys/sysctl.h>
+#include <sys/buf.h>
+#include <mach/task_policy.h>
+
+#ifndef KERNEL_PRIVATE
+ #define KERNEL_PRIVATE
+ #include <sys/kdebug.h>
+ #undef KERNEL_PRIVATE
+#else
+ #error Something is really strage...
+#endif /*KERNEL_PRIVATE*/
+
+#include <vector>
+#include <unordered_map>
+#include <unordered_set>
+#include <algorithm>
+
+#include <libkern/OSAtomic.h>
+
+#include <CPPUtil/CPPUtil.h>
+
+using namespace util;
+
+#include "MetaTypes.hpp"
+#include "TaskRequestedPolicy.hpp"
+#include "TaskEffectivePolicy.hpp"
+#include "KDState.hpp"
+#include "KDThreadMapEntry.hpp"
+#include "KDCPUMapEntry.hpp"
+#include "KDEvent.hpp"
+#include "KDBG.hpp"
+#include "Kernel.hpp"
+#include "TraceCodes.hpp"
+#include "MachineVoucher.hpp"
+#include "VoucherInterval.hpp"
+#include "MachineThread.hpp"
+#include "IOActivity.hpp"
+#include "CPUActivity.hpp"
+#include "ThreadSummary.hpp"
+#include "ProcessSummary.hpp"
+#include "MachineMachMsg.hpp"
+#include "NurseryMachMsg.hpp"
+#include "CPUSummary.hpp"
+#include "MachineCPU.hpp"
+#include "MachineProcess.hpp"
+#include "TraceDataHeader.hpp"
+#include "TraceFile.hpp"
+#include "Machine.hpp"
+#include "Machine.impl.hpp"
+#include "Machine.mutable-impl.hpp"
+#include "MachineProcess.impl.hpp"
+#include "MachineProcess.mutable-impl.hpp"
+#include "MachineThread.impl.hpp"
+#include "MachineThread.mutable-impl.hpp"
+#include "MachineCPU.impl.hpp"
+#include "MachineCPU.mutable-impl.hpp"
+
+#endif
--- /dev/null
+//
+// Kernel.cpp
+// KDBG
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include "KDebug.h"
+
+using namespace util;
+
+bool Kernel::is_64_bit()
+{
+ int mib[4];
+ size_t len;
+ struct kinfo_proc kp;
+
+ /* Now determine if the kernel is running in 64-bit mode */
+ mib[0] = CTL_KERN;
+ mib[1] = KERN_PROC;
+ mib[2] = KERN_PROC_PID;
+ mib[3] = 0; /* kernproc, pid 0 */
+ len = sizeof(kp);
+ if (sysctl(mib, sizeof(mib)/sizeof(mib[0]), &kp, &len, NULL, 0) == -1) {
+ THROW("sysctl to get kernel size failed");
+ }
+
+ if (kp.kp_proc.p_flag & P_LP64)
+ return true;
+
+ return false;
+}
+
+uint32_t Kernel::active_cpu_count()
+{
+ int mib[4];
+ size_t len;
+ int num_cpus;
+
+ /*
+ * grab the number of cpus and scale the buffer size
+ */
+ mib[0] = CTL_HW;
+ mib[1] = HW_NCPU;
+ mib[2] = 0;
+ len = sizeof(num_cpus);
+
+ sysctl(mib, 2, &num_cpus, &len, NULL, 0);
+
+ return num_cpus;
+}
--- /dev/null
+//
+// Kernel.hpp
+// KDBG
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef __kdprof__Kernel__
+#define __kdprof__Kernel__
+
+enum class KernelSize { k32, k64 };
+
+class Kernel {
+ public:
+ static bool is_64_bit();
+ static uint32_t active_cpu_count();
+};
+
+#endif /* defined(__kdprof__Kernel__) */
--- /dev/null
+//
+// Machine.hpp
+// KDBG
+//
+// Created by James McIlree on 10/25/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+enum class kMachineFlag : std::uint32_t {
+ LostEvents = 0x00000001
+};
+
+template <typename SIZE>
+class Machine {
+ protected:
+ std::vector<MachineCPU<SIZE>> _cpus;
+
+ std::unordered_multimap<pid_t, MachineProcess<SIZE>> _processes_by_pid;
+ std::unordered_multimap<const char*, MachineProcess<SIZE>*, ConstCharHash, ConstCharEqualTo> _processes_by_name;
+ std::vector<MachineProcess<SIZE>*> _processes_by_time;
+
+ std::unordered_multimap<typename SIZE::ptr_t, MachineThread<SIZE> > _threads_by_tid;
+ std::vector<MachineThread<SIZE>*> _threads_by_time;
+
+ std::vector<MachineMachMsg<SIZE>> _mach_msgs;
+ std::unordered_map<uintptr_t, uintptr_t> _mach_msgs_by_event_index;
+ std::unordered_map<typename SIZE::ptr_t, NurseryMachMsg<SIZE>> _mach_msg_nursery;
+
+ //
+ // Vouchers are a bit special. We install pointers to vouchers in
+ // MachineThreads and MachineMachMsg. This means that vouchers cannot
+ // be moved once allocated. We could do two passes to exactly size
+ // the data structures, this should be investigated in the future.
+ //
+ // On create or first observed use, a voucher goes into the nursery.
+ // It stays there until a destroy event, or the end of Machine events.
+ // Once flushed from the nursery, we have a map of addresses, which
+ // points to a vector sorted by time. This allows addr @ time lookups
+ // later.
+ //
+ std::unordered_map<typename SIZE::ptr_t, std::unique_ptr<MachineVoucher<SIZE>>> _voucher_nursery;
+ std::unordered_map<typename SIZE::ptr_t, std::vector<std::unique_ptr<MachineVoucher<SIZE>>>> _vouchers_by_addr;
+
+ std::unordered_map<typename SIZE::ptr_t, IOActivity<SIZE>> _io_by_uid; // uid == unique id, not user id
+ std::vector<IOActivity<SIZE>> _all_io;
+ std::vector<AbsInterval> _all_io_active_intervals;
+
+ MachineProcess<SIZE>* _kernel_task;
+ const KDEvent<SIZE>* _events;
+ uintptr_t _event_count;
+ uint32_t _flags;
+ int32_t _unknown_process_pid; // We need unique negative pid's for previously unknown TID's
+
+ //
+ // Protected initialization code
+ //
+ void raw_initialize(const KDCPUMapEntry* cpumaps,
+ uint32_t cpumap_count,
+ const KDThreadMapEntry<SIZE>* threadmaps,
+ uint32_t threadmap_count,
+ const KDEvent<SIZE>* events,
+ uintptr_t event_count);
+
+ void post_initialize();
+
+ //
+ // Mutable API, for use during construction
+ //
+
+ pid_t next_unknown_pid() { return --_unknown_process_pid; }
+
+ MachineProcess<SIZE>* create_process(pid_t pid, const char* name, AbsTime create_timestamp, kMachineProcessFlag flags);
+ MachineThread<SIZE>* create_thread(MachineProcess<SIZE>* process, typename SIZE::ptr_t tid, MachineVoucher<SIZE>* voucher, AbsTime create_timestamp, kMachineThreadFlag flags);
+ MachineVoucher<SIZE>* create_voucher(typename SIZE::ptr_t address, AbsTime create_timestamp, kMachineVoucherFlag flags, uint32_t content_bytes_capacity);
+
+ void destroy_voucher(typename SIZE::ptr_t address, AbsTime timestamp);
+
+ void set_flags(kMachineFlag flag) { _flags |= (uint32_t)flag; }
+
+ void set_process_name(MachineProcess<SIZE>* process, const char* name);
+
+ MachineProcess<SIZE>* mutable_process(pid_t pid, AbsTime time) { return const_cast<MachineProcess<SIZE>*>(process(pid, time)); }
+ MachineThread<SIZE>* mutable_thread(typename SIZE::ptr_t tid, AbsTime time) { return const_cast<MachineThread<SIZE>*>(thread(tid, time)); }
+
+ MachineProcess<SIZE>* youngest_mutable_process(pid_t pid);
+ MachineThread<SIZE>* youngest_mutable_thread(typename SIZE::ptr_t tid);
+
+ MachineVoucher<SIZE>* process_event_voucher_lookup(typename SIZE::ptr_t address, uint32_t msgh_bits);
+ MachineThread<SIZE>* process_event_tid_lookup(typename SIZE::ptr_t tid, AbsTime now);
+
+ MachineVoucher<SIZE>* thread_forwarding_voucher_lookup(const MachineVoucher<SIZE>* original_thread_voucher);
+
+ void begin_io(MachineThread<SIZE>* thread, AbsTime time, typename SIZE::ptr_t uid, typename SIZE::ptr_t size);
+ void end_io(AbsTime time, typename SIZE::ptr_t uid);
+
+ bool process_event(const KDEvent<SIZE>& event);
+ void process_trequested_task(pid_t pid, typename SIZE::ptr_t trequested_0, typename SIZE::ptr_t trequested_1);
+ void process_trequested_thread(typename SIZE::ptr_t tid, typename SIZE::ptr_t trequested_0, typename SIZE::ptr_t trequested_1);
+
+ void initialize_cpu_idle_intr_states();
+
+ public:
+ static MachineVoucher<SIZE> UnsetVoucher;
+ static MachineVoucher<SIZE> NullVoucher;
+
+ Machine(KDCPUMapEntry* cpumaps, uint32_t cpumap_count, KDThreadMapEntry<SIZE>* threadmaps, uint32_t threadmap_count, KDEvent<SIZE>* events, uintptr_t event_count);
+ // Destructive, mutates parent!
+ Machine(Machine<SIZE>& parent, KDEvent<SIZE>* events, uintptr_t event_count);
+ Machine(const TraceFile& file);
+
+ bool lost_events() const { return (_flags & (uint32_t)kMachineFlag::LostEvents) > 0; }
+
+ const MachineProcess<SIZE>* process(pid_t pid, AbsTime time) const;
+ const MachineThread<SIZE>* thread(typename SIZE::ptr_t tid, AbsTime time) const;
+ const MachineVoucher<SIZE>* voucher(typename SIZE::ptr_t address, AbsTime time) const;
+ const MachineMachMsg<SIZE>* mach_msg(uintptr_t event_index) const;
+
+ const std::vector<const MachineProcess<SIZE>*>& processes() const;
+ const std::vector<const MachineThread<SIZE>*>& threads() const;
+ const std::vector<const MachineCPU<SIZE>>& cpus() const;
+
+ const KDEvent<SIZE>* events() const { return _events; }
+ uintptr_t event_count() const { return _event_count; }
+
+ AbsInterval timespan() const;
+
+ // Returns the number of cpus that have timeline data.
+ // (IOW, typically the number of AP(s) on a machine, but might be less if you've disabled some so they generate no trace data)
+ uint32_t active_cpus() const;
+
+ // If summary_cpu == NULL , all cpus are matched.
+ CPUSummary<SIZE> summary_for_timespan(AbsInterval timespan, const MachineCPU<SIZE>* summary_cpu) const;
+
+ // This attempts to analyze various pieces of data and guess
+ // if the Machine represents an ios device or not.
+ bool is_ios() const;
+
+ DEBUG_ONLY(void validate() const;)
+};
+
+template <typename SIZE> MachineVoucher<SIZE> Machine<SIZE>::UnsetVoucher(SIZE::PTRMAX, AbsInterval(AbsTime(0),AbsTime(UINT64_MAX)), kMachineVoucherFlag::IsUnsetVoucher, 0);
+template <typename SIZE> MachineVoucher<SIZE> Machine<SIZE>::NullVoucher(0, AbsInterval(AbsTime(0),AbsTime(UINT64_MAX)), kMachineVoucherFlag::IsNullVoucher, 0);
+
--- /dev/null
+//
+// Machine.impl.hpp
+// KDBG
+//
+// Created by James McIlree on 10/30/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include "KDebug.h"
+
+template <typename SIZE>
+bool process_by_time_sort(const MachineProcess<SIZE>* left, const MachineProcess<SIZE>* right) {
+ return left->timespan().location() < right->timespan().location();
+}
+
+template <typename SIZE>
+bool thread_by_time_sort(const MachineThread<SIZE>* left, const MachineThread<SIZE>* right) {
+ return left->timespan().location() < right->timespan().location();
+}
+
+template <typename SIZE>
+void Machine<SIZE>::post_initialize() {
+ //
+ // Post initialization. Sort various by time vectors, etc.
+ //
+
+ std::sort(_processes_by_time.begin(), _processes_by_time.end(), process_by_time_sort<SIZE>);
+ std::sort(_threads_by_time.begin(), _threads_by_time.end(), thread_by_time_sort<SIZE>);
+
+ // naked auto okay here, process is a ptr.
+ AbsTime last_machine_timestamp = _events[_event_count-1].timestamp();
+ for (auto process : _processes_by_time) {
+ process->post_initialize(last_machine_timestamp);
+ }
+
+ //
+ // Collapse the idle/intr/run queues into a single timeline
+ //
+ for (auto& cpu : _cpus) {
+ cpu.post_initialize(timespan());
+ }
+
+ //
+ // Flush any outstanding blocked events
+ //
+ for (auto& thread : _threads_by_tid) {
+ thread.second.post_initialize(last_machine_timestamp);
+ }
+
+ //
+ // Sort the IOActivity events, and build a flattened vector that can be used to find IO ranges for intersecting during interval searches
+ //
+ _io_by_uid.clear();
+ std::sort(_all_io.begin(), _all_io.end());
+
+ // We cannot use trange_vector_union to flatten _all_io, as _all_io isn't a plain TRange<AbsTime> type, and we want to yield that type.
+ // cut & paste to the rescue! :-)
+ if (!_all_io.empty()) {
+ auto input_it = _all_io.begin();
+ _all_io_active_intervals.push_back(*input_it);
+ while (++input_it < _all_io.end()) {
+ TRange<AbsTime> union_range = _all_io_active_intervals.back();
+
+ if (union_range.intersects(*input_it)) {
+ _all_io_active_intervals.pop_back();
+ _all_io_active_intervals.push_back(union_range.union_range(*input_it));
+ } else {
+ ASSERT(union_range < *input_it, "Out of order merging");
+ _all_io_active_intervals.push_back(*input_it);
+ }
+ }
+ }
+
+ //
+ // Flush any outstanding MachMsg(s) in the nursery (state SEND)
+ //
+ // NOTE! We do not clear _mach_msg_nursery because its state is
+ // forwarded to future Machine(s).
+ //
+ for (auto& nursery_it : _mach_msg_nursery) {
+ auto& nursery_msg = nursery_it.second;
+ if (nursery_msg.state() == kNurseryMachMsgState::Send) {
+ auto mach_msg_it = _mach_msgs.emplace(_mach_msgs.end(),
+ nursery_msg.id(),
+ nursery_msg.kmsg_addr(),
+ kMachineMachMsgFlag::HasSender,
+ nursery_msg.send_time(),
+ nursery_msg.send_tid(),
+ nursery_msg.send_msgh_bits(),
+ nursery_msg.send_voucher(),
+ AbsTime(0),
+ 0,
+ 0,
+ &Machine<SIZE>::UnsetVoucher);
+ _mach_msgs_by_event_index[nursery_msg.send_event_index()] = std::distance(_mach_msgs.begin(), mach_msg_it);
+ }
+ }
+
+ //
+ // Flush any outstanding Voucher(s) in the nursery
+ //
+ for (auto& nursery_it : _voucher_nursery) {
+
+ //
+ // First we need to "close" the open end of the live voucher's
+ // timespan.
+ //
+ auto voucher = nursery_it.second.get();
+ voucher->set_timespan_to_end_of_time();
+
+ auto address = nursery_it.first;
+
+ // First find the "row" for this address.
+ auto by_addr_it = _vouchers_by_addr.find(address);
+ if (by_addr_it == _vouchers_by_addr.end()) {
+ // No address entry case
+ std::vector<std::unique_ptr<MachineVoucher<SIZE>>> row;
+ row.emplace_back(std::move(nursery_it.second));
+ _vouchers_by_addr.emplace(address, std::move(row));
+ } else {
+ auto& row = by_addr_it->second;
+
+ // Make sure these are sorted and non-overlapping
+ ASSERT(row.back()->timespan() < voucher->timespan(), "Sanity");
+ ASSERT(!row.back()->timespan().intersects(voucher->timespan()), "Sanity");
+
+ row.emplace_back(std::move(nursery_it.second));
+ }
+ }
+
+ _voucher_nursery.clear();
+
+ DEBUG_ONLY(validate());
+}
+
+template <typename SIZE>
+void Machine<SIZE>::raw_initialize(const KDCPUMapEntry* cpumaps,
+ uint32_t cpumap_count,
+ const KDThreadMapEntry<SIZE>* threadmaps,
+ uint32_t threadmap_count,
+ const KDEvent<SIZE>* events,
+ uintptr_t event_count)
+{
+ ASSERT(cpumaps || cpumap_count == 0, "Sanity");
+ ASSERT(threadmaps || threadmap_count == 0, "Sanity");
+ ASSERT(events || event_count == 0, "Sanity");
+
+ for (uint32_t i = 0; i < cpumap_count; ++i) {
+ _cpus.emplace_back(i, cpumaps[i].is_iop(), cpumaps[i].name());
+ }
+
+ // We cannot create processes / threads unless we have at least one event to give us a timestamp.
+ if (event_count) {
+ AbsTime now = events[0].timestamp();
+
+ _kernel_task = create_process(0, "kernel_task", now, kMachineProcessFlag::IsKernelProcess);
+
+ // Initial thread state, nothing in nusery
+ for (uint32_t index = 0; index < threadmap_count; ++index) {
+ auto& threadmap = threadmaps[index];
+
+ pid_t pid = threadmap.pid();
+
+ // The kernel threadmap often has empty entries. Skip them.
+ if (pid == 0)
+ break;
+
+ if (pid == 1 && strncmp(threadmap.name(), "kernel_task", 12) == 0) {
+ pid = 0;
+ }
+
+ MachineProcess<SIZE>* process = youngest_mutable_process(pid);
+ if (!process) {
+ process = create_process(pid, threadmap.name(), now, kMachineProcessFlag::CreatedByThreadMap);
+ ASSERT(process, "Sanity");
+ }
+ process->add_thread(create_thread(process, threadmap.tid(), &UnsetVoucher, now, kMachineThreadFlag::CreatedByThreadMap));
+ }
+ }
+
+ // We need to know what the idle/INTR states of the CPU's are.
+ initialize_cpu_idle_intr_states();
+
+ for (uintptr_t index = 0; index < event_count; ++index) {
+ if (!process_event(events[index]))
+ break;
+ }
+
+ post_initialize();
+}
+
+template <typename SIZE>
+Machine<SIZE>::Machine(KDCPUMapEntry* cpumaps, uint32_t cpumap_count, KDThreadMapEntry<SIZE>* threadmaps, uint32_t threadmap_count, KDEvent<SIZE>* events, uintptr_t event_count) :
+ _kernel_task(nullptr),
+ _events(events),
+ _event_count(event_count),
+ _flags(0),
+ _unknown_process_pid(-1)
+{
+ raw_initialize(cpumaps,
+ cpumap_count,
+ threadmaps,
+ threadmap_count,
+ events,
+ event_count);
+}
+
+template <typename SIZE>
+Machine<SIZE>::Machine(const TraceFile& file) :
+ _kernel_task(nullptr),
+ _events(file.events<SIZE>()),
+ _event_count(file.event_count()),
+ _flags(0),
+ _unknown_process_pid(-1)
+{
+ raw_initialize(file.cpumap(),
+ file.cpumap_count(),
+ file.threadmap<SIZE>(),
+ file.threadmap_count(),
+ file.events<SIZE>(),
+ file.event_count());
+}
+
+template <typename SIZE>
+Machine<SIZE>::Machine(Machine<SIZE>& parent, KDEvent<SIZE>* events, uintptr_t event_count) :
+ _kernel_task(nullptr),
+ _events(events),
+ _event_count(event_count),
+ _flags(0),
+ _unknown_process_pid(-1)
+{
+ ASSERT(events || event_count == 0, "Sanity");
+
+ const std::vector<const MachineThread<SIZE>*>& parent_threads = parent.threads();
+ const std::vector<const MachineCPU<SIZE>>& parent_cpus = parent.cpus();
+
+ for (const MachineCPU<SIZE>& parent_cpu : parent_cpus) {
+ _cpus.emplace_back(parent_cpu.id(), parent_cpu.is_iop(), parent_cpu.name());
+ }
+
+ // We cannot create processes / threads unless we have at least one event to give us a timestamp.
+ if (event_count) {
+ AbsTime now = events[0].timestamp();
+
+ //
+ // Forawd any live vouchers. This must done before forwarding threads
+ // or MachMsgs from their nurseries, as they have references to the
+ // vouchers.
+ //
+ for (auto& parent_vouchers_by_addr_it : parent._vouchers_by_addr) {
+ std::unique_ptr<MachineVoucher<SIZE>>& voucher = parent_vouchers_by_addr_it.second.back();
+ if (voucher->is_live()) {
+ // When we flushed these vouchers in the previous machine state,
+ // we set their timespans to infinite. We need to reset them in
+ // case a close event arrives.
+ voucher->set_timespan_to_zero_length();
+ _voucher_nursery.emplace(voucher->address(), std::move(voucher));
+ }
+ }
+
+ _kernel_task = create_process(0, "kernel_task", now, kMachineProcessFlag::IsKernelProcess);
+
+ for (const MachineThread<SIZE>* parent_thread : parent_threads) {
+ if (!parent_thread->is_trace_terminated()) {
+ const MachineProcess<SIZE>& parent_process = parent_thread->process();
+ MachineProcess<SIZE>* new_process = youngest_mutable_process(parent_process.pid());
+ if (!new_process) {
+ kMachineProcessFlag new_process_flags = (kMachineProcessFlag)(parent_process.flags() | (uint32_t)kMachineProcessFlag::CreatedByPreviousMachineState);
+ new_process = create_process(parent_process.pid(), parent_process.name(), now, new_process_flags);
+ ASSERT(new_process, "Sanity");
+ }
+ new_process->add_thread(create_thread(new_process,
+ parent_thread->tid(),
+ thread_forwarding_voucher_lookup(parent_thread->last_voucher()),
+ now,
+ (kMachineThreadFlag)(parent_thread->flags() | (uint32_t)kMachineThreadFlag::CreatedByPreviousMachineState)));
+ }
+ }
+
+ // We need to know what the idle/INTR states of the CPU's are.
+ //
+ // Start by looking at the existing states.
+ uint32_t init_count = 0;
+ uint32_t ap_count = 0;
+ for (const MachineCPU<SIZE>& parent_cpu : parent_cpus) {
+ if (!parent_cpu.is_iop()) {
+ ap_count++;
+ const std::vector<CPUActivity<SIZE>>& parent_cpu_timeline = parent_cpu.timeline();
+
+ bool intr_initialized = false;
+ bool idle_initialized = false;
+ bool runq_initialized = false;
+
+ MachineCPU<SIZE>& cpu = _cpus[parent_cpu.id()];
+
+ for (auto reverse_it = parent_cpu_timeline.rbegin(); reverse_it < parent_cpu_timeline.rend(); ++reverse_it) {
+
+ // We can sometimes split two simultaneous events across two buffer snaps.
+ // IOW, buffer snap 1:
+ //
+ // event[N].timestamp = 1234;
+ //
+ // buffer snap 2:
+ //
+ // event[0].timestamp = 1234;
+ ASSERT(!reverse_it->contains(now) || reverse_it->max()-AbsTime(1) == now, "Sanity");
+ ASSERT(reverse_it->location() <= now, "Sanity");
+
+ switch (reverse_it->type()) {
+ //
+ // The states are separate, and heirarchical.
+ // The order (low -> high) is:
+ // Run, Idle, INTR
+ // Unknown is a special state; we give up on seeing it.
+ // A lower state precludes being in a higher state, but
+ // not vice-versa. You can not be IDLE if you are currently
+ // Run. You may be IDLE during Run.
+ //
+
+ case kCPUActivity::Unknown:
+ // Don't actually initialize anything, just force a bailout
+ runq_initialized = idle_initialized = intr_initialized = true;
+ break;
+
+ // NOTE NOTE NOTE!
+ //
+ // Overly clever here, note the lack of "break" in the Run
+ // and Idle clause, we fall through to initialize higher
+ // states.
+ case kCPUActivity::Run:
+ ASSERT(!runq_initialized, "This should always be the last level to initialize");
+ if (MachineThread<SIZE>* on_cpu_thread = youngest_mutable_thread(reverse_it->thread()->tid())) {
+ cpu.initialize_thread_state(on_cpu_thread, now);
+ init_count++;
+ } else {
+ ASSERT(reverse_it->thread()->is_trace_terminated() , "We should find this thread unless its been removed");
+ }
+ runq_initialized = true;
+
+ case kCPUActivity::Idle:
+ if (!idle_initialized) {
+ cpu.initialize_idle_state(reverse_it->is_idle(), now);
+ init_count++;
+ idle_initialized = true;
+ }
+
+ case kCPUActivity::INTR:
+ if (!intr_initialized) {
+ cpu.initialize_intr_state(reverse_it->is_intr(), now);
+ init_count++;
+ intr_initialized = true;
+ }
+ break;
+ }
+
+ if (runq_initialized) {
+ ASSERT(idle_initialized && intr_initialized, "Sanity");
+ break;
+ }
+ }
+ }
+ }
+
+ if (init_count < (ap_count * 3)) {
+ initialize_cpu_idle_intr_states();
+ }
+
+ //
+ // Forward any messages from the nursery
+ //
+
+ for (auto& parent_nursery_it : parent._mach_msg_nursery) {
+ auto& parent_nursery_msg = parent_nursery_it.second;
+
+ switch (parent_nursery_msg.state()) {
+ // We forward send(s) because they can become receives.
+ // We forward the free's because they stop us from showing bogus kernel message receipts
+ case kNurseryMachMsgState::Send:
+ case kNurseryMachMsgState::Free: {
+ ASSERT(_mach_msg_nursery.find(parent_nursery_msg.kmsg_addr()) == _mach_msg_nursery.end(), "Duplicate kmsg address when forwarding mach_msg nursery from parent");
+
+ auto it = _mach_msg_nursery.emplace(parent_nursery_msg.kmsg_addr(), parent_nursery_msg);
+
+ // Grr, emplace returns a std::pair<it, bool>, and the it is std::pair<key, value>...
+
+ // We have to clear this to prevent bogus data being shown during a receive,
+ // the send event index is no longer available.
+ it.first->second.set_send_event_index(-1);
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ }
+
+ for (uintptr_t index = 0; index < event_count; ++index) {
+ if (!process_event(_events[index]))
+ break;
+ }
+
+ post_initialize();
+}
+
+template <typename SIZE>
+const MachineProcess<SIZE>* Machine<SIZE>::process(pid_t pid, AbsTime time) const {
+ auto by_pid_range = _processes_by_pid.equal_range(pid);
+ for (auto it = by_pid_range.first; it != by_pid_range.second; ++it) {
+ const MachineProcess<SIZE>& process = it->second;
+ if (process.timespan().contains(time)) {
+ return &process;
+ }
+ }
+
+ return nullptr;
+}
+
+template <typename SIZE>
+const MachineThread<SIZE>* Machine<SIZE>::thread(typename SIZE::ptr_t tid, AbsTime time) const {
+ auto by_tid_range = _threads_by_tid.equal_range(tid);
+ for (auto it = by_tid_range.first; it != by_tid_range.second; ++it) {
+ const MachineThread<SIZE>& thread = it->second;
+ if (thread.timespan().contains(time)) {
+ return &thread;
+ }
+ }
+
+ return nullptr;
+}
+
+template <typename SIZE>
+struct VoucherVsAbsTimeComparator {
+ bool operator()(const std::unique_ptr<MachineVoucher<SIZE>>& voucher, const AbsTime time) const {
+ return voucher->timespan().max() < time;
+ }
+
+ bool operator()(const AbsTime time, const std::unique_ptr<MachineVoucher<SIZE>>& voucher) const {
+ return time < voucher->timespan().max();
+ }
+};
+
+template <typename SIZE>
+const MachineVoucher<SIZE>* Machine<SIZE>::voucher(typename SIZE::ptr_t address, AbsTime timestamp) const {
+ // First find the "row" for this address.
+ auto by_addr_it = _vouchers_by_addr.find(address);
+ if (by_addr_it != _vouchers_by_addr.end()) {
+ auto& row = by_addr_it->second;
+
+ auto by_time_it = std::upper_bound(row.begin(), row.end(), timestamp, VoucherVsAbsTimeComparator<SIZE>());
+ // The upper bound will report that 0 is lower than [ 10, 20 ), need to check contains!
+ if (by_time_it != row.end()) {
+ // The compiler is having troubles seeing through an iterator that reflects unique_ptr methods which reflects MachineVoucher methods.
+ auto v = by_time_it->get();
+ if (v->timespan().contains(timestamp)) {
+ return v;
+ }
+ }
+ }
+
+ return nullptr;
+}
+
+template <typename SIZE>
+const MachineMachMsg<SIZE>* Machine<SIZE>::mach_msg(uintptr_t event_index) const {
+ auto it = _mach_msgs_by_event_index.find(event_index);
+ if (it != _mach_msgs_by_event_index.end()) {
+ return &_mach_msgs.at(it->second);
+ }
+
+ return nullptr;
+}
+
+#if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
+template <typename SIZE>
+void Machine<SIZE>::validate() const {
+ ASSERT(_events, "Sanity");
+ ASSERT(_event_count, "Sanity");
+
+ //
+ // Event timestamp ordering is already pre-checked, no point in retesting it here.
+ //
+
+ ASSERT(_threads_by_tid.size() == _threads_by_time.size(), "Container views state not in sync");
+ ASSERT(_processes_by_pid.size() == _processes_by_name.size(), "Container views state not in sync");
+ ASSERT(_processes_by_pid.size() == _processes_by_time.size(), "Container views state not in sync");
+
+ for (auto& pair : _processes_by_pid) {
+ auto& process = pair.second;
+ process.validate();
+ AbsInterval process_timespan = process.timespan();
+ for (auto thread : process.threads()) {
+ ASSERT(process_timespan.contains(thread->timespan()), "thread outside process timespan");
+ }
+ }
+
+ for (auto thread_ptr : _threads_by_time) {
+ thread_ptr->validate();
+ }
+
+ //
+ // Make sure no process with the same pid overlaps in time
+ //
+ const MachineProcess<SIZE>* last_process = nullptr;
+ for (auto& pair : _processes_by_pid) {
+ auto& process = pair.second;
+ if (last_process && last_process->pid() == process.pid()) {
+ // The < operator only checks ordering, it is not strict
+ // about overlap. We must check both
+ ASSERT(last_process->timespan() < process.timespan(), "Sanity");
+ ASSERT(!last_process->timespan().intersects(process.timespan()), "Sanity");
+ }
+ last_process = &process;
+ }
+
+ //
+ // Make sure no thread with the same tid overlaps in time
+ //
+ const MachineThread<SIZE>* last_thread = nullptr;
+ for (auto& pair : _threads_by_tid) {
+ auto& thread = pair.second;
+ if (last_thread && last_thread->tid() == thread.tid()) {
+ // The < operator only checks ordering, it is not strict
+ // about overlap. We must check both
+ ASSERT(last_thread->timespan() < thread.timespan(), "Sanity");
+ ASSERT(!last_thread->timespan().intersects(thread.timespan()), "Sanity");
+ }
+ last_thread = &thread;
+ }
+
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(_all_io_active_intervals), "all io search/mask vector fails invariant");
+}
+#endif
+
+template <typename SIZE>
+const std::vector<const MachineProcess<SIZE>*>& Machine<SIZE>::processes() const {
+ return *reinterpret_cast< const std::vector<const MachineProcess<SIZE>*>* >(&_processes_by_time);
+}
+
+template <typename SIZE>
+const std::vector<const MachineThread<SIZE>*>& Machine<SIZE>::threads() const {
+ return *reinterpret_cast< const std::vector<const MachineThread<SIZE>*>* >(&_threads_by_time);
+}
+
+template <typename SIZE>
+const std::vector<const MachineCPU<SIZE>>& Machine<SIZE>::cpus() const {
+ return *reinterpret_cast< const std::vector<const MachineCPU<SIZE>>* >(&_cpus);
+}
+
+template <typename SIZE>
+AbsInterval Machine<SIZE>::timespan() const {
+ if (_event_count) {
+ AbsTime begin(_events[0].timestamp());
+ AbsTime end(_events[_event_count-1].timestamp());
+ return AbsInterval(begin, end - begin + AbsTime(1));
+ }
+
+ return AbsInterval(AbsTime(0),AbsTime(0));
+}
+
+template <typename SIZE>
+CPUSummary<SIZE> Machine<SIZE>::summary_for_timespan(AbsInterval summary_timespan, const MachineCPU<SIZE>* summary_cpu) const {
+ ASSERT(summary_timespan.intersects(timespan()), "Sanity");
+ CPUSummary<SIZE> summary;
+
+ uint32_t ap_cpu_count = 0;
+ for (auto& cpu: _cpus) {
+ // We don't know enough about iops to do anything with them.
+ // Also skip cpus with no activity
+ if (!cpu.is_iop() && cpu.is_active()) {
+ ap_cpu_count++;
+ }
+ }
+
+ bool should_calculate_wallclock_run_time = (summary_cpu == NULL && ap_cpu_count > 1);
+
+ summary.begin_cpu_timeline_walks();
+
+ //
+ // Lots of optimization possibilities here...
+ //
+ // We spend a LOT of time doing the set lookups to map from a thread/process to a ThreadSummary / ProcessSummary.
+ // If we could somehow map directly from thread/process to the summary, this would speed up considerably.
+ //
+
+ for (auto& cpu: _cpus) {
+ // We don't know enough about iops to do anything with them.
+ // Also skip cpus with no activity
+ if (!cpu.is_iop() && cpu.is_active()) {
+ if (summary_cpu == NULL || summary_cpu == &cpu) {
+
+ summary.begin_cpu_timeline_walk(&cpu);
+
+ auto& timeline = cpu.timeline();
+ if (!timeline.empty()) {
+ AbsInterval timeline_timespan = AbsInterval(timeline.front().location(), timeline.back().max() - timeline.front().location());
+ AbsInterval trimmed_timespan = summary_timespan.intersection_range(timeline_timespan);
+
+ summary.incr_active_cpus();
+
+ auto start = cpu.activity_for_timestamp(trimmed_timespan.location());
+ auto end = cpu.activity_for_timestamp(trimmed_timespan.max()-AbsTime(1));
+
+ ASSERT(start && start->contains(trimmed_timespan.location()), "Sanity");
+ ASSERT(end && end->contains(trimmed_timespan.max()-AbsTime(1)), "Sanity");
+
+ ProcessSummary<SIZE>* process_summary = NULL;
+ ThreadSummary<SIZE>* thread_summary = NULL;
+
+ if (start->is_run() && !start->is_context_switch()) {
+ const MachineThread<SIZE>* thread_on_cpu = start->thread();
+ const MachineProcess<SIZE>* process_on_cpu = &thread_on_cpu->process();
+
+ process_summary = summary.mutable_process_summary(process_on_cpu);
+ thread_summary = process_summary->mutable_thread_summary(thread_on_cpu);
+ }
+
+ // NOTE! <=, not <, because end is inclusive of data we want to count!
+ while (start <= end) {
+ // NOTE! summary_timespan, NOT trimmed_timespan!
+ AbsInterval t = start->intersection_range(summary_timespan);
+
+ switch (start->type()) {
+ case kCPUActivity::Unknown:
+ // Only cpu summaries track unknown time
+ summary.add_unknown_time(t.length());
+ break;
+
+ case kCPUActivity::Idle:
+ summary.add_idle_time(t.length());
+ summary.add_all_cpus_idle_interval(t);
+ if (process_summary) process_summary->add_idle_time(t.length());
+ if (thread_summary) thread_summary->add_idle_time(t.length());
+ break;
+
+ case kCPUActivity::INTR:
+ summary.add_intr_time(t.length());
+ // It might seem like we should add INTR time to the wallclock run time
+ // for the top level summary, but the concurrency level is calculated as
+ // Actual / Wallclock, where Actual only counts RUN time. If we add INTR
+ // the results are skewed.
+ if (process_summary) process_summary->add_intr_time(t.length());
+ if (thread_summary) thread_summary->add_intr_time(t.length());
+ break;
+
+ case kCPUActivity::Run: {
+ // We must reset these each time. Consider the case where we have the following:
+ //
+ // RRRRRRRRIIIIIIIIIIIIIIIIRRRRRRRRRR
+ // ^ ^
+ // CSW Summary starts here
+ //
+ // The first run seen in the summary will not be a CSW, and yet process/thread summary
+ // are NULL...
+
+ const MachineThread<SIZE>* thread_on_cpu = start->thread();
+ const MachineProcess<SIZE>* process_on_cpu = &thread_on_cpu->process();
+
+ process_summary = summary.mutable_process_summary(process_on_cpu);
+ thread_summary = process_summary->mutable_thread_summary(thread_on_cpu);
+
+ if (start->is_context_switch()) {
+ summary.incr_context_switches();
+ process_summary->incr_context_switches();
+ thread_summary->incr_context_switches();
+ }
+
+ summary.add_run_time(t.length());
+ process_summary->add_run_time(t.length());
+ thread_summary->add_run_time(t.length());
+
+ // We only calculate wallclock run time if there is a chance
+ // it might differ from run time.
+ if (should_calculate_wallclock_run_time) {
+ summary.add_wallclock_run_interval(t);
+ process_summary->add_wallclock_run_interval(t);
+ }
+
+ break;
+ }
+ }
+
+ start++;
+ }
+ }
+
+ summary.end_cpu_timeline_walk(&cpu);
+ }
+ }
+ }
+
+ summary.end_cpu_timeline_walks();
+
+ // We only attempt to calculate future summary data in limited circumstances
+ // There must be enough future data to consistently decide if threads would run in the future.
+ // If the summary_cpu is not "all" we do not attempt to calculate.
+
+ if (summary_cpu == NULL) {
+ AbsInterval future_timespan(summary_timespan.max(), summary_timespan.length() * AbsTime(5));
+ if (future_timespan.intersection_range(timespan()).length() == future_timespan.length()) {
+ for (auto& cpu: _cpus) {
+ // We don't know enough about iops to do anything with them
+ if (!cpu.is_iop()) {
+ auto& timeline = cpu.timeline();
+
+ if (!timeline.empty()) {
+ AbsInterval timeline_timespan = AbsInterval(timeline.front().location(), timeline.back().max() - timeline.front().location());
+ AbsInterval trimmed_timespan = future_timespan.intersection_range(timeline_timespan);
+
+ auto start = cpu.activity_for_timestamp(trimmed_timespan.location());
+ auto end = cpu.activity_for_timestamp(trimmed_timespan.max()-AbsTime(1));
+
+ ASSERT(start && start->contains(trimmed_timespan.location()), "Sanity");
+ ASSERT(end && end->contains(trimmed_timespan.max()-AbsTime(1)), "Sanity");
+
+ ProcessSummary<SIZE>* process_summary = NULL;
+ ThreadSummary<SIZE>* thread_summary = NULL;
+
+ // NOTE! <=, not <, because end is inclusive of data we want to count!
+ while (start <= end) {
+ // NOTE! future_timespan, NOT trimmed_timespan!
+ AbsInterval t = start->intersection_range(future_timespan);
+
+ switch (start->type()) {
+ case kCPUActivity::Unknown:
+ break;
+
+ case kCPUActivity::Idle:
+ // On idle, we mark the current thread as blocked.
+ if (thread_summary)
+ thread_summary->set_is_blocked_in_future();
+ break;
+
+ case kCPUActivity::INTR:
+ break;
+
+ case kCPUActivity::Run: {
+ // We must reset these each time. Consider the case where we have the following:
+ //
+ // RRRRRRRRIIIIIIIIIIIIIIIIRRRRRRRRRR
+ // ^ ^
+ // CSW Summary starts here
+ //
+ // The first run seen in the summary will not be a CSW, and yet process/thread summary
+ // are NULL...
+
+ const MachineThread<SIZE>* thread_on_cpu = start->thread();
+ const MachineProcess<SIZE>* process_on_cpu = &thread_on_cpu->process();
+
+ process_summary = summary.mutable_process_summary(process_on_cpu);
+ thread_summary = process_summary->mutable_thread_summary(thread_on_cpu);
+
+ if (!thread_summary->is_future_initialized()) {
+ thread_summary->set_future_initialized();
+ thread_summary->set_total_blocked_in_summary(thread_on_cpu->blocked_in_timespan(summary_timespan));
+ thread_summary->set_first_block_after_summary(thread_on_cpu->next_blocked_after(summary_timespan.max()));
+ ASSERT(thread_summary->total_blocked_in_summary() + thread_summary->total_run_time() <= summary_timespan.length(), "More time blocked + running than is possible in summary timespan");
+ thread_summary->set_max_possible_future_run_time(summary_timespan.length() - (thread_summary->total_blocked_in_summary() + thread_summary->total_run_time()));
+ }
+
+ if (!thread_summary->is_blocked_in_future()) {
+ // We ONLY block at context_switch locations. But, we can context
+ // switch on any cpu. So, need a strong check!
+ if (t.max() >= thread_summary->first_block_after_summary()) {
+ thread_summary->set_is_blocked_in_future();
+ } else {
+ ASSERT(t.location() <= thread_summary->first_block_after_summary(), "Sanity");
+ // Each thread controls how much time it can accumulate in a given window.
+ // It may be that only a fraction (or none) of the time can be added.
+ // Make sure to only add the thread approved amount to the process and total summary
+ AbsTime future_time = thread_summary->add_future_run_time(t.length());
+ summary.add_future_run_time(future_time);
+ process_summary->add_future_run_time(future_time);
+ }
+ }
+ break;
+ }
+ }
+ start++;
+ }
+ }
+ }
+ }
+
+ //
+ // When we're doing future run predictions, we can create summaries for
+ // threads that have no run time, and no future run time.
+ //
+ // The way this happens is you have 2 or more cpus.
+ // On cpu 1, there is a blocking event for Thread T at time x.
+ //
+ // While walking through cpu 2's activity, you see T
+ // scheduled at x + N. You cannot add to T's future run
+ // time, and T never ran in the original time window.
+ // Thus, T is added and does nothing.
+ //
+
+ // Remove inactive threads/processes.
+ auto& process_summaries = summary.mutable_process_summaries();
+ auto process_it = process_summaries.begin();
+ while (process_it != process_summaries.end()) {
+ auto next_process_it = process_it;
+ ++next_process_it;
+ if (process_it->total_run_time() == 0 && process_it->total_future_run_time() == 0) {
+ DEBUG_ONLY({
+ for (auto& thread_summary : process_it->thread_summaries()) {
+ ASSERT(thread_summary.total_run_time() == 0 && thread_summary.total_future_run_time() == 0, "Process with 0 run time && 0 future run time has thread with non zero values");
+ }
+ });
+ process_summaries.erase(process_it);
+ } else {
+ // Our evil friend unordered_set returns const iterators...
+ auto& thread_summaries = const_cast<ProcessSummary<SIZE>*>(&*process_it)->mutable_thread_summaries();
+ auto thread_it = thread_summaries.begin();
+ while (thread_it != thread_summaries.end()) {
+ auto next_thread_it = thread_it;
+ ++next_thread_it;
+ if (thread_it->total_run_time() == 0 && thread_it->total_future_run_time() == 0) {
+ thread_summaries.erase(thread_it);
+ }
+ thread_it = next_thread_it;
+ }
+ }
+ process_it = next_process_it;
+ }
+ }
+ }
+
+ //
+ // Calculate vmfault data.
+ //
+ // We want to calculate this after the future CPU time, because it is possible a time slice might have vmfaults
+ // that span the entire timespan. This could result in a process/thread with no run time, and no future time, which
+ // would be removed as "inactive" during future CPU time calculation.
+ //
+
+ if (summary_cpu == NULL) {
+ // vmfault intervals are stored in the MachineThread
+ for (MachineThread<SIZE>* machine_thread : _threads_by_time) {
+ const MachineProcess<SIZE>* process = &machine_thread->process();
+
+ ProcessSummary<SIZE>* process_summary = NULL;
+ ThreadSummary<SIZE>* thread_summary = NULL;
+
+ const auto& vm_faults = machine_thread->vm_faults();
+ if (!vm_faults.empty()) {
+ AbsInterval vm_faults_timespan = AbsInterval(vm_faults.front().location(), vm_faults.back().max() - vm_faults.front().location());
+ AbsInterval trimmed_timespan = summary_timespan.intersection_range(vm_faults_timespan);
+
+ if (trimmed_timespan.length() > 0) {
+ auto start = interval_beginning_timespan(vm_faults, trimmed_timespan);
+ auto end = interval_ending_timespan(vm_faults, trimmed_timespan);
+
+ ASSERT(!start || start->intersects(trimmed_timespan), "Sanity");
+ ASSERT(!end || end->intersects(trimmed_timespan), "Sanity");
+ ASSERT((!start && !end) || ((start && end) && (start <= end)), "Sanity");
+
+ if (start && end) {
+ // NOTE! <=, not <, because end is inclusive of data we want to count!
+ while (start <= end) {
+ //
+ // NOTE! summary_timespan, NOT trimmed_timespan!
+ //
+ // 8/25/13 ... Okay, why do we care summary vs trimmed?
+ // It shouldn't be possible for start to lie outside trimmed...
+ // Leaving this for now rather than introducing some bizzare
+ // corner case, but wth...
+ //
+ AbsInterval t = start->intersection_range(summary_timespan);
+
+ ASSERT(t.length() > 0, "Might be too strong, but expecting this to be non-zero");
+
+ summary.add_vm_fault_time(t.length());
+
+ // We must initialize these lazily. If we don't, every process and thread gets
+ // a summary entry. But we don't want to keep looking them up over and over...
+ if (!process_summary) {
+ process_summary = summary.mutable_process_summary(process);
+ }
+ process_summary->add_vm_fault_time(t.length());
+
+ if (!thread_summary) {
+ thread_summary = process_summary->mutable_thread_summary(machine_thread);
+ }
+ thread_summary->add_vm_fault_time(t.length());
+
+ start++;
+ }
+ }
+ }
+ }
+ }
+ }
+
+
+ //
+ // Calculate IO activity data.
+ //
+ if (summary_cpu == NULL) {
+ //
+ // IO activity may overlap on even individual threads.
+ //
+ // All IO activity is stored in a single sorted vector, but
+ // it may overlap even at the thread level. There isn't an
+ // easy way to locate a starting and stopping point that intersect
+ // a given range.
+ //
+ // The solution being used is to flatten the overlapping IO
+ // and keep a sorted non overlapping list of IO activity. For any
+ // given timespan, we find the overlapping intervals of flattened
+ // IO activity and then look up the actual matching IOActivity
+ // objects.
+ //
+ if (!_all_io_active_intervals.empty()) {
+ AbsInterval io_timespan = AbsInterval(_all_io_active_intervals.front().location(), _all_io_active_intervals.back().max() - _all_io_active_intervals.front().location());
+ AbsInterval trimmed_timespan = summary_timespan.intersection_range(io_timespan);
+ if (trimmed_timespan.length() > 0) {
+ //
+ // First find the flattened start point
+ //
+ if (auto flattened_start = interval_beginning_timespan(_all_io_active_intervals, trimmed_timespan)) {
+ //
+ // Now find the actual start IOActivity
+ //
+ auto it = std::lower_bound(_all_io.begin(), _all_io.end(), flattened_start->location(), AbsIntervalLocationVsAbsTimeComparator());
+ ASSERT(it != _all_io.end(), "If we reach here, we should ALWAYS find a match!");
+
+ // We need <= in case there are multiple IOActivities ending at the same time
+ while (it != _all_io.end() && it->location() < summary_timespan.max()) {
+ AbsInterval t = it->intersection_range(summary_timespan);
+
+ // Some of the ranges will not intersect at all, for example
+ //
+ // IOActivity
+ //
+ // XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+ // XXXXXX
+ // XXXXXXXX
+ //
+ // Summary Range
+ //
+ // SSSSSSSSSSSSSSSSSS
+ //
+ // The flattened_start will point at the oldest IOActivity
+ // which overlaps the summary range, but many of the later
+ // IOActivities will not overlap.
+
+ if (t.length() > 0) {
+ //
+ // Wait time.
+ //
+ summary.add_io_time(t.length());
+
+ ProcessSummary<SIZE>* process_summary = summary.mutable_process_summary(&it->thread()->process());
+ process_summary->add_io_time(t.length());
+
+ ThreadSummary<SIZE>* thread_summary = process_summary->mutable_thread_summary(it->thread());
+ thread_summary->add_io_time(t.length());
+
+ //
+ // Bytes completed
+ //
+ if (summary_timespan.contains(it->max() - AbsTime(1))) {
+ summary.add_io_bytes_completed(it->size());
+ process_summary->add_io_bytes_completed(it->size());
+ thread_summary->add_io_bytes_completed(it->size());
+ }
+ }
+ it++;
+ }
+ }
+ }
+ }
+ }
+
+ //
+ // Calculate Jetsam activity data.
+ //
+ if (summary_cpu == NULL) {
+ // jetsam activity is stored in the MachineThread
+ for (MachineThread<SIZE>* machine_thread : _threads_by_time) {
+ const MachineProcess<SIZE>* process = &machine_thread->process();
+
+ ProcessSummary<SIZE>* process_summary = NULL;
+ ThreadSummary<SIZE>* thread_summary = NULL;
+
+ const auto& jetsam_activity = machine_thread->jetsam_activity();
+ if (!jetsam_activity.empty()) {
+ AbsInterval jetsam_timespan = AbsInterval(jetsam_activity.front().location(), jetsam_activity.back().max() - jetsam_activity.front().location());
+ AbsInterval trimmed_timespan = summary_timespan.intersection_range(jetsam_timespan);
+
+ if (trimmed_timespan.length() > 0) {
+ auto start = interval_beginning_timespan(jetsam_activity, trimmed_timespan);
+ auto end = interval_ending_timespan(jetsam_activity, trimmed_timespan);
+
+ ASSERT(!start || start->intersects(trimmed_timespan), "Sanity");
+ ASSERT(!end || end->intersects(trimmed_timespan), "Sanity");
+ ASSERT((!start && !end) || ((start && end) && (start <= end)), "Sanity");
+
+ if (start && end) {
+ // NOTE! <=, not <, because end is inclusive of data we want to count!
+ while (start <= end) {
+ //
+ // NOTE! summary_timespan, NOT trimmed_timespan!
+ //
+ // 8/25/13 ... Okay, why do we care summary vs trimmed?
+ // It shouldn't be possible for start to lie outside trimmed...
+ // Leaving this for now rather than introducing some bizzare
+ // corner case, but wth...
+ //
+ AbsInterval t = start->intersection_range(summary_timespan);
+
+ ASSERT(t.length() > 0, "Might be too strong, but expecting this to be non-zero");
+
+ summary.add_jetsam_time(t.length());
+
+ // We must initialize these lazily. If we don't, every process and thread gets
+ // a summary entry. But we don't want to keep looking them up over and over...
+ if (!process_summary) {
+ process_summary = summary.mutable_process_summary(process);
+ }
+ process_summary->add_jetsam_time(t.length());
+
+ if (!thread_summary) {
+ thread_summary = process_summary->mutable_thread_summary(machine_thread);
+ }
+ thread_summary->add_jetsam_time(t.length());
+
+ start++;
+ }
+ }
+ }
+ }
+ }
+
+ // Jetsam kill times are stored in the process.
+ for (MachineProcess<SIZE>* machine_process : _processes_by_time) {
+ if (machine_process->is_exit_by_jetsam()) {
+ if (summary_timespan.contains(machine_process->exit_timestamp())) {
+ summary.increment_processes_jetsamed();
+ summary.mutable_process_summary(machine_process)->set_jetsam_killed();
+ }
+ }
+ }
+ }
+
+ DEBUG_ONLY(summary.validate());
+
+ return summary;
+}
+
+template <typename SIZE>
+uint32_t Machine<SIZE>::active_cpus() const {
+ uint32_t cpus = 0;
+
+ for (auto& cpu : _cpus) {
+ if (!cpu.timeline().empty()) {
+ cpus++;
+ }
+ }
+
+ return cpus;
+}
+
+// This attempts to analyze various pieces of data and guess
+// if the Machine represents an ios device or not.
+
+template <typename SIZE>
+bool Machine<SIZE>::is_ios() const {
+ // I looked at avg intr time, and min intr time; they were too close for
+ // reliable detection of desktop vs device (desktop has intr(s) as short
+ // as 60ns).
+
+ // For now, we're just going to do a really gross detection, in any trace
+ // from a device we'd expect to see SpringBoard or backboardd.
+
+ for (auto process : _processes_by_time) {
+ if (strcmp(process->name(), "SpringBoard") == 0)
+ return true;
+ if (strcmp(process->name(), "backboardd") == 0)
+ return true;
+ }
+
+ return false;
+}
--- /dev/null
+//
+// Machine.mutable-impl.hpp
+// KDBG
+//
+// Created by James McIlree on 10/30/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include "KDebug.h"
+
+template <typename SIZE>
+MachineProcess<SIZE>* Machine<SIZE>::create_process(pid_t pid, const char* name, AbsTime create_timestamp, kMachineProcessFlag flags) {
+ ASSERT(name, "Sanity");
+
+ // Validate that we are not creating a process that already exists
+ DEBUG_ONLY({
+ ASSERT(_processes_by_pid.size() == _processes_by_name.size(), "Sanity");
+ ASSERT(_processes_by_pid.size() == _processes_by_time.size(), "Sanity");
+
+ auto by_pid_range = _processes_by_pid.equal_range(pid);
+ for (auto it = by_pid_range.first; it != by_pid_range.second; ++it) {
+ MachineProcess<SIZE>& process = it->second;
+ ASSERT(!process.timespan().contains(create_timestamp), "Creating a process that overlaps an existing process");
+ }
+
+ auto by_name_range = _processes_by_name.equal_range(name);
+ for (auto it = by_name_range.first; it != by_name_range.second; ++it) {
+ MachineProcess<SIZE>& process = *it->second;
+ ASSERT((process.pid() != pid) || (!process.timespan().contains(create_timestamp)), "Creating a process that overlaps an existing process");
+ }
+
+ // The "by time" vector is unsorted during construction, we have to look at everything.
+ for (MachineProcess<SIZE>* process : _processes_by_time) {
+ if (process->pid() == pid) {
+ ASSERT(!process->timespan().contains(create_timestamp), "Creating a process that overlaps an existing process");
+ }
+ }
+ })
+
+ if (MachineProcess<SIZE>* about_to_be_reused_process = youngest_mutable_process(pid)) {
+ // If this process is still alive, we're going to be replacing it.
+ // The only legal way of doing that is an exec. Validate.
+ if (!about_to_be_reused_process->is_trace_terminated()) {
+ DEBUG_ONLY({
+ ASSERT((uint32_t)flags & ((uint32_t)kMachineProcessFlag::CreatedByForkExecEvent | (uint32_t)kMachineProcessFlag::CreatedByExecEvent),
+ "Replacing existing process without exec or fork-exec");
+ })
+ //
+ // Exit by exec is unique in that we will have two processes/threads
+ // back to back in the timeline. We do not want them to overlap, and
+ // the new process timeline is half open , and will have this time as
+ // its creation. Pass a 1 mabs older time to exit to prevent overlap
+ about_to_be_reused_process->set_exit_by_exec(create_timestamp - AbsTime(1));
+ }
+ ASSERT(about_to_be_reused_process->is_trace_terminated(), "Sanity");
+ }
+
+ MachineProcess<SIZE>* process = &_processes_by_pid.emplace(pid, MachineProcess<SIZE>(pid, name, create_timestamp, flags))->second;
+ _processes_by_name.emplace(process->name(), process);
+ _processes_by_time.push_back(process);
+
+ return process;
+}
+
+template <typename SIZE>
+MachineThread<SIZE>* Machine<SIZE>::create_thread(MachineProcess<SIZE>* process, typename SIZE::ptr_t tid, MachineVoucher<SIZE>* voucher, AbsTime create_timestamp, kMachineThreadFlag flags) {
+ ASSERT(process, "Sanity");
+
+ // Validate that we are not creating a thread that already exists
+ DEBUG_ONLY({
+ ASSERT(_threads_by_tid.size() == _threads_by_time.size(), "Sanity");
+
+ auto by_tid_range = _threads_by_tid.equal_range(tid);
+ for (auto it = by_tid_range.first; it != by_tid_range.second; ++it) {
+ MachineThread<SIZE>& thread = it->second;
+ ASSERT(!thread.timespan().contains(create_timestamp), "Creating a thread that overlaps an existing thread");
+ }
+
+ // The "by time" vector is unsorted during construction, we have to look at everything
+ for (MachineThread<SIZE>* thread : _threads_by_time) {
+ if (thread->tid() == tid) {
+ ASSERT(!thread->timespan().contains(create_timestamp), "Creating a thread that overlaps an existing thread");
+ }
+ }
+ })
+
+ // Currently the only way we intentionally re-use live threads is via exec/fork-exec.
+ // The exec/fork-exec code calls create_process first, which should mark all existing
+ // threads as trace-terminated. So we should NEVER see a live thread at this point.
+ // validate.
+ DEBUG_ONLY({
+ if (MachineThread<SIZE>* about_to_be_reused_thread = youngest_mutable_thread(tid)) {
+ ASSERT(about_to_be_reused_thread->is_trace_terminated(), "Expected this thread to be terminated");
+ }
+ });
+
+ MachineThread<SIZE>* thread = &_threads_by_tid.emplace(tid, MachineThread<SIZE>(process, tid, voucher, create_timestamp, flags))->second;
+ _threads_by_time.push_back(thread);
+
+ return thread;
+}
+
+template <typename SIZE>
+MachineVoucher<SIZE>* Machine<SIZE>::create_voucher(typename SIZE::ptr_t address, AbsTime create_timestamp, kMachineVoucherFlag flags, uint32_t content_bytes_capacity) {
+ ASSERT(address, "Should not be NULL");
+ ASSERT(content_bytes_capacity < 4096, "Probably an error"); // This is a guesstimate, may need re-evaluation
+
+ MachineVoucher<SIZE>* voucher;
+
+ ASSERT(_voucher_nursery.find(address) == _voucher_nursery.end(), "Attempt to create an already live voucher (<rdar://problem/16898190>)");
+ //
+ // There is no real workaround for this. Other tracepoints will use the address, bad things happen. You can't fix ordering bugs with cleverness outside the lock :-).
+ //
+ // <rdar://problem/16898190> voucher create / destroy tracepoints are outside the hashtable lock
+
+ auto workaround_it = _voucher_nursery.find(address);
+ if (workaround_it != _voucher_nursery.end()) {
+ // We've hit a race condition, this voucher was used before the create event was posted.
+ // We want to update the content_bytes_capacity, but not the create_timestamp.
+ voucher = workaround_it->second.get();
+ voucher->workaround_16898190(flags, content_bytes_capacity);
+ } else {
+ auto it = _voucher_nursery.emplace(address, std::make_unique<MachineVoucher<SIZE>>(address, AbsInterval(create_timestamp, AbsTime(0)), flags, content_bytes_capacity));
+ ASSERT(it.second, "Voucher emplace in nursery failed");
+ voucher = it.first->second.get();
+ }
+
+ ASSERT(voucher->is_live(), "Sanity");
+ ASSERT(!voucher->is_null(), "Sanity");
+ ASSERT(!voucher->is_unset(), "Sanity");
+
+ return voucher;
+}
+
+template <typename SIZE>
+void Machine<SIZE>::destroy_voucher(typename SIZE::ptr_t address, AbsTime timestamp) {
+ ASSERT(address, "Should not be NULL");
+
+ auto nursery_it = _voucher_nursery.find(address);
+
+ // We need a voucher for every reference, so we are in the odd position
+ // of creating a voucher so we can destroy it.
+ if (nursery_it == _voucher_nursery.end()) {
+ create_voucher(address, AbsTime(0), kMachineVoucherFlag::CreatedByFirstUse, 0);
+ nursery_it = _voucher_nursery.find(address);
+ }
+
+ MachineVoucher<SIZE>* voucher = nursery_it->second.get();
+
+ voucher->set_destroyed(timestamp);
+
+ // First find the "row" for this address.
+ auto by_addr_it = _vouchers_by_addr.find(address);
+ if (by_addr_it == _vouchers_by_addr.end()) {
+ // No address entry case
+ //std::vector<std::unique_ptr<MachineVoucher<SIZE>>> row(std::move(nursery_it->second));
+ std::vector<std::unique_ptr<MachineVoucher<SIZE>>> row;
+ row.emplace_back(std::move(nursery_it->second));
+ _vouchers_by_addr.emplace(address, std::move(row));
+ } else {
+ auto& row = by_addr_it->second;
+
+ // Make sure these are sorted and non-overlapping
+ ASSERT(row.back()->timespan() < voucher->timespan(), "Sanity");
+ ASSERT(!row.back()->timespan().intersects(voucher->timespan()), "Sanity");
+
+ row.emplace_back(std::move(nursery_it->second));
+ }
+
+ _voucher_nursery.erase(nursery_it);
+}
+
+//
+// This function handles looking up a voucher by address. If neccessary, it will create a new voucher.
+// NOTE! Does not update voucher timestamps, that is only done at voucher destroy.
+//
+
+template <typename SIZE>
+MachineVoucher<SIZE>* Machine<SIZE>::process_event_voucher_lookup(typename SIZE::ptr_t address, uint32_t msgh_bits) {
+ // NOTE! There is a subtle race here, we *MUST* test msgh_bits before
+ // checking for a 0 address. An unset voucher may have address 0...
+ if (MACH_MSGH_BITS_VOUCHER(msgh_bits) == MACH_MSGH_BITS_ZERO)
+ return &UnsetVoucher;
+
+ if (address == 0)
+ return &NullVoucher;
+
+ auto nursery_it = _voucher_nursery.find(address);
+ if (nursery_it == _voucher_nursery.end()) {
+ // If no voucher exists, create a default (no-contents!) voucher.
+ return create_voucher(address, AbsTime(0), kMachineVoucherFlag::CreatedByFirstUse, 0);
+ }
+
+ return nursery_it->second.get();
+}
+
+template <typename SIZE>
+MachineVoucher<SIZE>* Machine<SIZE>::thread_forwarding_voucher_lookup(const MachineVoucher<SIZE>* previous_machine_state_voucher) {
+ ASSERT(previous_machine_state_voucher, "Sanity");
+
+ if (previous_machine_state_voucher == &UnsetVoucher)
+ return &UnsetVoucher;
+
+ if (previous_machine_state_voucher == &NullVoucher)
+ return &NullVoucher;
+
+ auto nursery_it = _voucher_nursery.find(previous_machine_state_voucher->address());
+ if (nursery_it == _voucher_nursery.end()) {
+ ASSERT(false, "Should not ever have a thread forwarding a voucher not in the nursery");
+ return &UnsetVoucher;
+ }
+
+ return nursery_it->second.get();
+}
+
+//
+// This is used by processes that are being fork-exec'd / exec'd. They must be
+// created with some name, but it isn't their final name. For now, we are
+// heavily ASSERTING state to only allow processes which are fork-exec'd /
+// exec'd to set their name.
+//
+template <typename SIZE>
+void Machine<SIZE>::set_process_name(MachineProcess<SIZE>* process, const char* name) {
+ ASSERT(process, "Sanity");
+ ASSERT(process->is_created_by_fork_exec() || process->is_created_by_exec(), "Sanity");
+ ASSERT(process->threads().size() == 1, "Sanity");
+ ASSERT(process->is_fork_exec_in_progress() || process->is_exec_in_progress(), "Sanity");
+ ASSERT(name, "Sanity");
+
+ auto by_name_range = _processes_by_name.equal_range(process->name());
+ for (auto it = by_name_range.first; it != by_name_range.second; ++it) {
+ if (process == it->second) {
+ _processes_by_name.erase(it);
+ process->set_name(name);
+ _processes_by_name.emplace(process->name(), process);
+ return;
+ }
+ }
+
+ ASSERT(false, "Attempt to rename did not find a matching process");
+}
+
+//
+// The "youngest" process/thread lookups are used during event processing,
+// where we often must look up a process/thread that hasn't been updated
+// to reflect current timespans. A time based lookup would fail.
+//
+template <typename SIZE>
+MachineProcess<SIZE>* Machine<SIZE>::youngest_mutable_process(pid_t pid) {
+ MachineProcess<SIZE>* youngest_process = nullptr;
+ auto by_pid_range = _processes_by_pid.equal_range(pid);
+ for (auto it = by_pid_range.first; it != by_pid_range.second; ++it) {
+ MachineProcess<SIZE>& process = it->second;
+ // Larger times are newer (younger)
+ if (!youngest_process || process.timespan().location() > youngest_process->timespan().location()) {
+ youngest_process = &process;
+ }
+ }
+
+ return youngest_process;
+}
+
+template <typename SIZE>
+MachineThread<SIZE>* Machine<SIZE>::youngest_mutable_thread(typename SIZE::ptr_t tid) {
+ MachineThread<SIZE>* youngest_thread = nullptr;
+ auto by_tid_range = _threads_by_tid.equal_range(tid);
+ for (auto it = by_tid_range.first; it != by_tid_range.second; ++it) {
+ MachineThread<SIZE>& thread = it->second;
+ // Larger times are newer (younger)
+ if (!youngest_thread || thread.timespan().location() > youngest_thread->timespan().location()) {
+ youngest_thread = &thread;
+ }
+ }
+
+ return youngest_thread;
+}
+
+//
+// This function handles looking up a thread by tid. If neccessary, it will create a new thread
+// and process. Any thread / process that are looked up or created will have their timestamps updated.
+//
+template <typename SIZE>
+MachineThread<SIZE>* Machine<SIZE>::process_event_tid_lookup(typename SIZE::ptr_t tid, AbsTime now) {
+ MachineThread<SIZE>* thread = youngest_mutable_thread(tid);
+
+ if (!thread) {
+ // This is an "unknown" thread. We have no information about its name or parent process.
+ char unknown_process_name[20];
+ snprintf(unknown_process_name, sizeof(unknown_process_name), "unknown-%llX", (uint64_t)tid);
+
+ //
+ // Strongly considering just requiring this to be valid, and never allowing an unknown thread.
+ //
+ printf("UNKNOWN TID FAIL! unknonwn tid %llx\n", (int64_t)tid);
+ ASSERT(false, "unknown TID fail");
+
+ MachineProcess<SIZE>* unknown_process = create_process(next_unknown_pid(), unknown_process_name, now, kMachineProcessFlag::IsUnknownProcess);
+ thread = create_thread(unknown_process, tid, &UnsetVoucher, now, kMachineThreadFlag::CreatedByUnknownTidInTrace);
+ unknown_process->add_thread(thread);
+ }
+
+ ASSERT(thread, "Sanity");
+ ASSERT(!thread->is_trace_terminated(), "Event tid seen after trace termination");
+ ASSERT(!thread->process().is_trace_terminated(), "Event pid seen after trace termination");
+
+ return thread;
+}
+
+//
+// See comments in task_policy.c for full explanation of trequested_0 & trequested_1.
+//
+// process_trequested_task means that the tracepoint either had a NULL thread, or specified that the tracepoint was targeted at task level.
+// This only matters in 32 bit traces, where it takes both trequested_0 and trequested_1 to carry task or thread requested data.
+//
+// For now, there is nothing we want to see in thread_requested data.
+//
+template <typename SIZE>
+void Machine<SIZE>::process_trequested_task(pid_t pid, typename SIZE::ptr_t trequested_0, typename SIZE::ptr_t trequested_1) {
+ TaskRequestedPolicy task_requested = (SIZE::is_64_bit) ? TaskRequestedPolicy(trequested_0) : TaskRequestedPolicy((Kernel32::ptr_t)trequested_0, (Kernel32::ptr_t)trequested_1);
+
+ if (uint32_t apptype = (uint32_t)task_requested.as_struct().t_apptype) {
+ if (pid) {
+ if (MachineProcess<SIZE>* target = youngest_mutable_process(pid)) {
+ target->set_apptype_from_trequested(apptype);
+ }
+ }
+ }
+}
+
+template <typename SIZE>
+void Machine<SIZE>::process_trequested_thread(typename SIZE::ptr_t tid, typename SIZE::ptr_t trequested_0, typename SIZE::ptr_t trequested_1) {
+ TaskRequestedPolicy task_requested = (SIZE::is_64_bit) ? TaskRequestedPolicy(trequested_0) : TaskRequestedPolicy((Kernel32::ptr_t)trequested_0, (Kernel32::ptr_t)trequested_1);
+
+ if (uint32_t apptype = (uint32_t)task_requested.as_struct().t_apptype) {
+ if (MachineThread<SIZE>* target_thread = youngest_mutable_thread(tid)) {
+ target_thread->mutable_process().set_apptype_from_trequested(apptype);
+ }
+ }
+}
+
+#define AST_PREEMPT 0x01
+#define AST_QUANTUM 0x02
+#define AST_URGENT 0x04
+#define AST_HANDOFF 0x08
+#define AST_YIELD 0x10
+
+#define TRACE_DATA_NEWTHREAD 0x07000004
+#define TRACE_STRING_NEWTHREAD 0x07010004
+#define TRACE_DATA_EXEC 0x07000008
+#define TRACE_STRING_EXEC 0x07010008
+#define TRACE_LOST_EVENTS 0x07020008
+
+// From ./osfmk/i386/mp.c
+#define TRACE_MP_CPU_DEACTIVATE MACHDBG_CODE(DBG_MACH_MP, 7)
+
+// From osfmk/kern/task.h
+#define TASK_POLICY_TASK 0x4
+#define TASK_POLICY_THREAD 0x8
+
+template <typename SIZE>
+bool Machine<SIZE>::process_event(const KDEvent<SIZE>& event)
+{
+ ASSERT(!lost_events(), "Should not be processing events after TRACE_LOST_EVENTS");
+
+ AbsTime now = event.timestamp();
+ ASSERT(event.cpu() > -1 && event.cpu() < _cpus.size(), "cpu_id out of range");
+ MachineCPU<SIZE>& cpu = _cpus[event.cpu()];
+
+ if (!cpu.is_iop()) {
+ //
+ // If we have lost events, immediately bail.
+ //
+ // Pre-process events known to have bogus TID's:
+ //
+ // DBG_TRACE_INFO events may not have a valid TID.
+ // MACH_IPC_VOUCHER_CREATE_ATTR_DATA do not have a valid TID,
+ //
+
+ switch (event.dbg_cooked()) {
+ case TRACEDBG_CODE(DBG_TRACE_INFO, 1): // kernel_debug_early_end()
+ case TRACEDBG_CODE(DBG_TRACE_INFO, 4): // kernel_debug_string()
+ return true;
+
+ case TRACEDBG_CODE(DBG_TRACE_INFO, 2): // TRACE_LOST_EVENTS
+ set_flags(kMachineFlag::LostEvents);
+ return false;
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_VOUCHER_CREATE_ATTR_DATA):
+ // trace event data is
+ // data, data, data, data
+ //
+ // event tid is voucher address!
+ if (auto voucher = process_event_voucher_lookup(event.tid(), UINT32_MAX)) {
+ voucher->add_content_bytes(event.arg1_as_pointer());
+ }
+ return true;
+
+ default:
+ break;
+ }
+
+ MachineThread<SIZE>* event_thread = process_event_tid_lookup(event.tid(), now);
+
+ switch (event.dbg_cooked()) {
+ case BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXIT): // the exit syscall never returns, use this instead
+ // arg1 == pid exiting
+ // arg2 == exit code
+ if (event.is_func_end()) {
+ // can BSD_PROC_EXIT can be called from another context?
+ ASSERT((pid_t)event.arg1() == event_thread->process().pid(), "proc_exit pid does not match context pid");
+ event_thread->mutable_process().set_exit_by_syscall(now, (int32_t)event.arg2());
+ }
+ break;
+
+ case TRACE_DATA_NEWTHREAD: {
+ ASSERT(event.is_func_none(), "TRACE_DATA_NEWTHREAD event has start/end bit set. Should be func_none.");
+ //
+ // This is called by the thread creating the new thread.
+ //
+ // The thread id of the new thread is in arg1.
+ // The pid of the process creating the new thread is in arg2.
+ //
+ // NOTE! This event carries enough information to create a thread, which we do.
+ // However, the immediately following TRACE_STRING_NEWTHREAD does not have the
+ // newly_created thread's tid. We cannot assume that we will always be able to
+ // read these events as a pair, they may be split by a particularly unlucky
+ // buffer snapshot.
+ //
+ // We have a "thread nursery" which we use to associate the tid of the new
+ // thread with the creating thread.
+ //
+ // (During fork operations, the "parent" may appear different than the child,
+ // this is why we cannot reuse the parent's name and ignore the STRING event.)
+ //
+ auto new_thread_id = (typename SIZE::ptr_t)event.arg1();
+ auto new_thread_pid = (int32_t)event.arg2();
+
+ MachineProcess<SIZE>* new_process = youngest_mutable_process(new_thread_pid);
+ kMachineThreadFlag new_thread_flags;
+
+ //
+ // Okay, it looks like we cannot pay much attention to the source of thread
+ // creates, the system will create a thread for anyone at any time, in any
+ // place. The new model is to lookup the pid of the new thread, and if it
+ // exists and is live, use that. Otherwise, fork-exec a new process.
+ //
+
+ if (new_process) {
+ new_thread_flags = kMachineThreadFlag::CreatedByTraceDataNewThread;
+ } else {
+ new_thread_flags = (kMachineThreadFlag)((uint32_t)kMachineThreadFlag::CreatedByForkExecEvent |
+ (uint32_t)kMachineThreadFlag::IsMain);
+
+ auto new_process_flags = (kMachineProcessFlag)((uint32_t)kMachineProcessFlag::CreatedByForkExecEvent |
+ (uint32_t)kMachineProcessFlag::IsForkExecInProgress);
+
+ // Create the new process
+ new_process = create_process(new_thread_pid, "###Fork#Exec###", now, new_process_flags);
+ }
+ ASSERT(new_process, "Sanity");
+ ASSERT(!new_process->is_trace_terminated(), "Sanity");
+ ASSERT(new_thread_pid != 0 || new_process == _kernel_task, "Sanity");
+ new_process->add_thread(create_thread(new_process, new_thread_id, &UnsetVoucher, now, new_thread_flags));
+ break;
+ }
+
+ case TRACEDBG_CODE(DBG_TRACE_DATA, TRACE_DATA_THREAD_TERMINATE): {
+ ASSERT(event.is_func_none(), "Sanity");
+ typename SIZE::ptr_t terminated_tid = event.arg1();
+ // If tid == terminated_tid, we need to handle the lookup below differently
+ ASSERT(event.tid() != terminated_tid, "Should not be possible");
+ MachineThread<SIZE>* terminated_thread = process_event_tid_lookup(terminated_tid, now);
+ terminated_thread->set_trace_terminated(now);
+
+ // Was this the last thread for a given process?
+ bool all_threads_trace_terminated = true;
+ MachineProcess<SIZE>& process = terminated_thread->mutable_process();
+ for (auto thread : process.threads()) {
+ if (!thread->is_trace_terminated()) {
+ all_threads_trace_terminated = false;
+ break;
+ }
+ }
+
+ if (all_threads_trace_terminated) {
+ process.set_trace_terminated(now);
+ }
+ break;
+ }
+
+ case TRACE_DATA_EXEC: {
+ ASSERT(event.is_func_none(), "TRACE_DATA_EXEC event has start/end bit set. Should be func_none.");
+
+ ASSERT(!event_thread->is_trace_terminated(), "Thread that is trace terminated is exec'ing");
+ ASSERT(!event_thread->process().is_kernel(), "Kernel process is exec'ing");
+ ASSERT(!event_thread->is_idle_thread(), "IDLE thread is exec'ing");
+
+ // arg1 == pid
+ int32_t exec_pid = (int32_t)event.arg1();
+ ASSERT(exec_pid != -1, "Kernel thread is exec'ing");
+ ASSERT(exec_pid == event_thread->process().pid() || event_thread->process().is_unknown(), "Pids should match. If not, maybe vfork?");
+
+ if (event_thread->process().is_fork_exec_in_progress()) {
+ ASSERT(event_thread->process().threads().size() == 1, "Fork invariant violated");
+ // event_thread->mutable_process().clear_fork_exec_in_progress();
+
+ // Hmmm.. Do we need to propagate an apptype here?
+ } else {
+ //
+ // Creating a new process will automagically clean up the
+ // existing one, setting the last known timestamp, and "PidReused"
+ //
+ auto exec_thread_flags = (kMachineThreadFlag)((uint32_t)kMachineThreadFlag::CreatedByExecEvent |
+ (uint32_t)kMachineThreadFlag::IsMain);
+
+ auto exec_process_flags = (kMachineProcessFlag)((uint32_t)kMachineProcessFlag::CreatedByExecEvent |
+ (uint32_t)kMachineProcessFlag::IsExecInProgress);
+
+ auto exec_process = create_process(exec_pid, "###Exec###", now, exec_process_flags);
+ MachineThread<SIZE>* exec_thread = create_thread(exec_process, event_thread->tid(), &UnsetVoucher, now, exec_thread_flags);
+ exec_process->add_thread(exec_thread);
+
+ int32_t apptype = event_thread->process().apptype();
+ if (apptype != -1) {
+ exec_process->set_apptype(apptype);
+ }
+ }
+ break;
+ }
+
+ case TRACE_STRING_EXEC: {
+ ASSERT(event.is_func_none(), "TRACE_STRING_EXEC event has start/end bit set. Should be func_none.");
+ ASSERT(event_thread->mutable_process().is_exec_in_progress() ||
+ event_thread->mutable_process().is_fork_exec_in_progress(), "Must be exec or fork-exec in progress to be here");
+
+ set_process_name(&event_thread->mutable_process(), event.all_args_as_string().c_str());
+
+ if (event_thread->process().is_exec_in_progress())
+ event_thread->mutable_process().clear_exec_in_progress();
+ else
+ event_thread->mutable_process().clear_fork_exec_in_progress();
+ break;
+ }
+
+ case MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0):
+ if (event.is_func_start()) {
+ cpu.set_intr(now);
+ } else {
+ cpu.clear_intr(now);
+ }
+ break;
+
+ case MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED):
+ case MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF): {
+ // The is deactivate switch to idle thread should have happened before we see an actual
+ // context switch for this cpu.
+ ASSERT(!cpu.is_deactivate_switch_to_idle_thread(), "State machine fail");
+
+ typename SIZE::ptr_t handoff_tid = event.arg2();
+ // If the handoff tid and the event_tid are the same, the lookup will fail an assert due to timestamps going backwards.
+ MachineThread<SIZE>* handoff_thread = (handoff_tid == event.tid()) ? event_thread : process_event_tid_lookup(handoff_tid, now);
+ ASSERT(handoff_thread, "Sanity");
+
+ // If we marked a given thread as unrunnable in idle, or the MKRUNNABLE wasn't emitted, make sure we
+ // mark the thread as runnable now.
+ handoff_thread->make_runnable(now);
+ cpu.context_switch(handoff_thread, event_thread, now);
+
+ if (!event_thread->is_idle_thread()) {
+ if (event_thread->tid() != event.arg2()) {
+ if ((event.arg1() & (AST_PREEMPT | AST_QUANTUM | AST_URGENT | AST_HANDOFF | AST_YIELD)) == 0) {
+ event_thread->make_unrunnable(now);
+ }
+ }
+ }
+ break;
+ }
+
+ //
+ // There is a rare case of:
+ //
+ // event[795176] { timestamp=4b8074fa6bb5, arg1=0, arg2=0, arg3=0, arg4=0, tid=8ab77, end MP_CPU_DEACTIVATE, cpu=1 }
+ // event[795177] { timestamp=4b8074fa70bd, arg1=8ab77, arg2=ffffffffffffffff, arg3=0, arg4=4, tid=2d, --- MACH_SCHED_CHOOSE_PROCESSOR, cpu=1 }
+ //
+ // When a cpu shuts down via MP_CPU_DEACTIVATE, on reactivation, the cpu does a forced switch to its idle thread,
+ // without dropping a MACH_SCHED or MACH_STACK_HANDOFF. We want to catch this and update the cpu correctly, as
+ // well as marking the idle thread.
+ //
+ // This is a desktop only codepath, TRACE_MP_CPU_DEACTIVATE is defined in ./osfmk/i386/mp.c
+ //
+ case TRACE_MP_CPU_DEACTIVATE:
+ ASSERT(event_thread == cpu.thread() || !cpu.is_thread_state_initialized(), "Sanity");
+ if (event.is_func_end()) {
+ cpu.set_deactivate_switch_to_idle_thread();
+ }
+ break;
+
+ case MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_CHOOSE_PROCESSOR):
+ //
+ // I have seen a sequence of events like this, where it appears that multiple threads get re-dispatched:
+ //
+ // event[254871] { timestamp=332dd22319b, arg1=0, arg2=0, arg3=0, arg4=0, tid=1b8ab, end MP_CPU_DEACTIVATE, cpu=7 }
+ // event[254876] { timestamp=332dd22387a, arg1=1b7d9, arg2=ffffffffffffffff, arg3=e, arg4=4, tid=1b8ab, --- MACH_SCHED_CHOOSE_PROCESSOR, cpu=7 }
+ // event[254877] { timestamp=332dd223c44, arg1=e, arg2=0, arg3=0, arg4=0, tid=1b8ab, --- MACH_SCHED_REMOTE_AST, cpu=7 }
+ // event[254887] { timestamp=332dd22441c, arg1=1b8ab, arg2=ffffffffffffffff, arg3=4, arg4=4, tid=53, --- MACH_SCHED_CHOOSE_PROCESSOR, cpu=7 }
+ //
+ // We will wait until we see a tid mismatch before clearing the deactivate_switch state
+ //
+ if (cpu.is_deactivate_switch_to_idle_thread()) {
+ if (cpu.thread() == NULL || event_thread->tid() != cpu.thread()->tid()) {
+ // The choose tracepoint has the tid of the thread on cpu when it deactivated.
+ ASSERT(cpu.thread() == NULL || cpu.thread()->tid() == event.arg1(), "Sanity");
+
+ cpu.clear_deactivate_switch_to_idle_thread();
+ event_thread->set_is_idle_thread();
+ event_thread->make_runnable(now);
+ cpu.context_switch(event_thread, cpu.thread(), now);
+ }
+ }
+ break;
+
+ case MACHDBG_CODE(DBG_MACH_SCHED, MACH_MAKE_RUNNABLE):
+ event_thread->make_runnable(now);
+ break;
+
+ case MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE):
+ if (event.is_func_start()) {
+ cpu.set_idle(now);
+ } else {
+ cpu.clear_idle(now);
+ }
+ break;
+
+ case MACHDBG_CODE(DBG_MACH_VM, 2 /* MACH_vmfault is hardcoded as 2 */):
+ if (event.is_func_start())
+ event_thread->begin_vm_fault(now);
+ else
+ event_thread->end_vm_fault(now);
+ break;
+
+ case BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM):
+ case BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_JETSAM_HIWAT):
+ if (event.is_func_end()) {
+ if (pid_t pid = (pid_t)event.arg2()) {
+ //
+ // The time for this kill is already covered by the MEMSTAT_scan.
+ // We still want to mark the victim process as jetsam killed, though.
+ // We need to look up the victim, which is the pid in arg2.
+ //
+ if (MachineProcess<SIZE>* victim = youngest_mutable_process(pid)) {
+ ASSERT(!victim->is_exiting(), "Jetsam killing already dead process");
+ // This isn't technically impossible, but as a practical matter it is more likely
+ // signalling a bug than we were able to wrap the pid counter and reuse this pid
+ ASSERT(!victim->is_kernel(), "Cannot jetsam kernel");
+ victim->set_exit_by_jetsam(now);
+ } else {
+ ASSERT(false, "Unable to find jetsam victim pid");
+ }
+ }
+ }
+ break;
+
+ case BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_SCAN):
+ case BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_UPDATE):
+ case BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_FREEZE):
+ if (event.is_func_start())
+ event_thread->begin_jetsam_activity(event.dbg_cooked(), now);
+ else
+ event_thread->end_jetsam_activity(event.dbg_cooked(), now);
+ break;
+
+ //
+ // IMP_TASK_APPTYPE trace args are:
+ //
+ // start:
+ // target_pid, trequested_0, trequested_1, apptype
+ // end:
+ // target_pid, trequested_0, trequested_1, is_importance_receiver
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_NONE):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_DAEMON_INTERACTIVE):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_DAEMON_STANDARD):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_DAEMON_ADAPTIVE):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_DAEMON_BACKGROUND):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_APP_DEFAULT):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_APP_TAL):
+ //
+ // We want to set the explicit apptype now, and trequested will not have the
+ // apptype data until the end event.
+ //
+ if (event.is_func_start()) {
+ if (pid_t pid = (pid_t)event.arg1()) {
+ if (MachineProcess<SIZE>* target = youngest_mutable_process(pid)) {
+ target->set_apptype((uint32_t)event.arg4());
+ }
+ }
+ }
+ process_trequested_task((pid_t)event.arg1(), event.arg2(), event.arg3());
+ break;
+
+ // Trace data is
+ // self_pid, audit_token_pid_from_task(task), trequested_0(task, NULL), trequested_1(task, NULL)
+ case IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, 0):
+ case IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, 1):
+ case IMPORTANCE_CODE(IMP_BOOST, IMP_BOOSTED):
+ case IMPORTANCE_CODE(IMP_BOOST, IMP_UNBOOSTED):
+ process_trequested_task((pid_t)event.arg2(), event.arg3(), event.arg4());
+ break;
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER): {
+ //
+ // This can be invoked against another thread; you must use arg1 as the tid.
+ //
+ // thread-tid, name, voucher, callsite-id
+ //
+ auto set_thread_tid = event.arg1();
+ MachineThread<SIZE>* set_thread = (set_thread_tid == event.tid()) ? event_thread : process_event_tid_lookup(set_thread_tid, now);
+ set_thread->set_voucher(process_event_voucher_lookup(event.arg3(), UINT32_MAX), now);
+ break;
+ }
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_VOUCHER_CREATE):
+ // trace event data is
+ // voucher address, voucher table size, system voucher count, voucher content bytes
+ create_voucher(event.arg1(), now, kMachineVoucherFlag::CreatedByVoucherCreate, (uint32_t)event.arg4());
+ break;
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_VOUCHER_DESTROY):
+ destroy_voucher(event.arg1(), now);
+ break;
+
+ // The MachMsg state chart...
+ //
+ // The "key" to the mach msg is the kmsg_addr.
+ //
+ // We can encounter a given kmsg_addr in any of
+ // four possible states:
+ //
+ // UNINITIALIZED
+ // SEND
+ // RECV
+ // FREE
+ //
+ // These are the legal state transitions:
+ // (transition to UNINITIALIZED is not possible)
+ //
+ // UNIN -> SEND ; Accept as FREE -> SEND
+ // UNIN -> RECV ; Accept as SEND -> RECV
+ // UNIN -> FREE ; Accept as FREE -> FREE
+ //
+ // SEND -> SEND ; ERROR!
+ // SEND -> RECV ; User to User IPC, send message to machine
+ // SEND -> FREE ; User to Kernel IPC, recycle.
+ //
+ // RECV -> SEND ; ERROR!
+ // RECV -> RECV ; ERROR!
+ // RECV -> FREE ; End User IPC
+ //
+ // FREE -> SEND ; Begin User IPC
+ // FREE -> RECV ; Kernel to User IPC
+ // FREE -> FREE ; Kernel to Kernel IPC
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_SEND): {
+ // trace event data is:
+ // kmsg_addr, msgh_bits, msgh_id, voucher_addr,
+ auto kmsg_addr = event.arg1();
+ auto msgh_bits = (uint32_t)event.arg2();
+ auto msgh_id = (uint32_t)event.arg3();
+ auto voucher_addr = event.arg4();
+
+ auto nursery_it = _mach_msg_nursery.find(kmsg_addr);
+ if (nursery_it == _mach_msg_nursery.end()) {
+ nursery_it = _mach_msg_nursery.emplace(kmsg_addr, kmsg_addr).first;
+ }
+
+ auto& nursery_msg = nursery_it->second;
+
+ switch (nursery_msg.state()) {
+ // SEND -> SEND ; ERROR!
+ // RECV -> SEND ; ERROR!
+ case kNurseryMachMsgState::Send:
+ ASSERT(false, "illegal state transition (SEND -> SEND) in nursery mach msg");
+ case kNurseryMachMsgState::Recv:
+ ASSERT(false, "illegal state transition (RECV -> SEND) in nursery mach msg");
+ break;
+
+ // UNIN -> SEND ; Accept as FREE -> SEND
+ // FREE -> SEND ; Begin User IPC
+ case kNurseryMachMsgState::Uninitialized:
+ case kNurseryMachMsgState::Free: {
+ uintptr_t event_index = &event - _events;
+ nursery_msg.send(event_index, event.timestamp(), event.tid(), kmsg_addr, msgh_bits, msgh_id, process_event_voucher_lookup(voucher_addr, msgh_bits));
+ break;
+ }
+ }
+ // We do the state set here so that release builds
+ // sync to current state when errors are encountered
+ nursery_msg.set_state(kNurseryMachMsgState::Send);
+ break;
+ }
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV):
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV_VOUCHER_REFUSED):
+ {
+ // trace event data is
+ // kmsg_addr, msgh_bits, msgh_id, voucher_addr
+ auto kmsg_addr = event.arg1();
+ auto msgh_bits = (uint32_t)event.arg2();
+ auto voucher_addr = event.arg4();
+
+ auto nursery_it = _mach_msg_nursery.find(kmsg_addr);
+ if (nursery_it == _mach_msg_nursery.end()) {
+ nursery_it = _mach_msg_nursery.emplace(kmsg_addr, kmsg_addr).first;
+ }
+
+ auto& nursery_msg = nursery_it->second;
+
+ uint32_t flags = (event.dbg_code() == MACH_IPC_MSG_RECV_VOUCHER_REFUSED) ? kMachineMachMsgFlag::IsVoucherRefused : 0;
+ uintptr_t event_index = &event - _events;
+
+ switch (nursery_msg.state()) {
+
+ // UNIN -> RECV ; Accept as SEND -> RECV
+ case kNurseryMachMsgState::Uninitialized: {
+ flags |= kMachineMachMsgFlag::HasReceiver;
+
+ auto mach_msg_it = _mach_msgs.emplace(_mach_msgs.end(),
+ NurseryMachMsg<SIZE>::message_id(),
+ kmsg_addr,
+ flags,
+ AbsTime(0),
+ 0,
+ 0,
+ &Machine<SIZE>::UnsetVoucher,
+ now,
+ event.tid(),
+ msgh_bits,
+ process_event_voucher_lookup(voucher_addr, msgh_bits));
+
+ ASSERT(_mach_msgs_by_event_index.find(event_index) == _mach_msgs_by_event_index.end(), "Stomping mach msg");
+ _mach_msgs_by_event_index[event_index] = std::distance(_mach_msgs.begin(), mach_msg_it);
+ break;
+ }
+
+ // SEND -> RECV ; User to User IPC, send message to machine
+ case kNurseryMachMsgState::Send: {
+ ASSERT(kmsg_addr == nursery_msg.kmsg_addr(), "Sanity");
+ ASSERT((uint32_t)event.arg3() == nursery_msg.send_msgh_id(), "Sanity");
+
+ flags |= (kMachineMachMsgFlag::HasSender | kMachineMachMsgFlag::HasReceiver);
+
+ auto mach_msg_it = _mach_msgs.emplace(_mach_msgs.end(),
+ nursery_msg.id(),
+ kmsg_addr,
+ flags,
+ nursery_msg.send_time(),
+ nursery_msg.send_tid(),
+ nursery_msg.send_msgh_bits(),
+ nursery_msg.send_voucher(),
+ now,
+ event.tid(),
+ msgh_bits,
+ process_event_voucher_lookup(voucher_addr, msgh_bits));
+
+ intptr_t send_event_index = nursery_msg.send_event_index();
+ if (send_event_index != -1) {
+ ASSERT(send_event_index < _event_count, "Sanity");
+ ASSERT(_mach_msgs_by_event_index.find(event_index) == _mach_msgs_by_event_index.end(), "Stomping mach msg");
+ _mach_msgs_by_event_index[send_event_index] = std::distance(_mach_msgs.begin(), mach_msg_it);
+ }
+ ASSERT(_mach_msgs_by_event_index.find(event_index) == _mach_msgs_by_event_index.end(), "Stomping mach msg");
+ _mach_msgs_by_event_index[event_index] = std::distance(_mach_msgs.begin(), mach_msg_it);
+ break;
+ }
+
+ // RECV -> RECV ; ERROR!
+ case kNurseryMachMsgState::Recv:
+ ASSERT(false, "illegal state transition (RECV -> RECV) in nursery mach msg");
+ break;
+
+ // FREE -> RECV ; Kernel to User IPC
+ case kNurseryMachMsgState::Free:
+ break;
+ }
+
+ // We do the state set here so that release builds
+ // sync to current state when errors are encountered
+ nursery_msg.set_state(kNurseryMachMsgState::Recv);
+ break;
+ }
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_FREE): {
+ // trace event data is:
+ // kmsg_addr
+
+ auto kmsg_addr = event.arg1();
+
+ auto nursery_it = _mach_msg_nursery.find(kmsg_addr);
+ if (nursery_it == _mach_msg_nursery.end()) {
+ nursery_it = _mach_msg_nursery.emplace(kmsg_addr, kmsg_addr).first;
+ }
+
+ auto& nursery_msg = nursery_it->second;
+
+
+ // All transitions to FREE are legal.
+ nursery_msg.set_state(kNurseryMachMsgState::Free);
+ }
+
+ default:
+ // IO Activity
+ //
+ // There isn't an easy way to handle these inside the switch, The
+ // code is used as a bitfield.
+
+
+ //
+ // Okay temp note on how to approach this.
+ //
+ // Even a single thread may have overlapping IO activity.
+ // None of the current scheme's handle overlapped activity well.
+ //
+ // We'd like to be able to show for any given interval, "X pages IO outstanding, Y pages completed, Z ms waiting"
+ //
+ // To do that, we've got to be able to intersect an arbitrary interval with a pile of overlapping intervals.
+ //
+ // The approach is to accumulate the IO activity into a single vector.
+ // Sort by interval.location().
+ // Now flatten this interval (union flatten).
+ // This will produce a second vector of non-overlapping intervals.
+ // When we want to intersect the arbitrary interval, we do the standard search on the non overlapping interval vector.
+ // This will give us a starting and ending location that guarantee to cover every IO that might intersect.
+ //
+ // The assumption is that while IO's overlap, they don't stay active forever. Sooner or later there will be a break.
+ //
+ // The arch-nemesis of this scheme is the light overlap, like so:
+ //
+ // XXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX
+ // XXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXX
+
+
+ //
+ // It turns out that IO can overlap inside a single thread, for example:
+ //
+ // 437719 C73AD5945 --- P_RdDataAsync 209b9f07 1000002 6b647 5000 2A72 1 gamed 293
+ // 437724 C73AD5DCA --- P_RdDataAsync 209b7e37 1000002 6b64c 6000 2A72 1 gamed 293
+ // 437822 C73AD98B0 --- P_RdDataAsyncDone 209b7e37 4dfe3eef 0 0 191 1 kernel_task 0
+ // 437829 C73AD9E55 --- P_RdDataAsyncDone 209b9f07 4dfe3eef 0 0 191 1 kernel_task 0
+ //
+
+ if (event.dbg_class() == DBG_FSYSTEM && event.dbg_subclass() == DBG_DKRW) {
+ uint32_t code = event.dbg_code();
+ //
+ // Disk IO doesn't use func_start/func_end
+ //
+ // arg1 == uid
+ // arg4 == size
+ if (code & DKIO_DONE) {
+ this->end_io(now, event.arg1());
+ } else {
+
+ // IO is initiated by a given process/thread, but it always finishes on a kernel_thread.
+ // We need to stash enough data to credit the correct thread when the completion event arrives.
+ begin_io(event_thread, now, event.arg1(), event.arg4());
+ }
+ } else if (event.dbg_class() == DBG_IMPORTANCE) {
+ //
+ // Every task policy set trace code carries "trequested" data, we would like to grab them all.
+ //
+ // This subclass spans the range of 0x20 through 0x3F
+ //
+
+ uint32_t subclass = event.dbg_subclass();
+ if (subclass >= 0x20 && subclass <= 0x3F) {
+ // Trace event data is
+ // targetid(task, thread), trequested_0(task, thread), trequested_1(task, thread), value
+
+ bool is_task_event = (event.dbg_code() & TASK_POLICY_TASK) > 0;
+
+ // Should not be both a task and thread event.
+ ASSERT(is_task_event != (event.dbg_code() & TASK_POLICY_THREAD), "BEWM!");
+
+ if (is_task_event) {
+ process_trequested_task((pid_t)event.arg1(), event.arg2(), event.arg3());
+ } else {
+ process_trequested_thread(event.arg1(), event.arg2(), event.arg3());
+ }
+ }
+ }
+ break;
+ }
+ }
+
+ return true;
+}
+
+template <typename SIZE>
+void Machine<SIZE>::initialize_cpu_idle_intr_states() {
+ ASSERT(_event_count, "Sanity");
+ ASSERT(_events, "Sanity");
+ ASSERT(!_cpus.empty(), "Sanity");
+
+ // How much work do we need to do?
+ uint32_t inits_needed = 0;
+ uint32_t inits_done = 0;
+ for (auto& cpu : _cpus) {
+ if (!cpu.is_iop()) {
+ inits_needed += 3;
+
+ if (cpu.is_intr_state_initialized()) {
+ inits_done++;
+ }
+ if (cpu.is_idle_state_initialized()) {
+ inits_done++;
+ }
+ if (cpu.is_thread_state_initialized()) {
+ inits_done++;
+ }
+ }
+ }
+
+ uintptr_t index;
+ for (index = 0; index < _event_count; ++index) {
+ const KDEvent<SIZE>& event = _events[index];
+ ASSERT(event.cpu() > -1 && event.cpu() < _cpus.size(), "cpu_id out of range");
+ MachineCPU<SIZE>& cpu = _cpus[event.cpu()];
+
+ if (!cpu.is_iop()) {
+ switch (event.dbg_cooked()) {
+ case TRACE_LOST_EVENTS:
+ // We're done, give up.
+ return;
+
+ case MACHDBG_CODE(DBG_MACH_EXCP_INTR, 0):
+ if (!cpu.is_intr_state_initialized()) {
+ inits_done++;
+
+ if (event.is_func_start()) {
+ // If we are starting an INTR now, the cpu was not in INTR prior to now.
+ cpu.initialize_intr_state(false, _events[0].timestamp());
+ } else {
+ // If we are ending an INTR now, the cpu was in INTR prior to now.
+ cpu.initialize_intr_state(true, _events[0].timestamp());
+ }
+ }
+ break;
+
+ case MACHDBG_CODE(DBG_MACH_SCHED, MACH_IDLE):
+ if (!cpu.is_idle_state_initialized()) {
+ inits_done++;
+
+ if (event.is_func_start()) {
+ // If we are starting Idle now, the cpu was not Idle prior to now.
+ cpu.initialize_idle_state(false, _events[0].timestamp());
+ } else {
+ // If we are ending Idle now, the cpu was Idle prior to now.
+ cpu.initialize_idle_state(true, _events[0].timestamp());
+ }
+ }
+ break;
+
+ // I spent a day tracking this down....
+ //
+ // When you are actively sampling (say, every 100ms) on a machine
+ // that is mostly idle, there will be long periods of VERY idle
+ // cpus. So you might get a sample with no begin/end idle at all,
+ // but the cpu is actually idle the entire time. Now suppose in
+ // the next sample, you get a simple idle timeout in the middle,
+ // and immdiately go back to idle. If we treat any TID found on
+ // cpu as "running", we blow up because this segment appears to
+ // have the idle thread "running".
+ //
+ // So, to do a proper thread init, we require actual scheduler
+ // activity to tell us who the thread was.
+ case MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED):
+ case MACHDBG_CODE(DBG_MACH_SCHED, MACH_STACK_HANDOFF):
+ if (!cpu.is_thread_state_initialized()) {
+ inits_done++;
+
+ // We want to use the thread that *was* on cpu, not the thread being
+ // handed off too.
+ MachineThread<SIZE>* init_thread = youngest_mutable_thread(event.tid());
+ // Legal for this to be NULL!
+ cpu.initialize_thread_state(init_thread, _events[0].timestamp());
+
+ }
+ break;
+ }
+ }
+
+ if (inits_done == inits_needed) {
+ break;
+ }
+ }
+}
+
+template <typename SIZE>
+void Machine<SIZE>::begin_io(MachineThread<SIZE>* thread, AbsTime begin_time, typename SIZE::ptr_t uid, typename SIZE::ptr_t size) {
+ auto it = _io_by_uid.find(uid);
+ if (it == _io_by_uid.end()) {
+ _io_by_uid.emplace(uid, IOActivity<SIZE>(begin_time, AbsTime(0), thread, size));
+ } else {
+ // We shouldn't find a valid IO entry at the uid we're installing.
+ ASSERT(it->second.thread() == NULL, "Overwriting existing io entry");
+ ASSERT(it->second.location() == 0, "Overwriting existing io entry");
+
+ it->second = IOActivity<SIZE>(begin_time, AbsTime(0), thread, size);
+ }
+}
+
+template <typename SIZE>
+void Machine<SIZE>::end_io(AbsTime end_time, typename SIZE::ptr_t uid) {
+ auto it = _io_by_uid.find(uid);
+
+ // Its okay to not find a match, if a trace begins with a Done event, for example.
+ if (it != _io_by_uid.end()) {
+ MachineThread<SIZE>* io_thread = it->second.thread();
+ AbsTime begin_time = it->second.location();
+ ASSERT(end_time > it->second.location(), "Sanity");
+
+ _all_io.emplace_back(begin_time, end_time - begin_time, io_thread, it->second.size());
+
+ DEBUG_ONLY({
+ it->second.set_thread(NULL);
+ it->second.set_location(AbsTime(0));
+ it->second.set_length(AbsTime(0));
+ })
+ }
+}
--- /dev/null
+//
+// MachineCPU.hpp
+// KDBG
+//
+// Created by James McIlree on 10/26/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+//
+// Much simplified cpu/thread state model.
+//
+// 1) A thread is *always* on cpu. Always. The only excpetion is during
+// initialization, when the thread on cpu is unknown.
+//
+// 2) There are three states possible: Running, IDLE, INTR.
+// A thread may move from any state to any other state.
+// A thread may not take an INTR while in INTR.
+// It is illegal to context_switch in IDLE or INTR.
+//
+
+enum class kMachineCPUFlag : std::uint32_t {
+ IsStateIdleInitialized = 0x00000001, // Set when the idle state at event[0] has been identified.
+ IsStateINTRInitialized = 0x00000002, // Set when the INTR state at event[0] has been identified.
+ IsStateThreadInitialized = 0x00000004, // Set when the on-cpu thread at event[0] has been identified (may be NULL, for threads not known at the time of event[0])
+ IsStateIdle = 0x00000008, // Set if the cpu is Idle
+ IsStateINTR = 0x00000010, // Set if the cpu is servicing an interrupt
+ IsStateDeactivatedForcedSwitchToIdleThread = 0x00000020, // OSX only; set when the cpu is deactivated and on wake forces a switch to its idle thread without a context switch tracepoint
+ IsIOP = 0x10000000 // Set if the cpu is an IOP
+};
+
+template <typename SIZE>
+class MachineCPU {
+ protected:
+
+ class ThreadOnCPU {
+ protected:
+ MachineThread<SIZE>* _thread;
+ AbsTime _timestamp;
+ public:
+ ThreadOnCPU(MachineThread<SIZE>* thread, bool is_event_zero_init_thread, AbsTime timestamp) :
+ _thread(thread),
+ _timestamp(timestamp)
+ {
+ if (is_event_zero_init_thread)
+ _thread = (MachineThread<SIZE>*)((uintptr_t)_thread | 1);
+ }
+
+ MachineThread<SIZE>* thread() { return (MachineThread<SIZE>*)((uintptr_t)_thread & ~0x1); }
+ AbsTime timestamp() { return _timestamp; }
+ bool is_event_zero_init_thread() { return (uintptr_t)_thread & 0x1; }
+ };
+
+ int32_t _id;
+ uint32_t _flags;
+ std::string _name; // IOP's have names, AP's will be "AP"
+ std::vector<CPUActivity<SIZE>> _timeline;
+
+ // State used only during initialization
+ MachineThread<SIZE>* _thread;
+ AbsTime _begin_idle;
+ AbsTime _begin_intr;
+ std::vector<ThreadOnCPU> _cpu_runq;
+ std::vector<AbsInterval> _cpu_intr;
+ std::vector<AbsInterval> _cpu_idle;
+
+ friend class Machine<SIZE>;
+
+ bool is_running() const { return (_flags & ((uint32_t)kMachineCPUFlag::IsStateIdle | (uint32_t)kMachineCPUFlag::IsStateINTR)) == 0; }
+
+ bool is_idle() const { return (_flags & (uint32_t)kMachineCPUFlag::IsStateIdle) > 0; }
+ void set_idle(AbsTime timestamp);
+ void clear_idle(AbsTime timestamp);
+
+ bool is_deactivate_switch_to_idle_thread() const { return (_flags & (uint32_t)kMachineCPUFlag::IsStateDeactivatedForcedSwitchToIdleThread) > 0; }
+ void set_deactivate_switch_to_idle_thread();
+ void clear_deactivate_switch_to_idle_thread();
+
+ bool is_idle_state_initialized() const { return (_flags & (uint32_t)kMachineCPUFlag::IsStateIdleInitialized) > 0; }
+ void initialize_idle_state(bool value, AbsTime timestamp);
+
+ bool is_intr() const { return (_flags & (uint32_t)kMachineCPUFlag::IsStateINTR) > 0; }
+ void set_intr(AbsTime timestamp);
+ void clear_intr(AbsTime timestamp);
+
+ bool is_intr_state_initialized() const { return (_flags & (uint32_t)kMachineCPUFlag::IsStateINTRInitialized) > 0; }
+ void initialize_intr_state(bool state, AbsTime timestamp);
+
+ void context_switch(MachineThread<SIZE>* to_thread, MachineThread<SIZE>* from_thread, AbsTime timestamp);
+
+ bool is_thread_state_initialized() const { return (_flags & (uint32_t)kMachineCPUFlag::IsStateThreadInitialized) > 0; }
+ void initialize_thread_state(MachineThread<SIZE>* thread, AbsTime timestamp);
+
+ MachineThread<SIZE>* thread() { return _thread; }
+
+ // This is called after all events have been processed, to allow the
+ // cpu timelines to be collapsed and post processed.
+ void post_initialize(AbsInterval events_interval);
+
+ public:
+ MachineCPU(int32_t id, bool is_iop, std::string name) :
+ _id(id),
+ _flags(is_iop ? (uint32_t)kMachineCPUFlag::IsIOP : 0),
+ _name(name),
+ _thread(nullptr),
+ _begin_idle(AbsTime(0)),
+ _begin_intr(AbsTime(0))
+ {
+ }
+
+ int32_t id() const { return _id; }
+ void set_id(int32_t id) { ASSERT(_id = -1, "Attempt to set id twice"); _id = id; }
+
+ bool is_iop() const { return (_flags & (uint32_t)kMachineCPUFlag::IsIOP) > 0; }
+
+ bool is_active() const { return !_timeline.empty(); }
+
+ const char* name() const { return _name.c_str(); }
+
+ const std::vector<CPUActivity<SIZE>>& timeline() const { return _timeline; }
+ const CPUActivity<SIZE>* activity_for_timestamp(AbsTime timestamp) const;
+};
--- /dev/null
+//
+// MachineCPU.impl.hpp
+// KDBG
+//
+// Created by James McIlree on 11/7/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+//
+// NOTE! activity match behavior explanation...
+//
+// CPUActivity entries are contiguous, there are no holes in the timeline.
+//
+// Note the operator< definitions above, std::lower_bounds is not using the
+// default AbsInterval <. The comparsions are against the interval(s) max() - 1.
+//
+// std::lower_bound returns a match doing <=, std::upper_bound returns a match doing <
+//
+// 8/26/13...
+//
+// Okay, based on a better understanding of the behavior of xxx_bounds, this
+// should be switchable to std::upper_bounds using a comparator without the
+// subtraction, and so slightly more efficient.
+//
+
+template <typename SIZE>
+const CPUActivity<SIZE>* MachineCPU<SIZE>::activity_for_timestamp(AbsTime timestamp) const {
+ auto it = std::upper_bound(_timeline.begin(), _timeline.end(), timestamp, AbsIntervalMaxVsAbsTimeComparator());
+
+ // The upper bound will report that 0 is lower than [ 10, 20 ), need to check contains!
+ if (it != _timeline.end() && it->contains(timestamp)) {
+ return &*it;
+ }
+
+ return NULL;
+}
--- /dev/null
+//
+// MachineCPU.mutable-impl.hpp
+// KDBG
+//
+// Created by James McIlree on 11/7/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+template <typename SIZE>
+void MachineCPU<SIZE>::set_idle(AbsTime timestamp) {
+ ASSERT(is_idle_state_initialized(), "Setting idle before state was initialized");
+ ASSERT(!is_intr(), "Setting idle while in interrupt");
+ ASSERT(!is_idle(), "Setting idle while already idle");
+ ASSERT(_begin_idle == 0, "Sanity");
+
+ _begin_idle = timestamp;
+ _flags |= (uint32_t)kMachineCPUFlag::IsStateIdle;
+}
+
+template <typename SIZE>
+void MachineCPU<SIZE>::clear_idle(AbsTime timestamp) {
+ ASSERT(is_idle_state_initialized(), "Clearing idle before state was initialized");
+ ASSERT(!is_intr(), "Clearing idle while in interrupt");
+ ASSERT(is_idle(), "Clearing idle while not idle");
+
+ _cpu_idle.emplace_back(_begin_idle, timestamp - _begin_idle);
+ DEBUG_ONLY(_begin_idle = AbsTime(0);)
+ _flags &= ~(uint32_t)kMachineCPUFlag::IsStateIdle;
+}
+
+template <typename SIZE>
+void MachineCPU<SIZE>::set_deactivate_switch_to_idle_thread() {
+ ASSERT(!is_deactivate_switch_to_idle_thread(), "State already set");
+ ASSERT(!is_intr(), "This state should not occur during INTR");
+
+ _flags |= (uint32_t)kMachineCPUFlag::IsStateDeactivatedForcedSwitchToIdleThread;
+}
+
+template <typename SIZE>
+void MachineCPU<SIZE>::clear_deactivate_switch_to_idle_thread() {
+ ASSERT(is_deactivate_switch_to_idle_thread(), "Clearing state when not set");
+ ASSERT(!is_intr(), "This state transition should not occur during INTR");
+
+ _flags &= ~(uint32_t)kMachineCPUFlag::IsStateDeactivatedForcedSwitchToIdleThread;
+}
+
+template <typename SIZE>
+void MachineCPU<SIZE>::initialize_idle_state(bool is_idle, AbsTime timestamp) {
+ ASSERT(!is_idle_state_initialized(), "Attempt to initialize Idle state more than once");
+ ASSERT(!this->is_idle(), "Attempt to initialize Idle state while already idle");
+
+ if (is_idle) {
+ _begin_idle = timestamp;
+ _flags |= (uint32_t)kMachineCPUFlag::IsStateIdle;
+ }
+
+ _flags |= (uint32_t)kMachineCPUFlag::IsStateIdleInitialized;
+}
+
+template <typename SIZE>
+void MachineCPU<SIZE>::set_intr(AbsTime timestamp) {
+ // We can take an INTR in state Unknown, IDLE, and RUNNING.
+ ASSERT(is_intr_state_initialized(), "Setting INTR before state was initialized");
+ ASSERT(!is_intr(), "Setting INTR when already in state INTR");
+ ASSERT(_begin_intr == 0, "Sanity");
+
+ _begin_intr = timestamp;
+ _flags |= (uint32_t)kMachineCPUFlag::IsStateINTR;
+}
+
+template <typename SIZE>
+void MachineCPU<SIZE>::clear_intr(AbsTime timestamp) {
+ ASSERT(is_intr_state_initialized(), "Clearing INTR before state was initialized");
+ ASSERT(is_intr(), "Clearing INTR when not in INTR");
+
+ _cpu_intr.emplace_back(_begin_intr, timestamp - _begin_intr);
+ DEBUG_ONLY(_begin_intr = AbsTime(0);)
+ _flags &= ~(uint32_t)kMachineCPUFlag::IsStateINTR;
+}
+
+template <typename SIZE>
+void MachineCPU<SIZE>::initialize_intr_state(bool is_intr, AbsTime timestamp) {
+ ASSERT(!is_intr_state_initialized(), "Attempt to initialize INTR state more than once");
+ ASSERT(!this->is_intr(), "Attempt to initialize INTR state while already INTR");
+
+ if (is_intr) {
+ _begin_intr = timestamp;
+ _flags |= (uint32_t)kMachineCPUFlag::IsStateINTR;
+ }
+
+ _flags |= (uint32_t)kMachineCPUFlag::IsStateINTRInitialized;
+}
+
+template <typename SIZE>
+void MachineCPU<SIZE>::initialize_thread_state(MachineThread<SIZE>* init_thread, AbsTime timestamp) {
+ ASSERT(!is_thread_state_initialized(), "Attempt to initialize thread state more than once");
+ ASSERT(!_thread, "Sanity");
+
+ // When initializing the thread state, the TID lookup may fail. This
+ // can happen if there wasn't a threadmap, or if the thread was created
+ // later in the trace. We explicitly allow NULL as a valid value here.
+ // NULL means "Go ahead and set the init flag, but we will not emit a
+ // runq event later when a real context switch happens
+
+ _flags |= (uint32_t)kMachineCPUFlag::IsStateThreadInitialized;
+ if (init_thread) {
+ _cpu_runq.emplace_back(init_thread, true, timestamp);
+ _thread = init_thread;
+ }
+}
+
+template <typename SIZE>
+void MachineCPU<SIZE>::context_switch(MachineThread<SIZE>* to_thread, MachineThread<SIZE>* from_thread, AbsTime timestamp) {
+ //
+ // We cannot context switch in INTR or Idle
+ //
+ // The one exception is if we were thread_initialized with NULL,
+ // then the first context switch will happen at idle.
+ ASSERT(!is_intr(), "May not context switch while in interrupt");
+ ASSERT(!is_idle() || _thread == NULL && is_thread_state_initialized(), "May not context switch while idle");
+ ASSERT(to_thread, "May not context switch to NULL");
+
+ // The threads should match, unless...
+ // 1) We're uninitialized; we don't know who was on cpu
+ // 2) VERY RARE: A process EXEC'd, and we made a new thread for the new process. The tid's will still match, and the old thread should be marked as trace terminated.
+ ASSERT(from_thread == _thread || _thread == NULL || (_thread->is_trace_terminated() && _thread->tid() == from_thread->tid()), "From thread does not match thread on cpu");
+
+ // Very rarely, we init a cpu to a thread, and then event[0] is a mach_sched
+ // or other context switch event. If that has happened, just discard the init
+ // thread entry.
+ if (_cpu_runq.size() == 1) {
+ if (_cpu_runq.back().is_event_zero_init_thread()) {
+ if (timestamp == _cpu_runq.back().timestamp()) {
+ _cpu_runq.pop_back();
+ }
+ }
+ }
+
+ ASSERT(_cpu_runq.empty() || timestamp > _cpu_runq.back().timestamp(), "Out of order timestamps");
+ ASSERT(_cpu_runq.size() < 2 || !_cpu_runq.back().is_event_zero_init_thread(), "Sanity");
+
+ _cpu_runq.emplace_back(to_thread, false, timestamp);
+ _thread = to_thread;
+}
+
+template <typename SIZE>
+void MachineCPU<SIZE>::post_initialize(AbsInterval events_timespan) {
+#if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
+ // Make sure everything is sorted
+ if (_cpu_runq.size() > 1) {
+ for (uint32_t i=1; i<_cpu_runq.size(); ++i) {
+ ASSERT(_cpu_runq[i-1].timestamp() < _cpu_runq[i].timestamp(), "Out of order run events");
+ }
+ }
+ if (_cpu_idle.size() > 1) {
+ for (uint32_t i=1; i<_cpu_idle.size(); ++i) {
+ ASSERT(_cpu_idle[i-1].max() < _cpu_idle[i].location(), "Out of order idle events");
+ }
+ }
+ if (_cpu_intr.size() > 1) {
+ for (uint32_t i=1; i<_cpu_intr.size(); ++i) {
+ ASSERT(_cpu_intr[i-1].max() < _cpu_intr[i].location(), "Out of order intr events");
+ }
+ }
+#endif
+
+ // We do not need to flush the current thread on cpu, as the cpu
+ // runq only records "on" events, and assumes a duration of "until
+ // the next thread arrives or end of time"
+
+
+ // if we have a pending intr state, flush it.
+ // We want to flush the intr first, so an idle
+ // flush doesn't assert.
+ if (is_intr())
+ clear_intr(events_timespan.max());
+
+ // If we have a pending idle state, flush it.
+ if (is_idle())
+ clear_idle(events_timespan.max());
+
+ if (!_cpu_runq.empty() || !_cpu_idle.empty() || !_cpu_intr.empty()) {
+ //
+ // Collapse all the events into a single timeline
+ //
+
+ // Check this math once we're done building the timeline.
+ size_t guessed_capacity = _cpu_runq.size() + _cpu_idle.size() * 2 + _cpu_intr.size() * 2;
+ _timeline.reserve(guessed_capacity);
+
+ auto runq_it = _cpu_runq.begin();
+ auto idle_it = _cpu_idle.begin();
+ auto intr_it = _cpu_intr.begin();
+
+ // Starting these at 0 will for an update to valid values in
+ // the first pass of the workloop.
+
+ AbsInterval current_runq(AbsTime(0), AbsTime(0));
+ AbsInterval current_idle(AbsTime(0), AbsTime(0));
+ AbsInterval current_intr(AbsTime(0), AbsTime(0));
+
+ MachineThread<SIZE>* current_thread = NULL;
+
+ AbsTime cursor(events_timespan.location());
+ while (events_timespan.contains(cursor)) {
+ //
+ // First we see if anyone needs updating with the next component.
+ //
+ if (cursor >= current_runq.max()) {
+ if (runq_it != _cpu_runq.end()) {
+ AbsTime end, begin = runq_it->timestamp();
+ if (runq_it+1 != _cpu_runq.end())
+ end = (runq_it+1)->timestamp();
+ else
+ end = events_timespan.max();
+
+ current_runq = AbsInterval(begin, end - begin);
+ current_thread = runq_it->thread();
+ ++runq_it;
+ } else {
+ // This will force future update checks to always fail.
+ current_runq = AbsInterval(events_timespan.max() + AbsTime(1), AbsTime(1));
+ current_thread = NULL;
+ }
+ }
+
+ if (cursor >= current_idle.max()) {
+ if (idle_it != _cpu_idle.end()) {
+ current_idle = *idle_it;
+ ++idle_it;
+ } else {
+ // This will force future update checks to always fail.
+ current_idle = AbsInterval(events_timespan.max() + AbsTime(1), AbsTime(1));
+ }
+ }
+
+ if (cursor >= current_intr.max()) {
+ if (intr_it != _cpu_intr.end()) {
+ current_intr = *intr_it;
+ ++intr_it;
+ } else {
+ // This will force future update checks to always fail.
+ current_intr = AbsInterval(events_timespan.max() + AbsTime(1), AbsTime(1));
+ }
+ }
+
+ //
+ // Now we see what type of activity we will be recording.
+ //
+ // This is heirarchical, intr > idle > run > unknown.
+ //
+
+ kCPUActivity type = kCPUActivity::Unknown;
+
+ if (current_runq.contains(cursor))
+ type = kCPUActivity::Run;
+
+ if (current_idle.contains(cursor))
+ type = kCPUActivity::Idle;
+
+ if (current_intr.contains(cursor))
+ type = kCPUActivity::INTR;
+
+ //
+ // Now we know the type, and the starting location.
+ // We must find the end.
+ //
+ // Since this is heirarchical, each type may end on
+ // its own "end", or the "begin" of a type higher than
+ // itself. An idle can end at its end, or at an intr begin.
+ //
+
+ AbsTime end;
+ switch (type) {
+ case kCPUActivity::Unknown:
+ end = std::min({ events_timespan.max(), current_runq.location(), current_idle.location(), current_intr.location() });
+ break;
+
+ case kCPUActivity::Run:
+ end = std::min({ current_runq.max(), current_idle.location(), current_intr.location() });
+ break;
+
+ case kCPUActivity::Idle:
+ end = std::min(current_idle.max(), current_intr.location());
+ break;
+
+ case kCPUActivity::INTR:
+ end = current_intr.max();
+ break;
+ }
+
+ //
+ // Now we drop in the new activity
+ //
+ if (type == kCPUActivity::Run) {
+ ASSERT(current_thread, "Current thread is NULL");
+ // Its a context switch if we are at the beginning of the runq interval
+ _timeline.emplace_back(current_thread, AbsInterval(cursor, end - cursor), current_runq.location() == cursor);
+ } else
+ _timeline.emplace_back(type, AbsInterval(cursor, end - cursor));
+
+ //
+ // And bump the cursor to the end...
+ //
+ cursor = end;
+ }
+
+#if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
+ for (auto it = _timeline.begin(); it != _timeline.end(); ++it) {
+ auto next_it = it + 1;
+ ASSERT(events_timespan.contains(*it), "activity not contained in events_timespan");
+ if (next_it != _timeline.end()) {
+ ASSERT(it->max() == next_it->location(), "activity not end to end");
+ bool initial_idle_state = ((it == _timeline.begin()) && it->is_idle());
+ ASSERT(!next_it->is_context_switch() || (it->is_run() || it->is_unknown() || initial_idle_state) , "Context switch activity preceeded by !run activity");
+ }
+ }
+#endif
+ }
+
+ _cpu_runq.clear();
+ _cpu_runq.shrink_to_fit();
+
+ _cpu_idle.clear();
+ _cpu_idle.shrink_to_fit();
+
+ _cpu_intr.clear();
+ _cpu_intr.shrink_to_fit();
+}
--- /dev/null
+//
+// MachineMachMsg.hpp
+// KDBG
+//
+// Created by James McIlree on 2/20/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kernel_perf_cmds_MachineMachMsg_hpp
+#define kernel_perf_cmds_MachineMachMsg_hpp
+
+enum kMachineMachMsgFlag {
+ HasSender = 0x00000001,
+ HasReceiver = 0x00000002,
+ IsVoucherRefused = 0x00000004
+};
+
+template <typename SIZE>
+class MachineMachMsg {
+ protected:
+
+ // POTENTIAL ISSUE:
+ //
+ // We could have a case where a sender queue's a message, then dies before the receiver picks it up.
+ // With a dead thread, the next MachineState will not include the tid, and then we'll be unable to look it up.
+
+ // NOTE NOTE NOTE!
+ //
+ // The instance vars are sorted by size to avoid wasting space.
+ // Don't change without good reason, and add new vars to the
+ // correct location!
+
+ /*
+ * ALWAYS 64b
+ */
+
+ AbsTime _send_time;
+ AbsTime _recv_time;
+
+ /*
+ * LP64 - HOST
+ *
+ * I'm going to assume the most common asymetric pattern is a 64 bit desktop
+ * looking at a 32 bit device...
+ */
+
+ MachineVoucher<SIZE>* _send_voucher;
+ MachineVoucher<SIZE>* _recv_voucher;
+
+ /*
+ * LP64 - SIZE
+ */
+
+ typename SIZE::ptr_t _send_tid;
+ typename SIZE::ptr_t _recv_tid;
+ typename SIZE::ptr_t _kmsg_addr;
+
+
+ /*
+ * ALWAYS 32b
+ */
+
+ uint32_t _id; // This is globally unique for EACH message.
+ uint32_t _send_msgh_bits; // msgh_bits is modified between send/recv
+ uint32_t _recv_msgh_bits;
+ uint32_t _flags;
+
+ public:
+ MachineMachMsg(uint32_t id,
+ typename SIZE::ptr_t kmsg_addr,
+ uint32_t flags,
+ AbsTime send_time,
+ typename SIZE::ptr_t send_tid,
+ uint32_t send_msgh_bits,
+ MachineVoucher<SIZE>* send_voucher,
+ AbsTime recv_time,
+ typename SIZE::ptr_t recv_tid,
+ uint32_t recv_msgh_bits,
+ MachineVoucher<SIZE>* recv_voucher) :
+ _send_time(send_time),
+ _recv_time(recv_time),
+ _send_voucher(send_voucher),
+ _recv_voucher(recv_voucher),
+ _send_tid(send_tid),
+ _recv_tid(recv_tid),
+ _kmsg_addr(kmsg_addr),
+ _id(id),
+ _send_msgh_bits(send_msgh_bits),
+ _recv_msgh_bits(recv_msgh_bits),
+ _flags(flags)
+ {
+ // Should always have a valid pointer, but may be Machine<SIZE>::NullVoucher
+ ASSERT(send_voucher, "Sanity");
+ ASSERT(recv_voucher, "Sanity");
+
+ ASSERT(send_voucher->is_unset() == (MACH_MSGH_BITS_VOUCHER(_send_msgh_bits) == MACH_MSGH_BITS_ZERO), "voucher state disagrees with msgh_bits");
+ ASSERT(recv_voucher->is_unset() == (MACH_MSGH_BITS_VOUCHER(_recv_msgh_bits) == MACH_MSGH_BITS_ZERO), "voucher state disagrees with msgh_bits");
+ }
+
+ bool has_sender() const { return (_flags & kMachineMachMsgFlag::HasSender) > 0; }
+ bool has_receiver() const { return (_flags & kMachineMachMsgFlag::HasReceiver) > 0; }
+
+ uint32_t id() const { return _id; }
+
+ typename SIZE::ptr_t send_tid() const { ASSERT(has_sender(), "No Sender"); return _send_tid; }
+ typename SIZE::ptr_t recv_tid() const { ASSERT(has_receiver(), "No Receiver"); return _recv_tid; }
+
+ AbsTime send_time() const { ASSERT(has_sender(), "No Sender"); return _send_time; }
+ AbsTime recv_time() const { ASSERT(has_receiver(), "No Receiver"); return _recv_time; }
+
+ MachineVoucher<SIZE>* send_voucher() const { ASSERT(has_sender(), "No Sender"); return _send_voucher; }
+ MachineVoucher<SIZE>* recv_voucher() const { ASSERT(has_receiver(), "No Receiver"); return _recv_voucher; }
+
+ uint32_t send_msgh_bits() const { ASSERT(has_sender(), "No Sender"); return _send_msgh_bits; }
+ uint32_t recv_msgh_bits() const { ASSERT(has_receiver(), "No Receiver"); return _recv_msgh_bits; }
+
+ bool is_voucher_refused() const { ASSERT(has_receiver(), "No Receiver"); return (_flags & kMachineMachMsgFlag::IsVoucherRefused) > 0; }
+
+ bool has_send_voucher() const { return has_sender() && MACH_MSGH_BITS_VOUCHER(_send_msgh_bits) != MACH_MSGH_BITS_ZERO; }
+ bool has_recv_voucher() const { return has_receiver() && MACH_MSGH_BITS_VOUCHER(_recv_msgh_bits) != MACH_MSGH_BITS_ZERO; }
+
+ bool has_non_null_send_voucher() const { return has_sender() && MACH_MSGH_BITS_VOUCHER(_send_msgh_bits) != MACH_MSGH_BITS_ZERO && !_send_voucher->is_null(); }
+ bool has_non_null_recv_voucher() const { return has_receiver() && MACH_MSGH_BITS_VOUCHER(_recv_msgh_bits) != MACH_MSGH_BITS_ZERO && !_recv_voucher->is_null(); }
+};
+
+#endif
--- /dev/null
+//
+// MachineProcess.hpp
+// KDBG
+//
+// Created by James McIlree on 10/26/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+//
+// Process life cycle
+//
+// There are four ways a process can be created:
+//
+// 1) CreatedByPreviousMachineState
+//
+// It is a carryover from a previous Machine state. This happens when a
+// live trace creates a machine state that is a union of a previous state
+// and new event data.
+//
+// 2) CreatedByThreadMap
+//
+// It is a process that was running at the time the trace events were
+// taken. The kernel provides this data.
+//
+// 3) CreatedByExecEvent
+//
+// It is a process that "reused" an existing pid, and exec'd a new process
+// in place. The Machine State will completely close down the old process
+// and create a new one to track data for the newly exec'd process.
+//
+// 4) CreatedByForkExecEvent
+//
+// An existing process "forked", creating a new pid, and then "exec'd".
+// This is seen in trace events as a TRACE_DATA_NEWTHREAD with a pid that
+// does not match the callers pid.
+//
+// There are also two catch-all processes, "Unknown", and "Kernel". The kernel
+// process contains all kernel only threads, and unknown contains threads that
+// are encountered without any previous identifying information.
+//
+
+enum class kMachineProcessFlag : std::uint32_t {
+ CreatedByPreviousMachineState = 0x00000001,
+ CreatedByThreadMap = 0x00000002,
+ CreatedByForkExecEvent = 0x00000004,
+ CreatedByExecEvent = 0x00000008,
+ IsForkExecInProgress = 0x00000010,
+ IsExecInProgress = 0x00000020,
+ IsUnknownProcess = 0x00000040,
+ IsKernelProcess = 0x00000080,
+ IsExitBySyscall = 0x00000100,
+ IsExitByJetsam = 0x00000400,
+ IsExitByExec = 0x00000800,
+ IsTraceTerminated = 0x00001000
+};
+
+template <typename SIZE> class Machine;
+
+template <typename SIZE>
+class MachineProcess {
+ protected:
+ pid_t _pid;
+ char _name[20]; // Actual limit is 16, we round up for NULL terminator
+ AbsInterval _timespan; // This is set at trace termination, or in post_initialize if still live.
+ AbsTime _exit_initiated_timestamp;
+ std::vector<MachineThread<SIZE>*> _threads_by_time;
+ uint32_t _flags;
+ int32_t _exit_status;
+ int32_t _apptype; // Unset == -1
+
+ //
+ // Mutable API
+ //
+
+ friend class Machine<SIZE>;
+
+ void set_flags(kMachineProcessFlag flags) { _flags |= (uint32_t)flags; }
+ void clear_flags(kMachineProcessFlag flags) { _flags &= ~(uint32_t)flags; }
+ bool is_flag_set(kMachineProcessFlag flag) const { return (_flags & (uint32_t)flag) > 0; }
+
+ //
+ // Process exit lifecycle
+ //
+ // Processes start to exit / terminate when one of the following happens:
+ //
+ // syscall to proc exit
+ // jetsam causes a SIGKILL
+ // syscall to exec
+ //
+ // It may be that more than one of these events happen. For example, jetsam
+ // may cause a process to die via a SIGKILL.
+ //
+ // For the purposes of this API, only the first method of initiating exit
+ // is recorded. This includes the timestamp; if you ask for the exit timestamp
+ // you will get the timestamp for the first invocation of any of the exit
+ // paths.
+ //
+ // Once a process starts terminating, it will eventually reach the point
+ // where no futher events will ever be seen for that process. When the
+ // last thread in the process is marked as trace terminated, the process
+ // is marked as trace terminated.
+ //
+ // The definitive test for a process being entirely done is trace termination.
+ //
+
+ //
+ // The exit code and conditions are a bit of a mess.
+ // All processes exit. This is triggered by the BSD_PROC_EXIT
+ // tracepoint. Some processes chose to exit, some are forced to
+ // exit by signals (SIGKILL, for example). Some processes are
+ // forced to exit by a mechanism that appears to be a signal but
+ // we want to track them separately (jetsam).
+ //
+ // The upshot of this is the exit code is stored in waitpid
+ // style. See waitpid(2) for the macros used to decode this.
+ //
+ void set_exit_by_syscall(AbsTime timestamp, int exit_status);
+ void set_exit_by_jetsam(AbsTime timestamp);
+ void set_exit_by_exec(AbsTime timestamp);
+ void set_trace_terminated(AbsTime timestamp); // Also sets last timestamp
+
+ void set_apptype(uint32_t apptype);
+ void set_apptype_from_trequested(uint32_t apptype);
+ void set_name(const char* name);
+
+ void add_thread(MachineThread<SIZE>* thread);
+
+ bool is_exec_in_progress() const { return (_flags & (uint32_t)kMachineProcessFlag::IsExecInProgress) > 0; }
+ bool is_fork_exec_in_progress() const { return (_flags & (uint32_t)kMachineProcessFlag::IsForkExecInProgress) > 0; }
+
+ void clear_fork_exec_in_progress();
+ void clear_exec_in_progress();
+
+ // This is called after all events have been processed, to allow the
+ // threads to be sorted.
+ void post_initialize(AbsTime last_machine_timestamp);
+
+ public:
+ MachineProcess(pid_t pid,
+ const char* name,
+ AbsTime create_timestamp,
+ kMachineProcessFlag flags);
+
+ pid_t pid() const { return _pid; }
+ const char* name() const { return _name; }
+ AbsInterval timespan() const { return _timespan; }
+ AbsTime exit_timestamp() const { return _exit_initiated_timestamp; }
+ int32_t exit_status() const { return _exit_status; }
+ int32_t apptype() const { return _apptype; }
+
+ uint32_t flags() const { return _flags; }
+
+ const std::vector<const MachineThread<SIZE>*>& threads() const { return *reinterpret_cast<const std::vector<const MachineThread<SIZE>*>*>(&_threads_by_time); }
+
+ bool is_exit_by_syscall() const { return is_flag_set(kMachineProcessFlag::IsExitBySyscall); }
+ bool is_exit_by_jetsam() const { return is_flag_set(kMachineProcessFlag::IsExitByJetsam); }
+ bool is_exit_by_exec() const { return is_flag_set(kMachineProcessFlag::IsExitByExec); }
+
+ // The invariant is that trace_terminated may not be set without is_exiting() set
+ bool is_exiting() const { return is_exit_by_syscall() || is_exit_by_jetsam() || is_exit_by_exec(); }
+ bool is_trace_terminated() const { return is_flag_set(kMachineProcessFlag::IsTraceTerminated); }
+
+ bool is_unknown() const { return is_flag_set(kMachineProcessFlag::IsUnknownProcess); }
+ bool is_kernel() const { return is_flag_set(kMachineProcessFlag::IsKernelProcess); }
+
+ bool is_created_by_previous_machine_state() const { return is_flag_set(kMachineProcessFlag::CreatedByPreviousMachineState); }
+ bool is_created_by_thread_map() const { return is_flag_set(kMachineProcessFlag::CreatedByThreadMap); }
+ bool is_created_by_fork_exec() const { return is_flag_set(kMachineProcessFlag::CreatedByForkExecEvent); }
+ bool is_created_by_exec() const { return is_flag_set(kMachineProcessFlag::CreatedByExecEvent); }
+
+ DEBUG_ONLY(void validate() const;)
+};
+
--- /dev/null
+//
+// MachineProcess.impl.hpp
+// KDBG
+//
+// Created by James McIlree on 10/30/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+template <typename SIZE>
+MachineProcess<SIZE>::MachineProcess(pid_t pid, const char* name, AbsTime create_timestamp, kMachineProcessFlag flags) :
+ _pid(pid),
+ _timespan(create_timestamp, AbsTime(0)),
+ _flags((uint32_t)flags),
+ _exit_status(0),
+ _apptype(-1)
+{
+ ASSERT(name, "Sanity");
+ ASSERT(strlen(name) < sizeof(_name) - 1, "Sanity");
+
+ // strlcpy guarantees NULL termination
+ strlcpy(_name, name, sizeof(_name));
+}
+
+#if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
+template <typename SIZE>
+void MachineProcess<SIZE>::validate() const {
+ ASSERT(strlen(_name), "Must have a non zero length name");
+
+ if (is_trace_terminated()) {
+ ASSERT(is_exiting(), "Process is trace terminated without precursor exit event");
+
+ for (auto thread : _threads_by_time) {
+ ASSERT(thread->is_trace_terminated(), "process is trace terminated, but has live thread");
+ }
+ }
+
+ for (auto thread : _threads_by_time) {
+ ASSERT(_timespan.contains(thread->timespan()), "thread outside process timespan");
+ thread->validate();
+ }
+
+ // Every process should have one and only one primordial (main) thread.
+ // However, we cannot tell what the main thread is for threadmap processes,
+ // and processes forwarded from an earlier machine state may have already
+ // exited their main thread. We can only check exec/fork-exec.
+
+ if ((is_created_by_exec() || is_created_by_fork_exec()) && !is_created_by_previous_machine_state()) {
+ auto main_threads = 0;
+ for (auto thread : _threads_by_time) {
+ if (thread->is_main_thread()) main_threads++;
+ ASSERT(main_threads <= 1, "More than one main thread in a process");
+ }
+ ASSERT(main_threads == 1, "Incorrect number of main thread in process");
+ }
+
+}
+#endif
\ No newline at end of file
--- /dev/null
+//
+// MachineProcess.mutable-impl.hpp
+// KDBG
+//
+// Created by James McIlree on 10/30/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include "KDebug.h"
+
+template <typename SIZE>
+void MachineProcess<SIZE>::set_exit_by_syscall(AbsTime timestamp, int32_t exit_status) {
+ ASSERT(!is_exiting(), "Attempt to exit after process is already exiting");
+ ASSERT(_exit_initiated_timestamp == 0, "Sanity");
+ ASSERT(!is_kernel(), "Kernel process is attempting to exit");
+
+ _exit_status = exit_status;
+ _exit_initiated_timestamp = timestamp;
+
+ set_flags(kMachineProcessFlag::IsExitBySyscall);
+}
+
+template <typename SIZE>
+void MachineProcess<SIZE>::set_exit_by_jetsam(AbsTime timestamp) {
+ ASSERT(!is_exiting(), "Attempt to exit after process is already exiting");
+ ASSERT(_exit_initiated_timestamp == 0, "Sanity");
+ ASSERT(!is_kernel(), "Kernel process is attempting to exit");
+
+ _exit_initiated_timestamp = timestamp;
+
+ set_flags(kMachineProcessFlag::IsExitByJetsam);
+}
+
+template <typename SIZE>
+void MachineProcess<SIZE>::set_exit_by_exec(AbsTime timestamp) {
+ ASSERT(!is_exiting(), "Attempt to exit after process is already exiting");
+ ASSERT(_exit_initiated_timestamp == 0, "Sanity");
+ ASSERT(!is_kernel(), "Kernel process is attempting to exit");
+
+ _exit_initiated_timestamp = timestamp;
+ set_flags(kMachineProcessFlag::IsExitByExec);
+
+ for (MachineThread<SIZE>* thread : _threads_by_time) {
+ if (!thread->is_trace_terminated()) {
+ thread->set_trace_terminated(timestamp);
+ }
+ }
+
+ set_trace_terminated(timestamp);
+}
+
+template <typename SIZE>
+void MachineProcess<SIZE>::set_trace_terminated(AbsTime timestamp){
+ ASSERT(is_exiting(), "Attempting to set trace terminated without precursor exit event");
+ ASSERT(!is_kernel(), "Kernel process is attempting to set trace terminated");
+
+ DEBUG_ONLY({
+ for (MachineThread<SIZE>* thread : _threads_by_time) {
+ ASSERT(thread->is_trace_terminated(), "Setting process as trace terminated when it still has live threads");
+ }
+ })
+
+ _timespan.set_max(timestamp + AbsTime(1));
+ set_flags(kMachineProcessFlag::IsTraceTerminated);
+}
+
+template <typename SIZE>
+void MachineProcess<SIZE>::set_apptype(uint32_t type) {
+ ASSERT(type >= TASK_APPTYPE_NONE && type <= TASK_APPTYPE_APP_TAL, "Out of range");
+ ASSERT(_apptype == -1 || _apptype == type, "Attempt to set apptype more than once, or to change an inherited apptype");
+ ASSERT(!is_kernel(), "Kernel is attempting to set apptype");
+ ASSERT(!is_exiting(), "Setting apptype after exit");
+
+ _apptype = type;
+}
+
+template <typename SIZE>
+void MachineProcess<SIZE>::set_apptype_from_trequested(uint32_t type) {
+ ASSERT(type >= TASK_APPTYPE_NONE && type <= TASK_APPTYPE_APP_TAL, "Out of range");
+ ASSERT(_apptype == -1 || _apptype == type, "trequested apptype does not match set apptype");
+
+ _apptype = type;
+}
+
+template <typename SIZE>
+void MachineProcess<SIZE>::set_name(const char* name) {
+ ASSERT(name, "Sanity");
+ ASSERT(strlen(name) < sizeof(_name) - 1, "Sanity");
+
+ strlcpy(_name, name, sizeof(_name));
+}
+
+template <typename SIZE>
+void MachineProcess<SIZE>::clear_fork_exec_in_progress() {
+ ASSERT(!is_unknown(), "Sanity");
+ ASSERT(!is_kernel(), "Sanity");
+ ASSERT(!is_exiting(), "Sanity");
+ ASSERT(!is_exec_in_progress(), "Sanity");
+ ASSERT(is_fork_exec_in_progress(), "Sanity");
+
+ clear_flags(kMachineProcessFlag::IsForkExecInProgress);
+}
+
+template <typename SIZE>
+void MachineProcess<SIZE>::clear_exec_in_progress() {
+ ASSERT(!is_unknown(), "Sanity");
+ ASSERT(!is_kernel(), "Sanity");
+ ASSERT(!is_exiting(), "Sanity");
+ ASSERT(!is_fork_exec_in_progress(), "Sanity");
+ ASSERT(is_exec_in_progress(), "Sanity");
+
+ clear_flags(kMachineProcessFlag::IsExecInProgress);
+}
+
+template <typename SIZE>
+void MachineProcess<SIZE>::add_thread(MachineThread<SIZE>* thread) {
+ ASSERT(thread, "Sanity");
+ ASSERT(&thread->process() == this, "Sanity");
+ ASSERT(!thread->is_trace_terminated(), "Attempt to add thread that is already terminated");
+ ASSERT(thread->timespan().location() >= _timespan.location(), "Attempt to add thread that started before this process");
+ ASSERT(!is_exiting(), "Adding thread to process that has exited");
+
+ // 6/20/2014, reworking time handling, is this still true?
+ //
+ // Process/thread created by a previous machine state will violate these
+ // rules, during initialization. However, only threads created in that
+ // form will be so tagged, and so we can exclude them from this assert.
+ //
+ // ASSERT(!is_exited() || thread->is_created_by_previous_machine_state(), "Adding thread to process that has marked itself as exited");
+
+ DEBUG_ONLY({
+ // At this point, the threads vector is not sorted.
+ // We have to look at everything :-(.
+ for (MachineThread<SIZE>* process_thread : _threads_by_time) {
+ if (process_thread->tid() == thread->tid()) {
+ ASSERT(!process_thread->timespan().intersects(thread->timespan()), "Overlapping duplicate threads");
+ }
+ }
+ })
+
+ _threads_by_time.push_back(thread);
+}
+
+template <typename SIZE>
+void MachineProcess<SIZE>::post_initialize(AbsTime last_machine_timestamp) {
+ //
+ // For processes that are still alive at the post_initialize phase,
+ // we want to extend their timespan(s) to the end of the machine state,
+ // so they can be looked up by pid/name.
+ //
+ if (!is_trace_terminated()) {
+ ASSERT(_timespan.length() == 0, "Should not have timespan set");
+
+ // Time in a range is always half open. [ 10, 11 ) means 10 is included,
+ // but 11 is not. In order to include a given timestamp, we must use
+ // a value one greater.
+ AbsTime half_open_timestamp = last_machine_timestamp + AbsTime(1);
+
+ _timespan.set_max(half_open_timestamp);
+ }
+
+ std::sort(_threads_by_time.begin(), _threads_by_time.end(), thread_by_time_sort<SIZE>);
+}
--- /dev/null
+//
+// MachineThread.hpp
+// KDBG
+//
+// Created by James McIlree on 10/26/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+template <typename SIZE> class MachineProcess;
+
+enum class kMachineThreadFlag : std::uint32_t {
+ CreatedByPreviousMachineState = 0x00000001,
+ CreatedByThreadMap = 0x00000002,
+ CreatedByTraceDataNewThread = 0x00000004,
+ CreatedByUnknownTidInTrace = 0x00000008,
+ CreatedByForkExecEvent = 0x00000010,
+ CreatedByExecEvent = 0x00000020,
+ IsMain = 0x00000040,
+ IsIdle = 0x00000080,
+ TraceTerminated = 0x00000200, // Set when a MACH_THREAD_TERMINATED is seen. This is definitive, no further trace events should reference this thread.
+};
+
+template <typename SIZE> class Machine;
+template <typename SIZE> class JetsamActivity;
+template <typename SIZE> class MachineVoucher;
+
+template <typename SIZE>
+class MachineThread {
+ protected:
+ uint32_t _flags;
+ typename SIZE::ptr_t _tid; // We're unlikely to ever look at K64 on a 32 bit machine, put this here as best chance to not increase struct size with useless padding bytes.
+ MachineProcess<SIZE>* _process;
+ AbsInterval _timespan;
+ AbsTime _begin_blocked;
+ AbsTime _begin_vm_fault;
+ AbsTime _begin_jetsam_activity;
+ uint32_t _begin_jetsam_activity_type;
+ std::vector<AbsInterval> _blocked;
+ std::vector<AbsInterval> _vm_faults;
+ std::vector<AbsInterval> _jetsam_activity;
+ std::vector<VoucherInterval<SIZE>> _vouchers_by_time;
+
+ //
+ // Mutable API
+ //
+ friend class Machine<SIZE>;
+ friend class MachineProcess<SIZE>;
+
+ MachineProcess<SIZE>& mutable_process() { return *_process; }
+
+ void set_flags(kMachineThreadFlag flags) { _flags |= (uint32_t)flags; }
+ void clear_flags(kMachineThreadFlag flags) { _flags &= ~(uint32_t)flags; }
+ bool is_flag_set(kMachineThreadFlag flag) const { return (_flags & (uint32_t)flag) > 0; }
+
+ // This can be discovered after the thread is created.
+ void set_is_idle_thread();
+ void set_trace_terminated(AbsTime timestamp);
+
+ void set_voucher(MachineVoucher<SIZE>* voucher, AbsTime timestamp);
+
+ //
+ // NOTE! Unrunnable/blocked isn't quite exact; it doesn't match
+ // the scheduler view of unrunnable/blocked.
+ //
+ // 1) If you're not blocked, you're runnable
+ // 2) A thread is considered "blocked" if the cpu it is on goes idle.
+ //
+ void make_runnable(AbsTime timestamp);
+ void make_unrunnable(AbsTime timestamp);
+
+ void begin_vm_fault(AbsTime timestamp);
+ void end_vm_fault(AbsTime timestamp);
+
+ void begin_jetsam_activity(uint32_t type, AbsTime timestamp);
+ void end_jetsam_activity(uint32_t type, AbsTime timestamp);
+
+ void add_io_activity(AbsInterval interval, uint32_t code, uint32_t page_count);
+
+ AbsTime blocked_in_timespan(AbsInterval timespan) const;
+ AbsTime next_blocked_after(AbsTime timestamp) const;
+
+ // This is called after all events have been processed, to flush any pending state
+ void post_initialize(AbsTime last_machine_timestamp);
+
+ public:
+ MachineThread(MachineProcess<SIZE>* process, typename SIZE::ptr_t tid, MachineVoucher<SIZE>* initial_voucher, AbsTime create_timestamp, kMachineThreadFlag flags) :
+ _flags((uint32_t)flags),
+ _tid(tid),
+ _process(process),
+ _timespan(create_timestamp, AbsTime(0)),
+ _begin_jetsam_activity_type(0)
+ {
+ ASSERT(_tid != 0, "Sanity");
+ ASSERT(_process, "Sanity");
+ ASSERT(initial_voucher, "Sanity");
+
+ _vouchers_by_time.emplace_back(initial_voucher, AbsInterval(create_timestamp, AbsTime::END_OF_TIME - create_timestamp));
+ }
+
+ typename SIZE::ptr_t tid() const { return _tid; }
+ AbsInterval timespan() const { return _timespan; }
+ const MachineProcess<SIZE>& process() const { return *_process; }
+ uint32_t flags() const { return _flags; }
+
+ const MachineVoucher<SIZE>* voucher(AbsTime timestamp) const;
+ const MachineVoucher<SIZE>* last_voucher() const;
+
+ const std::vector<AbsInterval>& vm_faults() const { return _vm_faults; }
+ const std::vector<AbsInterval>& jetsam_activity() const { return _jetsam_activity; }
+
+ bool is_created_by_previous_machine_state() const { return is_flag_set(kMachineThreadFlag::CreatedByPreviousMachineState); }
+ bool is_created_by_thread_map() const { return is_flag_set(kMachineThreadFlag::CreatedByThreadMap); }
+ bool is_created_by_trace_data_new_thread() const { return is_flag_set(kMachineThreadFlag::CreatedByTraceDataNewThread); }
+ bool is_created_by_unknown_tid_in_trace() const { return is_flag_set(kMachineThreadFlag::CreatedByUnknownTidInTrace); }
+ bool is_created_by_fork_exec() const { return is_flag_set(kMachineThreadFlag::CreatedByForkExecEvent); }
+ bool is_created_by_exec() const { return is_flag_set(kMachineThreadFlag::CreatedByExecEvent); }
+
+ bool is_idle_thread() const { return is_flag_set(kMachineThreadFlag::IsIdle); }
+ bool is_main_thread() const { return is_flag_set(kMachineThreadFlag::IsMain); }
+
+ bool is_trace_terminated() const { return is_flag_set(kMachineThreadFlag::TraceTerminated); }
+
+ DEBUG_ONLY(void validate() const;)
+};
--- /dev/null
+//
+// MachineThread.impl.hpp
+// KDBG
+//
+// Created by James McIlree on 10/30/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+template <typename SIZE>
+AbsTime MachineThread<SIZE>::blocked_in_timespan(AbsInterval timespan) const {
+ auto it = std::lower_bound(_blocked.begin(), _blocked.end(), AbsInterval(timespan.location(), AbsTime(1)));
+ // The lower bound will report that 0 is lower than [ 10, 20 ), need to check contains!
+ AbsTime blocked_time;
+ while (it != _blocked.end() && timespan.intersects(*it)) {
+ blocked_time += timespan.intersection_range(*it).length();
+ ++it;
+ }
+
+ return blocked_time;
+}
+
+template <typename SIZE>
+AbsTime MachineThread<SIZE>::next_blocked_after(AbsTime timestamp) const {
+ auto it = std::lower_bound(_blocked.begin(), _blocked.end(), AbsInterval(timestamp, AbsTime(1)));
+ // The lower bound will report that 0 is lower than [ 10, 20 ), need to check contains!
+ if (it != _blocked.end()) {
+ if (it->contains(timestamp))
+ return timestamp;
+
+ ASSERT(it->location() > timestamp, "Sanity");
+ return it->location();
+ }
+
+ return _timespan.max();
+}
+
+template <typename SIZE>
+const MachineVoucher<SIZE>* MachineThread<SIZE>::voucher(AbsTime timestamp) const {
+ ASSERT(_timespan.contains(timestamp), "Sanity");
+
+ auto it = std::upper_bound(_vouchers_by_time.begin(), _vouchers_by_time.end(), timestamp, AbsIntervalMaxVsAbsTimeComparator());
+
+ // The upper bound will report that 0 is lower than [ 10, 20 ), need to check contains!
+ if (it != _vouchers_by_time.end() && it->contains(timestamp)) {
+ return it->voucher();
+ }
+
+ return &Machine<SIZE>::UnsetVoucher;
+}
+
+template <typename SIZE>
+const MachineVoucher<SIZE>* MachineThread<SIZE>::last_voucher() const {
+ ASSERT(!_vouchers_by_time.empty(), "Sanity");
+ return _vouchers_by_time.back().voucher();
+}
+
+#if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
+template <typename SIZE>
+void MachineThread<SIZE>::validate() const {
+ ASSERT(_process, "Sanity");
+ ASSERT(_process->timespan().contains(timespan()), "Sanity");
+
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(_blocked), "Sanity");
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(_vm_faults), "Sanity");
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(_jetsam_activity), "Sanity");
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(_vouchers_by_time), "Sanity");
+
+ if (!_blocked.empty()) {
+ ASSERT(_timespan.contains(_blocked.front()), "Blocked interval not contained by thread timespan");
+ ASSERT(_timespan.contains(_blocked.back()), "Blocked interval not contained by thread timespan");
+ }
+
+ if (!_vm_faults.empty()) {
+ ASSERT(_timespan.contains(_vm_faults.front()), "vm fault interval not contained by thread timespan");
+ ASSERT(_timespan.contains(_vm_faults.back()), "vm_fault interval not contained by thread timespan");
+ }
+
+ if (!_jetsam_activity.empty()) {
+ ASSERT(_timespan.contains(_jetsam_activity.front()), "jetsam_activity interval not contained by thread timespan");
+ ASSERT(_timespan.contains(_jetsam_activity.back()), "jetsam_activity interval not contained by thread timespan");
+ }
+
+ if (!_vouchers_by_time.empty()) {
+ ASSERT(_timespan.contains(_vouchers_by_time.front()), "vouchers_by_time interval not contained by thread timespan");
+ ASSERT(_timespan.contains(_vouchers_by_time.back()), "vouchers_by_time interval not contained by thread timespan");
+ }
+
+ ASSERT(!_process->is_trace_terminated() || is_trace_terminated(), "Process is trace terminated but thread is live");
+
+ // Each thread should have at least one creation flag.
+ // Note that created by previous machine state is in addition to the
+ // actual create flag, so does not count
+ ASSERT(is_created_by_thread_map() ||
+ is_created_by_trace_data_new_thread() ||
+ is_created_by_unknown_tid_in_trace() ||
+ is_created_by_fork_exec() ||
+ is_created_by_exec(), "Should have at least one create flag");
+}
+#endif
--- /dev/null
+//
+// MachineThread.mutable-impl.hpp
+// KDBG
+//
+// Created by James McIlree on 10/30/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include "KDebug.h"
+
+
+template <typename SIZE>
+void MachineThread<SIZE>::set_is_idle_thread() {
+ ASSERT(!is_trace_terminated(), "Attempt to mark terminated thread as IDLE");
+ ASSERT(_process->is_kernel(), "Attempt to set non-kernel thread as IDLE");
+
+ set_flags(kMachineThreadFlag::IsIdle);
+}
+
+template <typename SIZE>
+void MachineThread<SIZE>::set_trace_terminated(AbsTime timestamp) {
+ ASSERT(!is_trace_terminated(), "Attempt to trace terminate thread more than once");
+ ASSERT(!is_idle_thread(), "Attempt to terminate IDLE thread");
+
+ AbsTime terminated_timestamp = timestamp + AbsTime(1);
+
+ // If we were killed with a block event pending, we need to flush it
+ // to the queue. The make_runnable call will do sanity checks to
+ // handle the corner cases when called from here.
+ make_runnable(terminated_timestamp);
+
+ // We need to set the final timestamp for this thread's last voucher.
+ // Note that the null voucher and unset voucher are actual objects,
+ // they are not represented by NULL or nullptr.
+ _vouchers_by_time.back().set_max(terminated_timestamp);
+
+ //
+ // Finally set this threads timespan
+ //
+ _timespan.set_max(terminated_timestamp);
+
+
+ set_flags(kMachineThreadFlag::TraceTerminated);
+}
+
+template <typename SIZE>
+void MachineThread<SIZE>::make_runnable(AbsTime timestamp) {
+ ASSERT(!is_trace_terminated(), "Attempting to make terminated thread runnable");
+ ASSERT(timestamp >= _timespan.location(), "Attempt to make thread runnable before it exists");
+
+ if (_begin_blocked > 0) {
+ ASSERT(timestamp > _begin_blocked, "Sanity");
+ ASSERT(_blocked.empty() || _begin_blocked > _blocked.back().max(), "Out of order blocked regions");
+ _blocked.emplace_back(_begin_blocked, timestamp - _begin_blocked);
+ _begin_blocked = AbsTime(0);
+ }
+}
+
+template <typename SIZE>
+void MachineThread<SIZE>::make_unrunnable(AbsTime timestamp) {
+ ASSERT(!is_trace_terminated(), "Attempting to make terminated thread unrunnable");
+ ASSERT(timestamp >= _timespan.location(), "Attempt to make thread unrunnable before it exists");
+
+ _begin_blocked = timestamp;
+}
+
+template <typename SIZE>
+void MachineThread<SIZE>::begin_vm_fault(AbsTime timestamp) {
+ ASSERT(timestamp >= _timespan.location(), "Attempt to begin vm fault before thread exists");
+ ASSERT(!is_trace_terminated(), "Attempt to begin vm fault on thread that has terminated");
+ ASSERT(!is_idle_thread(), "Attempt to begin vm fault on IDLE thread");
+
+ ASSERT(_begin_vm_fault == 0, "Attempt to begin vm_fault without end");
+ _begin_vm_fault = timestamp;
+}
+
+template <typename SIZE>
+void MachineThread<SIZE>::end_vm_fault(AbsTime timestamp) {
+ ASSERT(timestamp >= _timespan.location(), "Attempt to end vm fault before thread exists");
+ ASSERT(!is_trace_terminated(), "Attempt to end vm fault on thread that has terminated");
+ ASSERT(!is_idle_thread(), "Attempt to end vm fault on IDLE thread");
+
+ if (_begin_vm_fault > 0) {
+ ASSERT(timestamp > _begin_vm_fault, "Sanity");
+ ASSERT(_vm_faults.empty() || _begin_vm_fault > _vm_faults.back().max(), "Out of order vm_fault regions");
+ _vm_faults.emplace_back(_begin_vm_fault, timestamp - _begin_vm_fault);
+ _begin_vm_fault = AbsTime(0);
+ }
+}
+
+template <typename SIZE>
+void MachineThread<SIZE>::begin_jetsam_activity(uint32_t type, AbsTime timestamp) {
+ ASSERT(timestamp >= _timespan.location(), "Attempt to begin jetsam activity before thread exists");
+ ASSERT(!is_trace_terminated(), "Attempt to begin jetsam activity on thread that has terminated");
+ ASSERT(!is_idle_thread(), "Attempt to begin jetsam activity on IDLE thread");
+
+ ASSERT(_begin_jetsam_activity == 0, "Attempt to begin jetsam activity without end");
+ ASSERT(_begin_jetsam_activity_type == 0, "Sanity");
+
+ _begin_jetsam_activity = timestamp;
+ DEBUG_ONLY(_begin_jetsam_activity_type = type;)
+}
+
+template <typename SIZE>
+void MachineThread<SIZE>::end_jetsam_activity(uint32_t type, AbsTime timestamp) {
+ ASSERT(timestamp >= _timespan.location(), "Attempt to end jetsam activity before thread exists");
+ ASSERT(!is_trace_terminated(), "Attempt to end jetsam activity on thread that has terminated");
+ ASSERT(!is_idle_thread(), "Attempt to end jetsam activity on IDLE thread");
+
+ if (_begin_jetsam_activity > 0) {
+ ASSERT(type == _begin_jetsam_activity_type, "End event type does not match start event");
+ ASSERT(timestamp > _begin_jetsam_activity, "Sanity");
+ ASSERT(_jetsam_activity.empty() || _begin_jetsam_activity > _jetsam_activity.back().max(), "Out of order jetsam activities");
+ _jetsam_activity.emplace_back(_begin_jetsam_activity, timestamp - _begin_jetsam_activity);
+ _begin_jetsam_activity = AbsTime(0);
+ DEBUG_ONLY(_begin_jetsam_activity_type = 0;)
+ }
+}
+
+template <typename SIZE>
+void MachineThread<SIZE>::set_voucher(MachineVoucher<SIZE>* voucher, AbsTime timestamp) {
+ ASSERT(timestamp >= _timespan.location(), "Attempt to set voucher on thread before thread exists");
+ ASSERT(!is_trace_terminated(), "Attempt to set voucher on terminated thread");
+ ASSERT(!is_idle_thread(), "Attempt to set voucher on IDLE thread");
+ ASSERT(!_vouchers_by_time.empty() || _vouchers_by_time.back().max() < timestamp, "Sanity");
+ ASSERT(_vouchers_by_time.back().location() < timestamp, "Sanity");
+
+ VoucherInterval<SIZE>& last_voucher = _vouchers_by_time.back();
+
+ if (voucher != last_voucher.voucher()) {
+ ASSERT(last_voucher.max() == AbsTime::END_OF_TIME, "Sanity");
+
+ //
+ // By default, the voucher interval has the last voucher continuing "forever".
+ // We need to trim the time used by that voucher, while handling the case of
+ // the very first event setting a new voucher as well.
+ //
+ // There are three cases possible.
+ //
+ // 1) timestamp > last_voucher.location // This is the expected case
+ // 2) timestamp == last_voucher.location // This should only be possible on the very first event for a thread
+ // 3) timestamp < last_voucher.location // This is an error at all times.
+ //
+
+ if (timestamp > last_voucher.location()) {
+ // Expected case (#1)
+ last_voucher.set_max(timestamp);
+ _vouchers_by_time.emplace_back(voucher, AbsInterval(timestamp, AbsTime::END_OF_TIME - timestamp));
+ } else if (timestamp == last_voucher.location()) {
+ // Corner case (#2)
+ //
+ // Note that we cannot assert that the voucher being replaced is the unset voucher,
+ // as vouchers are forwarded during "live" event handling. This means that the thread
+ // may have a valid voucher that is replaced on the first event.
+ //
+ // The timestamp == _timespan.location assert may also be too strong, if we start forwarding threads true lifetimes.
+
+ ASSERT(timestamp == _timespan.location(), "Should only be overriding a voucher on the first event for a given thread.");
+ ASSERT(_vouchers_by_time.size() == 1, "Attempt to replace the current voucher when it isn't the first voucher");
+ _vouchers_by_time.pop_back();
+ _vouchers_by_time.emplace_back(voucher, AbsInterval(timestamp, AbsTime::END_OF_TIME - timestamp));
+ } else {
+ ASSERT(false, "Attempting to set a voucher on thread earlier in time than the thread's current voucher");
+
+ }
+ }
+}
+
+template <typename SIZE>
+void MachineThread<SIZE>::post_initialize(AbsTime last_machine_timestamp) {
+ if (!is_trace_terminated()) {
+ //
+ // For threads that are still alive at the post_initialize phase,
+ // we want to extend their timespan(s) to the end of the machine state,
+ // so they can be looked up by tid/timestamp
+ //
+ ASSERT(_timespan.length() == 0, "Sanity");
+
+ // Time in a range is always half open. [ 10, 11 ) means 10 is included,
+ // but 11 is not. In order to include a given timestamp, we must use
+ // a value one greater.
+ AbsTime half_open_timestamp = last_machine_timestamp + AbsTime(1);
+
+ _timespan.set_max(half_open_timestamp);
+
+ // 6/22/2014 Not sure about this. Just working on the massive time
+ // cleanup, along with the "we really know when threads and processes
+ // are done" cleanup. We used to always check and flush any outstanding
+ // blocked events in post_initialize. This is done explicitly in the trace
+ // terminated code now. However, it is possible to have a blocked event
+ // outstanding in a live thread at this point. If we actually forward state
+ // to future threads, we would want to pick that up, right?
+ //
+ // So what do we do here?
+ //
+ // We could make sure the intermediate states were properly fowarded
+ // as the threads are forwarded. That leaves the problem of queries against
+ // this machine state not showing an existing blocked state, which could
+ // have begun long ago.
+ //
+ // If we flush, how do we tag that last block so the forwarding happens
+ // correctly?
+ //
+ // For now, no one is doing the live update thing and using the cpu
+ // states, so I'm going to flush.
+ //
+ // Note that if the very last event is a make_unrunnable for a thread,
+ // this is going to yield a zero length blocking event, which might assert.
+ //
+ // NEEDS REVIEW, FIX ME.
+ make_runnable(half_open_timestamp);
+
+ _vouchers_by_time.back().set_max(half_open_timestamp);
+ }
+}
--- /dev/null
+//
+// MachineVoucher.hpp
+// KDBG
+//
+// Created by James McIlree on 2/18/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kernel_perf_cmds_MachineVoucher_hpp
+#define kernel_perf_cmds_MachineVoucher_hpp
+
+template <typename SIZE> class Machine;
+
+enum class kMachineVoucherFlag : std::uint32_t {
+ CreatedByVoucherCreate = 0x00000001,
+ CreatedByFirstUse = 0x00000002,
+ CreatedByPreviousMachineState = 0x00000004,
+ IsNullVoucher = 0x00000008,
+ IsUnsetVoucher = 0x00000010,
+ IsDestroyed = 0x00000020
+};
+
+template <typename SIZE>
+class MachineVoucher {
+ protected:
+ AbsInterval _timespan;
+ uint8_t* _content_bytes;
+ uint32_t _content_bytes_size;
+ uint32_t _content_bytes_capacity;
+ uint32_t _id;
+ uint32_t _flags;
+ typename SIZE::ptr_t _address;
+
+ static uint32_t voucher_id();
+
+ // Voucher pointers are used as unique identifiers for the lifespan
+ // of the voucher, which may exceed the lifespan of the Machine.
+ // We may not copy or "move" a voucher.
+
+ // Disable copy operators
+ MachineVoucher(const MachineVoucher& ignored) = delete;
+ MachineVoucher& operator=(const MachineVoucher& ignored) = delete;
+
+ // Disable move operators
+ MachineVoucher(MachineVoucher&& ignored) = delete;
+ MachineVoucher& operator=(MachineVoucher&& ignored) = delete;
+
+ friend class Machine<SIZE>;
+
+ void workaround_16898190(kMachineVoucherFlag flags, uint32_t content_bytes_capacity);
+
+ void add_content_bytes(uint8_t* bytes);
+
+ void set_destroyed(AbsTime timestamp);
+
+ // These are needed to make vouchers that are still alive at the
+ // end of an event trace appear correctly in searches/queries.
+ // However, when forwarding live vouchers to a future machine state,
+ // that work must be undone.
+ void set_timespan_to_end_of_time() {
+ ASSERT(is_live(), "Modifying timespan of destroyed voucher");
+ ASSERT(_timespan.length() == 0, "Modifying timespan after it has already been set");
+ _timespan.set_length(AbsTime(UINT64_MAX) - _timespan.location());
+ }
+
+ void set_timespan_to_zero_length() {
+ ASSERT(is_live(), "Modifying timespan of destroyed voucher");
+ ASSERT(_timespan.max() == UINT64_MAX, "Modifying timespan after it has already been set");
+ _timespan.set_length(AbsTime(0));
+ }
+
+ public:
+ MachineVoucher(typename SIZE::ptr_t address, AbsInterval create_timespan, kMachineVoucherFlag flags, uint32_t content_bytes_capacity);
+
+ ~MachineVoucher() {
+ if (_content_bytes) {
+ free(_content_bytes);
+ _content_bytes = nullptr;
+ }
+ }
+
+ bool operator==(const MachineVoucher& rhs) const { return this->_id == rhs._id; }
+ bool operator!=(const MachineVoucher& rhs) const { return !(*this == rhs); }
+
+ bool is_live() const { return (_flags & (uint32_t)kMachineVoucherFlag::IsDestroyed) == 0; }
+ bool is_destroyed() const { return (_flags & (uint32_t)kMachineVoucherFlag::IsDestroyed) > 0; }
+ bool is_null() const { return (_flags & (uint32_t)kMachineVoucherFlag::IsNullVoucher) > 0; }
+ bool is_unset() const { return (_flags & (uint32_t)kMachineVoucherFlag::IsUnsetVoucher) > 0; }
+ bool is_created_by_voucher_create() const { return (_flags & (uint32_t)kMachineVoucherFlag::CreatedByVoucherCreate) > 0; }
+ bool is_created_by_first_use() const { return (_flags & (uint32_t)kMachineVoucherFlag::CreatedByFirstUse) > 0; }
+ bool is_created_by_previous_machine_state() const { return (_flags & (uint32_t)kMachineVoucherFlag::CreatedByPreviousMachineState) > 0; }
+ bool has_valid_contents() const { return _content_bytes_size > 0 && _content_bytes_size == _content_bytes_capacity; }
+
+ typename SIZE::ptr_t address() const { return _address; }
+ AbsInterval timespan() const { return _timespan; }
+ const uint8_t* content_bytes() const { return _content_bytes; }
+ uint32_t content_size() const { return _content_bytes_capacity; }
+ uint32_t id() const { return _id; }
+};
+
+template <typename SIZE>
+uint32_t MachineVoucher<SIZE>::voucher_id() {
+ static uint32_t voucher_id = 1;
+ return OSAtomicIncrement32Barrier((volatile int32_t*)&voucher_id);
+}
+
+template <typename SIZE>
+MachineVoucher<SIZE>::MachineVoucher(typename SIZE::ptr_t address, AbsInterval timespan, kMachineVoucherFlag flags, uint32_t content_bytes_capacity) :
+ _timespan(timespan),
+ _content_bytes((content_bytes_capacity > 0) ? (uint8_t*)malloc((size_t)content_bytes_capacity) : nullptr),
+ _content_bytes_size(0),
+ _content_bytes_capacity(content_bytes_capacity),
+ _id(voucher_id()),
+ _flags((uint32_t)flags),
+ _address(address)
+{
+ DEBUG_ONLY({
+ if (!is_null() && !is_unset()) {
+ ASSERT(timespan.location() != 0 || is_created_by_first_use(), "Only implicitly created vouchers should have an unknown (0) create time");
+ ASSERT(is_created_by_voucher_create() || is_created_by_first_use() , "Should have a create flag");
+ ASSERT(content_bytes_capacity == 0 || is_created_by_voucher_create(), "Implicitly created vouchers should not have content");
+ }
+ })
+}
+
+template <typename SIZE>
+void MachineVoucher<SIZE>::workaround_16898190(kMachineVoucherFlag flags, uint32_t content_bytes_capacity) {
+ ASSERT(_content_bytes_capacity == 0, "Attempting to reset non-zero content_bytes_capacity");
+ ASSERT(!is_null(), "Sanity");
+ ASSERT(!is_unset(), "Sanity");
+ ASSERT(is_live(), "Should be live"); // This may be too strong, some races could have destroy before create
+
+ _flags |= (uint32_t)flags;
+ _content_bytes_capacity = content_bytes_capacity;
+}
+
+template <typename SIZE>
+void MachineVoucher<SIZE>::add_content_bytes(uint8_t* src) {
+ ASSERT(src, "Sanity");
+
+ // If the first reference we see to a voucher is an MACH_IPC_VOUCHER_CREATE_ATTR_DATA,
+ // we will not have contents.
+ if (!is_created_by_first_use()) {
+ size_t bytes_remaining = _content_bytes_capacity - _content_bytes_size;
+ ASSERT(bytes_remaining > 0, "Sanity");
+
+ // We either write an entire tracepoint worth of data,
+ // or the # of bytes remaining.
+ size_t bytes_to_write = std::min(bytes_remaining, sizeof(typename SIZE::ptr_t) * 4);
+ auto dest = &_content_bytes[_content_bytes_size];
+ memcpy(dest, src, bytes_to_write);
+ _content_bytes_size += bytes_to_write;
+ }
+}
+
+template <typename SIZE>
+void MachineVoucher<SIZE>::set_destroyed(AbsTime timestamp) {
+ ASSERT(!is_destroyed(), "Sanity");
+ ASSERT(timestamp > _timespan.location(), "Sanity");
+ ASSERT(_timespan.length() == 0, "Sanity");
+
+ // It turns out this is too strong. The kernel has a limited amount of buffer space available
+ // to hold the voucher contents. If the voucher exceeds that, no contents are emitted, and we
+ // fail this assert.
+ // ASSERT(_content_bytes_capacity == _content_bytes_size, "Destroying voucher with incomplete contents");
+
+ // +1 to make sure searches for this voucher at the destroy timestamp
+ // can find it.
+ _timespan.set_length((timestamp - _timespan.location()) + AbsTime(1));
+ _flags |= (uint32_t)kMachineVoucherFlag::IsDestroyed;
+}
+
+
+#endif
--- /dev/null
+//
+// MetaTypes.hpp
+// KDBG
+//
+// Created by James McIlree on 10/24/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+class Kernel32
+{
+ public:
+ typedef uint32_t ptr_t;
+
+ enum { PTRMAX = UINT32_MAX };
+ enum { is_64_bit = 0 };
+};
+
+class Kernel64
+{
+ public:
+ typedef uint64_t ptr_t;
+
+ enum { PTRMAX = UINT64_MAX };
+ enum { is_64_bit = 1 };
+};
--- /dev/null
+//
+// NurseryMachMsg.hpp
+// KDBG
+//
+// Created by James McIlree on 2/20/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kernel_perf_cmds_NurseryMachMsg_hpp
+#define kernel_perf_cmds_NurseryMachMsg_hpp
+
+enum class kNurseryMachMsgState : std::uint32_t {
+ Uninitialized = 1,
+ Send,
+ Recv,
+ Free
+};
+
+template <typename SIZE>
+class NurseryMachMsg {
+ protected:
+ AbsTime _send_time;
+ MachineVoucher<SIZE>* _send_voucher;
+
+ typename SIZE::ptr_t _send_tid;
+ typename SIZE::ptr_t _kmsg_addr;
+
+ uint32_t _id; // This is globally unique for EACH message.
+ uint32_t _send_msgh_id;
+ uint32_t _send_msgh_bits; // msgh_bits is modified between send/recv
+ kNurseryMachMsgState _state;
+
+ // These are intptr_t's so they can be set to -1, indicating "no index"
+ intptr_t _send_event_index;
+
+ public:
+ static uint32_t message_id();
+
+ NurseryMachMsg(typename SIZE::ptr_t kmsg_addr) :
+ _kmsg_addr(kmsg_addr),
+ _state(kNurseryMachMsgState::Uninitialized)
+ {
+ }
+
+ void send(uintptr_t index, AbsTime time, typename SIZE::ptr_t tid, typename SIZE::ptr_t kmsg_addr, uint32_t msgh_bits, uint32_t msgh_id, MachineVoucher<SIZE>* voucher);
+
+ kNurseryMachMsgState state() const { return _state; }
+ void set_state(kNurseryMachMsgState state) { _state = state; }
+
+ AbsTime send_time() const { return _send_time; }
+ typename SIZE::ptr_t send_tid() const { return _send_tid; }
+
+ typename SIZE::ptr_t kmsg_addr() const { return _kmsg_addr; }
+ MachineVoucher<SIZE>* send_voucher() const { return _send_voucher; }
+
+ uint32_t id() const { return _id; }
+ uint32_t send_msgh_id() const { return _send_msgh_id; }
+ uint32_t send_msgh_bits() const { return _send_msgh_bits; }
+
+ void set_send_event_index(intptr_t value) { _send_event_index = value; }
+ intptr_t send_event_index() const { return _send_event_index; }
+};
+
+template <typename SIZE>
+uint32_t NurseryMachMsg<SIZE>::message_id() {
+ static uint32_t message_id = 1;
+ return OSAtomicIncrement32Barrier((volatile int32_t*)&message_id);
+}
+
+template <typename SIZE>
+void NurseryMachMsg<SIZE>::send(uintptr_t index, AbsTime time, typename SIZE::ptr_t tid, typename SIZE::ptr_t kmsg_addr, uint32_t msgh_bits, uint32_t msgh_id, MachineVoucher<SIZE>* voucher) {
+ ASSERT(_state == kNurseryMachMsgState::Uninitialized || _state == kNurseryMachMsgState::Free, "Calling send when msg is not in Uninitialized/Free state");
+ ASSERT(kmsg_addr == _kmsg_addr, "Sanity");
+
+ ASSERT(tid, "Sanity");
+ ASSERT(msgh_bits, "Sanity");
+
+ _id = NurseryMachMsg::message_id();
+
+ _send_event_index = index;
+ _send_time = time;
+ _send_tid = tid;
+ // _kmsg_addr = kmsg_addr;
+ _send_msgh_bits = msgh_bits;
+ _send_msgh_id = msgh_id;
+ _send_voucher = voucher;
+}
+
+#endif
--- /dev/null
+//
+// ProcessSummary.hpp
+// KDBG
+//
+// Created by James McIlree on 4/23/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kdprof_ProcessSummary_hpp
+#define kdprof_ProcessSummary_hpp
+
+template <typename SIZE>
+class MachineProcess;
+
+template <typename SIZE>
+class MachineThread;
+
+template <typename SIZE>
+class CPUSummary;
+
+template <typename SIZE>
+class ProcessSummary {
+ public:
+ typedef std::unordered_set<ThreadSummary<SIZE>, ThreadSummaryHash<SIZE>, ThreadSummaryEqualTo<SIZE>> ThreadSummarySet;
+
+ protected:
+ const MachineProcess<SIZE>* _process;
+
+ AbsTime _total_run_time;
+ AbsTime _total_idle_time;
+ AbsTime _total_intr_time;
+ AbsTime _total_future_run_time;
+ AbsTime _total_wallclock_run_time;
+ AbsTime _total_vm_fault_time;
+ AbsTime _total_io_time;
+ AbsTime _total_jetsam_time;
+
+ uint32_t _context_switch_count;
+ uint32_t _count_idle_events;
+ uint32_t _count_intr_events;
+ uint32_t _count_vm_fault_events;
+ uint32_t _count_io_events;
+ bool _is_jetsam_killed;
+
+ uint64_t _io_bytes_completed;
+
+ ThreadSummarySet _thread_summaries;
+
+ std::vector<AbsInterval> _wallclock_run_intervals; // This is the actual wallclock run interval data.
+ std::vector<AbsInterval> _per_cpu_wallclock_run_intervals; // We need to accumulate intervals during summary generation, this is a temp buffer.
+
+ friend class Machine<SIZE>;
+ friend class CPUSummary<SIZE>;
+
+ void add_run_time(AbsTime time) { _total_run_time += time; }
+ void add_idle_time(AbsTime time) { _total_idle_time += time; _count_idle_events++; }
+ void add_intr_time(AbsTime time) { _total_intr_time += time; _count_intr_events++; }
+ void add_future_run_time(AbsTime time) { _total_future_run_time += time; }
+ void add_vm_fault_time(AbsTime time) { _total_vm_fault_time += time; _count_vm_fault_events++; }
+ void add_io_time(AbsTime time) { _total_io_time += time; _count_io_events++; }
+ void add_jetsam_time(AbsTime time) { _total_jetsam_time += time; }
+
+ void add_io_bytes_completed(typename SIZE::ptr_t bytes) { _io_bytes_completed += bytes; }
+
+ //
+ // Wallclock run intervals are added as each cpu timeline is walked.
+ // Between cpu(s), the results are accumulated to a single buffer
+ // After all cpus have been processed, the single buffer is summarized
+ //
+ void add_wallclock_run_interval(AbsInterval interval);
+ void accumulate_wallclock_run_intervals();
+ void summarize_wallclock_run_intervals();
+
+ void incr_context_switches() { _context_switch_count++; }
+
+ void set_jetsam_killed() { ASSERT(!_is_jetsam_killed, "Attempt to jetsam process twice"); _is_jetsam_killed = true; }
+
+ ThreadSummary<SIZE>* mutable_thread_summary(const MachineThread<SIZE>* thread) {
+ auto it = _thread_summaries.find(thread);
+ if (it == _thread_summaries.end()) {
+ // We create any thread summary that is missing.
+ auto insert_result = _thread_summaries.emplace(thread);
+ ASSERT(insert_result.second, "Sanity");
+ it = insert_result.first;
+ }
+
+ // NOTE! Because we are using a Set instead of a Map, STL wants
+ // the objects to be immutable. "it" refers to a const Record, to
+ // prevent us from changing the hash or equality of the Set. We
+ // know that the allowed set of mutations will not change these,
+ // and so we evil hack(tm) and cast away the const'ness.
+ return const_cast<ThreadSummary<SIZE>*>(&*it);
+ }
+
+ ThreadSummarySet& mutable_thread_summaries() { return _thread_summaries; }
+
+ public:
+ ProcessSummary(const MachineProcess<SIZE>* process) :
+ _process(process),
+ _context_switch_count(0),
+ _count_idle_events(0),
+ _count_intr_events(0),
+ _count_vm_fault_events(0),
+ _count_io_events(0),
+ _is_jetsam_killed(false),
+ _io_bytes_completed(0)
+ {
+ }
+
+ const MachineProcess<SIZE>* process() const { return _process; }
+
+ AbsTime total_time() const { return _total_run_time + _total_idle_time + _total_intr_time; }
+ AbsTime total_run_time() const { return _total_run_time; }
+ AbsTime total_idle_time() const { return _total_idle_time; }
+ AbsTime total_intr_time() const { return _total_intr_time; }
+ AbsTime total_future_run_time() const { return _total_future_run_time; }
+ AbsTime total_wallclock_run_time() const { return _total_wallclock_run_time; }
+ AbsTime total_vm_fault_time() const { return _total_vm_fault_time; }
+ AbsTime total_io_time() const { return _total_io_time; }
+ AbsTime total_jetsam_time() const { return _total_jetsam_time; }
+
+ AbsTime avg_on_cpu_time() const { return _total_run_time / _context_switch_count; }
+
+ uint32_t context_switches() const { return _context_switch_count; }
+ uint32_t num_idle_events() const { return _count_idle_events; }
+ uint32_t num_intr_events() const { return _count_intr_events; }
+ uint32_t num_vm_fault_events() const { return _count_vm_fault_events; }
+ uint32_t num_io_events() const { return _count_io_events; }
+ uint32_t num_processes_jetsammed() const { return _is_jetsam_killed ? 1 : 0; }
+
+ uint64_t io_bytes_completed() const { return _io_bytes_completed; }
+
+ const ThreadSummarySet& thread_summaries() const { return _thread_summaries; }
+
+ const ThreadSummary<SIZE>* thread_summary(const MachineThread<SIZE>* thread) const {
+ auto it = _thread_summaries.find(thread);
+ return (it == _thread_summaries.end()) ? NULL : &*it;
+ }
+
+ DEBUG_ONLY(void validate() const;)
+};
+
+template <typename SIZE>
+void ProcessSummary<SIZE>::add_wallclock_run_interval(AbsInterval interval) {
+ ASSERT(_per_cpu_wallclock_run_intervals.empty() || (_per_cpu_wallclock_run_intervals.back() < interval && !interval.intersects(_per_cpu_wallclock_run_intervals.back())), "Invariant violated");
+ _per_cpu_wallclock_run_intervals.emplace_back(interval);
+}
+
+template <typename SIZE>
+void ProcessSummary<SIZE>::accumulate_wallclock_run_intervals() {
+ _wallclock_run_intervals = trange_vector_union(_wallclock_run_intervals, _per_cpu_wallclock_run_intervals);
+ _per_cpu_wallclock_run_intervals.clear();
+ // We don't shrink_to_fit here as its expected another CPU's run intervals will be processed next.
+}
+
+template <typename SIZE>
+void ProcessSummary<SIZE>::summarize_wallclock_run_intervals() {
+ ASSERT(_per_cpu_wallclock_run_intervals.empty(), "Sanity");
+ _per_cpu_wallclock_run_intervals.shrink_to_fit();
+
+ ASSERT(_total_wallclock_run_time == 0, "Called more than once");
+
+ ASSERT(is_trange_vector_sorted_and_non_overlapping(_wallclock_run_intervals), "Sanity");
+
+ for (auto& interval : _wallclock_run_intervals) {
+ _total_wallclock_run_time += interval.length();
+ }
+
+ _wallclock_run_intervals.clear();
+ _wallclock_run_intervals.shrink_to_fit();
+}
+
+#if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
+template <typename SIZE>
+void ProcessSummary<SIZE>::validate() const {
+ ASSERT(_total_wallclock_run_time <= _total_run_time, "Sanity");
+
+ for (const auto& thread_summary : _thread_summaries) {
+ thread_summary.validate();
+ }
+}
+#endif
+
+template <typename SIZE>
+struct ProcessSummaryHash {
+ size_t operator()(const ProcessSummary<SIZE>& summary) const {
+ return std::hash<const MachineProcess<SIZE>*>()(summary.process());
+ }
+};
+
+template <typename SIZE>
+struct ProcessSummaryEqualTo {
+ bool operator()(const ProcessSummary<SIZE>& s1, const ProcessSummary<SIZE>& s2) const {
+ return s1.process() == s2.process();
+ }
+};
+
+#endif
--- /dev/null
+//
+// TaskEffectivePolicy.hpp
+// system_cmds
+//
+// Created by James McIlree on 6/19/14.
+//
+//
+
+// Using the raw struct was causing alignment issues on arm32; it seems that
+// structs have very relaxed alignment requirements on the v7 architectures.
+// The class wrapper forces a higher alignment and allows for some convenience
+// operators (compare, xor, etc)
+
+class TaskEffectivePolicy {
+ protected:
+ union {
+ Kernel32::ptr_t _kernel_32[2];
+ Kernel64::ptr_t _kernel_64;
+ struct task_effective_policy _policy;
+ } _content;
+
+ public:
+ TaskEffectivePolicy() {}
+
+ TaskEffectivePolicy(struct task_effective_policy policy) {
+ static_assert(sizeof(_content) == sizeof(struct task_effective_policy), "Sanity");
+ _content._policy = policy;
+ }
+
+ TaskEffectivePolicy(Kernel64::ptr_t teffective_0) {
+ static_assert(sizeof(_content) == sizeof(teffective_0), "Sanity");
+ _content._kernel_64 = teffective_0;
+ }
+
+ TaskEffectivePolicy(Kernel32::ptr_t teffective_0, Kernel32::ptr_t teffective_1) {
+ static_assert(sizeof(_content) == (sizeof(teffective_0) + sizeof(teffective_1)), "Sanity");
+ _content._kernel_32[0] = teffective_0;
+ _content._kernel_32[1] = teffective_1;
+ }
+
+ bool operator==(const TaskEffectivePolicy& other) const { return this->_content._kernel_64 == other._content._kernel_64; }
+ bool operator!=(const TaskEffectivePolicy& other) const { return !(*this == other); }
+
+ TaskEffectivePolicy operator~() const { return TaskEffectivePolicy(~this->_content._kernel_64); }
+
+ struct task_effective_policy as_struct() { return _content._policy; }
+};
\ No newline at end of file
--- /dev/null
+//
+// TaskRequestedPolicy.hpp
+// system_cmds
+//
+// Created by James McIlree on 6/23/14.
+//
+//
+
+
+// Using the raw struct was causing alignment issues on arm32; it seems that
+// structs have very relaxed alignment requirements on the v7 architectures.
+// The class wrapper forces a higher alignment and allows for some convenience
+// operators (compare, xor, etc)
+
+class TaskRequestedPolicy {
+ protected:
+ union {
+ Kernel32::ptr_t _kernel_32[2];
+ Kernel64::ptr_t _kernel_64;
+ struct task_requested_policy _policy;
+ } _content;
+
+ public:
+ TaskRequestedPolicy() {}
+
+ TaskRequestedPolicy(struct task_requested_policy policy) {
+ static_assert(sizeof(_content) == sizeof(struct task_requested_policy), "Sanity");
+ _content._policy = policy;
+ }
+
+ TaskRequestedPolicy(Kernel64::ptr_t trequested_0) {
+ static_assert(sizeof(_content) == sizeof(trequested_0), "Sanity");
+ _content._kernel_64 = trequested_0;
+ }
+
+ TaskRequestedPolicy(Kernel32::ptr_t trequested_0, Kernel32::ptr_t trequested_1) {
+ static_assert(sizeof(_content) == (sizeof(trequested_0) + sizeof(trequested_1)), "Sanity");
+ _content._kernel_32[0] = trequested_0;
+ _content._kernel_32[1] = trequested_1;
+ }
+
+ bool operator==(const TaskRequestedPolicy& other) const { return this->_content._kernel_64 == other._content._kernel_64; }
+ bool operator!=(const TaskRequestedPolicy& other) const { return !(*this == other); }
+
+ TaskRequestedPolicy operator~() const { return TaskRequestedPolicy(~this->_content._kernel_64); }
+
+ struct task_requested_policy as_struct() { return _content._policy; }
+};
\ No newline at end of file
--- /dev/null
+//
+// ThreadSummary.hpp
+// KDBG
+//
+// Created by James McIlree on 4/23/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kdprof_ThreadSummary_hpp
+#define kdprof_ThreadSummary_hpp
+
+template <typename SIZE>
+class MachineThread;
+
+template <typename SIZE>
+class ThreadSummary {
+ protected:
+ const MachineThread<SIZE>* _thread;
+
+ AbsTime _total_run_time;
+ AbsTime _total_idle_time;
+ AbsTime _total_intr_time;
+ AbsTime _total_vm_fault_time;
+ AbsTime _total_io_time;
+ AbsTime _total_jetsam_time;
+
+ uint32_t _context_switch_count;
+ uint32_t _count_idle_events;
+ uint32_t _count_intr_events;
+ uint32_t _count_vm_fault_events;
+ uint32_t _count_io_events;
+
+ uint64_t _io_bytes_completed;
+
+ AbsTime _total_future_run_time;
+
+ // Future run helper vars
+ AbsTime _total_blocked_in_summary;
+ AbsTime _max_possible_future_run_time;
+ AbsTime _first_block_after_summary;
+ bool _is_blocked_in_future;
+ bool _is_future_initialized;
+
+ friend class Machine<SIZE>;
+
+ void add_run_time(AbsTime time) { _total_run_time += time; }
+ void add_idle_time(AbsTime time) { _total_idle_time += time; _count_idle_events++; }
+ void add_intr_time(AbsTime time) { _total_intr_time += time; _count_intr_events++; }
+ void add_vm_fault_time(AbsTime time) { _total_vm_fault_time += time; _count_vm_fault_events++; }
+ void add_io_time(AbsTime time) { _total_io_time += time; _count_io_events++; }
+ void add_jetsam_time(AbsTime time) { _total_jetsam_time += time; }
+
+ void add_io_bytes_completed(typename SIZE::ptr_t bytes) { _io_bytes_completed += bytes; }
+
+ void incr_context_switches() { _context_switch_count++; }
+
+ bool is_blocked_in_future() { return _is_blocked_in_future; }
+ void set_is_blocked_in_future() { _is_blocked_in_future = true; }
+
+ AbsTime total_blocked_in_summary() { return _total_blocked_in_summary; }
+ void set_total_blocked_in_summary(AbsTime time) { _total_blocked_in_summary = time; }
+
+ AbsTime max_possible_future_run_time() { return _max_possible_future_run_time; }
+ void set_max_possible_future_run_time(AbsTime time) { _max_possible_future_run_time = time; }
+
+ AbsTime first_block_after_summary() { return _first_block_after_summary; }
+ void set_first_block_after_summary(AbsTime time) { _first_block_after_summary = time; }
+
+ bool is_future_initialized() { return _is_future_initialized; }
+ void set_future_initialized() { _is_future_initialized = true; }
+
+ AbsTime add_future_run_time(AbsTime time) {
+ ASSERT(_is_future_initialized, "Sanity");
+ ASSERT(!_is_blocked_in_future, "Sanity");
+
+ AbsTime capped_time = _max_possible_future_run_time - _total_future_run_time;
+ if (capped_time < time) {
+ _total_future_run_time += capped_time;
+ _is_blocked_in_future = true;
+ return capped_time;
+ } else {
+ _total_future_run_time += time;
+ return time;
+ }
+
+ ASSERT(_total_future_run_time < _max_possible_future_run_time, "Sanity");
+ }
+
+ public:
+ ThreadSummary(const MachineThread<SIZE>* thread) :
+ _thread(thread),
+ _context_switch_count(0),
+ _count_idle_events(0),
+ _count_intr_events(0),
+ _count_vm_fault_events(0),
+ _count_io_events(0),
+ _io_bytes_completed(0),
+ _is_blocked_in_future(false),
+ _is_future_initialized(false)
+ {
+ }
+
+ const MachineThread<SIZE>* thread() const { return _thread; }
+
+ AbsTime total_time() const { return _total_run_time + _total_idle_time + _total_intr_time; }
+
+ AbsTime total_run_time() const { return _total_run_time; }
+ AbsTime total_idle_time() const { return _total_idle_time; }
+ AbsTime total_intr_time() const { return _total_intr_time; }
+ AbsTime total_future_run_time() const { return _total_future_run_time; }
+ AbsTime total_vm_fault_time() const { return _total_vm_fault_time; }
+ AbsTime total_wallclock_vm_fault_time() const { return _total_vm_fault_time; }
+ AbsTime total_io_time() const { return _total_io_time; }
+ AbsTime total_jetsam_time() const { return _total_jetsam_time; }
+
+ AbsTime avg_on_cpu_time() const { return _total_run_time / _context_switch_count; }
+
+ uint32_t context_switches() const { return _context_switch_count; }
+ uint32_t num_idle_events() const { return _count_idle_events; }
+ uint32_t num_intr_events() const { return _count_intr_events; }
+ uint32_t num_vm_fault_events() const { return _count_vm_fault_events; }
+ uint32_t num_io_events() const { return _count_io_events; }
+
+ uint64_t io_bytes_completed() const { return _io_bytes_completed; }
+
+ DEBUG_ONLY(void validate() const);
+};
+
+#if !defined(NDEBUG) && !defined(NS_BLOCK_ASSERTIONS)
+template <typename SIZE>
+void ThreadSummary<SIZE>::validate() const {
+}
+#endif
+
+template <typename SIZE>
+struct ThreadSummaryHash {
+ size_t operator()(const ThreadSummary<SIZE>& summary) const {
+ return std::hash<const MachineThread<SIZE>*>()(summary.thread());
+ }
+};
+
+template <typename SIZE>
+struct ThreadSummaryEqualTo {
+ bool operator()(const ThreadSummary<SIZE>& s1, const ThreadSummary<SIZE>& s2) const {
+ return s1.thread() == s2.thread();
+ }
+};
+
+#endif
--- /dev/null
+//
+// TraceCodes.cpp
+// KDBG
+//
+// Created by James McIlree on 4/16/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include "KDebug.h"
+
+std::vector<std::string> default_trace_code_paths() {
+ // As of 4/17/2013, this is a single file.
+ return { "/usr/share/misc/trace.codes" };
+}
+
+std::unordered_map<uint32_t, std::string> trace_codes_at_path(const char* path)
+{
+ std::unordered_map<uint32_t, std::string> codes;
+
+ if (FILE* fp = fopen(path, "r")) {
+ char line[PATH_MAX];
+
+ while (fgets(line, sizeof(line), fp)) {
+ int code;
+ char name[128];
+ if (sscanf(line, "%x%127s\n", &code, name) == 2) {
+ ASSERT(code != 0, "Should never have a code equal to zero");
+ ASSERT(strlen(name), "Invalid name");
+ codes[code] = name;
+ }
+ }
+
+ fclose(fp);
+ }
+
+ return codes;
+}
+
+std::unordered_map<uint32_t, std::string> resolve_trace_codes(bool should_read_default_codes, int output_fd, std::vector<std::string>& additional_paths) {
+ std::unordered_map<uint32_t, std::string> codes;
+
+ std::vector<std::string> paths;
+
+ if (should_read_default_codes) {
+ std::vector<std::string> default_paths = default_trace_code_paths();
+ paths.insert(paths.end(), default_paths.begin(), default_paths.end());
+ }
+
+ paths.insert(paths.end(), additional_paths.begin(), additional_paths.end());
+
+ for (auto& path : paths) {
+ std::unordered_map<uint32_t, std::string> partial = trace_codes_at_path(path.c_str());
+
+ if (output_fd > -1) {
+ dprintf(output_fd, "Read %zd codes from %s\n", partial.size(), path.c_str());
+ }
+
+ if (codes.empty()) {
+ codes = std::move(partial);
+ } else {
+ for (auto& map_pair : partial) {
+ auto insert_it = codes.insert(map_pair);
+ if (insert_it.second == false) {
+ if (map_pair.second != codes[map_pair.first]) {
+ dprintf(output_fd, "WARNING: code entry for 0x%x has multiple entries (%s, %s)\n", map_pair.first, map_pair.second.c_str(), codes[map_pair.first].c_str());
+ }
+ }
+ }
+ }
+ }
+
+ return codes;
+}
--- /dev/null
+//
+// TraceCodes.h
+// KDBG
+//
+// Created by James McIlree on 4/16/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef __kdprof__TraceCodes__
+#define __kdprof__TraceCodes__
+
+std::vector<std::string> default_trace_code_paths();
+std::unordered_map<uint32_t, std::string> trace_codes_at_path(const char* path);
+
+//
+// Set output_fd to an open fd to print verbose output/warnings.
+//
+std::unordered_map<uint32_t, std::string> resolve_trace_codes(bool should_read_default_codes, int output_fd, std::vector<std::string>& additional_paths);
+
+#endif /* defined(__kdprof__TraceCodes__) */
--- /dev/null
+//
+// TraceDataHeader.hpp
+// KDBG
+//
+// Created by James McIlree on 10/25/12.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+//
+// We have to specialize this, as the K64 min alignment is 4 bytes longer,
+// to maintain 8 byte alignment for the uint64_t _TOD_secs.
+//
+
+template <typename KERNEL_SIZE> class TraceDataHeaderFields {};
+
+template <>
+class TraceDataHeaderFields<Kernel32> {
+ public:
+ uint32_t version;
+ uint32_t thread_count;
+ uint32_t TOD_secs_top_half;
+ uint32_t TOD_secs_bottom_half;
+ uint32_t TOD_usecs;
+
+ // NOTE! The compiler has shown a tendency to place this on non 8 byte
+ // aligned addresses when stack allocating. We need to construct the
+ // uint64_t values by logical-or and shifting, treating as a pointer
+ // will fail!
+
+ TraceDataHeaderFields(uint32_t v, uint32_t tc, uint64_t s, uint32_t us) :
+ version(v),
+ thread_count(tc),
+ TOD_usecs(us)
+ {
+ TOD_secs_top_half = (uint32_t)(s >> 32);
+ TOD_secs_bottom_half = (uint32_t)(s & 0xFFFFFFFF);
+ }
+
+ uint64_t TOD_secs() {
+ return ((uint64_t)TOD_secs_top_half << 32) | (uint64_t)TOD_secs_bottom_half;
+ }
+};
+
+template <>
+class TraceDataHeaderFields<Kernel64> {
+ public:
+ uint32_t version;
+ uint32_t thread_count;
+ uint64_t _TOD_secs;
+ uint32_t TOD_usecs;
+ uint32_t _force_alignment; // Need to force 8 byte alignment in 32 bit code
+
+ TraceDataHeaderFields(uint32_t v, uint32_t tc, uint64_t s, uint32_t us) :
+ version(v),
+ thread_count(tc),
+ _TOD_secs(s),
+ TOD_usecs(us),
+ _force_alignment(0)
+ {
+ }
+
+ uint64_t TOD_secs() {
+ return _TOD_secs;
+ }
+};
+
+template <typename KERNEL_SIZE>
+class TraceDataHeader {
+ private:
+ TraceDataHeaderFields<KERNEL_SIZE> _fields;
+
+ public:
+ TraceDataHeader() : _fields(0, 0, 0, 0) {}
+ TraceDataHeader(uint32_t v, uint32_t tc, uint64_t s, uint32_t us) : _fields(v, tc, s, us) {}
+
+ uint32_t version() const { return _fields.version; }
+ uint32_t thread_count() const { return _fields.thread_count; }
+ uint64_t TOD_secs() const { return _fields.TOD_secs(); }
+ uint32_t TOD_usecs() const { return _fields.TOD_usecs; }
+};
+
--- /dev/null
+//
+// TraceFile.cpp
+// system_cmds
+//
+// Created by James McIlree on 4/1/14.
+//
+//
+
+#include "KDebug.h"
+
+TraceFile::TraceFile(const char* path, bool sort_events, uint32_t default_ap_count, uint32_t default_iop_count) :
+ _file(path),
+ _version(kTraceFileVersion::Unknown),
+ _is_64_bit(false),
+ _is_valid(false),
+ _threadmap(nullptr),
+ _threadmap_count(0),
+ _cpumap(nullptr),
+ _cpumap_count(0),
+ _events(nullptr),
+ _event_count(0)
+{
+ try {
+ parse<Kernel64>(sort_events, default_ap_count, default_iop_count);
+ } catch (...) {
+ parse<Kernel32>(sort_events, default_ap_count, default_iop_count);
+ }
+}
--- /dev/null
+//
+// TraceFile.hpp
+// system_cmds
+//
+// Created by James McIlree on 4/1/14.
+//
+//
+
+#ifndef __system_cmds__TraceFile__
+#define __system_cmds__TraceFile__
+
+// These are not (yet) defined in debug.h
+// Remove and use kdebug.h ASAP.
+#define RAW_VERSION2 0x55aa0200 // RAW_VERSION2 is from Instruments/kperf
+#define RAW_VERSION3 0x55aa0300 // RAW_VERSION3 is the new hotness from kperf
+
+enum class kTraceFileVersion : uint32_t {
+ V0 = 0,
+ V1 = 1,
+ V1Plus = 2, // A 1+ is a 1 with a cpumap
+ V2 = 3, // Can type 2 contain a cpumap? Looks like no.
+ V3 = 4,
+ Unknown = UINT32_MAX
+};
+
+class TraceFile {
+ protected:
+ MappedFile _file;
+ kTraceFileVersion _version;
+ bool _is_64_bit;
+ bool _is_valid;
+ void* _threadmap;
+ uint32_t _threadmap_count;
+ KDCPUMapEntry* _cpumap;
+ uint32_t _cpumap_count;
+ void* _events;
+ uintptr_t _event_count;
+ std::vector<uint8_t> _time_sorted_events; // This is empty unless event sorting is requested.
+ std::vector<KDCPUMapEntry> _default_cpumap; // If the file does not contain a cpumap, this will be used instead
+
+ template <typename SIZE>
+ void sanity_check_event_data();
+
+ template <typename SIZE>
+ void parse(bool, uint32_t, uint32_t);
+
+ public:
+ TraceFile(const char* path, bool sort_events = false, uint32_t default_ap_count = 24, uint32_t default_iop_count = 0);
+
+ // Returns true if a Machine state can be created.
+ bool is_valid() const { return _is_valid; }
+ bool is_64_bit() const { return _is_64_bit; }
+ kTraceFileVersion version() const { return _version; }
+
+ // Exposed so iOS devices can report over sized trace
+ bool mmap_failed() const { return _file.mmap_failed(); }
+
+ const KDCPUMapEntry* cpumap() const { return _cpumap; }
+ uint32_t cpumap_count() const { return _cpumap_count; }
+
+ template <typename SIZE>
+ const KDThreadMapEntry<SIZE>* threadmap() const { return reinterpret_cast<KDThreadMapEntry<SIZE>*>(_threadmap); }
+ uint32_t threadmap_count() const { return _threadmap_count; }
+
+ template <typename SIZE>
+ const KDEvent<SIZE>* events() const { return reinterpret_cast<KDEvent<SIZE>*>(_events); }
+ uintptr_t event_count() const { return _event_count; }
+};
+
+//
+// This is a very simple attempt to sanity check the event data and prevent
+// crashes when reading 32b vs 64b trace data.
+//
+template <typename SIZE>
+void TraceFile::sanity_check_event_data() {
+ uintptr_t event_check_count = std::min((uintptr_t)10, _event_count);
+
+ AbsTime last_timestamp;
+
+ for (uintptr_t i=0; i<event_check_count; i++) {
+ KDEvent<SIZE>& event = reinterpret_cast<KDEvent<SIZE>*>(_events)[i];
+
+ if (event.cpu() < 0) {
+ THROW("Event cpu id is less than 0");
+ }
+
+ if (event.cpu() >= _cpumap_count) {
+ THROW("Event cpu id is greater than the number of configured cpus");
+ }
+
+ if (event.timestamp() < last_timestamp) {
+ THROW("Event Data sanity check found out of order timestamps");
+ }
+
+ if (SIZE::is_64_bit) {
+ if (event.unused() != 0) {
+ THROW("Event has value set in unknown field");
+ }
+ }
+
+ last_timestamp = event.timestamp();
+ }
+}
+
+template <typename SIZE>
+void TraceFile::parse(bool should_presort_events, uint32_t default_ap_count, uint32_t default_iop_count) {
+ if (TraceDataHeader<SIZE>* header = reinterpret_cast<TraceDataHeader<SIZE>*>(_file.address())) {
+ KDThreadMapEntry<SIZE>* threadmap = NULL;
+ uint32_t threadmap_count = 0;
+ KDCPUMapEntry* cpumap = NULL;
+ uint32_t cpumap_count = 0;
+ KDEvent<SIZE>* events = NULL;
+ kTraceFileVersion version;
+
+ switch (header->version()) {
+ case RAW_VERSION0:
+ // Should never happen!
+ ASSERT(false, "File is RAW_VERSION0");
+ THROW("RAW_VERSION0 is ILLEGAL");
+ break;
+
+ case RAW_VERSION1:
+ // Could be either v1 or v1+
+ break;
+
+ case RAW_VERSION2:
+ _version = kTraceFileVersion::V2;
+ // We do not know how to parse a V2 file
+ THROW("RAW_VERSION2 is unhandled");
+ break;
+
+ case RAW_VERSION3:
+ _version = kTraceFileVersion::V3;
+ // We do not know how to parse a V3 file
+ THROW("RAW_VERSION3 is unhandled");
+ break;
+
+ default:
+ // Could be a v0
+ break;
+ }
+
+ if (header->version() != RAW_VERSION1) {
+ // If the header is not a RAW_VERSION1, we must assume it is a
+ // RAW_VERSION0. The difficulty here is that RAW_VERSION0 consists
+ // of 4 bytes, which are the thread_count. We can't do much
+ // sanity checking. The first four bytes are already read into
+ // the existing header, reuse them. We must also reset the file
+ // offset.
+
+ threadmap_count = header->version();
+ threadmap = reinterpret_cast<KDThreadMapEntry<SIZE>*>(_file.address() + 4);
+
+ // Event data starts immediately following the threadmap
+ size_t offset = 4 + threadmap_count * sizeof(KDThreadMapEntry<SIZE>);
+ events = reinterpret_cast<KDEvent<SIZE>*>(_file.address() + offset);
+
+ version = kTraceFileVersion::V0;
+ } else {
+ //
+ // RAW_VERSION1
+ //
+ threadmap_count = header->thread_count();
+ threadmap = reinterpret_cast<KDThreadMapEntry<SIZE>*>(_file.address() + sizeof(TraceDataHeader<SIZE>));
+
+ size_t threadmap_size_in_bytes = threadmap_count * sizeof(KDThreadMapEntry<SIZE>);
+ size_t offset_to_event_data = (sizeof(TraceDataHeader<SIZE>) + threadmap_size_in_bytes + 4095) & ~4095;
+ size_t offset_to_cpumap_data = sizeof(TraceDataHeader<SIZE>) + threadmap_size_in_bytes;
+ size_t cpumap_bytes = offset_to_event_data - offset_to_cpumap_data;
+
+ //
+ // In a RAW_VERSION1, there *may* be a cpumap.
+ // If it exists, it will be between the header and the page aligned offset
+ // that event data begins at.
+ //
+ if (cpumap_bytes > sizeof(kd_cpumap_header) + sizeof(kd_cpumap)) {
+ kd_cpumap_header* cpumap_header = reinterpret_cast<kd_cpumap_header*>(_file.address() + offset_to_cpumap_data);
+ if (cpumap_header->version_no == RAW_VERSION1) {
+ cpumap = (KDCPUMapEntry*)&cpumap_header[1];
+ cpumap_count = cpumap_header->cpu_count;
+ }
+ }
+
+ // Event data starts at the next PAGE alignment boundary.
+ //
+ // Hmm, this could be pretty awful in iOS...
+ //
+ // Kernel page size is 4k. Userspace page size is 16kb in 64b.
+ // Kernel writes the data. Unless the kernel call fails, then userspace writes the data. Blech.
+ events = reinterpret_cast<KDEvent<SIZE>*>(_file.address() + offset_to_event_data);
+ }
+
+ uintptr_t event_count = (uintptr_t)_file.size() - (reinterpret_cast<uintptr_t>(events) - reinterpret_cast<uintptr_t>(_file.address()));
+ if (event_count % sizeof(KDEvent<SIZE>) != 0) {
+ // We're probably looking at the wrong k32/k64. Throw and try the other size.
+ THROW("Bytes in file does not match an even multiple of Event struct");
+ }
+ event_count /= sizeof(KDEvent<SIZE>);
+
+ if (cpumap == NULL || cpumap_count == 0) {
+ // No cpumap found, we need to fake one up using the default values.
+ //
+ // It would be nice if we could just read the events and derive the
+ // AP/IOP count, but the IOP events do not have valid tid(s), and
+ // must be ignored.
+
+ for (uint32_t i=0; i<default_ap_count; ++i) {
+ _default_cpumap.emplace_back(i, 0, "AP-???");
+ }
+ uint32_t iop_limit = default_ap_count + default_iop_count;
+ for (uint32_t i=default_ap_count; i<iop_limit; ++i) {
+ _default_cpumap.emplace_back(i, KDBG_CPUMAP_IS_IOP, "IOP-???");
+ }
+
+ cpumap = _default_cpumap.data();
+ cpumap_count = (uint32_t)_default_cpumap.size();
+
+ version = kTraceFileVersion::V1;
+ } else {
+ version = kTraceFileVersion::V1Plus;
+ }
+
+
+ // IOP's have been producing .trace files with out of order events.
+ // This is a hack fix to work around that. It costs a full copy of the data!
+ MemoryBuffer<KDEvent<SIZE>> presorted_events;
+ if (should_presort_events && event_count) {
+ _time_sorted_events.reserve(event_count * sizeof(KDEvent<SIZE>));
+ memcpy(_time_sorted_events.data(), events, event_count * sizeof(KDEvent<SIZE>));
+ events = reinterpret_cast<KDEvent<SIZE>*>(_time_sorted_events.data());
+ std::sort(events, events + event_count, [](KDEvent<SIZE> const& p0, KDEvent<SIZE> const& p1) -> bool {
+ return p0.timestamp() < p1.timestamp();
+ });
+ }
+
+ _threadmap = threadmap;
+ _threadmap_count = threadmap_count;
+
+ _cpumap = cpumap;
+ _cpumap_count = cpumap_count;
+
+ _events = events;
+ _event_count = event_count;
+
+ _version = version;
+ _is_64_bit = SIZE::is_64_bit;
+
+ sanity_check_event_data<SIZE>();
+
+ //
+ // Okay, success if we made it this far.
+ //
+ _is_valid = true;
+ }
+}
+
+#endif /* defined(__system_cmds__TraceFile__) */
--- /dev/null
+//
+// VoucherInterval.hpp
+// KDBG
+//
+// Created by James McIlree on 2/18/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef kernel_perf_cmds_Header_h
+#define kernel_perf_cmds_Header_h
+
+template <typename SIZE>
+class VoucherInterval : public AbsInterval {
+ MachineVoucher<SIZE>* _voucher;
+
+ public:
+ VoucherInterval(MachineVoucher<SIZE>* voucher, AbsInterval interval) :
+ AbsInterval(interval),
+ _voucher(voucher)
+ {
+ }
+
+ const MachineVoucher<SIZE>* voucher() const { return _voucher; }
+};
+
+#endif
PRIV_START \
effective_uid = (a); \
effective_gid = (b); \
- if (setregid((gid_t)-1, effective_gid)<0) perr("cannot setregid"); \
- if (setreuid((uid_t)-1, effective_uid)<0) perr("cannot setreuid"); \
+ if (setegid(effective_gid)<0) perr("cannot setegid"); \
+ if (seteuid(effective_uid)<0) perr("cannot seteuid"); \
PRIV_END \
}
#endif
--- /dev/null
+//
+// Action.hpp
+// kdprof
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_Action_hpp
+#define kdprof_Action_hpp
+
+class Action {
+ public:
+ virtual void execute(Globals& globals) = 0;
+};
+
+#endif
--- /dev/null
+//
+// CollectAction.cpp
+// kdprof
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+template <typename SIZE>
+static void execute_arch_specific(Globals& globals, KDState& state) {
+ // Collect all data first, printing takes time...
+ auto threadmap = KDBG::threadmap<SIZE>(state);
+ auto cpumap = KDBG::cpumap();
+
+ MemoryBuffer<KDEvent<SIZE>> events(state.capacity());
+ int count = KDBG::read(events.data(), events.capacity() * sizeof(KDEvent<SIZE>));
+
+ // Now handle any verbose printing.
+ /*if (globals.is_verbose()) {
+ printf("\n%lu threadmap entries:\n", threadmap.size());
+ for (auto& entry : threadmap) {
+ printf("\t0x%08llX %8u %20s\n", (uint64_t)entry.tid(), entry.pid(), entry.name());
+ }
+
+ printf("\n%lu cpumap entries:\n", cpumap.size());
+ for (auto& entry : cpumap) {
+ printf("\t%3u %8s\n", entry.cpu_id(), entry.name());
+ }
+
+ printf("\n%d events:\n", count);
+ }*/
+
+ if (globals.should_presort_events()) {
+ std::sort(events.data(), events.data() + count, [](KDEvent<SIZE> const& p0, KDEvent<SIZE> const& p1) -> bool {
+ return p0.timestamp() < p1.timestamp();
+ });
+ }
+ Machine<SIZE> machine((KDCPUMapEntry*)cpumap.data(), (uint32_t)cpumap.size(), (KDThreadMapEntry<SIZE>*)threadmap.data(), (uint32_t)threadmap.size(), (KDEvent<SIZE>*)events.data(), (uintptr_t)count);
+
+ if (!machine.lost_events()) {
+ if (globals.should_zero_base_timestamps() && count) {
+ globals.set_beginning_of_time((events.data())->timestamp());
+ } else {
+ globals.set_beginning_of_time(AbsTime(0));
+ }
+
+ if (!globals.is_timebase_set()) {
+ mach_timebase_info_data_t timebase;
+ mach_timebase_info(&timebase);
+ globals.set_timebase(timebase, false);
+ }
+
+ if (globals.is_verbose()) {
+ dprintf(globals.output_fd(), "\nLIVE DATA\n");
+ print_verbose_machine_info(globals, machine, (uint32_t)threadmap.size(), (uint32_t)cpumap.size());
+ }
+
+ if (globals.should_print_events()) {
+ // print_machine(globals, machine);
+ // print_machine_parallel(globals, machine);
+ print_machine_events(globals, machine);
+ }
+
+ if (globals.should_print_summary()) {
+ print_machine_summary(globals, machine);
+ }
+
+ if (globals.should_print_csv_summary()) {
+ print_machine_csv_summary(globals, machine);
+ }
+
+ if (globals.should_print_process_start_stop_timestamps()) {
+ print_process_start_stop_timestamps(globals, machine);
+ }
+ } else {
+ log_msg(ASL_LEVEL_WARNING, "The trace data indicates that events were lost, the file cannot be processed\n");
+ }
+}
+
+void CollectAction::execute(Globals& globals) {
+ KDState state = KDBG::state();
+ if (state.is_lp64()) {
+ execute_arch_specific<Kernel64>(globals, state);
+ } else {
+ execute_arch_specific<Kernel32>(globals, state);
+ }
+}
--- /dev/null
+//
+// CollectAction.hpp
+// kdprof
+//
+// Created by James McIlree on 4/16/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_CollectAction_hpp
+#define kdprof_CollectAction_hpp
+
+class CollectAction : public Action {
+ public:
+ CollectAction() {}
+
+ virtual void execute(Globals& globals);
+};
+
+#endif
--- /dev/null
+//
+// DisableAction.cpp
+// kdprof
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+void DisableAction::execute(Globals& globals) {
+ if (!KDBG::set_enabled(false)) {
+ usage("Unable to disable tracing");
+ }
+}
--- /dev/null
+//
+// DisableAction.hpp
+// kdprof
+//
+// Created by James McIlree on 4/16/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_DisableAction_hpp
+#define kdprof_DisableAction_hpp
+
+class DisableAction : public Action {
+ public:
+ DisableAction() {}
+
+ virtual void execute(Globals& globals);
+};
+
+#endif
--- /dev/null
+//
+// EnableAction.cpp
+// kdprof
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+void EnableAction::execute(Globals& globals) {
+ if (!KDBG::set_enabled(true)) {
+ usage("Unable to enable tracing");
+ }
+}
--- /dev/null
+//
+// EnableAction.hpp
+// kdprof
+//
+// Created by James McIlree on 4/16/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_EnableAction_hpp
+#define kdprof_EnableAction_hpp
+
+class EnableAction : public Action {
+ public:
+ EnableAction() {}
+
+ virtual void execute(Globals& globals);
+};
+
+#endif
--- /dev/null
+//
+// EventPrinting.cpp
+// kdprof
+//
+// Created by James McIlree on 6/6/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+void print_event_header(const Globals& globals, bool is_64_bit) {
+
+ // Header is...
+ //
+ // [Index] Time Type Code arg1 arg2 arg3 arg4 thread cpu# command/IOP-name pid
+ // 8 16 4 34 8/16 8/16 8/16 8/16 10 4 16 6
+
+
+ if (globals.should_print_event_index())
+ dprintf(globals.output_fd(), "%8s ", "Event#");
+
+ // The character counting for "Time(µS)" is OBO, it treats the µ as two characters.
+ // This means the %16s misaligns. We force it by making the input string 16 printable chars long,
+ // which overflows the %16s to the correct actual output length.
+ const char* time = globals.should_print_mach_absolute_timestamps() ? "Time(mach-abs)" : " Time(µS)";
+
+ if (is_64_bit)
+ dprintf(globals.output_fd(), "%16s %4s %-34s %-16s %-16s %-16s %-16s %10s %4s %-16s %-6s\n", time, "Type", "Code", "arg1", "arg2", "arg3", "arg4", "thread", "cpu#", "command", "pid");
+ else
+ dprintf(globals.output_fd(), "%16s %4s %-34s %-8s %-8s %-8s %-8s %10s %4s %-16s %-6s\n", time, "Type", "Code", "arg1", "arg2", "arg3", "arg4", "thread", "cpu#", "command", "pid");
+}
--- /dev/null
+//
+// EventPrinting.hpp
+// kdprof
+//
+// Created by James McIlree on 4/20/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_ParallelPrinting_hpp
+#define kdprof_ParallelPrinting_hpp
+
+void print_event_header(const Globals& globals, bool is_64_bit);
+
+template <typename SIZE>
+char* print_event(char* buf, char* buf_end, const Globals& globals, const Machine<SIZE>& machine, const KDEvent<SIZE>& event, uintptr_t event_index)
+{
+ // Header is...
+ //
+ // [Index] Time Type Code arg1 arg2 arg3 arg4 thread cpu# command/IOP-name pid
+ // 8 16 4 34 8/16 8/16 8/16 8/16 10 4 16 6
+ //
+ // For now, each column is folding up the "after" spacing in a single printf, IOW
+ //
+ // buf += snprintf(buf, buf_end - buf, "%8s ", "COL"); /* + 2 spaces */
+ //
+ // Not:
+ //
+ // buf += snprintf(buf, buf_end - buf, "%8s", "COL");
+ // buf += snprintf(buf, buf_end - buf, " "); /* 2 spaces */
+
+ ASSERT(event.cpu() > -1 && event.cpu() < machine.cpus().size(), "cpu_id out of range");
+ const MachineCPU<SIZE>& cpu = machine.cpus()[event.cpu()];
+
+ //
+ // Okay, here is how snprintf works.
+ //
+ // char buf[2];
+ //
+ // snprintf(buf, 0, "a"); // Returns 1, buf is unchanged.
+ // snprintf(buf, 1, "a"); // Returns 1, buf = \0
+ // snprintf(buf, 2, "a"); // Returns 1, buf = 'a', \0
+
+ //
+ // If we cannot print successfully, we return the orignal pointer.
+ //
+ char* orig_buf = buf;
+
+ //
+ // [Index]
+ //
+ if (globals.should_print_event_index()) {
+ buf += snprintf(buf, buf_end - buf, "%8llu ", (uint64_t)event_index);
+ }
+
+ if (buf >= buf_end)
+ return orig_buf;
+
+ //
+ // Time
+ //
+ if (globals.should_print_mach_absolute_timestamps()) {
+ if (globals.beginning_of_time().value() == 0)
+ buf += snprintf(buf, buf_end - buf, "%16llX ", (event.timestamp() - globals.beginning_of_time()).value());
+ else
+ buf += snprintf(buf, buf_end - buf, "%16llu ", (event.timestamp() - globals.beginning_of_time()).value());
+ } else {
+ NanoTime ntime = (event.timestamp() - globals.beginning_of_time()).nano_time(globals.timebase());
+ buf += snprintf(buf, buf_end - buf, "%16.2f ", (double)ntime.value() / 1000.0);
+ }
+
+ if (buf >= buf_end)
+ return orig_buf;
+
+ //
+ // Type Code
+ //
+ const char* type = event.is_func_start() ? "beg" : (event.is_func_end() ? "end" : "---");
+ auto trace_code_it = globals.trace_codes().find(event.dbg_cooked());
+ if (cpu.is_iop() || !globals.should_print_symbolic_event_codes() || trace_code_it == globals.trace_codes().end()) {
+ buf += snprintf(buf, buf_end - buf, "%4s %-34x ", type, event.dbg_cooked());
+ } else {
+ buf += snprintf(buf, buf_end - buf, "%4s %-34s ", type, trace_code_it->second.c_str());
+ }
+
+ if (buf >= buf_end)
+ return orig_buf;
+
+ //
+ // arg1
+ //
+ if (event.dbg_class() == DBG_IOKIT && event.dbg_subclass() == DBG_IOPOWER) {
+ std::string kext_name = event.arg1_as_string();
+ std::reverse(kext_name.begin(), kext_name.end());
+
+ if (SIZE::is_64_bit)
+ buf += snprintf(buf, buf_end - buf, "%-16s ", kext_name.c_str());
+ else
+ buf += snprintf(buf, buf_end - buf, "%-8s ", kext_name.c_str());
+ } else {
+ if (SIZE::is_64_bit)
+ buf += snprintf(buf, buf_end - buf, "%-16llX ", (uint64_t)event.arg1());
+ else
+ buf += snprintf(buf, buf_end - buf, "%-8x ", (uint32_t)event.arg1());
+ }
+
+ if (buf >= buf_end)
+ return orig_buf;
+
+ //
+ // Profiling showed that the repeated snprintf calls were hot, rolling them up is ~2.5% per on a HUGE file.
+ //
+ // arg2 arg3 arg4 thread cpu
+ //
+ if (SIZE::is_64_bit)
+ buf += snprintf(buf, buf_end - buf, "%-16llX %-16llX %-16llX %10llX %4u ", (uint64_t)event.arg2(), (uint64_t)event.arg3(), (uint64_t)event.arg4(), (uint64_t)event.tid(), event.cpu());
+ else
+ buf += snprintf(buf, buf_end - buf, "%-8x %-8x %-8x %10llX %4u ", (uint32_t)event.arg2(), (uint32_t)event.arg3(), (uint32_t)event.arg4(), (uint64_t)event.tid(), event.cpu());
+
+ if (buf >= buf_end)
+ return orig_buf;
+
+ //
+ // command & pid (handled together due to IOP not printing a pid
+ //
+ if (cpu.is_iop()) {
+ // We print the IOP name instead of a command
+ buf += snprintf(buf, buf_end - buf, "%-16s\n", cpu.name());
+ } else {
+ if (const MachineThread<SIZE>* thread = machine.thread(event.tid(), event.timestamp())) {
+ buf += snprintf(buf, buf_end - buf, "%-16s %-6d\n", thread->process().name(), thread->process().pid());
+ } else {
+ buf += snprintf(buf, buf_end - buf, "%-16s %-6s\n", "?????", "???");
+ }
+ }
+
+ // Still need to check this, its an error if we overflow on the last print!
+ if (buf >= buf_end)
+ return orig_buf;
+
+ return buf;
+}
+
+template <typename SIZE>
+char* print_event_range_to_buffer(const Globals& globals, const Machine<SIZE>& machine, TRange<uintptr_t> range, MemoryBuffer<char>& buffer ) {
+ char* cursor = buffer.data();
+ char* cursor_end = cursor + buffer.capacity();
+
+ if (const KDEvent<SIZE>* events = machine.events()) {
+ ASSERT(TRange<uintptr_t>(0, machine.event_count()).contains(range), "Sanity");
+ for (uintptr_t index = range.location(); index < range.max(); ++index) {
+ char* temp = print_event(cursor, cursor_end, globals, machine, events[index], index);
+ if (temp != cursor)
+ cursor = temp;
+ else {
+ // Changing the capacity will invalidate the cursor
+ ptrdiff_t offset = cursor - buffer.data();
+ buffer.set_capacity(buffer.capacity()*2);
+ cursor = buffer.data() + offset;
+ cursor_end = buffer.data() + buffer.capacity();
+ }
+ }
+ }
+
+ return cursor;
+}
+
+class PrintWorkUnit {
+ protected:
+ MemoryBuffer<char> _buffer;
+ TRange<uintptr_t> _event_range;
+ char* _buffer_end;
+
+ // We do not want work units copied.
+ PrintWorkUnit(const PrintWorkUnit& that) = delete;
+ PrintWorkUnit& operator=(const PrintWorkUnit& other) = delete;
+
+ public:
+ PrintWorkUnit(MemoryBuffer<char>&& buffer, TRange<uintptr_t> event_range, char* buffer_end) :
+ _buffer(std::move(buffer)),
+ _event_range(event_range),
+ _buffer_end(buffer_end)
+ {
+ ASSERT(_buffer.capacity(), "Sanity");
+ ASSERT(_buffer.data(), "Sanity");
+ ASSERT(!_buffer_end || _buffer_end > _buffer.data(), "Sanity");
+ ASSERT(!_buffer_end || (_buffer_end < _buffer.data() + _buffer.capacity()), "Sanity");
+ }
+
+ MemoryBuffer<char>& buffer() { return _buffer; }
+
+ TRange<uintptr_t> event_range() { return _event_range; }
+ void set_event_range(TRange<uintptr_t> range) { _event_range = range; }
+
+ char* buffer_end() const { return _buffer_end; }
+ void set_buffer_end(char* buffer_end) { _buffer_end = buffer_end; }
+};
+
+template <typename SIZE>
+class PrintProducer {
+ protected:
+ const Globals& _globals;
+ const Machine<SIZE>& _machine;
+ uintptr_t _start_index;
+ uintptr_t _end_index;
+ uintptr_t _chunk_size;
+
+ public:
+ PrintProducer(const Globals& globals, const Machine<SIZE>& machine, uintptr_t chunk_size) :
+ _globals(globals),
+ _machine(machine),
+ _chunk_size(chunk_size)
+ {
+ _start_index = 0;
+ _end_index = machine.event_count();
+
+ if (globals.is_summary_start_set() || globals.is_summary_stop_set()) {
+ AbsInterval machine_timespan = machine.timespan();
+
+ KDEvent<SIZE> start_event(globals.summary_start(machine_timespan));
+ auto it = std::lower_bound(machine.events(), machine.events() + _end_index, start_event);
+ ASSERT(&*it >= machine.events(), "Returned start index lower than start");
+ _start_index = std::distance(machine.events(), it);
+
+ KDEvent<SIZE> end_event(globals.summary_stop(machine_timespan));
+ it = std::lower_bound(machine.events(), machine.events() + _end_index, end_event);
+ ASSERT(&*it <= machine.events() + _end_index, "Returned end index greater than end");
+ _end_index = std::distance(machine.events(), it);
+
+ ASSERT(_start_index <= _end_index, "start index is > end index");
+ }
+ }
+
+ bool produce(PrintWorkUnit& work_unit) {
+ // Claim a chunk of work to do
+ uintptr_t orig_start_index, new_start_index;
+ do {
+ orig_start_index = _start_index;
+ new_start_index = orig_start_index + std::min(_chunk_size, _end_index - orig_start_index);
+ } while (orig_start_index < _end_index && !OSAtomicCompareAndSwapPtrBarrier((void*)orig_start_index, (void *)new_start_index, (void * volatile *)&_start_index));
+
+ // Did we claim work?
+ if (orig_start_index < _end_index) {
+ TRange<uintptr_t> event_range(orig_start_index, new_start_index - orig_start_index);
+ char* end = print_event_range_to_buffer(_globals, _machine, event_range, work_unit.buffer());
+
+ work_unit.set_event_range(event_range);
+ work_unit.set_buffer_end(end);
+ return true;
+ }
+
+ return false;
+ }
+
+ uintptr_t start_index() const { return _start_index; }
+};
+
+template <typename SIZE>
+class PrintConsumer {
+ protected:
+ const Globals& _globals;
+ uintptr_t _write_index;
+ std::mutex _write_mutex;
+ std::condition_variable _write_condition;
+
+ public:
+ PrintConsumer(const Globals& globals, const Machine<SIZE>& machine, uintptr_t start_index) :
+ _globals(globals),
+ _write_index(start_index)
+ {
+ }
+
+ void consume(PrintWorkUnit& work_unit) {
+ std::unique_lock<std::mutex> guard(_write_mutex);
+ _write_condition.wait(guard, [&](){ return work_unit.event_range().location() == this->_write_index; });
+
+ ASSERT(work_unit.event_range().location() == _write_index, "Sanity");
+
+ char* data = work_unit.buffer().data();
+ size_t bytes = work_unit.buffer_end() - data;
+ write(_globals.output_fd(), work_unit.buffer().data(), bytes);
+ _write_index = work_unit.event_range().max();
+
+ _write_condition.notify_all();
+ }
+};
+
+template <typename SIZE>
+uintptr_t print_machine_events(const Globals& globals, const Machine<SIZE>& machine) {
+ print_event_header(globals, SIZE::is_64_bit);
+
+ if (const KDEvent<SIZE>* events = machine.events()) {
+ if (uintptr_t event_count = machine.event_count()) {
+
+ //
+ // We want to chunk this up into reasonably sized pieces of work.
+ // Because each piece of work can potentially accumulate a large
+ // amount of memory, we need to limit the amount of work "in-flight".
+ //
+ uint32_t active_cpus = Kernel::active_cpu_count();
+
+ uintptr_t chunk_size = 2000;
+
+ PrintProducer<SIZE> producer(globals, machine, chunk_size);
+ PrintConsumer<SIZE> consumer(globals, machine, producer.start_index());
+
+ std::vector<std::thread> threads;
+ for (uint32_t i=0; i<active_cpus; ++i) {
+ threads.push_back(std::thread([&]() {
+ PrintWorkUnit work_unit(MemoryBuffer<char>(160 * chunk_size), TRange<uintptr_t>(0, 0), (char*)NULL);
+ while (producer.produce(work_unit)) {
+ consumer.consume(work_unit);
+ }
+ }));
+ }
+
+ for(auto& thread : threads){
+ thread.join();
+ }
+
+ uint32_t totalProcesses = 0;
+ uint32_t totalThreads = 0;
+
+ for (auto process : machine.processes()) {
+ if (!process->is_created_by_previous_machine_state()) {
+ totalProcesses++;
+ }
+ }
+
+ for (auto thread : machine.threads()) {
+ if (!thread->is_created_by_previous_machine_state()) {
+ totalThreads++;
+ }
+ }
+
+ dprintf(globals.output_fd(), "Total Events: %llu\n", (uint64_t)event_count);
+ dprintf(globals.output_fd(), "Total Processes: %u\n", totalProcesses);
+ dprintf(globals.output_fd(), "Total Threads: %u\n", totalThreads);
+
+ return event_count;
+ }
+ }
+
+ return 0;
+}
+
+#endif
--- /dev/null
+//
+// MachineGlobals.cpp
+// kdprof
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+Globals::Globals() :
+ _cpu_count(0),
+ _iop_count(0),
+ _kernel_size(Kernel::is_64_bit() ? KernelSize::k64 : KernelSize::k32),
+ _is_cpu_count_set(false),
+ _is_iop_count_set(false),
+ _is_kernel_size_set(false),
+ _is_summary_start_set(false),
+ _is_summary_stop_set(false),
+ _is_summary_step_set(false),
+ _is_should_print_summary_set(false),
+ _is_timebase_set(false),
+ _should_read_default_trace_codes(true),
+ _should_print_mach_absolute_timestamps(false),
+ _should_print_event_index(false),
+ _should_print_symbolic_event_codes(true),
+ _is_verbose(false),
+ _should_presort_events(false),
+ _should_print_cpu_summaries(false),
+ _should_print_process_summaries(true),
+ _should_print_thread_summaries(false),
+ _should_print_events(false),
+ _should_print_summary(false),
+ _should_zero_base_timestamps(true),
+ _should_print_process_start_stop_timestamps(false),
+ _should_print_csv_summary(false),
+ _sort_key(kSortKey::CPU)
+{
+ // Default to the current machine's values
+ mach_timebase_info(&_timebase_info);
+
+ for (auto& entry : KDBG::cpumap()) {
+ if (entry.is_iop())
+ _iop_count++;
+ else
+ _cpu_count++;
+ }
+
+ // If we are unable to get a cpumap,
+ // fallback on the current # of cpus
+ if (_cpu_count == 0) {
+ _cpu_count = Kernel::active_cpu_count();
+ _iop_count = 0;
+ }
+}
+
+AbsTime Globals::parse_time(const char* arg) const {
+
+ char* units;
+ uint64_t value = strtoull(arg, &units, 0);
+
+ // Unspecified units are treated as seconds
+ if (*units == 0 || strcmp(units, "s") == 0) {
+ return NanoTime(value * NANOSECONDS_PER_SECOND).abs_time(_timebase_info);
+ }
+
+ if (strcmp(units, "ms") == 0)
+ return NanoTime(value * NANOSECONDS_PER_MILLISECOND).abs_time(_timebase_info);
+
+ if (strcmp(units, "us") == 0)
+ return NanoTime(value * NANOSECONDS_PER_MICROSECOND).abs_time(_timebase_info);
+
+ if (strcmp(units, "ns") == 0)
+ return NanoTime(value).abs_time(_timebase_info);
+
+ if (strcmp(units, "mabs") == 0) {
+ return AbsTime(value);
+ }
+
+ usage("Unable to parse units on time value");
+}
+
+AbsTime Globals::summary_start(AbsInterval timespan) const {
+ AbsTime start(timespan.location());
+
+ if (is_summary_start_set()) {
+ AbsTime summary_start = parse_time(_summary_start.c_str());
+
+ bool absolute_start_stop = (_beginning_of_time == 0);
+ if (absolute_start_stop)
+ start = summary_start;
+ else
+ start += summary_start;
+ }
+
+ return start;
+}
+
+AbsTime Globals::summary_stop(AbsInterval timespan) const {
+
+ if (is_summary_stop_set()) {
+ AbsTime summary_stop = parse_time(_summary_stop.c_str());
+
+ bool absolute_start_stop = (_beginning_of_time == 0);
+ if (absolute_start_stop)
+ return summary_stop;
+ else
+ return timespan.location() + summary_stop;
+ }
+
+ return timespan.max();
+}
+
+AbsTime Globals::summary_step(AbsInterval timespan) const {
+ if (is_summary_step_set()) {
+ return parse_time(_summary_step.c_str());
+ }
+
+ return timespan.length();
+}
--- /dev/null
+//
+// Globals.hpp
+// kdprof
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_Globals_hpp
+#define kdprof_Globals_hpp
+
+//
+// These are "global" values that control parsing and printing behavior.
+//
+
+enum class kSortKey : std::uint32_t {
+ CPU=0,
+ VMFault,
+ IO_Ops,
+ IO_Size,
+ IO_Wait,
+ ID
+};
+
+class Globals {
+ protected:
+ // Default/unknown parsing values
+ uint32_t _cpu_count;
+ uint32_t _iop_count;
+ KernelSize _kernel_size;
+ std::string _summary_start;
+ std::string _summary_stop;
+ std::string _summary_step;
+
+ bool _is_cpu_count_set;
+ bool _is_iop_count_set;
+ bool _is_kernel_size_set;
+ bool _is_summary_start_set;
+ bool _is_summary_stop_set;
+ bool _is_summary_step_set;
+ bool _is_should_print_summary_set;
+ bool _is_timebase_set;
+
+ // Output, printing related.
+ AbsTime _beginning_of_time;
+ mach_timebase_info_data_t _timebase_info;
+ FileDescriptor _output_fd;
+ bool _should_read_default_trace_codes;
+ std::vector<std::string> _additional_trace_code_paths;
+ std::unordered_map<uint32_t, std::string> _trace_codes;
+ bool _should_print_mach_absolute_timestamps;
+ bool _should_print_event_index;
+ bool _should_print_symbolic_event_codes;
+ bool _is_verbose;
+ bool _should_presort_events;
+ bool _should_print_cpu_summaries;
+ bool _should_print_process_summaries;
+ bool _should_print_thread_summaries;
+ bool _should_print_events;
+ bool _should_print_summary;
+ bool _should_zero_base_timestamps;
+ bool _should_print_process_start_stop_timestamps;
+ bool _should_print_csv_summary;
+ kSortKey _sort_key;
+
+ AbsTime parse_time(const char* arg) const;
+
+ public:
+ Globals();
+
+ uint32_t cpu_count() const { return _cpu_count; }
+ void set_cpu_count(uint32_t num) { _cpu_count = num; _is_cpu_count_set = true; }
+ bool is_cpu_count_set() const { return _is_cpu_count_set; }
+
+ uint32_t iop_count() const { return _iop_count; }
+ void set_iop_count(uint32_t num) { _iop_count = num; _is_iop_count_set = true; }
+ bool is_iop_count_set() const { return _is_iop_count_set; }
+
+ KernelSize kernel_size() const { return _kernel_size; }
+ void set_kernel_size(KernelSize size) { _kernel_size = size; _is_kernel_size_set = true; }
+ bool is_kernel_size_set() const { return _is_kernel_size_set; }
+
+ AbsTime beginning_of_time() const { return _beginning_of_time; }
+ void set_beginning_of_time(AbsTime t) { _beginning_of_time = t; }
+
+ mach_timebase_info_data_t timebase() const { return _timebase_info; }
+ void set_timebase(mach_timebase_info_data_t timebase, bool is_user_set) { _timebase_info = timebase; if (is_user_set) _is_timebase_set = true; }
+ bool is_timebase_set() const { return _is_timebase_set; }
+
+ int output_fd() const { return _output_fd.is_open() ? (int)_output_fd : STDOUT_FILENO; }
+
+ // Okay, this method caused enough pain to make the final resolution worth a comment.
+ //
+ // http://thbecker.net/articles/rvalue_references/section_05.html
+ //
+ // Things that are declared as rvalue reference can be lvalues or rvalues.
+ // The distinguishing criterion is: if it has a name, then it is an lvalue. Otherwise, it is an rvalue.
+ //
+ // In this case, you cannot call set_output_fd with an lvalue, but fd is STILL an lvalue.
+ // We must still explicitly use std::move on fd!
+ void set_output_fd(FileDescriptor&& fd) { _output_fd = std::move(fd); }
+
+ void set_should_read_default_trace_codes(bool value) { _should_read_default_trace_codes = value; }
+ void append_trace_codes_at_path(std::string path) { _additional_trace_code_paths.push_back(path); }
+ void resolve_trace_codes(void) { _trace_codes = ::resolve_trace_codes(_should_read_default_trace_codes, _is_verbose ? 1 : -1, _additional_trace_code_paths); }
+
+ const std::unordered_map<uint32_t, std::string>& trace_codes() const { return _trace_codes; }
+ void set_trace_codes(std::unordered_map<uint32_t, std::string>&& codes) { _trace_codes = codes; }
+
+ bool should_print_mach_absolute_timestamps() const { return _should_print_mach_absolute_timestamps; }
+ void set_should_print_mach_absolute_timestamps(bool value) { _should_print_mach_absolute_timestamps = value; }
+
+ bool should_print_event_index() const { return _should_print_event_index; }
+ void set_should_print_event_index(bool value) { _should_print_event_index = value; }
+
+ bool should_print_symbolic_event_codes() const { return _should_print_symbolic_event_codes; }
+ void set_should_print_symbolic_event_codes(bool value) { _should_print_symbolic_event_codes = value; }
+
+ bool is_verbose() const { return _is_verbose; }
+ void set_is_verbose(bool value) { _is_verbose = value; }
+
+ bool should_presort_events() const { return _should_presort_events; }
+ void set_should_presort_events(bool value) { _should_presort_events = value; }
+
+ bool should_print_cpu_summaries() const { return _should_print_cpu_summaries; }
+ void set_should_print_cpu_summaries(bool value) { _should_print_cpu_summaries = value; }
+
+ bool should_print_process_summaries() const { return _should_print_process_summaries; }
+ void set_should_print_process_summaries(bool value) { _should_print_process_summaries = value; }
+
+ bool should_print_thread_summaries() const { return _should_print_thread_summaries; }
+ void set_should_print_thread_summaries(bool value) { _should_print_thread_summaries = value; }
+
+ bool should_print_events() const { return _should_print_events; }
+ void set_should_print_events(bool value) { _should_print_events = value; }
+
+ bool should_print_summary() const { return _should_print_summary; }
+ void set_should_print_summary(bool value) { _should_print_summary = value; _is_should_print_summary_set = true; }
+ bool is_should_print_summary_set() const { return _is_should_print_summary_set; }
+
+ bool should_zero_base_timestamps() const { return _should_zero_base_timestamps; }
+ void set_should_zero_base_timestamps(bool value) { _should_zero_base_timestamps = value; }
+
+ bool should_print_process_start_stop_timestamps() const { return _should_print_process_start_stop_timestamps; }
+ void set_should_print_process_start_stop_timestamps(bool value) { _should_print_process_start_stop_timestamps = value; }
+
+ bool should_print_csv_summary() const { return _should_print_csv_summary; }
+ void set_should_print_csv_summary(bool value) { _should_print_csv_summary = value; }
+
+ kSortKey sort_key() const { return _sort_key; }
+ void set_sort_key(kSortKey key) { _sort_key = key; }
+
+ //
+ // The summary {start/stop/step} functions translate the string on the fly,
+ // using the currently set timebase. They need to be fed a timespan that
+ // corresponds to the Machine<SIZE>'s timespan, because the default values
+ // and offsets depend on that.
+ //
+ // This solve the issue of the user saying --start 1234mabs at the command line
+ // and getting an offset of 1234 nanoseconds on a desktop when they are looking
+ // at a device file.
+ //
+ AbsTime summary_start(AbsInterval timespan) const;
+ void set_summary_start(const char* value) { _summary_start = value; _is_summary_start_set = true; }
+ bool is_summary_start_set() const { return _is_summary_start_set; }
+
+ AbsTime summary_stop(AbsInterval timespan) const;
+ void set_summary_stop(const char* value) { _summary_stop = value; _is_summary_stop_set = true; }
+ bool is_summary_stop_set() const { return _is_summary_stop_set; }
+
+ AbsTime summary_step(AbsInterval timespan) const;
+ void set_summary_step(const char* value) { _summary_step = value; _is_summary_step_set = true; }
+ bool is_summary_step_set() const { return _is_summary_step_set; }
+};
+
+#endif
--- /dev/null
+//
+// InitializeAction.cpp
+// kdprof
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+void InitializeAction::execute(Globals& globals) {
+ if (_buffers) {
+ if (!KDBG::set_buffer_capacity(_buffers)) {
+ usage("Attempt to set buffer count failed");
+ }
+ }
+
+ if (!KDBG::initialize_buffers()) {
+ usage("Attempt to initialize buffers failed\n");
+ }
+}
--- /dev/null
+//
+// InitializeAction.hpp
+// kdprof
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_InitializeAction_hpp
+#define kdprof_InitializeAction_hpp
+
+class InitializeAction : public Action {
+ protected:
+ uint32_t _buffers;
+
+ public:
+ InitializeAction(uint32_t buffers) : _buffers(buffers) { }
+
+ virtual void execute(Globals& globals);
+};
+
+#endif
--- /dev/null
+//
+// NoWrapAction.cpp
+// kdprof
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+void NoWrapAction::execute(Globals& globals) {
+ KDBG::set_nowrap(true);
+}
--- /dev/null
+//
+// NoWrapAction.hpp
+// kdprof
+//
+// Created by James McIlree on 4/16/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_NoWrapAction_hpp
+#define kdprof_NoWrapAction_hpp
+
+class NoWrapAction : public Action {
+ public:
+ NoWrapAction() {}
+
+ virtual void execute(Globals& globals);
+};
+
+#endif
--- /dev/null
+//
+// PrintStateAction.cpp
+// kdprof
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+void PrintStateAction::execute(Globals& globals) {
+ printf("\n");
+ KDBG::state().print();
+ printf("\n");
+}
--- /dev/null
+//
+// PrintStateAction.hpp
+// kdprof
+//
+// Created by James McIlree on 4/16/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_PrintStateAction_hpp
+#define kdprof_PrintStateAction_hpp
+
+class PrintStateAction : public Action {
+ public:
+ PrintStateAction() {}
+
+ virtual void execute(Globals& globals);
+};
+
+#endif
--- /dev/null
+//
+// RemoveAction.cpp
+// kdprof
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+void RemoveAction::execute(Globals& globals) {
+ KDBG::reset();
+}
--- /dev/null
+//
+// RemoveAction.hpp
+// kdprof
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_RemoveAction_hpp
+#define kdprof_RemoveAction_hpp
+
+class RemoveAction : public Action {
+ public:
+ RemoveAction() {}
+
+ virtual void execute(Globals& globals);
+};
+
+#endif
--- /dev/null
+//
+// SaveTraceAction.cpp
+// kdprof
+//
+// Created by James McIlree on 5/2/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+template <typename SIZE>
+static void execute_arch_specific(Globals& globals, KDState& state, FileDescriptor& save_fd) {
+ // Collect all data first, printing takes time...
+ auto threadmap = KDBG::threadmap<SIZE>(state);
+ auto cpumap = KDBG::cpumap();
+
+ // These are future proofing, trace doesn't actually need page alignment
+ // here, just file block size alignment. When page sizes go to 16k, we
+ // don't want 16k of padding.
+
+#define FILE_BLOCK_SIZE 4096
+#define FILE_BLOCK_SIZE_MASK 4095
+
+ /*
+ * To write a RAW_VERSION1+ file, we must embed a cpumap in the "padding"
+ * used to file block align the events folloing the threadmap. If the
+ * threadmap happens to not require enough padding, we artificially
+ * increase its footprint until it needs enough padding.
+ */
+
+ uint32_t pad_size = FILE_BLOCK_SIZE - ((sizeof(TraceDataHeader<SIZE>) + (threadmap.size() * sizeof(KDThreadMapEntry<SIZE>))) & FILE_BLOCK_SIZE_MASK);
+ uint32_t cpumap_size = sizeof(kd_cpumap_header) + (uint32_t)cpumap.size() * sizeof(KDCPUMapEntry);
+ uint32_t extra_thread_count = 0;
+
+ if (cpumap_size > pad_size) {
+ /* Force an overflow onto the next page, we get a full page of padding */
+ extra_thread_count = (pad_size / sizeof(KDCPUMapEntry)) + 1;
+ }
+
+ // Write the header
+ TraceDataHeader<SIZE> header(RAW_VERSION1, (uint32_t)threadmap.size(), time(NULL), 0);
+ write(save_fd, &header, sizeof(TraceDataHeader<SIZE>));
+
+ // Write the threadmaps
+ write(save_fd, threadmap.data(), threadmap.size() * sizeof(KDThreadMapEntry<SIZE>));
+
+ if (extra_thread_count) {
+ pad_size = extra_thread_count * sizeof(KDThreadMapEntry<SIZE>);
+ auto pad_buf = (uint8_t *)calloc(pad_size, 1);
+ write(save_fd, pad_buf, pad_size);
+ free(pad_buf);
+ }
+
+ // Write the cpumaps & any remaining padding
+ size_t bytes_written = sizeof(TraceDataHeader<SIZE>) + (threadmap.size() + extra_thread_count) * sizeof(KDThreadMapEntry<SIZE>);
+ pad_size = FILE_BLOCK_SIZE - (bytes_written & FILE_BLOCK_SIZE_MASK);
+
+ ASSERT(pad_size >= cpumap.size() * sizeof(KDCPUMapEntry), "Not enough padding bytes!");
+ if (pad_size) {
+ auto cpumap_header = (kd_cpumap_header*)calloc(pad_size, 1);
+ cpumap_header->version_no = RAW_VERSION1;
+ cpumap_header->cpu_count = (uint32_t)cpumap.size();
+ auto cpus = (kd_cpumap*)&cpumap_header[1];
+ memcpy(cpus, cpumap.data(), cpumap.size() * sizeof(KDCPUMapEntry));
+ write(save_fd, cpumap_header, pad_size);
+ }
+
+ // Write the events
+ //
+ // Because this may be used to capture boot traces which consume very
+ // large amounts of memory, we will likely not be able to collect
+ // the entire buffer space in a single shot. Read it in small chunks.
+ //
+ auto twenty_mb = 20 * 1024 * 1024;
+ auto num_events_in_twenty_mb = twenty_mb / sizeof(KDEvent<SIZE>);
+ MemoryBuffer<KDEvent<SIZE>> events(num_events_in_twenty_mb);
+
+ // We read until we don't get back a full buffer, hoping thats enough.
+ while (1) {
+ int count = KDBG::read(events.data(), events.capacity_in_bytes());
+
+ if (count != -1)
+ write(save_fd, events.data(), count * sizeof(KDEvent<SIZE>));
+
+ if (count < num_events_in_twenty_mb) {
+ break;
+ }
+ }
+
+ // close up
+ save_fd.close();
+}
+
+void SaveTraceAction::execute(Globals& globals) {
+ KDState state = KDBG::state();
+ if (state.is_lp64()) {
+ execute_arch_specific<Kernel64>(globals, state, _save_fd);
+ } else {
+ execute_arch_specific<Kernel32>(globals, state, _save_fd);
+ }
+}
--- /dev/null
+//
+// SaveTraceAction.hpp
+// kdprof
+//
+// Created by James McIlree on 5/2/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __kdprof__SaveTraceAction__
+#define __kdprof__SaveTraceAction__
+
+class SaveTraceAction : public Action {
+ FileDescriptor _save_fd;
+
+ public:
+ SaveTraceAction(FileDescriptor&& fd) :
+ _save_fd(std::move(fd))
+ {
+ ASSERT(_save_fd.is_open(), "Sanity");
+ }
+
+ virtual void execute(Globals& globals);
+};
+
+#endif
--- /dev/null
+//
+// SleepAction.cpp
+// kdprof
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+void SleepAction::execute(Globals& globals) {
+ uint64_t nanos = _time.value();
+ struct timespec ts;
+ ts.tv_sec = decltype(ts.tv_sec)(nanos / NANOSECONDS_PER_SECOND);
+ ts.tv_nsec = decltype(ts.tv_sec)(nanos - ts.tv_sec * NANOSECONDS_PER_SECOND);
+ nanosleep(&ts, NULL);
+}
--- /dev/null
+//
+// SleepAction.hpp
+// kdprof
+//
+// Created by James McIlree on 4/16/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_SleepAction_hpp
+#define kdprof_SleepAction_hpp
+
+class SleepAction : public Action {
+ NanoTime _time;
+
+ public:
+ SleepAction(NanoTime t) : _time(t) {}
+
+ virtual void execute(Globals& globals);
+};
+
+#endif
--- /dev/null
+//
+// SummaryPrinting.cpp
+// kdprof
+//
+// Created by James McIlree on 4/19/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+constexpr const char* const SummaryLineData::indent_string[];
+
+void print_summary_header(const Globals& globals) {
+ // Header is...
+ // Avg Actual Wanted Actual Wanted Jetsam
+ // All CPU Thr Avg Actual Wanted Concurrency Processes To Run Threads To Run VMFault VMFault IO Wait # IO IO Bytes Jetsam Proc
+ // [Time(mS)] Name Run% Idle% Intr% Idle% #Intr #CSW On CPU/µS CPU/mS CPU/mS (# CPU) Ran Processes Ran Threads Count Time (mS) Time (mS) Ops Completed Time (mS) Count
+ // 123456789abcdef0 123456789012345678901234567890 1234567 1234567 1234567 1234567 1234567 12345678 123456789 123456789abc 123456789abc 123456789ab 123456789 123456789 1234567 1234567 1234567 123456789abc 123456789abc 1234567 1234567890 123456789 123456
+ // 1119100000.00 76.58 16.53 6.89 0.00 230 112 10000.00 100000.00 100000.00 1.55 2 3 12 13 2280 230.48 1998.22 3318 123.40 MB 0.00
+
+ const char* time1 = "";
+ const char* time2 = "";
+ const char* time3 = "";
+ char time_buffer1[32];
+ char time_buffer2[32];
+ char time_buffer3[32];
+
+ // If we're printing the entire data set, don't print a timestamp.
+ if (globals.is_summary_start_set() || globals.is_summary_stop_set() || globals.is_summary_step_set()) {
+ sprintf(time_buffer1, "%16s ", "");
+ sprintf(time_buffer2, "%16s ", "");
+ sprintf(time_buffer3, "%-16s ", globals.should_print_mach_absolute_timestamps() ? "Time(mach-abs)" : "Time(mS)");
+
+ time1 = time_buffer1;
+ time2 = time_buffer2;
+ time3 = time_buffer3;
+ }
+
+ dprintf(globals.output_fd(), "%s%-30s %7s %7s %7s %7s %7s %8s %9s %12s %12s %11s %9s %9s %7s %7s %7s %12s %12s %7s %10s %9s %6s\n", time1, "", "", "", "", "", "", "", "", "", "", "Avg", "Actual", "Wanted", "Actual", "Wanted", "", "", "", "", "", "", "Jetsam");
+ dprintf(globals.output_fd(), "%s%-30s %7s %7s %7s %7s %7s %8s %9s %12s %12s %11s %9s %9s %7s %7s %7s %12s %12s %7s %10s %9s %6s\n", time2, "", "", "", "", "All-CPU", "", "", "Thr Avg", "Actual", "Wanted", "Concurrency", "Processes", "To Run", "Threads", "To Run", "VMFault", "VMFault", "IO Wait", "# IO", "IO Bytes", "Jetsam", "Proc");
+ dprintf(globals.output_fd(), "%s%-30s %7s %7s %7s %7s %7s %8s %9s %12s %12s %11s %9s %9s %7s %7s %7s %12s %12s %7s %10s %9s %6s\n", time3, "Name", "Run%", "Idle%", "Intr%", "Idle%", "#Intr", "#CSW", "On CPU/µS", "CPU/mS", "CPU/mS", "(# CPU)", "Ran", "Processes", "Ran", "Threads", "Count", "Time (mS)", "Time (mS)", "Ops", "Completed", "Time (mS)", "Count");
+}
--- /dev/null
+//
+// SummaryPrinting.hpp
+// kdprof
+//
+// Created by James McIlree on 4/19/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_Printing_hpp
+#define kdprof_Printing_hpp
+
+void print_summary_header(const Globals& globals);
+
+struct SummaryLineData {
+ protected:
+ static constexpr const char* const indent_string[] = { "", " ", " ", " " };
+ static const uint32_t MAX_INDENT_LEVEL = 3; // Need to know this for time indenting to work correctly
+
+ uint32_t _indent_level;
+ const char* _name;
+
+ public:
+
+ enum class SummaryType {
+ Unknown,
+ CPU,
+ Process,
+ Thread
+ };
+
+ SummaryLineData(const char* name, uint32_t indent_level) :
+ _indent_level(indent_level),
+ _name(name),
+ should_print_timestamp(true),
+ num_intr_events(0),
+ context_switches(0),
+ actual_process_count(0),
+ wanted_process_count(0),
+ actual_thread_count(0),
+ wanted_thread_count(0),
+ num_vm_fault_events(0),
+ num_io_events(0),
+ io_bytes_completed(0),
+ num_jetsam_pids(0),
+ percent_multiplier(100.0),
+ type(SummaryType::Unknown),
+ is_colored(false),
+ begin_color(NULL),
+ end_color(NULL)
+ {
+ ASSERT(_indent_level <= MAX_INDENT_LEVEL, "Sanity");
+ ASSERT(_name && strlen(_name) > 0, "Sanity");
+ }
+
+ bool should_print_timestamp;
+ AbsTime total_time;
+ AbsTime total_run_time;
+ AbsTime total_idle_time;
+ AbsTime total_intr_time;
+ AbsTime total_wanted_run_time;
+ AbsTime total_wallclock_run_time;
+ AbsTime total_all_cpus_idle_time;
+ AbsTime total_vm_fault_time;
+ AbsTime total_io_time;
+ AbsTime total_jetsam_time;
+ uint32_t num_intr_events;
+ uint32_t context_switches;
+ uint32_t actual_process_count;
+ uint32_t wanted_process_count;
+ uint32_t actual_thread_count;
+ uint32_t wanted_thread_count;
+ uint32_t num_vm_fault_events;
+ uint32_t num_io_events;
+ uint64_t io_bytes_completed;
+ uint32_t num_jetsam_pids;
+ double percent_multiplier;
+ SummaryType type;
+ bool is_colored;
+ const char* begin_color;
+ const char* end_color;
+
+ const char* name() { return _name; }
+ const char* outdent() { return indent_string[MAX_INDENT_LEVEL - _indent_level]; }
+ const char* indent() { return indent_string[_indent_level]; }
+
+ bool is_unknown() { return type == SummaryType::Unknown; }
+ bool is_cpu() { return type == SummaryType::CPU; }
+ bool is_process() { return type == SummaryType::Process; }
+ bool is_thread() { return type == SummaryType::Thread; }
+};
+
+template <typename SIZE>
+void print_summary_line(const Globals& globals, const Machine<SIZE>& machine, AbsInterval summary_interval, struct SummaryLineData& line_data)
+{
+ // Header is...
+ // Avg Actual Wanted Actual Wanted Jetsam
+ // All CPU Thr Avg Actual Wanted Concurrency Processes To Run Threads To Run VMFault VMFault IO Wait # IO IO Bytes Jetsam Proc
+ // [Time(mS)] Name Run% Idle% Intr% Idle% #Intr #CSW On CPU/µS CPU/mS CPU/mS (# CPU) Ran Processes Ran Threads Count Time (mS) Time (mS) Ops Completed Time (mS) Count
+ // 123456789abcdef0 123456789012345678901234567890 1234567 1234567 1234567 1234567 1234567 12345678 123456789 123456789abc 123456789abc 123456789ab 123456789 123456789 1234567 1234567 1234567 123456789abc 123456789abc 1234567 1234567890 123456789 123456
+ // 1119100000.00 76.58 16.53 6.89 0.00 230 112 10000.00 100000.00 100000.00 1.55 2 3 12 13 2280 230.48 1998.22 3318 123.40 MB 0.00
+
+ ASSERT(!line_data.is_unknown(), "Sanity");
+
+ //
+ // It turns out that calling dprintf is very expensive; we're going to
+ // accumulate to a string buffer and then flush once at the end.
+ //
+ char line[1024];
+ char* cursor = line;
+ char* line_end = line + sizeof(line);
+
+ //
+ // Begin line coloring (if any)
+ //
+ if (line_data.is_colored) {
+ ASSERT(line_data.begin_color && line_data.end_color, "Sanity");
+ cursor += snprintf(cursor, line_end - cursor, "%s", line_data.begin_color);
+
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+
+ if (line_data.should_print_timestamp) {
+
+ //
+ // Time and Name get a special indent treatment, so they come out
+ // as heirarchically aligned, while not disturbing the rest of the
+ // columns. The time value is actually outdented, the name value
+ // is indented.
+ //
+ // The result is that you get something like this:
+ //
+ // [Time(mS)] Name Run%
+ // 123456789abcdef0 123456789012345678901234567890 1234567
+ //
+ // 1000.00 INDENT-LEVEL-0 ##.##
+ // 1000.00 INDENT-LEVEL-1 ##.##
+ // 1000.00 INDENT-LEVEL-2 ##.##
+ // 1000.00 INDENT-LEVEL-3 ##.##
+ //
+
+ char time_buffer[64];
+
+ //
+ // Time
+ //
+ if (globals.should_print_mach_absolute_timestamps()) {
+ if (globals.beginning_of_time().value() == 0)
+ snprintf(time_buffer, sizeof(time_buffer), "%llX%s", (summary_interval.location() - globals.beginning_of_time()).value(), line_data.outdent());
+ else
+ snprintf(time_buffer, sizeof(time_buffer), "%llu%s", (summary_interval.location() - globals.beginning_of_time()).value(), line_data.outdent());
+ } else {
+ NanoTime ntime = (summary_interval.location() - globals.beginning_of_time()).nano_time(globals.timebase());
+ snprintf(time_buffer, sizeof(time_buffer), "%3.2f%s", (double)ntime.value() / 1000000.0, line_data.outdent());
+ }
+
+ cursor += snprintf(cursor, line_end - cursor, "%16s ", time_buffer);
+
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+
+ //
+ // Name
+ //
+
+ {
+ char name_buffer[64];
+ snprintf(name_buffer, sizeof(name_buffer), "%s%s", line_data.indent(), line_data.name());
+
+ cursor += snprintf(cursor, line_end - cursor, "%-30s ", name_buffer);
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+
+ //
+ // Run% Idle% Intr% All-CPUs-Idle% #Intr
+ //
+
+ // Special case for process/thread summary lines, print idle/intr as "-";
+ if (line_data.is_process() || line_data.is_thread()) {
+ double run_percent = 0.0;
+
+ if (line_data.total_time.value() > 0)
+ run_percent = line_data.total_run_time.double_value() / line_data.total_time.double_value() * line_data.percent_multiplier;
+
+ cursor += snprintf(cursor, line_end - cursor, "%7.2f %7s %7s %7s %7u ",
+ run_percent,
+ "-",
+ "-",
+ "-",
+ line_data.num_intr_events);
+ } else {
+ ASSERT(line_data.total_time.value() > 0, "Sanity");
+
+ cursor += snprintf(cursor, line_end - cursor, "%7.2f %7.2f %7.2f %7.2f %7u ",
+ line_data.total_run_time.double_value() / line_data.total_time.double_value() * line_data.percent_multiplier,
+ line_data.total_idle_time.double_value() / line_data.total_time.double_value() * line_data.percent_multiplier,
+ line_data.total_intr_time.double_value() / line_data.total_time.double_value() * line_data.percent_multiplier,
+ line_data.total_all_cpus_idle_time.double_value() / line_data.total_time.double_value() * line_data.percent_multiplier,
+ line_data.num_intr_events);
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // #context-switches avg-on-cpu/µS
+ //
+ if (line_data.context_switches > 0) {
+ double avg_on_cpu_uS = (line_data.total_run_time / AbsTime(line_data.context_switches)).nano_time(globals.timebase()).value() / 1000.0;
+ cursor += snprintf(cursor, line_end - cursor, "%8u %9.2f ", line_data.context_switches, avg_on_cpu_uS);
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%8u %9s ", line_data.context_switches, "-");
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // Actual CPU/mS, Wanted CPU/mS
+ //
+ if (line_data.total_wanted_run_time > 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%12.2f %12.2f ",
+ (double)line_data.total_run_time.nano_time(globals.timebase()).value() / 1000000.0,
+ (double)(line_data.total_run_time + line_data.total_wanted_run_time).nano_time(globals.timebase()).value() / 1000000.0);
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%12.2f %12s ",
+ (double)line_data.total_run_time.nano_time(globals.timebase()).value() / 1000000.0,
+ "-");
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // Proc Avg Concurrency
+ //
+
+ if (line_data.total_wallclock_run_time > 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%11.2f ", (double)line_data.total_run_time.value() / (double)line_data.total_wallclock_run_time.value());
+ // cursor += snprintf(cursor, line_end - cursor, "%11.2f ", (double)line_data.total_wallclock_run_time.nano_time(globals.timebase()).value() / 1000000.0);
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%11s ", "-");
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // Actual Processes, Wanted Processes
+ //
+ if (line_data.is_thread()) {
+ cursor += snprintf(cursor, line_end - cursor, "%9s %9s ", "-", "-");
+ } else {
+ if (line_data.total_run_time > 0 && line_data.total_wanted_run_time > 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%9u %9u ", (uint32_t)line_data.actual_process_count, (uint32_t)line_data.wanted_process_count);
+ } else if (line_data.total_run_time > 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%9u %9s ", (uint32_t)line_data.actual_process_count, "-");
+ } else if (line_data.total_wanted_run_time > 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%9s %9u ", "-", (uint32_t)line_data.wanted_process_count);
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%9s %9s ", "-", "-");
+ }
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // Actual Threads, Wanted Threads
+ //
+ if (line_data.total_run_time > 0 && line_data.total_wanted_run_time > 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%7u %7u ", (uint32_t)line_data.actual_thread_count, (uint32_t)line_data.wanted_thread_count);
+ } else if (line_data.total_run_time > 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%7u %7s ", (uint32_t)line_data.actual_thread_count, "-");
+ } else if (line_data.total_wanted_run_time > 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%7s %7u ", "-", (uint32_t)line_data.wanted_thread_count);
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%7s %7s ", "-", "-");
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+
+ //
+ // #vmfaults, mS blocked in vmfault
+ //
+ if (line_data.num_vm_fault_events == 0 && line_data.total_vm_fault_time.value() == 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%7s %12s ", "-", "-");
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%7u %12.2f ",
+ line_data.num_vm_fault_events,
+ (double)line_data.total_vm_fault_time.nano_time(globals.timebase()).value() / 1000000.0);
+ }
+
+ //
+ // mS blocked on IO activity
+ //
+ if (line_data.total_io_time.value() == 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%12s ", "-");
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%12.2f ",
+ (double)line_data.total_io_time.nano_time(globals.timebase()).value() / 1000000.0);
+ }
+
+ //
+ // # IO operations
+ //
+ if (line_data.num_io_events == 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%7s ", "-");
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%7u ", line_data.num_io_events);
+ }
+
+ //
+ // IO bytes completed
+ //
+ if (line_data.io_bytes_completed == 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%10s ", "-");
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%10s ", formated_byte_size(line_data.io_bytes_completed).c_str());
+ }
+
+ //
+ // Jetsam time
+ //
+ if (line_data.total_jetsam_time == 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%9s ", "-");
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%9.2f ",
+ (double)line_data.total_jetsam_time.nano_time(globals.timebase()).value() / 1000000.0);
+ }
+
+ //
+ // Jetsam count
+ //
+ if (line_data.is_cpu()) {
+ if (line_data.num_jetsam_pids == 0) {
+ cursor += snprintf(cursor, line_end - cursor, "%6s", "-");
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%6u", line_data.num_jetsam_pids);
+ }
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, "%6s", "");
+ }
+
+ //
+ // End line coloring (if any)
+ //
+ if (line_data.is_colored) {
+ cursor += snprintf(cursor, line_end - cursor, "%s", line_data.end_color);
+
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+
+ dprintf(globals.output_fd(), "%s\n", line);
+}
+
+template <typename SIZE>
+void print_cpu_summary_with_name_and_indent(const Globals& globals, const Machine<SIZE>& machine, AbsInterval summary_interval, const CPUSummary<SIZE>& master_summary, const CPUSummary<SIZE>& cpu_summary, const char* name, uint32_t indent)
+{
+ struct SummaryLineData data(name, indent);
+
+ data.should_print_timestamp = (globals.is_summary_start_set() || globals.is_summary_stop_set() || globals.is_summary_step_set());
+ data.total_time = master_summary.total_time();
+ data.total_run_time = cpu_summary.total_run_time();
+ data.total_idle_time = cpu_summary.total_idle_time();
+ data.total_intr_time = cpu_summary.total_intr_time();
+ data.total_wanted_run_time = cpu_summary.total_future_run_time();
+ data.total_wallclock_run_time = cpu_summary.total_wallclock_run_time();
+ data.total_all_cpus_idle_time = cpu_summary.total_all_cpus_idle_time();
+ data.total_vm_fault_time = cpu_summary.total_vm_fault_time();
+ data.total_io_time = cpu_summary.total_io_time();
+ data.total_jetsam_time = cpu_summary.total_jetsam_time();
+ data.context_switches = cpu_summary.context_switches();
+ data.num_intr_events = cpu_summary.num_intr_events();
+ data.num_vm_fault_events = cpu_summary.num_vm_fault_events();
+ data.num_io_events = cpu_summary.num_io_events();
+ data.num_jetsam_pids = cpu_summary.num_processes_jetsammed();
+ data.io_bytes_completed = cpu_summary.io_bytes_completed();
+ data.type = SummaryLineData::SummaryType::CPU;
+
+ for (auto& process_summary : cpu_summary.process_summaries()) {
+
+ if (process_summary.total_run_time() > 0) {
+ data.actual_process_count++;
+ data.wanted_process_count++;
+ } else if (process_summary.total_future_run_time() > 0) {
+ data.wanted_process_count++;
+ } else {
+ // ASSERT(cpu_summary.total_vm_fault_time() > 0, "Process in summary no actual or wanted run time, and no vm_fault time");
+ }
+
+ for (auto& thread_summary : process_summary.thread_summaries()) {
+ if (thread_summary.total_run_time() > 0) {
+ data.actual_thread_count++;
+ data.wanted_thread_count++;
+ } else if (thread_summary.total_future_run_time() > 0) {
+ data.wanted_thread_count++;
+ } else {
+ // ASSERT((thread_summary.total_vm_fault_time() > 0) || (thread_summary.total_pgin_time() > 0), "Thread in summary no actual or wanted run time, and no vm_fault or pgin time");
+ }
+ }
+ }
+
+ data.percent_multiplier *= (double)master_summary.active_cpus();
+
+ print_summary_line(globals, machine, summary_interval, data);
+}
+
+template <typename SIZE>
+void print_process_summary_with_name_and_indent(const Globals& globals, const Machine<SIZE>& machine, AbsInterval summary_interval, const CPUSummary<SIZE>& master_summary, const ProcessSummary<SIZE>& process_summary, const char* name, uint32_t indent)
+{
+ struct SummaryLineData data(name, indent);
+
+ data.should_print_timestamp = (globals.is_summary_start_set() || globals.is_summary_stop_set() || globals.is_summary_step_set());
+ data.total_run_time = process_summary.total_run_time();
+ data.total_wanted_run_time = process_summary.total_future_run_time();
+ data.total_wallclock_run_time = process_summary.total_wallclock_run_time();
+ data.total_vm_fault_time = process_summary.total_vm_fault_time();
+ data.total_io_time = process_summary.total_io_time();
+ data.total_jetsam_time = process_summary.total_jetsam_time();
+ data.context_switches = process_summary.context_switches();
+ data.num_intr_events = process_summary.num_intr_events();
+ data.actual_process_count = 1;
+ data.wanted_process_count = 1;
+ data.num_vm_fault_events = process_summary.num_vm_fault_events();
+ data.num_io_events = process_summary.num_io_events();
+ data.num_jetsam_pids = process_summary.num_processes_jetsammed();
+ data.io_bytes_completed = process_summary.io_bytes_completed();
+ data.total_time = master_summary.total_time();
+ // This causes the line printer to put "-" in the idle and intr % columns.
+ data.type = SummaryLineData::SummaryType::Process;
+ data.percent_multiplier *= (double)master_summary.active_cpus();
+
+ // We have to walk the threads to decide actual vs wanted to run
+ for (auto& thread_summary : process_summary.thread_summaries()) {
+ if (thread_summary.total_run_time() > 0) {
+ data.actual_thread_count++;
+ data.wanted_thread_count++;
+ } else if (thread_summary.total_future_run_time() > 0) {
+ data.wanted_thread_count++;
+ } else {
+ // ASSERT(thread_summary.total_vm_fault_time() > 0, "Thread in summary no actual or wanted run time, and no vm_fault time");
+ }
+ }
+
+ print_summary_line(globals, machine, summary_interval, data);
+}
+
+template <typename SIZE>
+void print_thread_summary_with_name_and_indent(const Globals& globals, const Machine<SIZE>& machine, AbsInterval summary_interval, const CPUSummary<SIZE>& master_summary, const ThreadSummary<SIZE>& thread_summary, const char* name, uint32_t indent)
+{
+ struct SummaryLineData data(name, indent);
+
+ /*data.is_colored = true;
+ data.begin_color = TerminalColorStringFor(kTerminalColor::GREEN, true, false);
+ data.end_color = TerminalColorResetString();*/
+
+ data.should_print_timestamp = (globals.is_summary_start_set() || globals.is_summary_stop_set() || globals.is_summary_step_set());
+ data.total_run_time = thread_summary.total_run_time();
+ data.total_wanted_run_time = thread_summary.total_future_run_time();
+ data.total_vm_fault_time = thread_summary.total_vm_fault_time();
+ data.total_io_time = thread_summary.total_io_time();
+ data.total_jetsam_time = thread_summary.total_jetsam_time();
+ data.context_switches = thread_summary.context_switches();
+ data.num_intr_events = thread_summary.num_intr_events();
+ data.num_vm_fault_events = thread_summary.num_vm_fault_events();
+ data.num_io_events = thread_summary.num_io_events();
+ data.num_jetsam_pids = 0;
+ data.io_bytes_completed = thread_summary.io_bytes_completed();
+ data.total_time = master_summary.total_time();
+ data.percent_multiplier *= (double)master_summary.active_cpus();
+ data.actual_thread_count = 1;
+ data.wanted_thread_count = 1;
+
+ // This causes the line printer to put "-" in various columns that don't make sense for a thread summary
+ data.type = SummaryLineData::SummaryType::Thread;
+
+ print_summary_line(globals, machine, summary_interval, data);
+}
+
+template <typename SIZE>
+static void sort_processes(const Globals& globals, const CPUSummary<SIZE>& summary, std::vector<const MachineProcess<SIZE>*>& processes) {
+ switch (globals.sort_key()) {
+ case kSortKey::CPU:
+ // Sort by Actual CPU, Future CPU, pid
+ std::sort(processes.begin(), processes.end(), [&summary](const MachineProcess<SIZE>* p0, const MachineProcess<SIZE>* p1) -> bool {
+ auto p0_summary = summary.process_summary(p0);
+ auto p1_summary = summary.process_summary(p1);
+
+ AbsTime p0_run_time = p0_summary->total_run_time();
+ AbsTime p1_run_time = p1_summary->total_run_time();
+
+ if (p0_run_time == p1_run_time) {
+ AbsTime p0_future_run_time = p0_summary->total_future_run_time();
+ AbsTime p1_future_run_time = p1_summary->total_future_run_time();
+
+ if (p0_future_run_time == p1_future_run_time)
+ return p0->pid() < p1->pid();
+
+ return p1_future_run_time < p0_future_run_time;
+ }
+
+ return p1_run_time < p0_run_time;
+ });
+ break;
+
+ case kSortKey::VMFault:
+ // Sort by VMFault time, #-faults, pid
+ std::sort(processes.begin(), processes.end(), [&summary](const MachineProcess<SIZE>* p0, const MachineProcess<SIZE>* p1) -> bool {
+ auto p0_summary = summary.process_summary(p0);
+ auto p1_summary = summary.process_summary(p1);
+
+ AbsTime p0_vm_fault_time = p0_summary->total_vm_fault_time();
+ AbsTime p1_vm_fault_time = p1_summary->total_vm_fault_time();
+
+ if (p0_vm_fault_time == p1_vm_fault_time) {
+ uint32_t p0_vm_fault_count = p0_summary->num_vm_fault_events();
+ uint32_t p1_vm_fault_count = p1_summary->num_vm_fault_events();
+
+ if (p0_vm_fault_count == p1_vm_fault_count)
+ return p0->pid() < p1->pid();
+
+ return p1_vm_fault_count < p0_vm_fault_count;
+ }
+
+ return p1_vm_fault_time < p0_vm_fault_time;
+ });
+ break;
+
+ case kSortKey::IO_Wait:
+ // Sort by IO time, pid
+ std::sort(processes.begin(), processes.end(), [&summary](const MachineProcess<SIZE>* p0, const MachineProcess<SIZE>* p1) -> bool {
+ auto p0_summary = summary.process_summary(p0);
+ auto p1_summary = summary.process_summary(p1);
+
+ AbsTime p0_io_time = p0_summary->total_io_time();
+ AbsTime p1_io_time = p1_summary->total_io_time();
+
+ if (p0_io_time == p1_io_time) {
+ uint32_t p0_io_ops = p0_summary->num_io_events();
+ uint32_t p1_io_ops = p1_summary->num_io_events();
+
+ if (p0_io_ops == p1_io_ops)
+ return p0->pid() < p1->pid();
+
+ return p1_io_ops < p0_io_ops;
+ }
+
+ return p1_io_time < p0_io_time;
+ });
+ break;
+
+ case kSortKey::IO_Ops:
+ // Sort by IO time, pid
+ std::sort(processes.begin(), processes.end(), [&summary](const MachineProcess<SIZE>* p0, const MachineProcess<SIZE>* p1) -> bool {
+ auto p0_summary = summary.process_summary(p0);
+ auto p1_summary = summary.process_summary(p1);
+
+ uint32_t p0_io_ops = p0_summary->num_io_events();
+ uint32_t p1_io_ops = p1_summary->num_io_events();
+
+ if (p0_io_ops == p1_io_ops) {
+ AbsTime p0_io_time = p0_summary->total_io_time();
+ AbsTime p1_io_time = p1_summary->total_io_time();
+
+ if (p0_io_time == p1_io_time)
+ return p0->pid() < p1->pid();
+
+ return p1_io_time < p0_io_time;
+ }
+
+ return p1_io_ops < p0_io_ops;
+ });
+ break;
+
+ case kSortKey::IO_Size:
+ // Sort by IO time, pid
+ std::sort(processes.begin(), processes.end(), [&summary](const MachineProcess<SIZE>* p0, const MachineProcess<SIZE>* p1) -> bool {
+ auto p0_summary = summary.process_summary(p0);
+ auto p1_summary = summary.process_summary(p1);
+
+ uint64_t p0_io_bytes_completed = p0_summary->io_bytes_completed();
+ uint64_t p1_io_bytes_completed = p1_summary->io_bytes_completed();
+
+ if (p0_io_bytes_completed == p1_io_bytes_completed) {
+ AbsTime p0_io_time = p0_summary->total_io_time();
+ AbsTime p1_io_time = p1_summary->total_io_time();
+
+ if (p0_io_time == p1_io_time)
+ return p0->pid() < p1->pid();
+
+ return p1_io_time < p0_io_time;
+ }
+
+ return p1_io_bytes_completed < p0_io_bytes_completed;
+ });
+ break;
+
+ case kSortKey::ID:
+ // Sort by pid
+ std::sort(processes.begin(), processes.end(), [](const MachineProcess<SIZE>* p0, const MachineProcess<SIZE>* p1) -> bool {
+ return p0->pid() < p1->pid();
+ });
+ break;
+ }
+}
+
+template <typename SIZE>
+static void sort_threads(const Globals& globals, const ProcessSummary<SIZE>& summary, std::vector<const MachineThread<SIZE>*>& threads) {
+ switch (globals.sort_key()) {
+ case kSortKey::CPU:
+ std::sort(threads.begin(), threads.end(), [&summary](const MachineThread<SIZE>* t0, const MachineThread<SIZE>* t1) -> bool {
+ auto t0_summary = summary.thread_summary(t0);
+ auto t1_summary = summary.thread_summary(t1);
+
+ AbsTime t0_run_time = t0_summary->total_run_time();
+ AbsTime t1_run_time = t1_summary->total_run_time();
+
+ if (t0_run_time == t1_run_time) {
+ AbsTime t0_future_run_time = t0_summary->total_future_run_time();
+ AbsTime t1_future_run_time = t1_summary->total_future_run_time();
+
+ if (t0_future_run_time == t1_future_run_time)
+ return t0->tid() < t1->tid();
+
+ return t1_future_run_time < t0_future_run_time;
+ }
+
+ return t1_run_time < t0_run_time;
+ });
+ break;
+
+ case kSortKey::VMFault:
+ // Sort by VMFault time, #-faults, pid
+ std::sort(threads.begin(), threads.end(), [&summary](const MachineThread<SIZE>* t0, const MachineThread<SIZE>* t1) -> bool {
+ auto t0_summary = summary.thread_summary(t0);
+ auto t1_summary = summary.thread_summary(t1);
+
+ AbsTime t0_vm_fault_time = t0_summary->total_vm_fault_time();
+ AbsTime t1_vm_fault_time = t1_summary->total_vm_fault_time();
+
+ if (t0_vm_fault_time == t1_vm_fault_time) {
+ uint32_t t0_vm_fault_count = t0_summary->num_vm_fault_events();
+ uint32_t t1_vm_fault_count = t1_summary->num_vm_fault_events();
+
+ if (t0_vm_fault_count == t1_vm_fault_count)
+ return t0->tid() < t1->tid();
+
+ return t1_vm_fault_count < t0_vm_fault_count;
+ }
+
+ return t1_vm_fault_time < t0_vm_fault_time;
+ });
+ break;
+
+ case kSortKey::IO_Wait:
+ // Sort by IO time, pid
+ std::sort(threads.begin(), threads.end(), [&summary](const MachineThread<SIZE>* t0, const MachineThread<SIZE>* t1) -> bool {
+ auto t0_summary = summary.thread_summary(t0);
+ auto t1_summary = summary.thread_summary(t1);
+
+ AbsTime t0_io_time = t0_summary->total_io_time();
+ AbsTime t1_io_time = t1_summary->total_io_time();
+
+ if (t0_io_time == t1_io_time) {
+ uint32_t t0_io_ops = t0_summary->num_io_events();
+ uint32_t t1_io_ops = t1_summary->num_io_events();
+
+ if (t0_io_ops == t1_io_ops)
+ return t0->tid() < t1->tid();
+
+ return t1_io_ops < t0_io_ops;
+ }
+
+ return t1_io_time < t0_io_time;
+ });
+ break;
+
+ case kSortKey::IO_Ops:
+ // Sort by IO time, pid
+ std::sort(threads.begin(), threads.end(), [&summary](const MachineThread<SIZE>* t0, const MachineThread<SIZE>* t1) -> bool {
+ auto t0_summary = summary.thread_summary(t0);
+ auto t1_summary = summary.thread_summary(t1);
+
+ uint32_t t0_io_ops = t0_summary->num_io_events();
+ uint32_t t1_io_ops = t1_summary->num_io_events();
+
+ if (t0_io_ops == t1_io_ops) {
+ AbsTime t0_io_time = t0_summary->total_io_time();
+ AbsTime t1_io_time = t1_summary->total_io_time();
+
+ if (t0_io_time == t1_io_time)
+ return t0->tid() < t1->tid();
+
+ return t1_io_time < t0_io_time;
+ }
+
+ return t1_io_ops < t0_io_ops;
+ });
+ break;
+
+ case kSortKey::IO_Size:
+ // Sort by IO time, pid
+ std::sort(threads.begin(), threads.end(), [&summary](const MachineThread<SIZE>* t0, const MachineThread<SIZE>* t1) -> bool {
+ auto t0_summary = summary.thread_summary(t0);
+ auto t1_summary = summary.thread_summary(t1);
+
+ uint64_t t0_io_bytes_completed = t0_summary->io_bytes_completed();
+ uint64_t t1_io_bytes_completed = t1_summary->io_bytes_completed();
+
+ if (t0_io_bytes_completed == t1_io_bytes_completed) {
+ AbsTime t0_io_time = t0_summary->total_io_time();
+ AbsTime t1_io_time = t1_summary->total_io_time();
+
+ if (t0_io_time == t1_io_time)
+ return t0->tid() < t1->tid();
+
+ return t1_io_time < t0_io_time;
+ }
+
+ return t1_io_bytes_completed < t0_io_bytes_completed;
+ });
+ break;
+
+ case kSortKey::ID:
+ std::sort(threads.begin(), threads.end(), [](const MachineThread<SIZE>* t0, const MachineThread<SIZE>* t1) -> bool {
+ return t0->tid() < t1->tid();
+ });
+ break;
+ }
+}
+
+template <typename SIZE>
+void print_machine_summary(const Globals& globals, const Machine<SIZE>& machine) {
+ AbsInterval machine_timespan = machine.timespan();
+
+ AbsTime start(globals.summary_start(machine_timespan));
+ AbsTime stop(globals.summary_stop(machine_timespan));
+ AbsTime step(globals.summary_step(machine_timespan));
+
+ print_summary_header(globals);
+
+ AbsInterval start_stop_timespan(start, stop - start);
+ AbsInterval clipped_start_stop_timespan(start_stop_timespan.intersection_range(machine_timespan));
+
+ start = clipped_start_stop_timespan.location();
+ stop = clipped_start_stop_timespan.max();
+
+ while (start < stop) {
+ AbsInterval base_interval(start, step);
+ AbsInterval summary_interval(base_interval.intersection_range(clipped_start_stop_timespan));
+
+ //
+ // TOTAL summary
+ //
+ CPUSummary<SIZE> summary = machine.summary_for_timespan(summary_interval, NULL);
+
+ //
+ // We want the TOTAL to include the number of ms elapsed, so print a duration
+ //
+ char total_buffer[64];
+ if (globals.should_print_mach_absolute_timestamps()) {
+ if (globals.beginning_of_time().value() == 0)
+ snprintf(total_buffer, sizeof(total_buffer), "TOTAL (0x%llXmabs)", summary_interval.length().value());
+ else
+ snprintf(total_buffer, sizeof(total_buffer), "TOTAL (%llumabs)", summary_interval.length().value());
+ } else {
+ NanoTime ntime = summary_interval.length().nano_time(globals.timebase());
+ snprintf(total_buffer, sizeof(total_buffer), "TOTAL (%3.2fms)", (double)ntime.value() / 1000000.0);
+ }
+ print_cpu_summary_with_name_and_indent(globals, machine, summary_interval, summary, summary, total_buffer, 0);
+
+ std::vector<CPUSummary<SIZE>> per_cpu_summaries;
+
+ //
+ // TOTAL per cpu summary
+ //
+ if (globals.should_print_cpu_summaries()) {
+ // summary.cpus() is unordered, we want to display sorted by cpu_id.
+ std::vector<const MachineCPU<SIZE>*> sorted_cpus;
+
+ for (auto& cpu : summary.cpus()) {
+ sorted_cpus.emplace_back(cpu);
+ }
+
+ std::sort(sorted_cpus.begin(), sorted_cpus.end(), [](MachineCPU<SIZE> const* cpu0, MachineCPU<SIZE> const* cpu1) -> bool {
+ return cpu0->id() < cpu1->id();
+ });
+
+ for (auto cpu : sorted_cpus) {
+ per_cpu_summaries.push_back(machine.summary_for_timespan(summary_interval, cpu));
+
+ char name[16];
+ snprintf(name, sizeof(name), "CPU%d", cpu->id());
+ print_cpu_summary_with_name_and_indent(globals, machine, summary_interval, summary, per_cpu_summaries.back(), name, 1);
+ }
+ }
+
+ //
+ // PER PROCESS summary
+ //
+ if (globals.should_print_process_summaries()) {
+ //
+ // We want to sort the list of processes by PID, so they always display in the same order.
+ //
+ std::vector<const MachineProcess<SIZE>*> sorted_processes;
+ for (auto& process_summary : summary.process_summaries()) {
+ sorted_processes.emplace_back(process_summary.process());
+ }
+
+ sort_processes(globals, summary, sorted_processes);
+
+ for (auto process : sorted_processes) {
+ ASSERT(summary.process_summary(process), "Unable to find process summary by pointer lookup");
+ if (const ProcessSummary<SIZE>* process_summary = summary.process_summary(process)) {
+ char name[32];
+ snprintf(name, sizeof(name), "%s (%d)%s", process->name(), process->pid(), process->is_exit_by_jetsam() ? " *" : "");
+ print_process_summary_with_name_and_indent(globals, machine, summary_interval, summary, *process_summary, name, 1);
+
+ if (globals.should_print_cpu_summaries()) {
+ //
+ // PER PROCESS per cpu summary
+ //
+ for (auto& cpu_summary : per_cpu_summaries) {
+ if (const ProcessSummary<SIZE>* per_cpu_process_summary = cpu_summary.process_summary(process)) {
+ char name[32];
+ snprintf(name, sizeof(name), "CPU%d %s (%d)", (*cpu_summary.cpus().begin())->id(), process->name(), process->pid());
+ print_process_summary_with_name_and_indent(globals, machine, summary_interval, summary, *per_cpu_process_summary, name, 2);
+ }
+ }
+ }
+
+ if (globals.should_print_thread_summaries()) {
+ //
+ // PER PROCESS per thread summary
+ //
+ std::vector<const MachineThread<SIZE>*> sorted_threads;
+ for (auto& thread_summary : process_summary->thread_summaries()) {
+ sorted_threads.emplace_back(thread_summary.thread());
+ }
+
+ sort_threads(globals, *process_summary, sorted_threads);
+
+ for (auto thread : sorted_threads) {
+ ASSERT(process_summary->thread_summary(thread), "Unable to find thread summary by pointer lookup");
+ if (const ThreadSummary<SIZE>* thread_summary = process_summary->thread_summary(thread)) {
+ char name[32];
+ snprintf(name, sizeof(name), "tid-%llX", (uint64_t)thread->tid());
+ print_thread_summary_with_name_and_indent(globals, machine, summary_interval, summary, *thread_summary, name, 2);
+
+ if (globals.should_print_cpu_summaries()) {
+ //
+ // PER PROCESS per thread per cpu summary
+ //
+ for (auto& cpu_summary : per_cpu_summaries) {
+ if (const ProcessSummary<SIZE>* per_cpu_process_summary = cpu_summary.process_summary(process)) {
+ if (const ThreadSummary<SIZE>* per_cpu_thread_summary = per_cpu_process_summary->thread_summary(thread)) {
+ char name[32];
+ snprintf(name, sizeof(name), "CPU%d tid-%llX", (*cpu_summary.cpus().begin())->id(), (uint64_t)thread->tid());
+ print_thread_summary_with_name_and_indent(globals, machine, summary_interval, summary, *per_cpu_thread_summary, name, 3);
+ }
+ }
+ }
+ }
+
+ }
+ }
+ }
+ }
+ }
+ }
+
+ start += step;
+ }
+}
+
+
+template <typename SIZE>
+void print_machine_csv_summary_header(const Globals& globals,
+ const Machine<SIZE>& machine,
+ std::vector<const MachineCPU<SIZE>*>& all_cpus,
+ std::vector<const MachineProcess<SIZE>*>& all_processes,
+ std::unordered_map<const MachineProcess<SIZE>*, std::vector<const MachineThread<SIZE>*>>& all_threads,
+ const char* header_type)
+{
+ // Header is...
+ //
+ // "", header_type
+ //
+ // "", "TOTAL", "CPU0", "CPU1", "proc1", "proc1-tid1", "proc1-tid2", "proc2", etc..
+
+ //
+ // It turns out that calling dprintf is very expensive; we're going to
+ // accumulate to a string buffer and then flush once at the end.
+ //
+ char line[16384]; // Header lines can be big!
+ char* cursor = line;
+ char* line_end = line + sizeof(line);
+
+ //
+ // header + TOTAL
+ //
+ cursor += snprintf(cursor, line_end - cursor, "%s\n\nTIME, TOTAL", header_type);
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // TOTAL per cpu summary
+ //
+ if (globals.should_print_cpu_summaries()) {
+ for (auto cpu : all_cpus) {
+ cursor += snprintf(cursor, line_end - cursor, ", CPU%d", cpu->id());
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+ }
+
+ //
+ // PER PROCESS summary
+ //
+ if (globals.should_print_process_summaries()) {
+ for (auto process : all_processes) {
+ cursor += snprintf(cursor, line_end - cursor, ", %s (%d)", process->name(), process->pid());
+ if (cursor > line_end)
+ cursor = line_end;
+
+ if (globals.should_print_cpu_summaries()) {
+ //
+ // PER PROCESS per cpu summary
+ //
+ for (auto cpu : all_cpus) {
+ cursor += snprintf(cursor, line_end - cursor, ", CPU%d %s (%d)", cpu->id(), process->name(), process->pid());
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+ }
+
+ if (globals.should_print_thread_summaries()) {
+ //
+ // PER PROCESS per thread summary
+ //
+ for (auto thread : all_threads[process]) {
+ cursor += snprintf(cursor, line_end - cursor, ", tid-%llX", (uint64_t)thread->tid());
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // PER PROCESS per thread per cpu summary
+ //
+ for (auto cpu : all_cpus) {
+ cursor += snprintf(cursor, line_end - cursor, ", CPU%d tid-%llX", cpu->id(), (uint64_t)thread->tid());
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+ }
+ }
+ }
+ }
+
+ dprintf(globals.output_fd(), "%s\n", line);
+}
+
+template <typename SIZE>
+void print_machine_csv_summary_actual_cpu_ms_line(const Globals& globals,
+ const Machine<SIZE>& machine,
+ AbsInterval summary_interval,
+ std::vector<const MachineCPU<SIZE>*>& all_cpus,
+ std::vector<const MachineProcess<SIZE>*>& all_processes,
+ std::unordered_map<const MachineProcess<SIZE>*, std::vector<const MachineThread<SIZE>*>>& all_threads,
+ CPUSummary<SIZE>& master_summary,
+ std::vector<CPUSummary<SIZE>>& per_cpu_summaries)
+{
+ char line[16384]; // Header lines can be big!
+ char* cursor = line;
+ char* line_end = line + sizeof(line);
+
+ //
+ // Time
+ //
+
+ if (globals.should_print_mach_absolute_timestamps()) {
+ if (globals.beginning_of_time().value() == 0)
+ cursor += snprintf(cursor, line_end - cursor, "%llX", (summary_interval.location() - globals.beginning_of_time()).value());
+ else
+ cursor += snprintf(cursor, line_end - cursor, "%llu", (summary_interval.location() - globals.beginning_of_time()).value());
+ } else {
+ NanoTime ntime = (summary_interval.location() - globals.beginning_of_time()).nano_time(globals.timebase());
+ cursor += snprintf(cursor, line_end - cursor, "%3.2f", (double)ntime.value() / 1000000.0);
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // TOTAL
+ //
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)master_summary.total_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // TOTAL per cpu summary
+ //
+ if (globals.should_print_cpu_summaries()) {
+ for (auto& cpu_summary : per_cpu_summaries) {
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)cpu_summary.total_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+ }
+
+ //
+ // PER PROCESS summary
+ //
+ if (globals.should_print_process_summaries()) {
+ for (auto process : all_processes) {
+ const ProcessSummary<SIZE>* process_summary;
+
+ // Not all summaries will have a matching process entry!
+ if ((process_summary = master_summary.process_summary(process))) {
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)process_summary->total_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, ",");
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ if (globals.should_print_cpu_summaries()) {
+ //
+ // PER PROCESS per cpu summary
+ //
+ for (auto& cpu_summary : per_cpu_summaries) {
+ if (const auto& process_summary = cpu_summary.process_summary(process)) {
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)process_summary->total_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, ",");
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+ }
+
+ if (globals.should_print_thread_summaries()) {
+ //
+ // PER PROCESS per thread summary
+ //
+
+ //
+ // We again have to do a bit more work, sometime a process is missing and we still need to print empty slots for its threads.
+
+
+ for (auto thread : all_threads[process]) {
+ if (process_summary) {
+ if (const auto& thread_summary = process_summary->thread_summary(thread)) {
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)thread_summary->total_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+ } else
+ cursor += snprintf(cursor, line_end - cursor, ",");
+ } else
+ cursor += snprintf(cursor, line_end - cursor, ",");
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+
+ if (globals.should_print_cpu_summaries()) {
+ //
+ // PER PROCESS per thread per cpu summary
+ //
+ for (auto& cpu_summary : per_cpu_summaries) {
+ if (const auto& per_cpu_process_summary = cpu_summary.process_summary(process)) {
+ if (const auto& per_cpu_thread_summary = per_cpu_process_summary->thread_summary(thread)) {
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)per_cpu_thread_summary->total_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+ } else
+ cursor += snprintf(cursor, line_end - cursor, ",");
+ } else
+ cursor += snprintf(cursor, line_end - cursor, ",");
+
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ dprintf(globals.output_fd(), "%s\n", line);
+}
+
+template <typename SIZE>
+void print_machine_csv_summary_wanted_cpu_ms_line(const Globals& globals,
+ const Machine<SIZE>& machine,
+ AbsInterval summary_interval,
+ std::vector<const MachineCPU<SIZE>*>& all_cpus,
+ std::vector<const MachineProcess<SIZE>*>& all_processes,
+ std::unordered_map<const MachineProcess<SIZE>*, std::vector<const MachineThread<SIZE>*>>& all_threads,
+ CPUSummary<SIZE>& master_summary,
+ std::vector<CPUSummary<SIZE>>& per_cpu_summaries)
+{
+ char line[16384]; // Header lines can be big!
+ char* cursor = line;
+ char* line_end = line + sizeof(line);
+
+ //
+ // Time
+ //
+
+ if (globals.should_print_mach_absolute_timestamps()) {
+ if (globals.beginning_of_time().value() == 0)
+ cursor += snprintf(cursor, line_end - cursor, "%llX", (summary_interval.location() - globals.beginning_of_time()).value());
+ else
+ cursor += snprintf(cursor, line_end - cursor, "%llu", (summary_interval.location() - globals.beginning_of_time()).value());
+ } else {
+ NanoTime ntime = (summary_interval.location() - globals.beginning_of_time()).nano_time(globals.timebase());
+ cursor += snprintf(cursor, line_end - cursor, "%3.2f", (double)ntime.value() / 1000000.0);
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // TOTAL
+ //
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)master_summary.total_future_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ //
+ // TOTAL per cpu summary
+ //
+ if (globals.should_print_cpu_summaries()) {
+ for (auto& cpu_summary : per_cpu_summaries) {
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)cpu_summary.total_future_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+ }
+
+ //
+ // PER PROCESS summary
+ //
+ if (globals.should_print_process_summaries()) {
+ for (auto process : all_processes) {
+ const ProcessSummary<SIZE>* process_summary;
+
+ // Not all summaries will have a matching process entry!
+ if ((process_summary = master_summary.process_summary(process))) {
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)process_summary->total_future_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, ",");
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+ if (globals.should_print_cpu_summaries()) {
+ //
+ // PER PROCESS per cpu summary
+ //
+ for (auto& cpu_summary : per_cpu_summaries) {
+ if (const auto& process_summary = cpu_summary.process_summary(process)) {
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)process_summary->total_future_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+ } else {
+ cursor += snprintf(cursor, line_end - cursor, ",");
+ }
+
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+ }
+
+ if (globals.should_print_thread_summaries()) {
+ //
+ // PER PROCESS per thread summary
+ //
+
+ //
+ // We again have to do a bit more work, sometime a process is missing and we still need to print empty slots for its threads.
+
+
+ for (auto thread : all_threads[process]) {
+ if (process_summary) {
+ if (const auto& thread_summary = process_summary->thread_summary(thread)) {
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)thread_summary->total_future_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+ } else
+ cursor += snprintf(cursor, line_end - cursor, ",");
+ } else
+ cursor += snprintf(cursor, line_end - cursor, ",");
+
+ if (cursor > line_end)
+ cursor = line_end;
+
+
+ if (globals.should_print_cpu_summaries()) {
+ //
+ // PER PROCESS per thread per cpu summary
+ //
+ for (auto& cpu_summary : per_cpu_summaries) {
+ if (const auto& per_cpu_process_summary = cpu_summary.process_summary(process)) {
+ if (const auto& per_cpu_thread_summary = per_cpu_process_summary->thread_summary(thread)) {
+ cursor += snprintf(cursor, line_end - cursor, ", %3.2f",
+ (double)per_cpu_thread_summary->total_future_run_time().nano_time(globals.timebase()).value() / 1000000.0);
+ } else
+ cursor += snprintf(cursor, line_end - cursor, ",");
+ } else
+ cursor += snprintf(cursor, line_end - cursor, ",");
+
+ if (cursor > line_end)
+ cursor = line_end;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ dprintf(globals.output_fd(), "%s\n", line);
+}
+
+template <typename SIZE>
+void print_machine_csv_summary(const Globals& globals, const Machine<SIZE>& machine) {
+ AbsInterval machine_timespan = machine.timespan();
+
+ AbsTime start(globals.summary_start(machine_timespan));
+ AbsTime stop(globals.summary_stop(machine_timespan));
+ AbsTime step(globals.summary_step(machine_timespan));
+
+ AbsInterval start_stop_timespan(start, stop - start);
+ AbsInterval clipped_start_stop_timespan(start_stop_timespan.intersection_range(machine_timespan));
+
+ start = clipped_start_stop_timespan.location();
+ stop = clipped_start_stop_timespan.max();
+
+ //
+ // While printing a csv summary, we need to use the entire set of processes/threads/cpus
+ // from the range, even though they may not run in each sample. We first gather a summary
+ // for the entire time, to get the master list.
+ //
+ CPUSummary<SIZE> start_stop_summary = machine.summary_for_timespan(clipped_start_stop_timespan, NULL);
+
+ std::vector<const MachineProcess<SIZE>*> all_processes;
+ std::vector<const MachineCPU<SIZE>*> all_cpus;
+ std::unordered_map<const MachineProcess<SIZE>*, std::vector<const MachineThread<SIZE>*>> all_threads;
+
+ //
+ // gather all processes
+ //
+ {
+ for (auto& process_summary : start_stop_summary.process_summaries()) {
+ all_processes.emplace_back(process_summary.process());
+ }
+
+ sort_processes(globals, start_stop_summary, all_processes);
+ }
+
+ //
+ // gather all cpus
+ //
+ if (globals.should_print_cpu_summaries()) {
+ for (auto& cpu : start_stop_summary.cpus()) {
+ all_cpus.emplace_back(cpu);
+ }
+
+ std::sort(all_cpus.begin(), all_cpus.end(), [](MachineCPU<SIZE> const* cpu0, MachineCPU<SIZE> const* cpu1) -> bool {
+ return cpu0->id() < cpu1->id();
+ });
+ }
+
+ //
+ // gather all threads
+ //
+ if (globals.should_print_thread_summaries()) {
+ for (auto process : all_processes) {
+ ASSERT(start_stop_summary.process_summary(process), "Unable to find process summary by pointer lookup");
+ if (const ProcessSummary<SIZE>* process_summary = start_stop_summary.process_summary(process)) {
+ //
+ // PER PROCESS per thread summary
+ //
+ auto& sorted_threads = all_threads[process];
+ for (auto& thread_summary : process_summary->thread_summaries()) {
+ sorted_threads.emplace_back(thread_summary.thread());
+ }
+
+ sort_threads(globals, *process_summary, sorted_threads);
+ }
+ }
+ }
+
+ print_machine_csv_summary_header(globals, machine, all_cpus, all_processes, all_threads, "Actual CPU/ms");
+
+ while (start < stop) {
+ AbsInterval base_interval(start, step);
+ AbsInterval summary_interval(base_interval.intersection_range(clipped_start_stop_timespan));
+
+ //
+ // TOTAL summary
+ //
+ CPUSummary<SIZE> summary = machine.summary_for_timespan(summary_interval, NULL);
+
+ //
+ // Per CPU summaries...
+ //
+ std::vector<CPUSummary<SIZE>> per_cpu_summaries;
+ if (globals.should_print_cpu_summaries()) {
+ for (auto cpu : all_cpus) {
+ per_cpu_summaries.push_back(machine.summary_for_timespan(summary_interval, cpu));
+ }
+ }
+
+ print_machine_csv_summary_actual_cpu_ms_line(globals, machine, summary_interval, all_cpus, all_processes, all_threads, summary, per_cpu_summaries);
+
+ start += step;
+ }
+
+
+ //
+ // Now print Wanted CPU/ms
+ //
+ start = clipped_start_stop_timespan.location();
+ stop = clipped_start_stop_timespan.max();
+
+ dprintf(globals.output_fd(), "\n");
+ print_machine_csv_summary_header(globals, machine, all_cpus, all_processes, all_threads, "Wanted CPU/ms");
+
+ while (start < stop) {
+ AbsInterval base_interval(start, step);
+ AbsInterval summary_interval(base_interval.intersection_range(clipped_start_stop_timespan));
+
+ //
+ // TOTAL summary
+ //
+ CPUSummary<SIZE> summary = machine.summary_for_timespan(summary_interval, NULL);
+
+ //
+ // Per CPU summaries...
+ //
+ std::vector<CPUSummary<SIZE>> per_cpu_summaries;
+ if (globals.should_print_cpu_summaries()) {
+ for (auto cpu : all_cpus) {
+ per_cpu_summaries.push_back(machine.summary_for_timespan(summary_interval, cpu));
+ }
+ }
+
+ print_machine_csv_summary_wanted_cpu_ms_line(globals, machine, summary_interval, all_cpus, all_processes, all_threads, summary, per_cpu_summaries);
+
+ start += step;
+ }
+}
+
+template <typename SIZE>
+void print_process_start_stop_timestamps(const Globals& globals, const Machine<SIZE>& machine) {
+ for (auto process : machine.processes()) {
+
+ //
+ // Skip processes with no events
+ //
+
+ if (process->timespan().length() == 0) {
+ // Skip processes with nothing in them.
+ // The assert may be too strong.
+ ASSERT(process->is_created_by_thread_map(), "Expected a zero length process to be from the thread map");
+ continue;
+ }
+
+ //
+ // Don't print the kernel process, it will occupy the entire trace
+ //
+ if (process->is_kernel())
+ continue;
+
+ //
+ // Time
+ //
+ char time_buffer[64];
+ if (globals.beginning_of_time().value() == 0)
+ snprintf(time_buffer, sizeof(time_buffer), "%llumabs", process->timespan().location().value());
+ else
+ snprintf(time_buffer, sizeof(time_buffer), "%llumabs", (process->timespan().location() - globals.beginning_of_time()).value());
+
+ //
+ // End time
+ //
+ char end_time_buffer[64];
+ if (globals.beginning_of_time().value() == 0)
+ snprintf(end_time_buffer, sizeof(end_time_buffer), "%llumabs", process->timespan().max().value());
+ else
+ snprintf(end_time_buffer, sizeof(end_time_buffer), "%llumabs", (process->timespan().max() - globals.beginning_of_time()).value());
+
+ const char* create_reason;
+ if (process->is_created_by_thread_map())
+ create_reason = "Threadmap Entry";
+ else if (process->is_created_by_previous_machine_state())
+ create_reason = "Prev Machine State";
+ else if (process->is_created_by_fork_exec())
+ create_reason = "ForkExec";
+ else if (process->is_created_by_exec())
+ create_reason = "Exec";
+ else
+ create_reason = "???";
+
+ if (globals.is_verbose()) {
+ printf(" %30s (%6d) --start %-16s --stop %-16s\tCreated by %-18s %s\n",
+ process->name(),
+ process->pid(),
+ time_buffer,
+ end_time_buffer,
+ create_reason,
+ process->is_trace_terminated() ? "EXITED" : "");
+ } else {
+ printf(" %30s (%6d) --start %s --stop %s\n",
+ process->name(),
+ process->pid(),
+ time_buffer,
+ end_time_buffer);
+ }
+ }
+}
+
+template <typename SIZE>
+void print_verbose_machine_info(const Globals& globals, const Machine<SIZE>& machine, uint32_t threadmap_count, uint32_t cpumap_count) {
+ dprintf(globals.output_fd(), "\tEvent data is %s, and appears to be from %s\n", SIZE::is_64_bit ? "K64" : "K32", machine.is_ios() ? "iOS" : "OSX");
+ dprintf(globals.output_fd(), "\tUsing a%stimebase of %d/%d\n", globals.is_timebase_set() ? " [User Set] " : " ", globals.timebase().numer, globals.timebase().denom);
+
+ if (threadmap_count) {
+ dprintf(globals.output_fd(), "\tA threadmap is present, and contains %u entries\n", threadmap_count);
+ } else {
+ dprintf(globals.output_fd(), "\tA threadmap is not present");
+ }
+
+ if (cpumap_count) {
+ dprintf(globals.output_fd(), "\tA cpumap is present, and contains %u entries\n", cpumap_count);
+
+ } else {
+ dprintf(globals.output_fd(), "\tA cpumap is not present, the system provided a default with %u cpus and %u iops\n", globals.cpu_count(), globals.iop_count());
+ }
+
+ dprintf(globals.output_fd(), "\tFound %u active cpus in trace data\n", machine.active_cpus());
+
+ if (globals.is_summary_start_set()) {
+ AbsInterval machine_timespan = machine.timespan();
+
+ if (globals.should_print_mach_absolute_timestamps()) {
+ if (globals.beginning_of_time().value() == 0)
+ dprintf(globals.output_fd(), "\tUsing a --start value of 0x%llXmabs (raw)\n", globals.summary_start(machine_timespan).value());
+ else
+ dprintf(globals.output_fd(), "\tUsing a --start value of %llumabs\n", (globals.summary_start(machine_timespan) - machine_timespan.location()).value());
+ } else {
+ NanoTime ntime = (globals.summary_start(machine_timespan) - machine_timespan.location()).nano_time(globals.timebase());
+ dprintf(globals.output_fd(), "\tUsing a --start value of %3.2fms\n", (double)ntime.value() / 1000000.0);
+ }
+ }
+
+ if (globals.is_summary_stop_set()) {
+ AbsInterval machine_timespan = machine.timespan();
+
+ if (globals.should_print_mach_absolute_timestamps()) {
+ if (globals.beginning_of_time().value() == 0)
+ dprintf(globals.output_fd(), "\tUsing a --stop value of 0x%llXmabs (raw)\n", globals.summary_stop(machine_timespan).value());
+ else
+ dprintf(globals.output_fd(), "\tUsing a --stop value of %llumabs\n", (globals.summary_stop(machine_timespan) - machine_timespan.location()).value());
+ } else {
+ NanoTime ntime = (globals.summary_stop(machine_timespan) - machine_timespan.location()).nano_time(globals.timebase());
+ dprintf(globals.output_fd(), "\tUsing a --stop value of %3.2fms\n", (double)ntime.value() / 1000000.0);
+ }
+ }
+
+ if (globals.is_summary_step_set()) {
+ AbsInterval machine_timespan = machine.timespan();
+
+ if (globals.should_print_mach_absolute_timestamps()) {
+ if (globals.beginning_of_time().value() == 0)
+ dprintf(globals.output_fd(), "\tUsing a --step value of 0x%llXmabs (raw)\n", globals.summary_step(machine_timespan).value());
+ else
+ dprintf(globals.output_fd(), "\tUsing a --step value of %llumabs\n", globals.summary_step(machine_timespan).value());
+ } else {
+ NanoTime ntime = globals.summary_step(machine_timespan).nano_time(globals.timebase());
+ dprintf( globals.output_fd(), "\tUsing a --step value of %3.2fms\n", (double)ntime.value() / 1000000.0);
+ }
+ }
+}
+
+#endif
--- /dev/null
+//
+// TraceFileAction.cpp
+// kdprof
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+#if 0
+template <typename SIZE>
+static void execute_arch_specific(Globals& globals, std::string path)
+{
+ //
+ // Trace file looks roughly like:
+ //
+ // RAW_header
+ // threadmap[thread_count]
+ // wasted-space-to-align-to-next-4096-byte-boundary
+ // KDEvents[]
+ //
+
+ MappedFile trace_data(path.c_str());
+ if (TraceDataHeader<SIZE>* header = reinterpret_cast<TraceDataHeader<SIZE>*>(trace_data.address())) {
+
+ KDThreadMapEntry<SIZE>* threadmap = NULL;
+ uint32_t threadmap_count = 0;
+ KDCPUMapEntry* cpumap = NULL;
+ uint32_t cpumap_count = 0;
+ KDEvent<SIZE>* events = NULL;
+
+ if (header->version() != RAW_VERSION1) {
+ // If the header is not a RAW_VERSION1, we must assume it is a
+ // RAW_VERSION0. The difficulty here is that RAW_VERSION0 consists
+ // of 4 bytes, which are the thread_count. We can't do much
+ // sanity checking. The first four bytes are already read into
+ // the existing header, reuse them. We must also reset the file
+ // offset.
+
+ threadmap_count = header->version();
+ threadmap = reinterpret_cast<KDThreadMapEntry<SIZE>*>(trace_data.address() + 4);
+
+ // Event data starts immediately following the threadmap
+ size_t offset = 4 + threadmap_count * sizeof(KDThreadMapEntry<SIZE>);
+ events = reinterpret_cast<KDEvent<SIZE>*>(trace_data.address() + offset);
+ } else {
+ //
+ // RAW_VERSION1
+ //
+ threadmap_count = header->thread_count();
+ threadmap = reinterpret_cast<KDThreadMapEntry<SIZE>*>(trace_data.address() + sizeof(TraceDataHeader<SIZE>));
+
+ size_t threadmap_size_in_bytes = threadmap_count * sizeof(KDThreadMapEntry<SIZE>);
+ size_t offset_to_event_data = (sizeof(TraceDataHeader<SIZE>) + threadmap_size_in_bytes + 4095) & ~4095;
+ size_t offset_to_cpumap_data = sizeof(TraceDataHeader<SIZE>) + threadmap_size_in_bytes;
+ size_t cpumap_bytes = offset_to_event_data - offset_to_cpumap_data;
+
+ //
+ // In a RAW_VERSION1, there *may* be a cpumap.
+ // If it exists, it will be between the header and the page aligned offset
+ // that event data begins at.
+ //
+ if (cpumap_bytes > sizeof(kd_cpumap_header) + sizeof(kd_cpumap)) {
+ kd_cpumap_header* cpumap_header = reinterpret_cast<kd_cpumap_header*>(trace_data.address() + offset_to_cpumap_data);
+ if (cpumap_header->version_no == RAW_VERSION1) {
+ cpumap = (KDCPUMapEntry*)&cpumap_header[1];
+ cpumap_count = cpumap_header->cpu_count;
+ }
+ }
+
+ // Event data starts at the next PAGE alignment boundary.
+ //
+ // Hmm, this could be pretty awful in iOS...
+ //
+ // Kernel page size is 4k. Userspace page size is 16kb in 64b.
+ // Kernel writes the data. Unless the kernel call fails, then userspace writes the data. Blech.
+ events = reinterpret_cast<KDEvent<SIZE>*>(trace_data.address() + offset_to_event_data);
+ }
+
+ uintptr_t event_count = (uintptr_t)trace_data.size() - (reinterpret_cast<uintptr_t>(events) - reinterpret_cast<uintptr_t>(trace_data.address()));
+ if (event_count % sizeof(KDEvent<SIZE>) != 0) {
+ // We're probably looking at the wrong k32/k64. Throw and try the other size.
+ THROW("Bytes in file does not match an even multiple of Event struct");
+ }
+ event_count /= sizeof(KDEvent<SIZE>);
+
+ std::vector<KDCPUMapEntry> default_cpumap;
+
+ if (cpumap == NULL || cpumap_count == 0) {
+ // No cpumap found, we need to fake one up using the default values.
+ for (uint32_t i=0; i<globals.cpu_count(); ++i) {
+ default_cpumap.emplace_back(i, 0, "AP-???");
+ }
+ uint32_t iop_limit = globals.cpu_count() + globals.iop_count();
+ for (uint32_t i=globals.cpu_count(); i<iop_limit; ++i) {
+ default_cpumap.emplace_back(i, KDBG_CPUMAP_IS_IOP, "IOP-???");
+ }
+
+ cpumap = default_cpumap.data();
+ cpumap_count = (uint32_t)default_cpumap.size();
+ }
+
+ // IOP's have been producing .trace files with out of order events.
+ // This is a hack fix to work around that. It costs a full copy of the data!
+ MemoryBuffer<KDEvent<SIZE>> presorted_events;
+ if (globals.should_presort_events() && event_count) {
+ presorted_events.set_capacity(event_count);
+ memcpy(presorted_events.data(), events, event_count * sizeof(KDEvent<SIZE>));
+ events = presorted_events.data();
+ std::sort(events, events + event_count, [](KDEvent<SIZE> const& p0, KDEvent<SIZE> const& p1) -> bool {
+ return p0.timestamp() < p1.timestamp();
+ });
+ }
+
+ Machine<SIZE> machine(cpumap, cpumap_count, threadmap, threadmap_count, events, event_count);
+
+ if (!machine.lost_events()) {
+ if (globals.should_zero_base_timestamps() && event_count) {
+ globals.set_beginning_of_time(events[0].timestamp());
+ } else {
+ globals.set_beginning_of_time(AbsTime(0));
+ }
+
+ if (!globals.is_timebase_set()) {
+ if (machine.is_ios()) {
+ globals.set_timebase({ 125, 3 }, false);
+ } else {
+ globals.set_timebase({ 1, 1 }, false);
+ }
+ }
+
+ if (globals.is_verbose()) {
+ dprintf(globals.output_fd(), "\n%s\n", path.c_str());
+ print_verbose_machine_info(globals, machine, threadmap_count, (default_cpumap.empty()) ? cpumap_count : 0);
+ }
+
+ if (globals.should_print_events()) {
+ print_machine_events(globals, machine);
+ }
+
+ if (globals.should_print_summary()) {
+ print_machine_summary(globals, machine);
+ }
+
+ if (globals.should_print_csv_summary()) {
+ print_machine_csv_summary(globals, machine);
+ }
+
+ if (globals.should_print_process_start_stop_timestamps()) {
+ print_process_start_stop_timestamps(globals, machine);
+ }
+ } else {
+ log_msg(ASL_LEVEL_WARNING, "The trace data indicates that events were lost, the file cannot be processed\n");
+ }
+ } else {
+ log_msg(ASL_LEVEL_ERR, "Unable to read from %s\n", path.c_str());
+ exit(1);
+ }
+}
+
+void TraceFileAction::execute(Globals& globals) {
+ if (globals.is_kernel_size_set()) {
+ try {
+ if (globals.kernel_size() == KernelSize::k32)
+ execute_arch_specific<Kernel32>(globals, _path);
+ else
+ execute_arch_specific<Kernel64>(globals, _path);
+ } catch (Exception& e) {
+ log_msg(ASL_LEVEL_ERR, "An exception was raised: %s", e.what());
+ log_msg(ASL_LEVEL_ERR, "An explicit kernel size was set, you may want to try not forcing the size to a single value\n");
+ log_msg(ASL_LEVEL_ERR, "You may also want to check the number of cpus and iops configured if the file is from a device and does not have a cpumap\n");
+ }
+ } else {
+ // Try em both!
+ try {
+ execute_arch_specific<Kernel64>(globals, _path);
+ } catch (Exception& e) {
+ execute_arch_specific<Kernel32>(globals, _path);
+ }
+ }
+}
+
+#endif
+
+template <typename SIZE>
+static void execute_arch_specific(Globals& globals, TraceFile& file, std::string& path)
+{
+ Machine<SIZE> machine(file);
+
+ if (!machine.lost_events()) {
+ if (globals.should_zero_base_timestamps() && machine.event_count()) {
+ globals.set_beginning_of_time(machine.events()[0].timestamp());
+ } else {
+ globals.set_beginning_of_time(AbsTime(0));
+ }
+
+ if (!globals.is_timebase_set()) {
+ if (machine.is_ios()) {
+ globals.set_timebase({ 125, 3 }, false);
+ } else {
+ globals.set_timebase({ 1, 1 }, false);
+ }
+ }
+
+ if (globals.is_verbose()) {
+ dprintf(globals.output_fd(), "\n%s\n", path.c_str());
+ print_verbose_machine_info(globals, machine, file.threadmap_count(), file.cpumap_count());
+ }
+
+ if (globals.should_print_events()) {
+ print_machine_events(globals, machine);
+ }
+
+ if (globals.should_print_summary()) {
+ print_machine_summary(globals, machine);
+ }
+
+ if (globals.should_print_csv_summary()) {
+ print_machine_csv_summary(globals, machine);
+ }
+
+ if (globals.should_print_process_start_stop_timestamps()) {
+ print_process_start_stop_timestamps(globals, machine);
+ }
+ } else {
+ log_msg(ASL_LEVEL_WARNING, "The trace data indicates that events were lost, the file cannot be processed\n");
+ }
+}
+
+void TraceFileAction::execute(Globals& globals) {
+ TraceFile file(_path.c_str(), globals.should_presort_events(), globals.cpu_count(), globals.iop_count());
+ if (globals.is_kernel_size_set()) {
+ try {
+ if (globals.kernel_size() == KernelSize::k32)
+ execute_arch_specific<Kernel32>(globals, file, _path);
+ else
+ execute_arch_specific<Kernel64>(globals, file, _path);
+ } catch (Exception& e) {
+ log_msg(ASL_LEVEL_ERR, "An exception was raised: %s", e.what());
+ log_msg(ASL_LEVEL_ERR, "An explicit kernel size was set, you may want to try not forcing the size to a single value\n");
+ log_msg(ASL_LEVEL_ERR, "You may also want to check the number of cpus and iops configured if the file is from a device and does not have a cpumap\n");
+ }
+ } else {
+ if (file.is_valid()) {
+ if (file.is_64_bit()) {
+ execute_arch_specific<Kernel64>(globals, file, _path);
+ } else {
+ execute_arch_specific<Kernel32>(globals, file, _path);
+ }
+ } else {
+ if (file.mmap_failed()) {
+ log_msg(ASL_LEVEL_ERR, "Unable to mmap %s, it may exceed this devices memory limits\n", _path.c_str());
+ } else {
+ log_msg(ASL_LEVEL_ERR, "%s does not appear to be a valid trace file\n", _path.c_str());
+ }
+ }
+ }
+}
+
--- /dev/null
+//
+// TraceFileAction.h
+// kdprof
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef __kdprof__TraceFileAction__
+#define __kdprof__TraceFileAction__
+
+class TraceFileAction : public Action {
+ protected:
+ std::string _path;
+
+ public:
+ TraceFileAction(const char* path) : _path(path) {
+ ASSERT(Path::is_file(_path, TRUE), "File must exist");
+ }
+
+ virtual void execute(Globals& globals);
+};
+
+#endif /* defined(__kdprof__TraceFileAction__) */
--- /dev/null
+//
+// global.h
+// kdprof
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#ifndef kdprof_global_h
+#define kdprof_global_h
+
+#include <CPPUtil/CPPUtil.h>
+
+using namespace util;
+
+#include "KDebug.h"
+
+#include <dispatch/dispatch.h>
+#include <libkern/OSAtomic.h>
+
+#include <vector>
+#include <unordered_map>
+#include <thread>
+
+#include "Globals.hpp"
+#include "EventPrinting.hpp"
+#include "SummaryPrinting.hpp"
+#include "Action.hpp"
+#include "InitializeAction.hpp"
+#include "TraceFileAction.hpp"
+#include "RemoveAction.hpp"
+#include "NoWrapAction.hpp"
+#include "PrintStateAction.hpp"
+#include "EnableAction.hpp"
+#include "DisableAction.hpp"
+#include "CollectAction.hpp"
+#include "SleepAction.hpp"
+#include "SaveTraceAction.hpp"
+
+__attribute__((noreturn)) void usage(const char *);
+
+#endif
--- /dev/null
+.Dd 3/8/14\r
+.Dt kdprof 1\r
+.Os Darwin\r
+.Sh NAME\r
+.Nm kdprof\r
+.Nd kdebug profiler and event printer\r
+.Sh SYNOPSIS\r
+.Nm\r
+.Op Fl h, -help\r
+.Sh DESCRIPTION\r
+The help output for kdprof is more recent than this man page. Please run\r
+.Nm\r
+--help\r
+.Sh SEE ALSO\r
+.Xr lsmp 1 ,\r
+.Xr msa 1 ,\r
+.Xr trace 1\r
--- /dev/null
+//
+// main.cpp
+// kdprof
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2013 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+// Generated by agvtool
+extern const unsigned char __kdprofVersionString[];
+
+bool shouldPrintVersion = false;
+
+__attribute__((noreturn)) void usage(const char *errorMsg) {
+ if (errorMsg) {
+ fprintf(stderr, "%s\n", errorMsg);
+ exit(1);
+ }
+
+ const char* BOLD = "\033[1m";
+ const char* UNBOLD = "\033[0m";
+
+ // printf("01234567890123456789012345678901234567890123456789012345678901234567890123456789\n");
+ printf("kdprof [options] [path/trace.codes ...] [path/data.trace ...]\n\n");
+ printf(" GLOBAL OPTIONS\n\n");
+ printf(" -h, --help Print this message\n");
+ printf(" --version Print version info\n");
+ printf(" -v, --verbose Print additional information\n");
+ printf(" --presort-events Sort events before processing. IOP workaround\n");
+ printf(" -N, --no-default-codes Do not read the default trace codes\n");
+ printf("\n");
+ printf(" LIVE TRACING OPTIONS\n\n");
+ printf(" -i, --intialize [#] Initialize the trace buffer, with opt buf count\n");
+ printf(" -r, --remove Remove the trace buffer\n");
+ printf(" -n, --no-wrap Do not allow the trace buffer to wrap\n");
+ printf(" -g, --print-kdbg-state Print the current kdbg state\n");
+ printf(" -e, --enable Enable collection of events\n");
+ printf(" -d, --disable Disable collection of events\n");
+ printf(" -t, --collect Collect and print the trace buffer\n");
+ printf(" --save path Collect and save the trace buffer to path\n");
+ printf(" -S, --sleep # Wait for a specified interval\n");
+ printf("\n");
+ printf(" OUTPUT OPTIONS\n\n");
+ printf(" -o, --output path Print output to path\n");
+ printf(" --summary Print calculated data (default true)\n");
+ printf(" --no-summary Do not print calculated data\n");
+ printf(" --csv Print a csv formatted summary for use in numbers\n");
+ printf(" --no-csv Do not print a csv formatted summary\n");
+ printf(" --step # Step by # time units in summary output\n");
+ printf(" --process Include per-process summary data\n");
+ printf(" --no-process Do not include per-process summary data\n");
+ printf(" --thread Include per-thread summary data\n");
+ printf(" --cpu Include per-cpu summary data\n");
+ printf(" --sort-by-cpu Sort process/thread lists by cpu usage\n");
+ printf(" --sort-by-vmfault Sort process/thread lists by vmfault time\n");
+ printf(" --sort-by-io-wait Sort process/thread lists by IO time\n");
+ printf(" --sort-by-io-ops Sort process/thread lists by # IO Ops\n");
+ printf(" --sort-by-io-size Sort process/thread lists by IO bytes\n");
+ printf(" --sort-by-pid Sort process/thread lists by pid/tid\n");
+ printf(" --events Enable individual event printing\n");
+ printf(" --no-events Disable individual event printing\n");
+ printf(" --raw-timestamps Print timestamps as raw values, not deltas\n");
+ printf(" --mach-absolute-time Print timestamps in mach absolute time\n");
+ printf(" --event-index Print the index of each event\n");
+ printf(" --no-codes Print hex trace codes, not symbolic\n");
+ printf(" --process-start-stop Print start/stop information about each process\n");
+ printf("\n");
+ printf(" DEPRECATED OPTIONS\n\n");
+ printf(" -X, --k32 Trace data is from a 32 bit kernel\n");
+ printf(" --k64 Trace data is from a 64 bit kernel\n");
+ printf(" --codes path read trace codes from path\n");
+ printf(" --trace path read trace data from path\n");
+ printf(" --ios Treat data as coming from an iOS device\n");
+ printf(" --timebase #/# Set the mach_timebase\n");
+ printf(" --cpus # Set the # of cpus.\n");
+ printf(" --iops # Set the # of iops.\n");
+ printf("\n");
+ printf(" OPTION ARGUMENTS\n\n");
+ printf(" All arguments that specifiy a time value may use the following postfixes\n\n");
+ printf(" s Seconds\n");
+ printf(" ms Milliseconds\n");
+ printf(" us Microseconds\n");
+ printf(" ns Nanoseconds\n");
+ printf(" mabs Mach Absolute Time Units\n");
+ printf("\n");
+ // printf("01234567890123456789012345678901234567890123456789012345678901234567890123456789\n");
+ printf(" USAGE\n");
+ printf("\n");
+ printf(" Arguments are parsed in order. Long form flags are not case sensitive.\n");
+ printf(" Live tracing and trace file arguments are pushed onto an execution stack\n");
+ printf(" and processed after argument parsing has completed.\n");
+ printf("\n");
+ printf(" Files ending in .trace or .codes may omit the --trace or --codes flag\n");
+ printf(" In most cases, you do not need to specify a kernel size or timebase, it is\n");
+ printf(" determined automatically.\n");
+ printf("\n");
+ printf(" Modern trace(s) have an embedded ap/iop cpu count. If you need to parse\n");
+ printf(" an older file, you will want to set these. Typically you would set the AP\n");
+ printf(" cpu count to the number of active cpus, and the IOP cpu count to zero.\n");
+ printf("\n");
+ printf(" EXAMPLES\n");
+ printf("\n");
+ printf(" %skdprof InterestingData.trace%s\n", BOLD, UNBOLD);
+ printf(" Print a summary of per process cpu usage in InterestingData.trace\n");
+ printf("\n");
+ printf(" %skdprof --step 100ms InterestingData.trace%s\n", BOLD, UNBOLD);
+ printf(" Print summaries of the per process cpu usage in InterestingData.trace,\n");
+ printf(" one for each 100ms of time\n");
+ printf("\n");
+ printf(" %skdprof --thread --step 100ms InterestingData.trace%s\n", BOLD, UNBOLD);
+ printf(" Print summaries of the per process and per thread cpu usage in\n");
+ printf(" InterestingData.trace, one for each 100ms of time\n");
+ printf("\n");
+ printf(" %skdprof -r -i 100000 -e -S 1 -d -t%s\n", BOLD, UNBOLD);
+ printf(" Reinit the trace buffer with 100000 entries, enable it, wait 1 second,\n");
+ printf(" and then collect/print the trace buffer\n");
+ printf("\n");
+ printf(" %skdprof --events foo.trace%s\n", BOLD, UNBOLD);
+ printf(" Print the events in foo.trace\n");
+ printf("\n");
+ exit(1);
+}
+
+static void add_trace_codes_path(const char* path, Globals& globals) {
+ if (Path::is_file(path, true)) {
+ char resolved_path[PATH_MAX];
+ if (realpath(path, resolved_path)) {
+ globals.append_trace_codes_at_path(resolved_path);
+ return;
+ }
+ }
+ char* errmsg = NULL;
+ asprintf(&errmsg, "Trace codes path %s is not valid", path);
+ usage(errmsg);
+}
+
+static std::unique_ptr<Action> create_trace_file_action(const char* trace_file_path) {
+ if (Path::is_file(trace_file_path, true)) {
+ char resolved_path[PATH_MAX];
+ if (realpath(trace_file_path, resolved_path)) {
+ return std::make_unique<TraceFileAction>(resolved_path);
+ }
+ }
+ char* errmsg = NULL;
+ asprintf(&errmsg, "Trace data path %s is not valid", trace_file_path);
+ usage(errmsg);
+}
+
+//
+// Must take globals so it can do the timebase conversions for mabs values!
+//
+static NanoTime parse_time(Globals& globals, const char* arg) {
+
+ char* units;
+ uint64_t value = strtoull(arg, &units, 0);
+
+ // Unspecified units are treated as seconds
+ if (*units == 0 || strcmp(units, "s") == 0) {
+ return NanoTime(value * NANOSECONDS_PER_SECOND);
+ }
+
+ if (strcmp(units, "ms") == 0)
+ return NanoTime(value * NANOSECONDS_PER_MILLISECOND);
+
+ if (strcmp(units, "us") == 0)
+ return NanoTime(value * NANOSECONDS_PER_MICROSECOND);
+
+ if (strcmp(units, "ns") == 0)
+ return NanoTime(value);
+
+ if (strcmp(units, "mabs") == 0) {
+ return AbsTime(value).nano_time(globals.timebase());
+ }
+
+ usage("Unable to parse units on time value");
+}
+
+static std::vector<std::unique_ptr<Action>> parse_arguments(int argc, const char* argv[], Globals& globals) {
+ int i = 1;
+ bool cpus_set = false;
+ bool iops_set = false;
+
+ std::vector<std::unique_ptr<Action>> actions;
+
+ while (i < argc) {
+ const char* arg = argv[i];
+ if ((strcmp(arg, "-h") == 0) || (strcasecmp(arg, "--help") == 0)) {
+ usage(NULL);
+ } else if ((strcasecmp(arg, "--version") == 0)) {
+ shouldPrintVersion = true;
+ } else if ((strcmp(arg, "-v") == 0) || strcasecmp(arg, "--verbose") == 0) {
+ globals.set_is_verbose(true);
+ } else if ((strcasecmp(arg, "--summary") == 0)) {
+ globals.set_should_print_summary(true);
+ } else if ((strcasecmp(arg, "--no-summary") == 0)) {
+ globals.set_should_print_summary(false);
+ } else if ((strcasecmp(arg, "--csv") == 0)) {
+ globals.set_should_print_csv_summary(true);
+ } else if ((strcasecmp(arg, "--no-csv") == 0)) {
+ globals.set_should_print_csv_summary(false);
+ } else if (strcasecmp(arg, "--step") == 0) {
+ if (++i >= argc)
+ usage("--step requires an argument");
+
+ globals.set_summary_step(argv[i]);
+ // Force a blow up now if the arg is unparseable
+ globals.summary_step(AbsInterval(AbsTime(1), AbsTime(1)));
+ } else if (strcasecmp(arg, "--start") == 0) {
+ if (++i >= argc)
+ usage("--start requires an argument");
+
+ globals.set_summary_start(argv[i]);
+ // Force a blow up now if the arg is unparseable
+ globals.summary_start(AbsInterval(AbsTime(1), AbsTime(1)));
+ } else if (strcasecmp(arg, "--stop") == 0) {
+ if (++i >= argc)
+ usage("--stop requires an argument");
+
+ globals.set_summary_stop(argv[i]);
+ // Force a blow up now if the arg is unparseable
+ globals.summary_stop(AbsInterval(AbsTime(1), AbsTime(1)));
+ } else if ((strcasecmp(arg, "--cpu") == 0)) {
+ globals.set_should_print_cpu_summaries(true);
+ } else if ((strcasecmp(arg, "--processes") == 0) || (strcasecmp(arg, "--process") == 0)) {
+ globals.set_should_print_process_summaries(true);
+ } else if ((strcasecmp(arg, "--no-processes") == 0) || (strcasecmp(arg, "--no-process") == 0)) {
+ globals.set_should_print_process_summaries(false);
+ } else if ((strcasecmp(arg, "--threads") == 0) || (strcasecmp(arg, "--thread") == 0)) {
+ globals.set_should_print_thread_summaries(true);
+ } else if ((strcasecmp(arg, "--sort-by-cpu") == 0)) {
+ globals.set_sort_key(kSortKey::CPU);
+ } else if ((strcasecmp(arg, "--sort-by-pid") == 0)) {
+ globals.set_sort_key(kSortKey::ID);
+ } else if ((strcasecmp(arg, "--sort-by-vmfault") == 0)) {
+ globals.set_sort_key(kSortKey::VMFault);
+ } else if ((strcasecmp(arg, "--sort-by-io") == 0)) {
+ globals.set_sort_key(kSortKey::IO_Wait);
+ } else if ((strcasecmp(arg, "--sort-by-io-wait") == 0)) {
+ globals.set_sort_key(kSortKey::IO_Wait);
+ } else if ((strcasecmp(arg, "--sort-by-io-ops") == 0)) {
+ globals.set_sort_key(kSortKey::IO_Ops);
+ } else if ((strcasecmp(arg, "--sort-by-io-size") == 0)) {
+ globals.set_sort_key(kSortKey::IO_Size);
+ } else if ((strcasecmp(arg, "--events") == 0)) {
+ globals.set_should_print_events(true);
+ } else if ((strcasecmp(arg, "--no-events") == 0)) {
+ globals.set_should_print_events(false);
+ } else if ((strcasecmp(arg, "--presort-events") == 0)) {
+ globals.set_should_presort_events(true);
+ } else if ((strcmp(arg, "-N") == 0) || strcasecmp(arg, "--no-default-codes") == 0) {
+ globals.set_should_read_default_trace_codes(false);
+ } else if (strcasecmp(arg, "--codes") == 0) {
+ if (++i >= argc)
+ usage("--codes requires an argument");
+ add_trace_codes_path(argv[i], globals);
+ } else if (strcasecmp(arg, "--trace") == 0) {
+ if (++i >= argc)
+ usage("--trace requires an argument");
+
+ actions.push_back(create_trace_file_action(argv[i]));
+ } else if ((strcmp(arg, "-i") == 0) || strcasecmp(arg, "--initialize") == 0) {
+ // The buffers argument is optional
+ uint32_t buffers_default = 0;
+
+ if (i + 1 < argc) {
+ arg = argv[i+1];
+ char* endptr;
+ uint32_t temp = (uint32_t)strtoul(arg, &endptr, 0);
+ if (*endptr == 0) {
+ // Consume the following argument if the conversion worked
+ buffers_default = temp;
+ i++;
+ }
+
+ }
+ actions.push_back(std::make_unique<InitializeAction>(buffers_default));
+ } else if ((strcmp(arg, "-r") == 0) || strcasecmp(arg, "--remove") == 0) {
+ actions.push_back(std::make_unique<RemoveAction>());
+ } else if ((strcmp(arg, "-n") == 0) || strcasecmp(arg, "--no-wrap") == 0) {
+ actions.push_back(std::make_unique<NoWrapAction>());
+ } else if ((strcmp(arg, "-g") == 0) || strcasecmp(arg, "--print-kdbg-state") == 0) {
+ actions.push_back(std::make_unique<PrintStateAction>());
+ } else if ((strcmp(arg, "-e") == 0) || strcasecmp(arg, "--enable") == 0) {
+ actions.push_back(std::make_unique<EnableAction>());
+ } else if ((strcmp(arg, "-d") == 0) || strcasecmp(arg, "--disable") == 0) {
+ actions.push_back(std::make_unique<DisableAction>());
+ } else if ((strcmp(arg, "-t") == 0) || strcasecmp(arg, "--collect") == 0) {
+ actions.push_back(std::make_unique<CollectAction>());
+ } else if (strcasecmp(arg, "--save") == 0) {
+ if (++i >= argc)
+ usage("--save requires an argument");
+
+ FileDescriptor desc(argv[i], O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (!desc.is_open()) {
+ char* errmsg = NULL;
+ asprintf(&errmsg, "Unable to create save file at %s", argv[i]);
+ usage(errmsg);
+ }
+ actions.push_back(std::make_unique<SaveTraceAction>(std::move(desc)));
+ } else if ((strcmp(arg, "-S") == 0) || strcasecmp(arg, "--sleep") == 0) {
+ if (++i >= argc)
+ usage("--sleep requires an argument");
+
+ actions.push_back(std::make_unique<SleepAction>(parse_time(globals, argv[i])));
+ } else if (strcasecmp(arg, "--ios") == 0) {
+ globals.set_timebase({ 125, 3 }, true);
+ /*
+ if (!cpus_set && !iops_set) {
+ globals.set_default_cpu_count(2); // Good guess for most devices
+ globals.set_default_iop_count(4); // Pure speculation...
+ }*/
+ } else if ((strcmp(arg, "-X") == 0) || strcasecmp(arg, "--k32") == 0) {
+ globals.set_kernel_size(KernelSize::k32);
+ } else if (strcasecmp(arg, "--k64") == 0) {
+ globals.set_kernel_size(KernelSize::k64);
+ } else if (strcasecmp(arg, "--timebase") == 0) {
+ if (++i >= argc)
+ usage("--timebase requires an argument");
+ arg = argv[i];
+
+ mach_timebase_info_data_t timebase;
+ if (sscanf(arg, "%u/%u", &timebase.numer, &timebase.denom) != 2) {
+ usage("Unable to parse --timebase argument");
+ }
+ globals.set_timebase(timebase, true);
+ } else if (strcasecmp(arg, "--cpus") == 0) {
+ cpus_set = true;
+ if (++i >= argc)
+ usage("--cpus requires an argument");
+ char* endptr;
+ uint32_t cpus = (uint32_t)strtoul(argv[i], &endptr, 0);
+ if (*endptr != 0)
+ usage("Unable to parse --cpus argument");
+ globals.set_cpu_count(cpus);
+ } else if (strcasecmp(arg, "--iops") == 0) {
+ iops_set = true;
+ if (++i >= argc)
+ usage("--iops requires an argument");
+ char* endptr;
+ uint32_t iops = (uint32_t)strtoul(argv[i], &endptr, 0);
+ if (*endptr != 0)
+ usage("Unable to parse --iops argument");
+ globals.set_iop_count(iops);
+ } else if (strcasecmp(arg, "--raw-timestamps") == 0) {
+ globals.set_should_zero_base_timestamps(false);
+ } else if (strcasecmp(arg, "--mach-absolute-time") == 0) {
+ globals.set_should_print_mach_absolute_timestamps(true);
+ } else if (strcasecmp(arg, "--event-index") == 0) {
+ globals.set_should_print_event_index(true);
+ } else if (strcasecmp(arg, "--no-codes") == 0) {
+ globals.set_should_print_symbolic_event_codes(false);
+ } else if ((strcasecmp(arg, "--process-start-stop") == 0) || (strcasecmp(arg, "--process-start-stops") == 0)) {
+ globals.set_should_print_process_start_stop_timestamps(true);
+ } else if ((strcmp(arg, "-o") == 0) || strcasecmp(arg, "--output") == 0) {
+ if (++i >= argc)
+ usage("--output requires an argument");
+
+ FileDescriptor desc(argv[i], O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (!desc.is_open()) {
+ char* errmsg = NULL;
+ asprintf(&errmsg, "Unable to create output file at %s", argv[i]);
+ usage(errmsg);
+ }
+ globals.set_output_fd(std::move(desc));
+ } else {
+ //
+ // Last attempts to divine argument type/intent.
+ //
+ std::string temp(arg);
+
+ // Is it a .codes file?
+ if (ends_with(temp, ".codes")) {
+ add_trace_codes_path(arg, globals);
+ goto no_error;
+ }
+
+ if (ends_with(temp, ".trace")) {
+ actions.push_back(create_trace_file_action(argv[i]));
+ goto no_error;
+ }
+
+ //
+ // ERROR!
+ //
+ char error_buffer[PATH_MAX];
+ snprintf(error_buffer, sizeof(error_buffer), "Unhandled argument: %s", arg);
+ usage(error_buffer);
+ }
+ no_error:
+
+ i++;
+ }
+
+ return actions;
+}
+
+int main (int argc, const char * argv[])
+{
+ //
+ // Use host values as defaults.
+ // User overrides as needed via flags.
+ //
+ Globals globals;
+ auto actions = parse_arguments(argc, argv, globals);
+
+ if (shouldPrintVersion) {
+ printf("%s version: %s", argv[0], __kdprofVersionString);
+ exit(0);
+ }
+
+ globals.resolve_trace_codes();
+
+ // 0x24000004 PPT_test
+
+ // Validate start/stop, if they are both set.
+ //
+ // The timebase isn't set for the tracefile at this point. This
+ // can sometimes fail when using a desktop timebase and mixed
+ // units (ms & mabs, for example)
+ if (globals.is_summary_start_set() && globals.is_summary_stop_set()) {
+ AbsInterval checker(AbsTime(1), AbsTime(1));
+ if (globals.summary_stop(checker) <= globals.summary_start(checker)) {
+ usage("The current --stop value is less than or equal to the --start value");
+ }
+ }
+
+ // If the user didn't ask for anything, set them up with a basic full trace summary
+ if (!globals.should_print_summary() &&
+ !globals.should_print_events() &&
+ !globals.should_print_csv_summary() &&
+ !globals.should_print_process_start_stop_timestamps() &&
+ !globals.is_should_print_summary_set())
+ {
+ globals.set_should_print_summary(true);
+ }
+
+ for (auto& action : actions) {
+ action->execute(globals);
+ }
+
+ return 0;
+}
--- /dev/null
+//
+// Action.hpp
+// msa
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef msa_Action_hpp
+#define msa_Action_hpp
+
+class Action {
+ public:
+ virtual void execute(Globals& globals) = 0;
+};
+
+#endif
--- /dev/null
+//
+// EventProcessing.hpp
+// msa
+//
+// Created by James McIlree on 2/5/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef msa_EventProcessing_hpp
+#define msa_EventProcessing_hpp
+
+template <typename SIZE>
+bool is_mach_msg_interesting(const Machine<SIZE>& machine, const MachineMachMsg<SIZE>* mach_msg)
+{
+ // If this message is carrying importance, it is interesting.
+ if ((mach_msg->has_sender() && MACH_MSGH_BITS_RAISED_IMPORTANCE(mach_msg->send_msgh_bits())) ||
+ (mach_msg->has_receiver() && MACH_MSGH_BITS_RAISED_IMPORTANCE(mach_msg->recv_msgh_bits())))
+ return true;
+
+ // If this message has a non-null voucher, it is interesting.
+ if ((mach_msg->has_sender() && !mach_msg->send_voucher()->is_null()) ||
+ (mach_msg->has_receiver() && !mach_msg->recv_voucher()->is_null()))
+ return true;
+
+ // If the message does NOT have a voucher, and the sender has a voucher set, it is interesting.
+ if (mach_msg->has_sender()) {
+ if (const MachineThread<SIZE>* sender_thread = machine.thread(mach_msg->send_tid(), mach_msg->send_time())) {
+ const MachineVoucher<SIZE>* sender_voucher = sender_thread->voucher(mach_msg->send_time());
+ if (!sender_voucher->is_unset() && !sender_voucher->is_null()) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+template <typename SIZE>
+void reset_task_data_on_exec_or_exit(const MachineProcess<SIZE>& process,
+ std::unordered_map<pid_t, bool>& task_appnap_state,
+ std::unordered_map<pid_t, TaskRequestedPolicy>& task_requested_state,
+ std::unordered_map<pid_t, std::pair<TaskEffectivePolicy, uint32_t>>& task_effective_state,
+ std::unordered_map<pid_t, std::pair<uint32_t, uint32_t>>& task_boosts)
+{
+ ASSERT(!process.is_kernel(), "Kernel process should not ever exec or exit");
+ ASSERT(process.pid() > 0, "Process with pid less than 1 exec'd ?");
+
+ if (pid_t pid = process.pid()) {
+ auto task_appnap_it = task_appnap_state.find(pid);
+ if (task_appnap_it != task_appnap_state.end()) {
+ task_appnap_state.erase(task_appnap_it);
+ }
+
+ auto task_requested_it = task_requested_state.find(pid);
+ if (task_requested_it != task_requested_state.end()) {
+ task_requested_state.erase(task_requested_it);
+ }
+
+ auto task_effective_it = task_effective_state.find(pid);
+ if (task_effective_it != task_effective_state.end()) {
+ task_effective_state.erase(task_effective_it);
+ }
+
+ auto task_boosts_it = task_boosts.find(pid);
+ if (task_boosts_it != task_boosts.end()) {
+ task_boosts.erase(task_boosts_it);
+ }
+ }
+}
+
+// From osfmk/kern/task.h
+#define TASK_POLICY_INTERNAL 0x0
+#define TASK_POLICY_EXTERNAL 0x1
+
+#define TASK_POLICY_TASK 0x4
+#define TASK_POLICY_THREAD 0x8
+
+template <typename SIZE>
+void process_events(Globals& globals,
+ const Machine<SIZE>& machine,
+ std::unordered_map<pid_t, bool>& task_appnap_state,
+ std::unordered_map<pid_t, TaskRequestedPolicy>& task_requested_state,
+ std::unordered_map<typename SIZE::ptr_t, TaskRequestedPolicy>& thread_requested_state,
+ std::unordered_map<pid_t, std::pair<TaskEffectivePolicy, uint32_t>>& task_effective_state,
+ std::unordered_map<typename SIZE::ptr_t, std::pair<TaskEffectivePolicy, uint32_t>>& thread_effective_state,
+ std::unordered_map<pid_t, std::pair<uint32_t, uint32_t>>& task_boosts)
+{
+ const KDEvent<SIZE>* events = machine.events();
+ uintptr_t count = machine.event_count();
+
+ ASSERT(count, "Expected at least one event");
+
+ //
+ // Filtering thoughts...
+ //
+ // Two levels of filtering.
+ //
+ // 1) global supression of events that are "uninteresting".
+ //
+ // We filter on each event "class", with a keyword, so something like
+ //
+ // --lifecycle [ all | user | none ] ;; This is fork, exec, exit, thread-create, thread-exit
+ // --mach-msgs [ all | user | voucher | none ] ;; This is all mach msgs
+ //
+ // 2) targetted supression of events that are not related to a user focus.
+ //
+ // We filter by process name/pid
+ //
+ // --track [ pid | name ]
+ //
+
+ PrintBuffer print_buffer(8192, 1024, globals.output_fd());
+
+ for (uintptr_t index=0; index < count; ++index) {
+ const KDEvent<SIZE>& event = events[index];
+
+ //
+ // Printing ...
+ //
+
+ switch (event.dbg_cooked()) {
+ case TRACE_DATA_EXEC: {
+ bool should_print = false;
+ if (globals.lifecycle_filter() >= kLifecycleFilter::User)
+ should_print = true;
+
+ if (should_print)
+ print_generic(print_buffer, globals, machine, event, index, "exec");
+
+ if (const MachineThread<SIZE>* exec_thread = machine.thread(event.tid(), event.timestamp())) {
+ reset_task_data_on_exec_or_exit(exec_thread->process(), task_appnap_state, task_requested_state, task_effective_state, task_boosts);
+ }
+ break;
+ }
+
+ case TRACE_DATA_NEWTHREAD: {
+ bool should_print = false;
+ auto new_thread_tid = (typename SIZE::ptr_t)event.arg1();
+ if (const MachineThread<SIZE>* new_thread = machine.thread(new_thread_tid, event.timestamp())) {
+ switch (globals.lifecycle_filter()) {
+ case kLifecycleFilter::None:
+ break;
+ case kLifecycleFilter::User:
+ if (!new_thread->process().is_kernel())
+ should_print = true;
+ break;
+ case kLifecycleFilter::All:
+ should_print = true;
+ break;
+ }
+
+ if (should_print) {
+ auto& new_process = new_thread->process();
+ ASSERT(new_process.pid() == (pid_t)event.arg2(), "Pid does not match");
+ if (new_process.timespan().location() == event.timestamp()) {
+ print_fork(print_buffer, globals, machine, event, index, new_process);
+ }
+
+ // We're not printing the actual event data, but instead the exiting thread's data:
+ print_base(print_buffer, globals, event.timestamp(), new_thread, event, index, "thread-create", true);
+ }
+ }
+ break;
+ }
+
+ case TRACEDBG_CODE(DBG_TRACE_DATA, TRACE_DATA_THREAD_TERMINATE): {
+ // This event may spawn two prints
+ //
+ // 1) thread termination
+ // 2) task termination
+ bool should_print = false;
+ typename SIZE::ptr_t terminated_tid = event.arg1();
+ if (const MachineThread<SIZE>* terminated_thread = machine.thread(terminated_tid, event.timestamp())) {
+ switch (globals.lifecycle_filter()) {
+ case kLifecycleFilter::None:
+ break;
+ case kLifecycleFilter::User:
+ if (!terminated_thread->process().is_kernel())
+ should_print = true;
+ break;
+ case kLifecycleFilter::All:
+ should_print = true;
+ break;
+ }
+
+ if (should_print) {
+ // We're not printing the actual event data, but instead the exiting thread's data:
+ print_base(print_buffer, globals, event.timestamp(), terminated_thread, event, index, "thread-exit", true);
+ }
+
+ // Was this the last thread in the process? (Do we also need to print a process exit?)
+ auto& terminated_process = terminated_thread->process();
+ if (terminated_process.is_trace_terminated()) {
+ if (event.timestamp() >= terminated_process.exit_timestamp()) {
+ if (should_print) {
+ print_exit(print_buffer, globals, event, terminated_thread, index);
+ }
+ reset_task_data_on_exec_or_exit(terminated_process, task_appnap_state, task_requested_state, task_effective_state, task_boosts);
+ }
+ }
+
+ auto thread_requested_it = thread_requested_state.find(terminated_tid);
+ if (thread_requested_it != thread_requested_state.end()) {
+ thread_requested_state.erase(thread_requested_it);
+ }
+
+ auto thread_effective_it = thread_effective_state.find(terminated_tid);
+ if (thread_effective_it != thread_effective_state.end()) {
+ thread_effective_state.erase(thread_effective_it);
+ }
+ } else
+ ASSERT(false, "Failed to find exit thread");
+ break;
+ }
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_SEND): {
+ // trace event data is:
+ // kmsg_addr, msgh_bits, msgh_id, voucher_addr,
+
+ // FIX ME!
+ //
+ // For now, we aren't recording mach msg's with endpoints in
+ // the kernel. If we don't find a mach msg, assume its a kernel
+ // msg.
+ if (const MachineMachMsg<SIZE>* mach_msg = machine.mach_msg(index)) {
+ if (is_mach_msg_interesting(machine, mach_msg)) {
+ print_mach_msg(print_buffer, globals, machine, event, index, true, mach_msg);
+ }
+ }
+ break;
+ }
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV):
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV_VOUCHER_REFUSED): {
+ // trace event data is
+ // kmsg_addr, msgh_bits, msgh_id, recv_voucher_addr
+
+ // FIX ME!
+ //
+ // For now, we aren't recording mach msg's with endpoints in
+ // the kernel. If we don't find a mach msg, assume its a kernel
+ // msg.
+ if (const MachineMachMsg<SIZE>* mach_msg = machine.mach_msg(index)) {
+ if (is_mach_msg_interesting(machine, mach_msg)) {
+ print_mach_msg(print_buffer, globals, machine, event, index, false, mach_msg);
+ }
+ }
+ break;
+ }
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_VOUCHER_CREATE): {
+ // trace event data is
+ // voucher address, voucher table size, system voucher count, voucher content bytes
+
+ if (auto voucher = machine.voucher(event.arg1(), event.timestamp())) {
+ print_voucher(print_buffer, globals, machine, event, index, "voucher_create", voucher, true);
+
+ if (voucher->has_valid_contents()) {
+ print_voucher_contents(print_buffer, globals, machine, event, index, voucher);
+ }
+ } else {
+ ASSERT(false, "Failed to find voucher");
+ }
+ break;
+ }
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_VOUCHER_DESTROY): {
+ // trace event data is
+ // voucher address, 0, system voucher count, 0
+ if (auto voucher = machine.voucher(event.arg1(), event.timestamp())) {
+ print_voucher(print_buffer, globals, machine, event, index, "voucher_destroy", voucher, false);
+ } else {
+ ASSERT(false, "Failed to find voucher");
+ }
+ break;
+ }
+
+ case MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SET_VOUCHER): {
+ print_generic(print_buffer, globals, machine, event, index, "thread_adopt_voucher");
+ break;
+ }
+
+ case IMPORTANCE_CODE(IMP_ASSERTION, IMP_EXTERN):
+ print_importance_assert(print_buffer, globals, machine, event, index, "externalize_importance", task_boosts);
+ break;
+
+ case IMPORTANCE_CODE(IMP_ASSERTION, IMP_HOLD | TASK_POLICY_EXTERNAL):
+ case IMPORTANCE_CODE(IMP_ASSERTION, IMP_HOLD | TASK_POLICY_INTERNAL):
+ print_importance_assert(print_buffer, globals, machine, event, index, "importance_hold", task_boosts);
+ break;
+
+ case IMPORTANCE_CODE(IMP_ASSERTION, IMP_DROP | TASK_POLICY_EXTERNAL):
+ case IMPORTANCE_CODE(IMP_ASSERTION, IMP_DROP | TASK_POLICY_INTERNAL):
+ print_importance_assert(print_buffer, globals, machine, event, index, "importance_drop", task_boosts);
+ break;
+
+ case IMPORTANCE_CODE(IMP_WATCHPORT, 0):
+ // trace data is
+ // proc_selfpid(), pid, boost, released_pid, 0);
+ if (event.arg3() > 0) {
+ print_watchport_importance_transfer(print_buffer, globals, machine, event, index);
+ }
+ break;
+
+ case IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, 0):
+ case IMPORTANCE_CODE(IMP_TASK_SUPPRESSION, 1):
+ // Trace data is
+ // self_pid, audit_token_pid_from_task(task), trequested_0(task, NULL), trequested_1(task, NULL)
+ print_appnap(print_buffer, globals, machine, event, index, (bool)event.dbg_code(), task_appnap_state, task_requested_state);
+ break;
+
+ case IMPORTANCE_CODE(IMP_BOOST, IMP_BOOSTED):
+ case IMPORTANCE_CODE(IMP_BOOST, IMP_UNBOOSTED):
+ // trace data is
+ // proc_selfpid(), audit_token_pid_from_task(task), trequested_0(task, NULL), trequested_1(task, NULL)
+ if (event.is_func_start()) {
+ print_boost(print_buffer, globals, machine, event, index, (pid_t)event.arg2(), (event.dbg_code() == IMP_BOOSTED));
+ }
+ break;
+
+ //
+ // IMP_TASK_APPTYPE trace args are:
+ //
+ // start:
+ // target_pid, trequested_0, trequested_1, apptype
+ // end:
+ // target_pid, trequested_0, trequested_1, is_importance_receiver
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_NONE):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_DAEMON_INTERACTIVE):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_DAEMON_STANDARD):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_DAEMON_ADAPTIVE):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_DAEMON_BACKGROUND):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_APP_DEFAULT):
+ case IMPORTANCE_CODE(IMP_TASK_APPTYPE, TASK_APPTYPE_APP_TAL):
+ if (event.is_func_end()) {
+ print_importance_apptype(print_buffer, globals, machine, event, index);
+ }
+ // FIX ME, not handling trequested status.
+ //
+ // process_trequested_task(print_buffer, globals, machine, event, index, (pid_t)event.arg1(), event.arg2(), event.arg3(), task_requested_policies);
+ break;
+
+
+ case IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_TASK)):
+ // trace data is
+ // targetpid, teffective_0(task, NULL), teffective_1(task, NULL), tpriority(task, NULL)
+ print_importance_update_task(print_buffer, globals, machine, event, index, "imp_update_task_create", task_effective_state);
+ break;
+
+ case IMPORTANCE_CODE(IMP_UPDATE, (IMP_UPDATE_TASK_CREATE | TASK_POLICY_THREAD)):
+ // trace data is
+ // targettid, teffective_0(task, thread), teffective_1(task, thread), tpriority(thread, NULL)
+ print_importance_update_thread(print_buffer, globals, machine, event, index, "imp_update_thread_create", thread_effective_state);
+ break;
+
+ case IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_TASK):
+ // trace data is
+ // targetpid, teffective_0(task, NULL), teffective_1(task, NULL), tpriority(task, THREAD_NULL)
+ print_importance_update_task(print_buffer, globals, machine, event, index, "imp_update_task", task_effective_state);
+ break;
+
+ case IMPORTANCE_CODE(IMP_UPDATE, TASK_POLICY_THREAD):
+ // trace data is
+ // targettid, teffective_0(task, thread), teffective_1(task, thread), tpriority(task, THREAD_NULL)
+ print_importance_update_thread(print_buffer, globals, machine, event, index, "imp_update_thread", thread_effective_state);
+ break;
+
+ case IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND):
+ // trace data is
+ // current_pid, sender_pid, imp_msgh_id, (bool)importance_cleared
+
+ // NOTE! Only end events carry "importance cleared"
+ if (event.is_func_end() && (event.arg4() != 0)) {
+ print_importance_send_failed(print_buffer, globals, machine, event, index);
+ }
+ break;
+
+ case IMPORTANCE_CODE(IMP_MSG, IMP_MSG_DELV): {
+ // trace data is
+ // sending_pid, task_pid /* recv_pid?? */, msgh_id, impresult
+ //
+ // for impresult:
+ //
+ // 0: BOOST NOT APPLIED
+ // 1: BOOST EXTERNALIZED
+ // 2: LIVE_IMPORTANCE_LINKAGE!
+ print_impdelv(print_buffer, globals, machine, event, index, (pid_t)event.arg1(), (uint32_t)event.arg4());
+ break;
+ }
+
+ default:
+ if (event.dbg_class() == DBG_IMPORTANCE) {
+ //
+ // Every task policy set trace code carries "trequested" data, we would like to grab them all.
+ //
+ // This subclass spans the range of 0x20 through 0x3F
+ //
+
+ uint32_t subclass = event.dbg_subclass();
+ if (subclass >= 0x20 && subclass <= 0x3F) {
+ // Trace event data is
+ // targetid(task, thread), trequested_0(task, thread), trequested_1(task, thread), value
+
+ bool is_task_event = (event.dbg_code() & TASK_POLICY_TASK) > 0;
+
+ // Should not be both a task and thread event.
+ ASSERT(is_task_event != (event.dbg_code() & TASK_POLICY_THREAD), "BEWM!");
+
+ if (is_task_event) {
+ // FIX ME, not handling trequested status.
+ //
+ // process_trequested_task(print_buffer, globals, machine, event, index, (pid_t)event.arg1(), event.arg2(), event.arg3(), task_requested_policies);
+ } else {
+ // FIX ME, not handling trequested status.
+ //
+ // process_trequested_thread(print_buffer, globals, machine, event, index, event.arg1(), event.arg2(), event.arg3(), task_requested_policies, thread_requested_policies);
+ }
+ }
+ }
+ break;
+ }
+ }
+}
+
+#endif
--- /dev/null
+//
+// EventRingBuffer.hpp
+// msa
+//
+// Created by James McIlree on 10/8/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef __msa__EventRingBuffer__
+#define __msa__EventRingBuffer__
+
+template <typename SIZE>
+class EventRingBuffer {
+ protected:
+ const Globals& _globals; // Used for printing the ringbuffer
+ std::vector<KDEvent<SIZE>> _events;
+ std::size_t _head;
+ std::size_t _tail;
+
+ public:
+ EventRingBuffer(const Globals& globals, std::size_t size);
+
+ // Returns:
+ //
+ // events, capacity, number_read
+ std::tuple<KDEvent<SIZE>*, std::size_t, std::size_t> read();
+
+ void print() const;
+ void print_event_index(std::size_t index) const;
+ void print_all_events() const;
+ void print_last_events(std::size_t lastN) const;
+ void print_from_timestamp(uint64_t timestamp) const;
+};
+
+template <typename SIZE>
+EventRingBuffer<SIZE>::EventRingBuffer(const Globals& globals, std::size_t size) :
+ _globals(globals),
+ _events(size),
+ _head(0),
+ _tail(0)
+{
+ ASSERT(size, "Sanity");
+
+ // Force all pages into memory so the first bazillion
+ // trace entries aren't VM_FAULT...
+ bzero(_events.data(), _events.size() * sizeof(KDEvent<SIZE>));
+}
+
+template <typename SIZE>
+std::tuple<KDEvent<SIZE>*, std::size_t, std::size_t> EventRingBuffer<SIZE>::read() {
+ std::size_t modulo_index = _tail % _events.size();
+ std::size_t count, capacity = _events.size() - modulo_index;
+ KDEvent<SIZE>* events = &_events.data()[modulo_index];
+
+ if ((count = KDBG::read(events, capacity * sizeof(KDEvent<SIZE>)))) {
+ // Update head/tail as soon as we have added data.
+ _tail += count;
+ if (_tail - _head > _events.size()) {
+ _head += count;
+ }
+ }
+
+ return std::make_tuple(events, count, capacity);
+}
+
+#if 0
+
+template <typename SIZE>
+void EventRingBuffer<SIZE>::print() const {
+ printf("%zu events in buffer [%zu -> %zu)\n", _tail - _head, _head, _tail);
+}
+
+template <typename SIZE>
+void EventRingBuffer<SIZE>::print_event_index(std::size_t index) const {
+ const KDEvent<SIZE>& event = _events[index % _events.size()];
+
+ const char* type = event.is_func_start() ? "beg" : (event.is_func_end() ? "end" : "---");
+ auto trace_code_it = _globals.trace_codes().find(event.dbg_cooked());
+
+ if (trace_code_it == _globals.trace_codes().end()) {
+ printf("event[%ld] { timestamp=%llx, arg1=%llx, arg2=%llx, arg3=%llx, arg4=%llx, tid=%llx, %4s %x, cpu=%u }\n", index, event.timestamp().value(),
+ (uint64_t)event.arg1(), (uint64_t)event.arg2(), (uint64_t)event.arg3(), (uint64_t)event.arg4(), (uint64_t)event.tid(), type, event.dbg_cooked(), event.cpu());
+ } else {
+ printf("event[%ld] { timestamp=%llx, arg1=%llx, arg2=%llx, arg3=%llx, arg4=%llx, tid=%llx, %4s %s, cpu=%u }\n", index, event.timestamp().value(),
+ (uint64_t)event.arg1(), (uint64_t)event.arg2(), (uint64_t)event.arg3(), (uint64_t)event.arg4(), (uint64_t)event.tid(), type, trace_code_it->second.c_str(), event.cpu());
+ }
+}
+
+template <typename SIZE>
+void EventRingBuffer<SIZE>::print_all_events() const {
+ std::size_t begin = _head;
+ while (begin < _tail) {
+ print_event_index(begin++);
+ }
+}
+
+template <typename SIZE>
+void EventRingBuffer<SIZE>::print_last_events(std::size_t lastN) const {
+ std::size_t length = std::min(lastN, _tail - _head);
+ std::size_t begin = _tail - length;
+ ASSERT(begin <= _tail, "Sanity");
+ while (begin < _tail) {
+ print_event_index(begin++);
+ }
+}
+
+template <typename SIZE>
+void EventRingBuffer<SIZE>::print_from_timestamp(uint64_t t) const {
+ std::size_t begin = _head;
+ while (begin < _tail) {
+ const KDEvent<SIZE>& event = _events[begin % _events.size()];
+ if (event.timestamp() >= t)
+ break;
+ begin++;
+ }
+
+ while (begin < _tail) {
+ print_event_index(begin++);
+ }
+}
+
+void PrintEventRingBuffer() {
+ // uint64_t _timestamp;
+ // uint64_t _arg1;
+ // uint64_t _arg2;
+ // uint64_t _arg3;
+ // uint64_t _arg4;
+ // uint64_t _thread;
+ // uint32_t _debugid;
+ // uint32_t _cpuid;
+
+ const KDEvent<Kernel64>* events = (const KDEvent<Kernel64>*)g_rb;
+ for (std::size_t i=ring_buffer_head_index; i<ring_buffer_tail_index; i++) {
+ const KDEvent<Kernel64>& event = events[i % g_rb_size];
+ printf("event[%ld] { timestamp=%llx, ", i, event.timestamp().value());
+ printf("arg1=%llx, ", event.arg1());
+ printf("arg2=%llx, ", event.arg2());
+ printf("arg3=%llx, ", event.arg3());
+ printf("arg4=%llx, ", event.arg4());
+ printf("tid=%llx, ", event.tid());
+ const char* type = event.is_func_start() ? "beg" : (event.is_func_end() ? "end" : "---");
+ auto trace_code_it = gglobals->trace_codes().find(event.dbg_cooked());
+ if (trace_code_it == gglobals->trace_codes().end()) {
+ printf("%4s %x, ", type, event.dbg_cooked());
+ } else {
+ printf("%4s %s, ", type, trace_code_it->second.c_str());
+ }
+ printf("cpu=%u }\n", event.cpu());
+ }
+ printf("%lu\n", ring_buffer_tail_index - ring_buffer_head_index);
+}
+#endif
+
+#endif /* defined(__staintracker__EventRingBuffer__) */
--- /dev/null
+//
+// MachineGlobals.cpp
+// msa
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+Globals::Globals() :
+ _cpu_count(0),
+ _iop_count(0),
+ _kernel_size(Kernel::is_64_bit() ? KernelSize::k64 : KernelSize::k32),
+ _live_update_interval("100ms"),
+ _is_cpu_count_set(false),
+ _is_iop_count_set(false),
+ _is_kernel_size_set(false),
+ _is_timebase_set(false),
+ _beginning_of_time(0),
+ _should_print_mach_absolute_timestamps(false),
+ _should_print_event_index(false),
+ _is_verbose(false),
+ _should_presort_events(false),
+ _should_zero_base_timestamps(true),
+ _should_trace_voucher_contents(true),
+ _lifecycle_filter(kLifecycleFilter::User),
+ _mach_msg_filter(kMachMsgFilter::Voucher)
+{
+ // Default to the current machine's values
+ mach_timebase_info(&_timebase_info);
+
+ for (auto& entry : KDBG::cpumap()) {
+ if (entry.is_iop())
+ _iop_count++;
+ else
+ _cpu_count++;
+ }
+
+ // If we are unable to get a cpumap,
+ // fallback on the current # of cpus
+ if (_cpu_count == 0) {
+ _cpu_count = Kernel::active_cpu_count();
+ _iop_count = 0;
+ }
+
+ // This is only used as is for live tracing or capturing a trace,
+ // so we want to use the current # of cpus.
+ _trace_buffer_size = 250000 * _cpu_count;
+}
+
+static AbsTime parse_time(const char* arg, mach_timebase_info_data_t timebase_info) {
+
+ char* units;
+ uint64_t value = strtoull(arg, &units, 0);
+
+ // Unspecified units are treated as seconds
+ if (*units == 0 || strcmp(units, "s") == 0) {
+ return NanoTime(value * NANOSECONDS_PER_SECOND).abs_time(timebase_info);
+ }
+
+ if (strcmp(units, "ms") == 0)
+ return NanoTime(value * NANOSECONDS_PER_MILLISECOND).abs_time(timebase_info);
+
+ if (strcmp(units, "us") == 0)
+ return NanoTime(value * NANOSECONDS_PER_MICROSECOND).abs_time(timebase_info);
+
+ if (strcmp(units, "ns") == 0)
+ return NanoTime(value).abs_time(timebase_info);
+
+ if (strcmp(units, "mabs") == 0) {
+ return AbsTime(value);
+ }
+
+ usage("Unable to parse units on time value");
+}
+
+AbsTime Globals::live_update_interval() const {
+ return parse_time(_live_update_interval.c_str(), _timebase_info);
+}
--- /dev/null
+//
+// Globals.hpp
+// msa
+//
+// Created by James McIlree on 4/17/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef msa_Globals_hpp
+#define msa_Globals_hpp
+
+//
+// These are "global" values that control parsing and printing behavior.
+//
+
+enum class kLifecycleFilter : std::uint32_t {
+ None = 0,
+ User,
+ All
+};
+
+enum class kMachMsgFilter : std::uint32_t {
+ None = 0,
+ User,
+ Voucher,
+ All
+};
+
+class Globals {
+ protected:
+ // Default/unknown parsing values
+ uint32_t _cpu_count;
+ uint32_t _iop_count;
+ KernelSize _kernel_size;
+ std::string _live_update_interval;
+
+ bool _is_cpu_count_set;
+ bool _is_iop_count_set;
+ bool _is_kernel_size_set;
+ bool _is_timebase_set;
+
+ // Output, printing related.
+ AbsTime _beginning_of_time;
+ mach_timebase_info_data_t _timebase_info;
+ FileDescriptor _output_fd;
+ bool _should_print_mach_absolute_timestamps;
+ bool _should_print_event_index;
+ bool _is_verbose;
+ bool _should_presort_events;
+ bool _should_zero_base_timestamps;
+ bool _should_trace_voucher_contents;
+ uint32_t _trace_buffer_size;
+ kLifecycleFilter _lifecycle_filter;
+ kMachMsgFilter _mach_msg_filter;
+
+
+ public:
+ Globals();
+
+ uint32_t cpu_count() const { return _cpu_count; }
+ void set_cpu_count(uint32_t num) { _cpu_count = num; _is_cpu_count_set = true; }
+ bool is_cpu_count_set() const { return _is_cpu_count_set; }
+
+ uint32_t iop_count() const { return _iop_count; }
+ void set_iop_count(uint32_t num) { _iop_count = num; _is_iop_count_set = true; }
+ bool is_iop_count_set() const { return _is_iop_count_set; }
+
+ KernelSize kernel_size() const { return _kernel_size; }
+ void set_kernel_size(KernelSize size) { _kernel_size = size; _is_kernel_size_set = true; }
+ bool is_kernel_size_set() const { return _is_kernel_size_set; }
+
+ AbsTime beginning_of_time() const { return _beginning_of_time; }
+ void set_beginning_of_time(AbsTime t) { _beginning_of_time = t; }
+
+ mach_timebase_info_data_t timebase() const { return _timebase_info; }
+ void set_timebase(mach_timebase_info_data_t timebase, bool is_user_set) { _timebase_info = timebase; if (is_user_set) _is_timebase_set = true; }
+ bool is_timebase_set() const { return _is_timebase_set; }
+
+ int output_fd() const { return _output_fd.is_open() ? (int)_output_fd : STDOUT_FILENO; }
+
+ // Okay, this method caused enough pain to make the final resolution worth a comment.
+ //
+ // http://thbecker.net/articles/rvalue_references/section_05.html
+ //
+ // Things that are declared as rvalue reference can be lvalues or rvalues.
+ // The distinguishing criterion is: if it has a name, then it is an lvalue. Otherwise, it is an rvalue.
+ //
+ // In this case, you cannot call set_output_fd with an lvalue, but fd is STILL an lvalue.
+ // We must still explicitly use std::move on fd!
+ void set_output_fd(FileDescriptor&& fd) { _output_fd = std::move(fd); }
+
+ bool should_print_mach_absolute_timestamps() const { return _should_print_mach_absolute_timestamps; }
+ void set_should_print_mach_absolute_timestamps(bool value) { _should_print_mach_absolute_timestamps = value; }
+
+ bool should_print_event_index() const { return _should_print_event_index; }
+ void set_should_print_event_index(bool value) { _should_print_event_index = value; }
+
+ bool is_verbose() const { return _is_verbose; }
+ void set_is_verbose(bool value) { _is_verbose = value; }
+
+ bool should_presort_events() const { return _should_presort_events; }
+ void set_should_presort_events(bool value) { _should_presort_events = value; }
+
+ bool should_zero_base_timestamps() const { return _should_zero_base_timestamps; }
+ void set_should_zero_base_timestamps(bool value) { _should_zero_base_timestamps = value; }
+
+ bool should_trace_voucher_contents() const { return _should_trace_voucher_contents; }
+ void set_should_trace_voucher_contents(bool value) { _should_trace_voucher_contents = value; }
+
+ uint32_t trace_buffer_size() const { return _trace_buffer_size; }
+ void set_trace_buffer_size(uint32_t value) { _trace_buffer_size = value; }
+
+ AbsTime live_update_interval() const;
+ void set_live_update_interval(const char* value) { _live_update_interval = value; }
+
+ kLifecycleFilter lifecycle_filter() const { return _lifecycle_filter; }
+ void set_lifecycle_filter(kLifecycleFilter value) { _lifecycle_filter = value; }
+
+ kMachMsgFilter mach_msg_filter() const { return _mach_msg_filter; }
+ void set_mach_msg_filter(kMachMsgFilter value) { _mach_msg_filter = value; }
+};
+
+#endif
--- /dev/null
+//
+// LiveTraceAction.cpp
+// msa
+//
+// Created by James McIlree on 2/4/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+// Force materialization of the ring buffer print methods,
+// so they can be called from the debugger.
+template class EventRingBuffer<Kernel32>;
+template class EventRingBuffer<Kernel64>;
+
+static bool shouldProcessEvents;
+static uint32_t sigintCount;
+
+static bool start_live_tracing(Globals& globals)
+{
+ if (!KDBG::reset()) return false;
+ if (!KDBG::set_buffer_capacity(globals.trace_buffer_size())) return false;
+ if (!KDBG::set_nowrap(false)) return false;
+ if (!KDBG::initialize_buffers()) return false;
+ if (!KDBG::set_enabled(KDEBUG_ENABLE_TRACE)) return false;
+
+ return true;
+}
+
+static void end_live_tracing(void)
+{
+ KDBG::reset();
+}
+
+static void signal_handler_ctrl_C(int sig)
+{
+ shouldProcessEvents = false;
+ if (++sigintCount >= 5) {
+ // Not responding, nuke it from orbit.
+ exit(1);
+ }
+}
+
+template <typename SIZE>
+static void live_trace_event_loop(Globals& globals)
+{
+ // Handle ctrl-C
+ shouldProcessEvents = true;
+ sigintCount = 0;
+
+ while (shouldProcessEvents) {
+ signal(SIGINT, signal_handler_ctrl_C);
+
+ EventRingBuffer<SIZE> ring_buffer(globals, globals.trace_buffer_size() * 2);
+
+ {
+ char buf[PATH_MAX];
+ char* buf_end = buf + sizeof(buf);
+ print_mach_msg_header(buf, buf_end, globals);
+ dprintf(globals.output_fd(), "%s", buf);
+ }
+
+ VoucherContentSysctl contents(globals.should_trace_voucher_contents());
+
+ if (start_live_tracing(globals)) {
+
+ // Okay, our goal is to hit specific timeposts.
+ // IOW, if our target is every 10ms, and we spend 3ms doing work,
+ // we sleep 7ms.
+ AbsTime traceUpdateIntervalAbs = globals.live_update_interval();
+ AbsTime now, next_trace_update = AbsTime::now();
+ std::unique_ptr<Machine<SIZE>> machine, last_machine;
+
+ std::unordered_map<pid_t, bool> task_appnap_state;
+ std::unordered_map<pid_t, TaskRequestedPolicy> task_requested_state;
+ std::unordered_map<typename SIZE::ptr_t, TaskRequestedPolicy> thread_requested_state;
+ std::unordered_map<pid_t, std::pair<TaskEffectivePolicy, uint32_t>> task_effective_state;
+ std::unordered_map<typename SIZE::ptr_t, std::pair<TaskEffectivePolicy, uint32_t>> thread_effective_state;
+ std::unordered_map<pid_t, std::pair<uint32_t, uint32_t>> task_boosts;
+
+ while (shouldProcessEvents) {
+ now = AbsTime::now();
+ if (now >= next_trace_update) {
+ std::size_t count, capacity;
+ KDEvent<SIZE>* events;
+
+ std::tie(events, count, capacity) = ring_buffer.read();
+ if (count) {
+ if (last_machine) {
+ machine = std::make_unique<Machine<SIZE>>(*last_machine, events, count);
+ } else {
+ auto state = KDBG::state();
+ auto threadmap = KDBG::threadmap<SIZE>(state);
+ auto cpumap = KDBG::cpumap();
+ machine = std::make_unique<Machine<SIZE>>(cpumap.data(), (uint32_t)cpumap.size(),
+ threadmap.data(), (uint32_t)threadmap.size(),
+ events, count);
+
+ if (globals.should_zero_base_timestamps() && count) {
+ globals.set_beginning_of_time(events[0].timestamp());
+ } else {
+ globals.set_beginning_of_time(AbsTime(0));
+ }
+ }
+
+ if (!machine->lost_events()) {
+ process_events(globals, *machine, task_appnap_state, task_requested_state, thread_requested_state, task_effective_state, thread_effective_state, task_boosts);
+
+ // We read to the end of the ring buffer, and there are
+ // more events to process. Do not risk an overflow, process
+ // them immediately.
+
+ // If count == capacity, we read to the end of the ring buffer,
+ // and should immediately re-read.
+ if (count < capacity) {
+ next_trace_update += traceUpdateIntervalAbs;
+ if (next_trace_update <= now) {
+ printf("WARNING - falling behind on event processing\n");
+ // Reset so if we do catch up, we don't spin on a clock
+ // that has fallen seconds behind.
+ next_trace_update = AbsTime::now();
+ }
+ }
+ } else {
+ printf("LOST EVENTS, exiting...\n");
+ shouldProcessEvents = false;
+ }
+
+ last_machine = std::move(machine);
+ }
+ }
+
+ mach_wait_until(next_trace_update.value());
+ }
+ } else {
+ printf("Unable to enable tracing.\n");
+ shouldProcessEvents = false;
+ }
+
+ signal(SIGINT, SIG_DFL);
+ }
+
+ // Final cleanup here to make sure partial initialization is
+ // cleaned up.
+ end_live_tracing();
+}
+
+void LiveTraceAction::execute(Globals& globals) {
+ // Initial state snapshot, is another program using the trace buffer, etc.
+ try {
+ KDState state = KDBG::state();
+ if (state.is_initialized() || state.controlling_pid() > 0) {
+ if (state.controlling_pid() != getpid()) {
+ if (state.controlling_pid() > 0 && kill(state.controlling_pid(), 0) == -1 && errno == ESRCH) {
+ if (globals.is_verbose()) {
+ printf("Reclaiming trace buffer control from pid %d\n", state.controlling_pid());
+ }
+ } else {
+ printf("Another process is using the trace facility, possibly pid %d\n", state.controlling_pid());
+ exit(1);
+ }
+ }
+ }
+
+ try {
+ if (state.is_lp64()) {
+ live_trace_event_loop<Kernel64>(globals);
+ } else {
+ live_trace_event_loop<Kernel32>(globals);
+ }
+ } catch (const std::exception& e) {
+ log_msg(ASL_LEVEL_WARNING, "Caught exception in %s:\n %s\n", __PRETTY_FUNCTION__, e.what());
+ KDBG::reset();
+ }
+
+ } catch (Exception& e) {
+ if (getuid() != 0) {
+ printf("Unable to acquire trace buffer state. You must be root.\n");
+ exit(1);
+ } else {
+ usage(e.what());
+ }
+ }
+}
--- /dev/null
+//
+// LiveTraceAction.h
+// msa
+//
+// Created by James McIlree on 2/4/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef __msa__LiveTraceAction__
+#define __msa__LiveTraceAction__
+
+class LiveTraceAction : public Action {
+ public:
+ LiveTraceAction() {}
+
+ virtual void execute(Globals& globals);
+};
+
+#endif /* defined(__msa__LiveTraceAction__) */
--- /dev/null
+//
+// PrintBuffer.hpp
+// system_cmds
+//
+// Created by James McIlree on 5/7/14.
+//
+//
+
+#ifndef __system_cmds__PrintBuffer__
+#define __system_cmds__PrintBuffer__
+
+//
+// Okay, here is how snprintf works.
+//
+// char buf[2];
+//
+// snprintf(buf, 0, "a"); // Returns 1, buf is unchanged.
+// snprintf(buf, 1, "a"); // Returns 1, buf = \0
+// snprintf(buf, 2, "a"); // Returns 1, buf = 'a', \0
+//
+// So... For a buffer of size N, each print is valid if and only if
+// it consumes N-1 bytes.
+//
+
+class PrintBuffer {
+ protected:
+ char* _buffer;
+ size_t _buffer_size;
+ size_t _buffer_capacity;
+ size_t _flush_boundary;
+ int _flush_fd;
+
+ public:
+ PrintBuffer(size_t capacity, size_t flush_boundary, int flush_fd) :
+ _buffer((char*)malloc(capacity)),
+ _buffer_size(0),
+ _buffer_capacity(capacity),
+ _flush_boundary(flush_boundary),
+ _flush_fd(flush_fd)
+ {
+ ASSERT(capacity > 0, "Sanity");
+ ASSERT(_buffer, "Sanity");
+ ASSERT(flush_boundary < capacity, "Sanity");
+ ASSERT(flush_fd != 0, "Must be a valid fd");
+ }
+
+ ~PrintBuffer() {
+ flush();
+ free(_buffer);
+ }
+
+ void set_capacity(size_t capacity) {
+ ASSERT(_buffer_size == 0, "Attempt to reallocate buffer while it still contains data");
+
+ if (_buffer) {
+ free(_buffer);
+ }
+
+ _buffer = (char*)malloc(capacity);
+ _buffer_size = 0;
+ _buffer_capacity = capacity;
+ }
+
+ void flush() {
+ if (_buffer_size) {
+ write(_flush_fd, _buffer, _buffer_size);
+ _buffer_size = 0;
+ }
+ }
+
+ void printf(const char* format, ...) __attribute__((format(printf, 2, 3))) {
+ repeat:
+ size_t remaining_bytes = _buffer_capacity - _buffer_size;
+
+ va_list list;
+ va_start(list, format);
+ int bytes_needed = vsnprintf(&_buffer[_buffer_size], remaining_bytes, format, list);
+ va_end(list);
+
+ // There are three levels of "end" detection.
+ //
+ // 1) If bytes_needed is >= capacity, we must flush, grow capacity, and repeat.
+ // 2) If bytes_needed is >= remaining_bytes, we must flush, and repeat.
+ // 3) If bytes_needed + _buffer_size comes within _flush_boundary bytes of the end, flush.
+ //
+ // NOTE snprintf behavior, we need bytes_needed+1 bytes
+ // to actually fully output all string characters.
+ //
+ // NOTE for any repeat condition, we do not commit the bytes that were written to the buffer.
+ //
+
+ // Condition 2
+ if (bytes_needed >= remaining_bytes) {
+ flush();
+
+ // Save a common path if test by checking this only inside Condition 2
+ //
+ // Condition 1
+ if (bytes_needed >= _buffer_capacity) {
+ set_capacity(bytes_needed+1);
+ }
+
+ goto repeat;
+ }
+
+ // Commit the snprintf
+ _buffer_size += bytes_needed;
+
+ // Condition 3
+ if (remaining_bytes - bytes_needed <= _flush_boundary) {
+ flush();
+ }
+ }
+};
+
+#endif /* defined(__system_cmds__PrintBuffer__) */
--- /dev/null
+//
+// MessagePrinting.cpp
+// msa
+//
+// Created by James McIlree on 2/5/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+const char* qos_to_string(uint32_t qos) {
+ static_assert(THREAD_QOS_LAST == 7, "QOS tiers need updating");
+
+ switch (qos) {
+ case THREAD_QOS_UNSPECIFIED:
+ return "unspecified";
+
+ case THREAD_QOS_MAINTENANCE:
+ return "maintenance";
+
+ case THREAD_QOS_BACKGROUND:
+ return "background";
+
+ case THREAD_QOS_UTILITY:
+ return "utility";
+
+ case THREAD_QOS_LEGACY:
+ return "legacy";
+
+ case THREAD_QOS_USER_INITIATED:
+ return "user-initiated";
+
+ case THREAD_QOS_USER_INTERACTIVE:
+ return "user-interactive";
+
+ default:
+ ASSERT(false, "Unhandled QoS");
+ return "QOS_???";
+ }
+}
+
+const char* qos_to_short_string(uint32_t qos) {
+ static_assert(THREAD_QOS_LAST == 7, "QOS tiers need updating");
+
+ switch (qos) {
+ case THREAD_QOS_UNSPECIFIED:
+ return "Unspec";
+
+ case THREAD_QOS_MAINTENANCE:
+ return "Maint";
+
+ case THREAD_QOS_BACKGROUND:
+ return "BG";
+
+ case THREAD_QOS_UTILITY:
+ return "Util";
+
+ case THREAD_QOS_LEGACY:
+ return "Legacy";
+
+ case THREAD_QOS_USER_INITIATED:
+ return "UInit";
+
+ case THREAD_QOS_USER_INTERACTIVE:
+ return "UI";
+
+ default:
+ ASSERT(false, "Unhandled QoS");
+ return "???";
+ }
+}
+
+const char* role_to_short_string(uint32_t role) {
+ switch (role) {
+ // This is seen when apps are terminating
+ case TASK_UNSPECIFIED:
+ return "unspec";
+
+ case TASK_FOREGROUND_APPLICATION:
+ return "fg";
+
+ case TASK_BACKGROUND_APPLICATION:
+ return "bg";
+
+ case TASK_CONTROL_APPLICATION:
+ case TASK_GRAPHICS_SERVER:
+ case TASK_THROTTLE_APPLICATION:
+ case TASK_NONUI_APPLICATION:
+ ASSERT(false, "These should be obsolete");
+ return "obsolete";
+
+ case TASK_DEFAULT_APPLICATION:
+ // Is this obsolete too?
+ return "defapp";
+
+ default:
+ ASSERT(false, "Unexpected app role");
+ return "???";
+ }
+}
+
+const char* role_to_string(uint32_t role) {
+ switch (role) {
+ // This is seen when apps are terminating
+ case TASK_UNSPECIFIED:
+ return "unspecified";
+
+ case TASK_FOREGROUND_APPLICATION:
+ return "foreground";
+
+ case TASK_BACKGROUND_APPLICATION:
+ return "background";
+
+ case TASK_CONTROL_APPLICATION:
+ return "control-application";
+
+ case TASK_GRAPHICS_SERVER:
+ return "graphics-server";
+
+ case TASK_THROTTLE_APPLICATION:
+ return "throttle-app";
+
+ case TASK_NONUI_APPLICATION:
+ return "nonui-app";
+
+ case TASK_DEFAULT_APPLICATION:
+ // Is this obsolete too?
+ return "default-app";
+
+ default:
+ ASSERT(false, "Unexpected app role");
+ return "???";
+ }
+}
+
+void print_base_empty(PrintBuffer& buffer,
+ const Globals& globals,
+ uintptr_t event_index,
+ const char* type,
+ bool should_newline)
+{
+ // Base Header is... (32)
+ //
+ // Time(µS) Type Thread ThreadVoucher AppType Process ;;
+ // 123456789abcdef0 1234567890123456789012 1234567890 123456789abcdef0 12345678901234567 123456789012345678901234 12
+ // 14.11 mach_msg_send 18FB voucher-133 AdaptiveDaemon TextEdit (231) ;;
+ // 18.11 mach_msg_recv 18FB 0 InteractiveDaemon configd (19981) ;;
+
+ // Base Header is... (64)
+ //
+ // Time(µS) Type Thread ThreadVoucher AppType Process ;;
+ // 123456789abcdef0 1234567890123456789012 1234567890 123456789abcdef0 12345678901234567 123456789012345678901234 12
+ // 14.11 mach_msg_send 18FB voucher-133 AdaptiveDaemon TextEdit (231) ;;
+ // 18.11 mach_msg_recv 18FB 0 InteractiveDaemon configd (19981) ;;
+
+ //
+ // [Index]
+ //
+ if (globals.should_print_event_index()) {
+ buffer.printf("%8llu ", (uint64_t)event_index);
+ }
+
+ //
+ // Time Type Code Thread ThreadVoucher AppType Process
+ //
+ // This assert doesn't handle utf8...
+ ASSERT(strlen(type) <= 22, "Sanity");
+
+ buffer.printf("%16s %3s %22s %10s %16s %17s %24s ;;", "-", "-", type, "-", "-", "-", "- (-)");
+
+ //
+ // Process
+ //
+ if (should_newline)
+ buffer.printf("\n");
+ else
+ buffer.printf(" ");
+}
+
+static char* print_base_header(char* buf, char* buf_end, const Globals& globals) {
+ // Base Header is... (32)
+ //
+ // Time(µS) Type Thread ThrVoucher Process ;;
+ // 123456789abcdef0 1234567890123456789012 1234567890 123456789a 123456789012345678901234 12
+ // 14.11 mach_msg_send 18FB FFFF8E44 TextEdit (231) ;;
+ // 18.11 mach_msg_recv 18FB 0 configd (19981) ;;
+
+ // Base Header is... (64)
+ //
+ // Time(µS) Type Thread ThreadVoucher Process ;;
+ // 123456789abcdef0 1234567890123456789012 1234567890 123456789abcdef0 123456789012345678901234 12
+ // 14.11 mach_msg_send 18FB BBBBAAEE55778234 TextEdit (231) ;;
+ // 18.11 mach_msg_recv 18FB 0 configd (19981) ;;
+
+ //
+ // If we cannot print successfully, we return the orignal pointer.
+ //
+ char* orig_buf = buf;
+
+ if (globals.should_print_event_index())
+ buf += snprintf(buf, buf_end - buf,"%8s ", "Event#");
+
+ if (buf >= buf_end)
+ return orig_buf;
+
+ // The character counting for "Time(µS)" is OBO, it treats the µ as two characters.
+ // This means the %16s misaligns. We force it by making the input string 16 printable chars long,
+ // which overflows the %16s to the correct actual output length.
+ const char* time = globals.should_print_mach_absolute_timestamps() ? "Time(mach-abs)" : " Time(µS)";
+
+ if (globals.kernel_size() == KernelSize::k32)
+ buf += snprintf(buf, buf_end - buf, "%s %22s %10s %10s %24s ;; ", time, "Type", "Thread", "ThrVoucher", "Process");
+ else
+ buf += snprintf(buf, buf_end - buf, "%s %22s %10s %16s %24s ;; ", time, "Type", "Thread", "ThreadVoucher", "Process");
+
+ return (buf >= buf_end) ? orig_buf : buf;
+}
+
+char* print_mach_msg_header(char* buf, char* buf_end, const Globals& globals) {
+
+ // MachMsg Header is... (32)
+ //
+ // ;; Message From/To MsgID MsgVoucher DeliveryTime FLAGS
+ // 12 123456789012345678901234567 123456789 123456789a 1234567890123 ...
+ // ;; -> configd (19981) 55 - - ONEWAY, IMP-DONATING
+ // ;; <- TextEdit (231) 55 FFFF8E44 120080 VOUCHER-PROVIDED-BY-KERNEL, VOUCHER-REFUSED
+
+ // MachMsg Header is... (64)
+ //
+ // ;; Message From/To MsgID MsgVoucher DeliveryTime FLAGS
+ // 12 123456789012345678901234567 123456789 123456789abcdef0 1234567890123 ...
+ // ;; -> configd (19981) 55 - - ONEWAY, IMP-DONATING
+ // ;; <- TextEdit (231) 55 FFFFAAEE55778234 120080 VOUCHER-PROVIDED-BY-KERNEL, VOUCHER-REFUSED
+
+ char* orig_buf = buf;
+
+ //
+ // Base Header
+ //
+ buf = print_base_header(buf, buf_end, globals);
+
+ if (buf == orig_buf)
+ return orig_buf;
+
+ //
+ // Mach Msg Header
+ //
+ if (globals.kernel_size() == KernelSize::k32)
+ buf += snprintf(buf, buf_end - buf, "%-27s %9s %10s %13s %s\n", "Message-From/To", "MsgID", "MsgVoucher", "DeliveryTime", "FLAGS");
+ else
+ buf += snprintf(buf, buf_end - buf, "%-27s %9s %16s %13s %s\n", "Message-From/To", "MsgID", "MsgVoucher", "DeliveryTime", "FLAGS");
+
+ return (buf >= buf_end) ? orig_buf : buf;
+}
--- /dev/null
+//
+// MessagePrinting.h
+// msa
+//
+// Created by James McIlree on 2/5/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef __msa__MessagePrinting__
+#define __msa__MessagePrinting__
+
+char* print_mach_msg_header(char*, char*, const Globals&);
+char* print_thread_set_voucher_header(char* buf, char* buf_end, const Globals& globals);
+const char* qos_to_string(uint32_t qos);
+const char* qos_to_short_string(uint32_t qos);
+const char* role_to_string(uint32_t role);
+const char* role_to_short_string(uint32_t role);
+void print_base_empty(PrintBuffer& buffer, const Globals& globals, uintptr_t event_index, const char* type, bool should_newline);
+
+template <typename SIZE>
+void print_base(PrintBuffer& buffer,
+ const Globals& globals,
+ AbsTime timestamp,
+ const MachineThread<SIZE>* thread,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ const char* type,
+ bool should_newline)
+{
+ // Base Header is... (32)
+ //
+ // Time(µS) Type Thread ThreadVoucher AppType Process ;;
+ // 123456789abcdef0 1234567890123456789012 1234567890 123456789abcdef0 12345678901234567 123456789012345678901234 12
+ // 14.11 mach_msg_send 18FB voucher-133 AdaptiveDaemon TextEdit (231) ;;
+ // 18.11 mach_msg_recv 18FB 0 InteractiveDaemon configd (19981) ;;
+
+ // Base Header is... (64)
+ //
+ // Time(µS) Type Thread ThreadVoucher AppType Process ;;
+ // 123456789abcdef0 1234567890123456789012 1234567890 123456789abcdef0 12345678901234567 123456789012345678901234 12
+ // 14.11 mach_msg_send 18FB voucher-133 AdaptiveDaemon TextEdit (231) ;;
+ // 18.11 mach_msg_recv 18FB 0 InteractiveDaemon configd (19981) ;;
+
+ //
+ // [Index]
+ //
+ if (globals.should_print_event_index()) {
+ buffer.printf("%8llu ", (uint64_t)event_index);
+ }
+
+ //
+ // Time
+ //
+ if (globals.should_print_mach_absolute_timestamps()) {
+ if (globals.beginning_of_time().value() == 0)
+ buffer.printf("%16llX ", (timestamp - globals.beginning_of_time()).value());
+ else
+ buffer.printf("%16llu ", (timestamp - globals.beginning_of_time()).value());
+ } else {
+ NanoTime ntime = (timestamp - globals.beginning_of_time()).nano_time(globals.timebase());
+ buffer.printf("%16.2f ", (double)ntime.value() / 1000.0);
+ }
+
+ //
+ // beg/end/---
+ //
+ buffer.printf("%3s ", event.is_func_start() ? "beg" : (event.is_func_end() ? "end" : "---"));
+
+ //
+ // Type Code, Thread
+ //
+
+ // This assert doesn't handle utf8...
+ ASSERT(strlen(type) <= 22, "Sanity");
+ if (SIZE::is_64_bit)
+ buffer.printf("%22s %10llX ", type, (uint64_t)thread->tid());
+ else
+ buffer.printf("%22s %10llX ", type, (uint64_t)thread->tid());
+
+ //
+ // ThreadVoucher
+ //
+ auto thread_voucher = (thread) ? thread->voucher(timestamp) : &Machine<SIZE>::UnsetVoucher;
+
+ if (thread_voucher->is_unset()) {
+ buffer.printf("%16s ", "-");
+ } else if (thread_voucher->is_null()) {
+ buffer.printf("%16s ", "0");
+ } else {
+ char voucher_id[32];
+ snprintf(voucher_id, sizeof(voucher_id), "voucher-%u", thread_voucher->id());
+ buffer.printf("%16s ", voucher_id);
+ }
+
+ //
+ // AppType
+ //
+ const char* apptype_string = nullptr;
+ switch (thread->process().apptype()) {
+ case -1:
+ apptype_string = "-";
+ break;
+ case TASK_APPTYPE_NONE:
+ apptype_string = "None";
+ break;
+ case TASK_APPTYPE_DAEMON_INTERACTIVE:
+ apptype_string = "InteractiveDaemon";
+ break;
+ case TASK_APPTYPE_DAEMON_STANDARD:
+ apptype_string = "StandardDaemon";
+ break;
+ case TASK_APPTYPE_DAEMON_ADAPTIVE:
+ apptype_string = "AdaptiveDaemon";
+ break;
+ case TASK_APPTYPE_DAEMON_BACKGROUND:
+ apptype_string = "BackgroundDaemon";
+ break;
+ case TASK_APPTYPE_APP_DEFAULT:
+ apptype_string = "App";
+ break;
+ case TASK_APPTYPE_APP_TAL:
+ apptype_string = "TALApp";
+ break;
+ default:
+ apptype_string = "???";
+ break;
+ }
+ buffer.printf("%17s ", apptype_string);
+
+ //
+ // Process
+ //
+ char process_name[32];
+
+ // Should not ever fail, but...
+ if (thread) {
+ const MachineProcess<SIZE>& process = thread->process();
+ snprintf(process_name, sizeof(process_name), "%s (%d)", process.name(), process.pid());
+ } else {
+ snprintf(process_name, sizeof(process_name), "???");
+ }
+
+ if (should_newline)
+ buffer.printf("%24s ;;\n", process_name);
+ else
+ buffer.printf("%24s ;; ", process_name);
+}
+
+template <typename SIZE>
+void print_mach_msg(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ bool is_send,
+ const MachineMachMsg<SIZE>* mach_msg)
+{
+ // Mach Msg Header is... (32)
+ //
+ // ;; Message From/To MsgID MsgVoucher DeliveryTime FLAGS
+ // 12 123456789012345678901234567 123456789ab 123456789abcdef0 1234567890123 ...
+ // ;; -> configd (19981) 55 - - ONEWAY, IMP-DONATING
+ // ;; <- TextEdit (231) 55 voucher-133 120080 VOUCHER-PROVIDED-BY-KERNEL, VOUCHER-REFUSED
+
+ // Mach Msg Header is... (64)
+ //
+ // ;; Message From/To MsgID MsgVoucher DeliveryTime FLAGS
+ // 12 123456789012345678901234567 123456789ab 123456789abcdef0 1234567890123 ...
+ // ;; -> configd (19981) 55 - - ONEWAY, IMP-DONATING
+ // ;; <- TextEdit (231) 55 voucher-133 120080 VOUCHER-PROVIDED-BY-KERNEL, VOUCHER-REFUSED
+
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, is_send ? "mach_msg_send" : "mach_msg_recv", false);
+
+ //
+ // Message From/To
+ //
+ {
+ char from_to_name[32];
+ const MachineThread<SIZE>* from_to_thread = NULL;
+ const char* from_to_direction;
+
+ if (is_send) {
+ from_to_direction = "->";
+ if (mach_msg->has_receiver())
+ from_to_thread = machine.thread(mach_msg->recv_tid(), mach_msg->recv_time());
+ } else {
+ from_to_direction = "<-";
+ if (mach_msg->has_sender())
+ from_to_thread = machine.thread(mach_msg->send_tid(), mach_msg->send_time());
+ }
+
+ if (from_to_thread) {
+ const MachineProcess<SIZE>& from_to_process = from_to_thread->process();
+ snprintf(from_to_name, sizeof(from_to_name), "%s %s (%d)", from_to_direction, from_to_process.name(), from_to_process.pid());
+ } else {
+ // (???) is a trigraph, break up by escaping one of the ?
+ snprintf(from_to_name, sizeof(from_to_name), "%s ??? (??\?)", from_to_direction);
+ }
+
+ buffer.printf("%-27s ", from_to_name);
+ }
+
+ //
+ // MsgID
+ //
+
+ char msg_id[32];
+ snprintf(msg_id, sizeof(msg_id), "msg-%u", mach_msg->id());
+ buffer.printf("%11s ", msg_id);
+
+ //
+ // MsgVoucher
+ //
+ // We want to differentiate between sending a NULL voucher and not having msgh_bits set.
+ // We will show a NULL voucher as 0, but if msgh_bits says no voucher was sent, we will show "-"
+ //
+
+ MachineVoucher<SIZE>* msg_voucher = (is_send) ? mach_msg->send_voucher() : mach_msg->recv_voucher();
+
+ if (msg_voucher->is_unset()) {
+ buffer.printf("%16s ", "-");
+ } else if (msg_voucher->is_null()) {
+ buffer.printf("%16s ", "0");
+ } else {
+ char voucher_id[32];
+ snprintf(voucher_id, sizeof(voucher_id), "voucher-%u", msg_voucher->id());
+ buffer.printf("%16s ", voucher_id);
+ }
+
+ //
+ // DeliveryTime
+ //
+
+ if (!is_send) {
+ if (mach_msg->has_sender()) {
+ NanoTime ntime = (mach_msg->recv_time() - mach_msg->send_time()).nano_time(globals.timebase());
+ buffer.printf("%13.2f ", (double)ntime.value() / 1000.0);
+ } else {
+ buffer.printf("%13s ", "?");
+ }
+ } else {
+ buffer.printf("%13s ", "-");
+ }
+
+ //
+ // FLAGS
+ //
+ const char* separator = "";
+
+ if (is_send) {
+ if (!MACH_MSGH_BITS_HAS_LOCAL(mach_msg->send_msgh_bits())) {
+ buffer.printf("%sONEWAY", separator);
+ separator = ", ";
+ }
+
+ if (MACH_MSGH_BITS_RAISED_IMPORTANCE(mach_msg->send_msgh_bits())) {
+ buffer.printf("%sMSGH_BITS_RAISED_IMPORTANCE", separator);
+ separator = ", ";
+ }
+
+ if (MACH_MSGH_BITS_HOLDS_IMPORTANCE_ASSERTION(mach_msg->send_msgh_bits())) {
+ buffer.printf("%sMSGH_BITS_HOLDS_IMPORTANCE_ASSERTION", separator);
+ separator = ", ";
+ }
+ } else {
+ if (mach_msg->is_voucher_refused()) {
+ // FIX ME!
+ // Need to test this... Can we tell if a voucher was refused without the
+ // send voucher?
+ //
+ if (mach_msg->has_non_null_send_voucher() || mach_msg->has_non_null_recv_voucher()) {
+ buffer.printf("%sVOUCHER-REFUSED", separator);
+ }
+
+ separator = ", ";
+ }
+ if (MACH_MSGH_BITS_RAISED_IMPORTANCE(mach_msg->recv_msgh_bits())) {
+ buffer.printf("%sMSGH_BITS_RAISED_IMPORTANCE", separator);
+ separator = ", ";
+ }
+
+ if (MACH_MSGH_BITS_HOLDS_IMPORTANCE_ASSERTION(mach_msg->recv_msgh_bits())) {
+ buffer.printf("%sMSGH_BITS_HOLDS_IMPORTANCE_ASSERTION", separator);
+ separator = ", ";
+ }
+ }
+
+ //
+ // MsgVoucher transformation
+ //
+ {
+ char transformed_voucher[32];
+
+ if (mach_msg->has_sender() && mach_msg->has_receiver()) {
+ auto send_voucher = mach_msg->send_voucher();
+ auto recv_voucher = mach_msg->recv_voucher();
+
+ if (send_voucher != recv_voucher) {
+ auto changed_voucher = (is_send) ? recv_voucher : send_voucher;
+ auto changed_tense = (is_send) ? "becomes" : "was";
+
+ if (changed_voucher->is_unset()) {
+ snprintf(transformed_voucher, sizeof(transformed_voucher), "(%s -)", changed_tense);
+ } else if (changed_voucher->is_null()) {
+ snprintf(transformed_voucher, sizeof(transformed_voucher), "(%s 0)", changed_tense);
+ } else {
+ snprintf(transformed_voucher, sizeof(transformed_voucher), "(%s voucher-%u)", changed_tense, changed_voucher->id());
+ }
+
+ buffer.printf("%sVOUCHER_CHANGED %s", separator, transformed_voucher);
+ }
+ }
+ }
+
+ buffer.printf("\n");
+}
+
+template <typename SIZE>
+void print_boost(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ pid_t boost_receiver_pid,
+ bool is_boost)
+{
+
+ // Base Header is... (32)
+ //
+ // ;;
+ // 12
+ // ;; BOOST foobard (338)
+
+ // Base Header is... (64)
+ //
+ // ;;
+ // 12
+ // ;; BOOST foobard (338)
+
+
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, is_boost ? "boost" : "unboost", false);
+
+ //
+ // Boost target
+ //
+
+ const MachineProcess<SIZE>* target = machine.process(boost_receiver_pid, event.timestamp());
+ const char* target_name;
+
+ if (target) {
+ target_name = target->name();
+ } else {
+ target_name = "???";
+ }
+
+ const char* action = is_boost ? "BOOST" : "UNBOOST";
+
+ buffer.printf("%s %s (%d)\n", action, target_name, boost_receiver_pid);
+}
+
+template <typename SIZE>
+void print_impdelv(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ pid_t sender_pid,
+ uint32_t importance_delivery_result)
+{
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, "importance_delivered", false);
+
+ //
+ // Importance sender
+ //
+ const char* sender_name = "???";
+ if (const MachineProcess<SIZE>* sender = machine.process(sender_pid, event.timestamp())) {
+ sender_name = sender->name();
+ }
+
+ // 0: BOOST NOT APPLIED
+ // 1: BOOST EXTERNALIZED
+ // 2: LIVE_IMPORTANCE_LINKAGE!
+
+ switch (importance_delivery_result) {
+ case 0:
+ buffer.printf("importance from %s (%d) was not applied\n", sender_name, sender_pid);
+ break;
+ case 1:
+ buffer.printf("importance from %s (%d) was externalized\n", sender_name, sender_pid);
+ break;
+ case 2:
+ buffer.printf("linked to %s (%d)'s live importance chain\n", sender_name, sender_pid);
+ break;
+
+ default:
+ ASSERT(false, "Unknown importance delivery result value");
+ buffer.printf("Unknown importance delivery result value\n");
+ break;
+ }
+}
+
+template <typename SIZE>
+void print_generic(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ const char* type)
+{
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, type, true);
+}
+
+template <typename SIZE>
+void print_importance_assert(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ const char* type,
+ std::unordered_map<pid_t, std::pair<uint32_t, uint32_t>>& task_importance)
+{
+ // All callers must have the following trace event data:
+ //
+ // ignored, target_pid, internal_count, external_count
+
+ // First check if anything changed
+ pid_t target_pid = (pid_t)event.arg2();
+ if (target_pid < 1)
+ return;
+
+ bool must_print = false;
+ auto it = task_importance.find(target_pid);
+ if (it == task_importance.end()) {
+ it = task_importance.emplace(target_pid, std::pair<uint32_t, uint32_t>(0, 0)).first;
+ // The very first time we see data for an app, we always want to print it.
+ must_print = true;
+ }
+
+ auto old_importance = it->second;
+ auto new_importance = std::pair<uint32_t, uint32_t>((uint32_t)event.arg3(), (uint32_t)event.arg4());
+ if (must_print || old_importance != new_importance) {
+ const MachineThread<SIZE>* event_thread = machine.thread(event.tid(), event.timestamp());
+ print_base(buffer, globals, event.timestamp(), event_thread, event, event_index, type, false);
+
+ const MachineProcess<SIZE>* target = machine.process(target_pid, event.timestamp());
+ const char* target_name;
+
+ if (target) {
+ target_name = target->name();
+ } else {
+ target_name = "???";
+ }
+
+ int internal_delta = new_importance.first - old_importance.first;
+ int external_delta = new_importance.second - old_importance.second;
+
+ char internal_sign = internal_delta >= 0 ? '+' : '-';
+ char external_sign = external_delta >= 0 ? '+' : '-';
+
+ char internal_changed_buf[32];
+ char external_changed_buf[32];
+
+ if (internal_delta != 0) {
+ snprintf(internal_changed_buf, sizeof(internal_changed_buf), " (%c%u)", internal_sign, abs(internal_delta));
+ } else {
+ internal_changed_buf[0] = 0;
+ }
+
+ if (external_delta != 0) {
+ snprintf(external_changed_buf, sizeof(external_changed_buf), " (%c%u)", external_sign, abs(external_delta));
+ } else {
+ external_changed_buf[0] = 0;
+ }
+
+ buffer.printf("%s (%d) internal: %u%s external: %u%s\n",
+ target_name, target_pid,
+ new_importance.first, internal_changed_buf,
+ new_importance.second, external_changed_buf);
+
+ it->second = new_importance;
+ }
+}
+
+template <typename SIZE>
+void print_watchport_importance_transfer(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index)
+{
+ // event data is
+ //
+ // proc_selfpid(), pid, boost, released_pid, 0);
+
+ // Did any importance transfer?
+ if (event.arg3() == 0)
+ return;
+
+ // Do we have a valid pid?
+ pid_t dest_pid = (pid_t)event.arg2();
+ if (dest_pid < 1)
+ return;
+
+ const MachineThread<SIZE>* event_thread = machine.thread(event.tid(), event.timestamp());
+ print_base(buffer, globals, event.timestamp(), event_thread, event, event_index, "importance_watchport", false);
+
+ const char* dest_name;
+ if (const MachineProcess<SIZE>* dest = machine.process(dest_pid, event.timestamp())) {
+ dest_name = dest->name();
+ } else {
+ dest_name = "???";
+ }
+
+ buffer.printf("%s (%d) receives %d importance via watchport\n",
+ dest_name, dest_pid, (int)event.arg3());
+}
+
+template <typename SIZE>
+void print_importance_send_failed(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index)
+{
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, "impsend", false);
+
+ //
+ // Currently, the IMP_MSG_SEND trace data is not accurate.
+ //
+
+ buffer.printf("Backed out importance (may be resent) - TIMED_OUT, NO_BUFFER, or SEND_INTERRUPTED\n");
+}
+
+#if 0
+
+template <typename SIZE>
+void print_trequested_task(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ pid_t pid,
+ struct task_requested_policy new_task_requested,
+ struct task_requested_policy original_task_requested)
+{
+ // Many of these events would print nothing, we want to make sure there is something to print first.
+
+ char description[512];
+ char* cursor = description;
+ char* cursor_end = cursor + sizeof(description);
+ uint32_t description_count = 0;
+
+ if (new_task_requested.t_role != original_task_requested.t_role) {
+ const char* role = "???";
+ switch (new_task_requested.t_role) {
+ // This is seen when apps are terminating
+ case TASK_UNSPECIFIED:
+ role = "unspecified";
+ break;
+
+ case TASK_FOREGROUND_APPLICATION:
+ role = "foreground";
+ break;
+
+ case TASK_BACKGROUND_APPLICATION:
+ role = "background";
+ break;
+
+ case TASK_CONTROL_APPLICATION:
+ role = "control-application";
+ break;
+
+ case TASK_GRAPHICS_SERVER:
+ role = "graphics-server";
+ break;
+
+ case TASK_THROTTLE_APPLICATION:
+ role = "throttle-application";
+ break;
+
+ case TASK_NONUI_APPLICATION:
+ role = "nonui-application";
+ break;
+
+ case TASK_DEFAULT_APPLICATION:
+ role = "default-application";
+ break;
+
+ default:
+ ASSERT(false, "Unexpected app role");
+ break;
+ }
+ cursor += snprintf(cursor, cursor_end - cursor, "%sROLE:%s", description_count++ == 0 ? "" : ", ", role);
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.int_darwinbg != original_task_requested.int_darwinbg) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%s%sINT_DARWINBG", description_count++ == 0 ? "" : ", ", new_task_requested.int_darwinbg ? "" : "!");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.ext_darwinbg != original_task_requested.ext_darwinbg) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%s%sEXT_DARWINBG", description_count++ == 0 ? "" : ", ", new_task_requested.ext_darwinbg ? "" : "!");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.t_int_gpu_deny != original_task_requested.t_int_gpu_deny) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sINT_GPU_DENY", description_count++ == 0 ? "" : ", ");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.t_ext_gpu_deny != original_task_requested.t_ext_gpu_deny) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sEXT_GPU_DENY", description_count++ == 0 ? "" : ", ");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.t_tal_enabled != original_task_requested.t_tal_enabled) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sTAL_ENABLED", description_count++ == 0 ? "" : ", ");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.t_sfi_managed != original_task_requested.t_sfi_managed) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sSFI_MANAGED", description_count++ == 0 ? "" : ", ");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.t_sup_active != original_task_requested.t_sup_active) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sAPPNAP", description_count++ == 0 ? "" : ", ");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.t_base_latency_qos != original_task_requested.t_base_latency_qos) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sBASE_LATENCY_QOS:%s", description_count++ == 0 ? "" : ", ", qos_to_string(new_task_requested.t_base_latency_qos));
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.t_over_latency_qos != original_task_requested.t_over_latency_qos) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sOVERRIDE_LATENCY_QOS:%s", description_count++ == 0 ? "" : ", ", qos_to_string(new_task_requested.t_over_latency_qos));
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.t_base_through_qos != original_task_requested.t_base_through_qos) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sBASE_THROUGHPUT_QOS:%s", description_count++ == 0 ? "" : ", ", qos_to_string(new_task_requested.t_base_through_qos));
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.t_over_through_qos != original_task_requested.t_over_through_qos) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sOVERRIDE_THROUGHPUT_QOS:%s", description_count++ == 0 ? "" : ", ", qos_to_string(new_task_requested.t_over_through_qos));
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_task_requested.t_qos_clamp != original_task_requested.t_qos_clamp) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sQOS_CLAMP:%s", description_count++ == 0 ? "" : ", ", qos_to_string(new_task_requested.t_qos_clamp));
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (description_count) {
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event_index, "task_trequested", false);
+
+ ASSERT(pid != -1, "Sanity");
+
+ const char* target_name;
+ if (const MachineProcess<SIZE>* target = machine.process(pid, event.timestamp())) {
+ target_name = target->name();
+ } else {
+ target_name = "???";
+ }
+
+ buffer.printf("%s (%d) requests %s\n", target_name, pid, description);
+ }
+}
+
+struct task_requested_policy {
+ /* Task and thread policy (inherited) */
+ uint64_t int_darwinbg :1, /* marked as darwinbg via setpriority */
+ ext_darwinbg :1,
+ int_iotier :2, /* IO throttle tier */
+ ext_iotier :2,
+ int_iopassive :1, /* should IOs cause lower tiers to be throttled */
+ ext_iopassive :1,
+ bg_iotier :2, /* what IO throttle tier should apply to me when I'm darwinbg? (pushed to threads) */
+ terminated :1, /* all throttles should be removed for quick exit or SIGTERM handling */
+
+ /* Thread only policy */
+ th_pidbind_bg :1, /* thread only: task i'm bound to is marked 'watchbg' */
+ th_workq_bg :1, /* thread only: currently running a background priority workqueue */
+ thrp_qos :3, /* thread only: thread qos class */
+ thrp_qos_relprio :4, /* thread only: thread qos relative priority (store as inverse, -10 -> 0xA) */
+ thrp_qos_override :3, /* thread only: thread qos class override */
+
+ /* Task only policy */
+ t_apptype :3, /* What apptype did launchd tell us this was (inherited) */
+ t_boosted :1, /* Has a non-zero importance assertion count */
+ t_int_gpu_deny :1, /* don't allow access to GPU */
+ t_ext_gpu_deny :1,
+ t_role :3, /* task's system role */
+ t_tal_enabled :1, /* TAL mode is enabled */
+ t_base_latency_qos :3, /* Timer latency QoS */
+ t_over_latency_qos :3, /* Timer latency QoS override */
+ t_base_through_qos :3, /* Computation throughput QoS */
+ t_over_through_qos :3, /* Computation throughput QoS override */
+ t_sfi_managed :1, /* SFI Managed task */
+ t_qos_clamp :3, /* task qos clamp */
+
+ /* Task only: suppression policies (non-embedded only) */
+ t_sup_active :1, /* Suppression is on */
+ t_sup_lowpri_cpu :1, /* Wants low priority CPU (MAXPRI_THROTTLE) */
+ t_sup_timer :3, /* Wanted timer throttling QoS tier */
+ t_sup_disk :1, /* Wants disk throttling */
+ t_sup_cpu_limit :1, /* Wants CPU limit (not hooked up yet)*/
+ t_sup_suspend :1, /* Wants to be suspended */
+ t_sup_throughput :3, /* Wants throughput QoS tier */
+ t_sup_cpu :1, /* Wants suppressed CPU priority (MAXPRI_SUPPRESSED) */
+ t_sup_bg_sockets :1, /* Wants background sockets */
+
+ reserved :2;
+};
+#endif
+
+template <typename SIZE>
+void print_appnap(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ bool is_appnap_active,
+ std::unordered_map<pid_t, bool>& task_appnap_state,
+ std::unordered_map<pid_t, TaskRequestedPolicy>& task_requested_state)
+{
+ //
+ // event args are:
+ //
+ // self_pid, audit_token_pid_from_task(task), trequested_0(task, NULL), trequested_1(task, NULL)
+ //
+ auto pid = (pid_t)event.arg2();
+ auto trequested_0 = event.arg3();
+ auto trequested_1 = event.arg4();
+ auto task_requested = (SIZE::is_64_bit) ? TaskRequestedPolicy(trequested_0) : TaskRequestedPolicy((Kernel32::ptr_t)trequested_0, (Kernel32::ptr_t)trequested_1);
+ auto should_print = false;
+
+ ASSERT(pid != -1, "Sanity");
+
+ // If the appnap state changed, we want to print this event.
+ auto appnap_it = task_appnap_state.find(pid);
+ if (appnap_it == task_appnap_state.end()) {
+ should_print = true;
+ task_appnap_state.emplace(pid, is_appnap_active);
+ } else {
+ if (appnap_it->second != is_appnap_active) {
+ should_print = true;
+ appnap_it->second = is_appnap_active;
+ }
+ }
+
+ // If the task_requested state changed, we want to print this event.
+ auto requested_it = task_requested_state.find(pid);
+ if (requested_it == task_requested_state.end()) {
+ should_print = true;
+ task_requested_state.emplace(pid, task_requested);
+ } else {
+ if (requested_it->second != task_requested) {
+ should_print = true;
+ requested_it->second = task_requested;
+ }
+ }
+
+ if (should_print) {
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, "imp_supression", false);
+
+ const char* name;
+ if (auto target = machine.process(pid, event.timestamp())) {
+ name = target->name();
+ } else {
+ name = "???";
+ }
+ buffer.printf("%s (%d) AppNap is %s\n", name, pid, is_appnap_active ? "ON" : "OFF");
+ print_trequested_task(buffer, globals, machine, event, event_index, pid, task_requested);
+ }
+}
+
+template <typename SIZE>
+void print_trequested_task(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ pid_t pid,
+ TaskRequestedPolicy task_requested)
+{
+
+ ASSERT(pid != -1, "Sanity");
+ const char* target_name;
+ if (const MachineProcess<SIZE>* target = machine.process(pid, event.timestamp())) {
+ target_name = target->name();
+ } else {
+ target_name = "???";
+ }
+
+ struct task_requested_policy trp = task_requested.as_struct();
+
+ print_base_empty(buffer, globals, event_index, "task_trequested", false);
+ buffer.printf("%s (%d) requests%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ target_name, pid,
+ trp.int_darwinbg ? " IntDBG" : "",
+ trp.ext_darwinbg ? " ExtDBG" : "",
+ trp.int_iopassive ? " IntIOPass" : "",
+ trp.ext_iopassive ? " ExtIOPass" : "",
+ trp.terminated ? " Term" : "",
+ trp.t_boosted ? " Boost" : "",
+ trp.t_int_gpu_deny ? " IntDenyGPU" : "",
+ trp.t_ext_gpu_deny ? " ExtDenyGPU" : "",
+ trp.t_tal_enabled ? " TAL" : "",
+ trp.t_sfi_managed ? " SFI" : "",
+ // Below here is AppNap only...
+ trp.t_sup_active ? " AppNap" : "",
+ trp.t_sup_lowpri_cpu ? " SupLowPriCPU" : "",
+ trp.t_sup_disk ? " SupDisk" : "",
+ trp.t_sup_cpu_limit ? " SupCPULim" : "",
+ trp.t_sup_suspend ? " SupSusp" : "",
+ trp.t_sup_cpu ? " SupCPU" : "",
+ trp.t_sup_bg_sockets ? " SupBGSck" : "");
+
+ print_base_empty(buffer, globals, event_index, "task_trequested", false);
+ buffer.printf("%s (%d) requests QOS (SupTHR/SupTMR/LAT/OVERLAT/THR/OVERTHR/CLAMP) %s/%s/%s/%s/%s/%s/%s int_IOTier:%d ext_IOTier:%d bg_IOTier:%d\n",
+ target_name, pid,
+ qos_to_string(trp.t_sup_throughput),
+ qos_to_string(trp.t_sup_timer),
+ qos_to_string(trp.t_base_latency_qos),
+ qos_to_string(trp.t_over_latency_qos),
+ qos_to_string(trp.t_base_through_qos),
+ qos_to_string(trp.t_over_through_qos),
+ qos_to_string(trp.t_qos_clamp),
+ trp.int_iotier,
+ trp.ext_iotier,
+ trp.bg_iotier);
+}
+
+template <typename SIZE>
+void print_trequested_thread(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ const MachineThread<SIZE>* thread,
+ struct task_requested_policy new_thread_requested,
+ struct task_requested_policy original_thread_requested)
+{
+ ASSERT(thread, "Sanity");
+
+ // Many of these events would print nothing, we want to make sure there is something to print first.
+
+ char description[512];
+ char* cursor = description;
+ char* cursor_end = cursor + sizeof(description);
+ uint32_t description_count = 0;
+
+ if (new_thread_requested.int_darwinbg != original_thread_requested.int_darwinbg) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sINT_DARWINBG", description_count++ == 0 ? "" : ", ");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_thread_requested.ext_darwinbg != original_thread_requested.ext_darwinbg) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sEXT_DARWINBG", description_count++ == 0 ? "" : ", ");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_thread_requested.th_pidbind_bg != original_thread_requested.th_pidbind_bg) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sPIDBIND_BG", description_count++ == 0 ? "" : ", ");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_thread_requested.th_workq_bg != original_thread_requested.th_workq_bg) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sWORKQ_BG", description_count++ == 0 ? "" : ", ");
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_thread_requested.thrp_qos != original_thread_requested.thrp_qos) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sTHREAD_QOS:%s", description_count++ == 0 ? "" : ", ", qos_to_string(new_thread_requested.thrp_qos));
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_thread_requested.thrp_qos_relprio != original_thread_requested.thrp_qos_relprio) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sTHREAD_QOS_RELATIVE_PRIORITY:%d", description_count++ == 0 ? "" : ", ", -new_thread_requested.thrp_qos_relprio);
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (new_thread_requested.thrp_qos_override != original_thread_requested.thrp_qos_override) {
+ cursor += snprintf(cursor, cursor_end - cursor, "%sTHREAD_OVERRIDE_QOS:%s", description_count++ == 0 ? "" : ", ", qos_to_string(new_thread_requested.thrp_qos_override));
+ GUARANTEE(cursor < cursor_end);
+ }
+
+ if (description_count) {
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event_index, "thread_trequested", false);
+ ASSERT(thread->process().pid() != -1, "Sanity");
+ buffer.printf("%s (%d) %llX requests %s\n", thread->process().name(), thread->process().pid(), (uint64_t)thread->tid(), description);
+ }
+}
+
+template <typename SIZE>
+void print_teffective_task(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ pid_t pid,
+ TaskEffectivePolicy task_effective)
+{
+ ASSERT(pid != -1, "Sanity");
+ const char* target_name;
+ if (const MachineProcess<SIZE>* target = machine.process(pid, event.timestamp())) {
+ target_name = target->name();
+ } else {
+ target_name = "???";
+ }
+
+ struct task_effective_policy tep = task_effective.as_struct();
+
+ print_base_empty(buffer, globals, event_index, "task_teffective", false);
+ buffer.printf("%s (%d) is%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n",
+ target_name, pid,
+ tep.darwinbg ? " DarwinBG" : "",
+ tep.t_sup_active ? " AppNap" : "",
+ tep.lowpri_cpu ? " LowPri" : "",
+ tep.io_passive ? " IOPass" : "",
+ tep.all_sockets_bg ? " ASckBG" : "",
+ tep.new_sockets_bg ? " NSckBG" : "",
+ tep.terminated ? " Term" : "",
+ tep.qos_ui_is_urgent ? " QOSUiIsUrg" : "",
+ tep.t_gpu_deny ? " GPUDeny" : "",
+ tep.t_suspended ? " SupSusp" : "",
+ tep.t_watchers_bg ? " WchrsBG" : "",
+ tep.t_suppressed_cpu ? " SupCPU" : "",
+ tep.t_sfi_managed ? " SFI" : "",
+ tep.t_live_donor ? " LiveImpDnr" : "");
+
+ print_base_empty(buffer, globals, event_index, "task_teffective", false);
+ buffer.printf("%s (%d) is Role:%s LAT/THR/CLAMP/CEIL:%s/%s/%s/%s IOTier:%d BG_IOTier:%d\n",
+ target_name, pid,
+ role_to_string(tep.t_role),
+ qos_to_string(tep.t_latency_qos),
+ qos_to_string(tep.t_through_qos),
+ qos_to_string(tep.t_qos_clamp),
+ qos_to_string(tep.t_qos_ceiling),
+ tep.io_tier,
+ tep.bg_iotier);
+}
+
+template <typename SIZE>
+void print_teffective_thread(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ const MachineThread<SIZE>* thread,
+ TaskEffectivePolicy thread_effective)
+{
+ ASSERT(thread, "Sanity");
+
+ const char* target_name = thread->process().name();
+
+ struct task_effective_policy tep = thread_effective.as_struct();
+
+ print_base_empty(buffer, globals, event_index, "thread_teffective", false);
+ buffer.printf("%s (%d) %llX is%s%s%s%s%s%s%s%s\n",
+ target_name, thread->process().pid(), (uint64_t)thread->tid(),
+ tep.darwinbg ? " DarwinBG" : "",
+ tep.t_sup_active ? " AppNap" : "",
+ tep.lowpri_cpu ? " LowPri" : "",
+ tep.io_passive ? " IOPass" : "",
+ tep.all_sockets_bg ? " ASckBG" : "",
+ tep.new_sockets_bg ? " NSckBG" : "",
+ tep.terminated ? " Term" : "",
+ tep.qos_ui_is_urgent ? " QOSUiIsUrg" : "");
+
+ print_base_empty(buffer, globals, event_index, "thread_teffective", false);
+ buffer.printf("%s (%d) %llX is QOS:%s QOS_relprio:%d\n",
+ target_name, thread->process().pid(), (uint64_t)thread->tid(),
+ qos_to_string(tep.thep_qos),
+ tep.thep_qos_relprio);
+}
+
+template <typename SIZE>
+void print_importance_apptype(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index)
+{
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, "set_apptype", false);
+
+ //
+ // trace args are:
+ //
+ // selfpid, targetpid, trequested(targetpid, NULL), is_importance_receiver
+
+ //
+ // QoS clamp
+ //
+ // Can only be determined on K64, the bits needed are trimmed off in
+ // K32 tracepoints.
+
+ char qos_clamp[32];
+ qos_clamp[0] = 0;
+ if (SIZE::is_64_bit) {
+ uint32_t qos_level = (event.arg3() & POLICY_REQ_QOS_CLAMP_MASK) >> POLICY_REQ_QOS_CLAMP_SHIFT;
+ if (qos_level != THREAD_QOS_UNSPECIFIED) {
+ snprintf(qos_clamp, sizeof(qos_clamp), ", clamped to %s", qos_to_string(qos_level));
+ }
+ }
+
+ pid_t target_pid = (pid_t)event.arg2();
+ const char* target_name;
+
+ if (target_pid != -1 ) {
+ if (const MachineProcess<SIZE>* target = machine.process(target_pid, event.timestamp())) {
+ target_name = target->name();
+ } else {
+ target_name = "???";
+ }
+ } else {
+ target_name = "NULL-Task";
+ }
+
+ const char* apptype = "???";
+ switch (event.dbg_code()) {
+ case TASK_APPTYPE_NONE:
+ apptype = "None";
+ break;
+ case TASK_APPTYPE_DAEMON_INTERACTIVE:
+ apptype = "InteractiveDaemon";
+ break;
+ case TASK_APPTYPE_DAEMON_STANDARD:
+ apptype = "StandardDaemon";
+ break;
+ case TASK_APPTYPE_DAEMON_ADAPTIVE:
+ apptype = "AdaptiveDaemon";
+ break;
+ case TASK_APPTYPE_DAEMON_BACKGROUND:
+ apptype = "BackgroundDaemon";
+ break;
+ case TASK_APPTYPE_APP_DEFAULT:
+ apptype = "App";
+ break;
+ case TASK_APPTYPE_APP_TAL:
+ apptype = "TALApp";
+ break;
+ default:
+ break;
+ }
+
+ const char* imp_recv = "";
+ if (event.arg4()) {
+ imp_recv = ", receives importance";
+ }
+ buffer.printf("Set %s (%d) to %s%s%s\n", target_name, target_pid, apptype, imp_recv, qos_clamp);
+}
+
+template <typename SIZE>
+void print_importance_update_task(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ const char* type,
+ std::unordered_map<pid_t, std::pair<TaskEffectivePolicy, uint32_t>>& task_effective_state)
+{
+ //
+ // event args are:
+ //
+ // targetpid, teffective_0(task, NULL), teffective_1(task, NULL), tpriority(task, THREAD_NULL)
+ //
+ auto pid = (pid_t)event.arg1();
+ auto teffective_0 = event.arg2();
+ auto teffective_1 = event.arg3();
+ auto priority = (uint32_t)event.arg4();
+ auto task_effective_policy = (SIZE::is_64_bit) ? TaskEffectivePolicy(teffective_0) : TaskEffectivePolicy((Kernel32::ptr_t)teffective_0, (Kernel32::ptr_t)teffective_1);
+ auto state = std::pair<TaskEffectivePolicy, uint32_t>(task_effective_policy, priority);
+ auto should_print = false;
+
+ ASSERT(pid != -1, "Sanity");
+
+ // Verify that some state changed before printing.
+ auto it = task_effective_state.find(pid);
+ if (it == task_effective_state.end()) {
+ should_print = true;
+ task_effective_state.emplace(pid, state);
+ } else {
+ if (it->second != state) {
+ should_print = true;
+ it->second = state;
+ }
+ }
+
+ if (should_print) {
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, type, false);
+
+ const char* name;
+ if (auto target = machine.process(pid, event.timestamp())) {
+ name = target->name();
+ } else {
+ name = "???";
+ }
+
+ buffer.printf("%s (%d) base priority is %d\n", name, pid, priority);
+
+ print_teffective_task(buffer, globals, machine, event, event_index, pid, task_effective_policy);
+ }
+}
+
+template <typename SIZE>
+void print_importance_update_thread(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ const char* type,
+ std::unordered_map<typename SIZE::ptr_t, std::pair<TaskEffectivePolicy, uint32_t>>& thread_effective_state)
+{
+ //
+ // event args are:
+ //
+ // targettid, teffective_0(task, thread), teffective_1(task, thread), tpriority(task, thread)
+ //
+
+ if (const MachineThread<SIZE>* thread = machine.thread(event.arg1(), event.timestamp())) {
+ auto pid = thread->process().pid();
+ auto teffective_0 = event.arg2();
+ auto teffective_1 = event.arg3();
+ auto priority = (uint32_t)event.arg4();
+ auto thread_effective_policy = (SIZE::is_64_bit) ? TaskEffectivePolicy(teffective_1) : TaskEffectivePolicy((Kernel32::ptr_t)teffective_0, (Kernel32::ptr_t)teffective_1);
+ auto state = std::pair<TaskEffectivePolicy, uint32_t>(thread_effective_policy, priority);
+ auto should_print = false;
+
+ // Verify that some state changed before printing.
+ auto it = thread_effective_state.find(thread->tid());
+ if (it == thread_effective_state.end()) {
+ should_print = true;
+ thread_effective_state.emplace(thread->tid(), state);
+ } else {
+ if (it->second != state) {
+ should_print = true;
+ it->second = state;
+ }
+ }
+
+ if (should_print) {
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, type, false);
+ buffer.printf("%s (%d) %llX base priority is %d\n", thread->process().name(), pid, (uint64_t)thread->tid(), priority);
+
+ print_teffective_thread(buffer, globals, machine, event, event_index, thread, thread_effective_policy);
+ }
+ }
+}
+
+template <typename SIZE>
+void print_fork(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ const MachineProcess<SIZE>& child_process)
+{
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, "fork", false);
+
+ //
+ // Other process
+ //
+
+ buffer.printf("Create %s (%d)\n", child_process.name(), child_process.pid());
+}
+
+template <typename SIZE>
+void print_exit(PrintBuffer& buffer,
+ const Globals& globals,
+ const KDEvent<SIZE>& event,
+ const MachineThread<SIZE>* thread,
+ uintptr_t event_index)
+{
+ ASSERT(thread, "Sanity");
+
+ print_base(buffer, globals, event.timestamp(), thread, event, event_index, "exit", false);
+
+ //
+ // exit code
+ //
+
+ int exit_status = thread->process().exit_status();
+
+ if (WIFEXITED(exit_status)) {
+ buffer.printf("returned %d\n", WEXITSTATUS(exit_status));
+ } else if (WIFSIGNALED(exit_status)) {
+ buffer.printf("SIGNAL: %s\n", strsignal(WTERMSIG(exit_status)));
+ } else {
+ buffer.printf("Unhandled exit status %x\n", (uint32_t)exit_status);
+ }
+}
+
+template <typename SIZE>
+void print_voucher(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ const char* type,
+ const MachineVoucher<SIZE>* voucher,
+ bool is_create)
+{
+ print_base(buffer, globals, event.timestamp(), machine.thread(event.tid(), event.timestamp()), event, event_index, type, false);
+
+ //
+ // Calculate lifetime
+ //
+
+ char lifetime[32];
+ AbsInterval timespan = voucher->timespan();
+
+ //
+ // Voucher created before the trace starts will have a starting time
+ // of 0; Vouchers that are still alive will have a max of UINT64_MAX.
+ //
+ if (timespan.location() == AbsTime(0) || timespan.max() == AbsTime(UINT64_MAX)) {
+ snprintf(lifetime, sizeof(lifetime), "???");
+ } else {
+ NanoTime t1 = timespan.length().nano_time(globals.timebase());
+ snprintf(lifetime, sizeof(lifetime), "%0.2f", (double)t1.value() / NANOSECONDS_PER_MICROSECOND);
+ }
+
+
+ //
+ // Voucher addr
+ //
+ if (is_create) {
+ buffer.printf("Create voucher-%u @ %llX, lifetime will be %s µs, now %u vouchers\n", voucher->id(), (uint64_t)voucher->address(), lifetime, (uint32_t)event.arg3());
+ } else {
+ buffer.printf("Destroy voucher-%u @ %llX, lifetime was %s µs, now %u vouchers\n", voucher->id(), (uint64_t)voucher->address(), lifetime, (uint32_t)event.arg3());
+ }
+}
+
+template <typename SIZE>
+void print_voucher_contents(PrintBuffer& buffer,
+ const Globals& globals,
+ const Machine<SIZE>& machine,
+ const KDEvent<SIZE>& event,
+ uintptr_t event_index,
+ const MachineVoucher<SIZE>* voucher)
+{
+ const uint8_t* bytes = voucher->content_bytes();
+ uint32_t bytes_required = voucher->content_size();
+
+ ASSERT(bytes_required, "Printing empty voucher");
+
+ unsigned int used_size = 0;
+ mach_voucher_attr_recipe_t recipe = NULL;
+ while (bytes_required > used_size) {
+ recipe = (mach_voucher_attr_recipe_t)&bytes[used_size];
+
+ switch (recipe->key) {
+ case MACH_VOUCHER_ATTR_KEY_NONE:
+ ASSERT(false, "No key in recipe");
+ break;
+
+ case MACH_VOUCHER_ATTR_KEY_ATM:
+ print_base_empty(buffer, globals, event_index, "voucher_create", false);
+ buffer.printf(" voucher-%u | ATM ID %llu\n", voucher->id(), *(uint64_t *)(uintptr_t)recipe->content);
+ break;
+
+ case MACH_VOUCHER_ATTR_KEY_IMPORTANCE:
+ print_base_empty(buffer, globals, event_index, "voucher_create", false);
+ buffer.printf(" voucher-%u | %s\n", voucher->id(), (char *)recipe->content);
+ break;
+
+ case MACH_VOUCHER_ATTR_KEY_BANK:
+ // Spacing and newline is different because that is how BANK formats it :-(
+ print_base_empty(buffer, globals, event_index, "voucher_create", false);
+ buffer.printf(" voucher-%u |%s", voucher->id(), (char *)recipe->content);
+ break;
+
+ case MACH_VOUCHER_ATTR_KEY_USER_DATA:
+ for (uint32_t offset=0; offset<recipe->content_size; offset += 16) {
+ uint8_t* data = ((uint8_t*)recipe->content) + offset;
+ size_t data_remaining = std::min(recipe->content_size - offset, (uint32_t)16);
+
+ print_base_empty(buffer, globals, event_index, "voucher_create", false);
+ buffer.printf(" voucher-%u | UserData: %04u ", voucher->id(), offset);
+
+ // 16 * 3 == 48, 16 chars to spare
+ char hex_buffer[64];
+ // Hex data.
+ for (uint32_t cursor = 0; cursor<data_remaining; cursor++) {
+ char* hex_buffer_tmp = &hex_buffer[cursor * 3];
+ size_t hex_buffer_tmp_size = sizeof(hex_buffer) - cursor * 3;
+ snprintf(hex_buffer_tmp, hex_buffer_tmp_size, "%02x ", data[cursor]);
+ }
+
+ char ascii_buffer[24];
+ for (uint32_t cursor = 0; cursor<data_remaining; cursor++) {
+ if (isprint(data[cursor]))
+ ascii_buffer[cursor] = data[cursor];
+ else
+ ascii_buffer[cursor] = '.';
+ }
+ ascii_buffer[data_remaining] = 0;
+
+ buffer.printf("%-48s %-16s\n", hex_buffer, ascii_buffer);
+ }
+ break;
+
+ default:
+ print_base_empty(buffer, globals, event_index, "voucher_create", false);
+ buffer.printf(" voucher-%u | UNKNOWN key-%u command-%u size-%u\n", voucher->id(), recipe->key, recipe->command, recipe->content_size);
+ break;
+ }
+
+ used_size += sizeof(mach_voucher_attr_recipe_data_t) + recipe->content_size;
+ }
+}
+
+#endif /* defined(__msa__MessagePrinting__) */
--- /dev/null
+//
+// ReadTraceFileAction.cpp
+// msa
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include "global.h"
+
+template <typename SIZE>
+static void execute_arch_specific(Globals& globals, TraceFile& file)
+{
+ Machine<SIZE> machine(file);
+
+ if (!machine.lost_events()) {
+ if (globals.should_zero_base_timestamps() && machine.event_count()) {
+ globals.set_beginning_of_time(machine.events()[0].timestamp());
+ } else {
+ globals.set_beginning_of_time(AbsTime(0));
+ }
+
+ if (!globals.is_timebase_set()) {
+ if (machine.is_ios()) {
+ globals.set_timebase({ 125, 3 }, false);
+ } else {
+ globals.set_timebase({ 1, 1 }, false);
+ }
+ }
+
+ char buf[PATH_MAX];
+ char* buf_end = buf + sizeof(buf);
+ print_mach_msg_header(buf, buf_end, globals);
+ dprintf(globals.output_fd(), "%s", buf);
+
+ std::unordered_map<pid_t, bool> task_appnap_state;
+ std::unordered_map<pid_t, TaskRequestedPolicy> task_requested_state;
+ std::unordered_map<typename SIZE::ptr_t, TaskRequestedPolicy> thread_requested_state;
+ std::unordered_map<pid_t, std::pair<TaskEffectivePolicy, uint32_t>> task_effective_state;
+ std::unordered_map<typename SIZE::ptr_t, std::pair<TaskEffectivePolicy, uint32_t>> thread_effective_state;
+ std::unordered_map<pid_t, std::pair<uint32_t, uint32_t>> task_boosts;
+
+ process_events(globals, machine, task_appnap_state, task_requested_state, thread_requested_state, task_effective_state, thread_effective_state, task_boosts);
+ } else {
+ log_msg(ASL_LEVEL_WARNING, "The trace data indicates that events were lost, the file cannot be processed\n");
+ }
+}
+
+void ReadTraceFileAction::execute(Globals& globals) {
+ TraceFile file(_path.c_str(), globals.should_presort_events(), globals.cpu_count(), globals.iop_count());
+ if (globals.is_kernel_size_set()) {
+ try {
+ if (globals.kernel_size() == KernelSize::k32)
+ execute_arch_specific<Kernel32>(globals, file);
+ else
+ execute_arch_specific<Kernel64>(globals, file);
+ } catch (Exception& e) {
+ log_msg(ASL_LEVEL_ERR, "An exception was raised: %s", e.what());
+ log_msg(ASL_LEVEL_ERR, "An explicit kernel size was set, you may want to try not forcing the size to a single value\n");
+ log_msg(ASL_LEVEL_ERR, "You may also want to check the number of cpus and iops configured if the file is from a device and does not have a cpumap\n");
+ }
+ } else {
+ if (file.is_valid()) {
+ if (file.is_64_bit()) {
+ execute_arch_specific<Kernel64>(globals, file);
+ } else {
+ execute_arch_specific<Kernel32>(globals, file);
+ }
+ } else {
+ if (file.mmap_failed()) {
+ log_msg(ASL_LEVEL_ERR, "Unable to mmap %s, it may exceed this devices memory limits\n", _path.c_str());
+ } else {
+ log_msg(ASL_LEVEL_ERR, "%s does not appear to be a valid trace file\n", _path.c_str());
+ }
+ }
+ }
+}
--- /dev/null
+//
+// ReadTraceFileAction.hpp
+// msa
+//
+// Created by James McIlree on 4/15/13.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef __kdprof__TraceFileAction__
+#define __kdprof__TraceFileAction__
+
+class ReadTraceFileAction : public Action {
+ protected:
+ std::string _path;
+
+ public:
+ ReadTraceFileAction(const char* path) : _path(path) {
+ ASSERT(Path::is_file(_path, TRUE), "File must exist");
+ }
+
+ virtual void execute(Globals& globals);
+};
+
+#endif /* defined(__msa__TraceFileAction__) */
--- /dev/null
+//
+// VoucherContentSysctl.cpp
+// system_cmds
+//
+// Created by James McIlree on 4/29/14.
+//
+//
+
+#include "global.h"
+
+VoucherContentSysctl::VoucherContentSysctl(bool is_enabled) :
+ _original_value(0),
+ _new_value(is_enabled ? 1 : 0)
+{
+ size_t original_value_size = sizeof(_original_value);
+ if (sysctlbyname("kern.ipc_voucher_trace_contents", &_original_value, &original_value_size, &_new_value, sizeof(_new_value))) {
+ log_msg(ASL_LEVEL_ERR, "Unable to %s kern.ipc_voucher_trace_contents sysctl", is_enabled ? "set" : "clear");
+ }
+}
+
+VoucherContentSysctl::~VoucherContentSysctl() {
+ if (_original_value != _new_value) {
+ if (sysctlbyname("kern.ipc_voucher_trace_contents", NULL, 0, &_original_value, sizeof(_original_value))) {
+ log_msg(ASL_LEVEL_ERR, "Unable to restore original value of kern.ipc_voucher_trace_contents sysctl");
+ }
+ }
+}
--- /dev/null
+//
+// VoucherContentSysctl.hpp
+// system_cmds
+//
+// Created by James McIlree on 4/29/14.
+//
+//
+
+#ifndef __system_cmds__VoucherContentSysctl__
+#define __system_cmds__VoucherContentSysctl__
+
+//
+// This class is used to manage the voucher contents sysctl
+class VoucherContentSysctl {
+ protected:
+ int _original_value;
+ int _new_value;
+
+ public:
+ VoucherContentSysctl(bool is_enabled);
+ ~VoucherContentSysctl();
+};
+
+#endif /* defined(__system_cmds__VoucherContentSysctl__) */
--- /dev/null
+//
+// WriteTraceFileAction.cpp
+// system_cmds
+//
+// Created by James McIlree on 4/29/14.
+//
+//
+
+#include "global.h"
+
+static bool shouldProcessEvents;
+static uint32_t sigintCount;
+
+static bool start_tracing(Globals& globals)
+{
+ if (!KDBG::reset()) return false;
+ if (!KDBG::set_buffer_capacity(globals.trace_buffer_size())) return false;
+ if (!KDBG::set_nowrap(false)) return false;
+ if (!KDBG::initialize_buffers()) return false;
+ if (!KDBG::set_enabled(KDEBUG_ENABLE_TRACE)) return false;
+
+ return true;
+}
+
+static void end_tracing(void)
+{
+ KDBG::reset();
+}
+
+static void signal_handler_ctrl_C(int sig)
+{
+ shouldProcessEvents = false;
+ if (++sigintCount >= 5) {
+ // Not responding, nuke it from orbit.
+ exit(1);
+ }
+}
+
+void WriteTraceFileAction::execute(Globals& globals) {
+ FileDescriptor fd(open(_path.c_str(), O_TRUNC|O_WRONLY|O_CREAT, 0777));
+ if (!fd.is_open()) {
+ log_msg(ASL_LEVEL_ERR, "Unable to write to %s\n", _path.c_str());
+ return;
+ }
+
+ shouldProcessEvents = true;
+ sigintCount = 0;
+
+ VoucherContentSysctl contents(globals.should_trace_voucher_contents());
+
+ AbsTime t1 = AbsTime::now();
+ if (start_tracing(globals)) {
+ // We cannot write the "maps" until after tracing has started.
+ if (KDBG::write_maps(fd)) {
+ signal(SIGINT, signal_handler_ctrl_C);
+
+ while (shouldProcessEvents) {
+ int events_written = KDBG::write_events(fd);
+ AbsTime t2 = AbsTime::now();
+ if (events_written != -1) {
+ printf("wrote %d events - elapsed time = %.1f secs\n", events_written, (double)(t2 - t1).nano_time().value() / (double)NANOSECONDS_PER_SECOND);
+ } else {
+ log_msg(ASL_LEVEL_WARNING, "write events returned -1\n");
+ break;
+ }
+ t1 = t2;
+ }
+
+ signal(SIGINT, SIG_DFL);
+ }
+ }
+
+ end_tracing();
+}
--- /dev/null
+//
+// WriteTraceFileAction.hpp
+// system_cmds
+//
+// Created by James McIlree on 4/29/14.
+//
+//
+
+#ifndef __system_cmds__WriteTraceFileAction__
+#define __system_cmds__WriteTraceFileAction__
+
+class WriteTraceFileAction : public Action {
+ protected:
+ std::string _path;
+
+ public:
+ WriteTraceFileAction(const char* path) :
+ _path(path)
+ {
+ ASSERT(path, "Sanity");
+ }
+
+ virtual void execute(Globals& globals);
+};
+
+#endif /* defined(__system_cmds__WriteTraceFileAction__) */
--- /dev/null
+//
+// global.h
+// msa
+//
+// Created by James McIlree on 2/1/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#ifndef msa_global_h
+#define msa_global_h
+
+#include <CPPUtil/CPPUtil.h>
+
+using namespace util;
+
+#include <KDBG/KDebug.h>
+
+#include <signal.h>
+
+#include <libkern/OSAtomic.h>
+
+#include <vector>
+#include <unordered_map>
+#include <thread>
+#include <tuple>
+
+__attribute__((noreturn)) void usage(const char *);
+
+#include "Globals.hpp"
+#include "EventRingBuffer.hpp"
+#include "PrintBuffer.hpp"
+#include "Action.hpp"
+#include "ReadTraceFileAction.hpp"
+#include "WriteTraceFileAction.hpp"
+#include "LiveTraceAction.hpp"
+#include "Printing.hpp"
+#include "EventProcessing.hpp"
+#include "VoucherContentSysctl.hpp"
+
+#endif
--- /dev/null
+//
+// main.cpp
+// msa
+//
+// Created by James McIlree on 1/30/14.
+// Copyright (c) 2014 Apple. All rights reserved.
+//
+
+#include <CPPUtil/CPPUtil.h>
+
+#include "global.h"
+
+bool isVerbose = true;
+bool shouldPrintVersion = true;
+std::vector<std::string> procsOfInterest;
+bool interestedInEverything = false;
+
+__attribute__((noreturn)) void usage(const char *errorMsg) {
+ if (errorMsg) {
+ printf("%s\n", errorMsg);
+ exit(1);
+ }
+
+ // const char* BOLD = "\033[1m";
+ // const char* UNBOLD = "\033[0m";
+
+ // printf("01234567890123456789012345678901234567890123456789012345678901234567890123456789\n");
+ printf("msa [options]\n\n");
+ printf(" GLOBAL OPTIONS\n\n");
+ printf(" -h, --help Print this message\n");
+ printf(" --verbose Print additional information\n");
+ printf(" --version Print version info\n");
+ printf("\n");
+ printf(" TRACE COLLECTION OPTIONS\n\n");
+ printf(" -i, --initialize # Set the size of the kernel trace buffer\n");
+ printf(" --no-voucher-contents Disable collecting voucher contents\n");
+ printf(" -L path Capture and save trace output to path\n");
+ printf("\n");
+ printf(" OUTPUT OPTIONS\n\n");
+ printf(" --lifecycle all|user|none\n");
+ printf(" Set filter level for lifecycle events\n");
+ printf(" --mach-msg all|user|voucher|none\n");
+ printf(" Set filter level for mach msg events\n");
+ printf(" -o, --output path Print output to path\n");
+ printf(" --raw-timestamps Print timestamps as raw values, not deltas\n");
+ printf(" --mach-absolute-time Print timestamps in mach absolute time\n");
+ printf(" --event-index Print the index of each event\n");
+ printf("\n");
+ exit(1);
+}
+
+template <typename SIZE>
+static bool check_interest_name(const MachineProcess<SIZE>& process) {
+ if (interestedInEverything)
+ return true;
+
+ const char* name = process.name();
+ for (auto& proc : procsOfInterest) {
+ if (strcmp(name, proc.c_str()) == 0)
+ return true;
+ }
+
+ return false;
+}
+
+static std::unique_ptr<Action> create_read_trace_file_action(const char* trace_file_path) {
+ if (Path::is_file(trace_file_path, true)) {
+ char resolved_path[PATH_MAX];
+ if (realpath(trace_file_path, resolved_path)) {
+ return std::make_unique<ReadTraceFileAction>(resolved_path);
+ }
+ }
+ char* errmsg = NULL;
+ asprintf(&errmsg, "%s does not exist or is not a file", trace_file_path);
+ usage(errmsg);
+}
+
+static std::vector<std::unique_ptr<Action>> parse_arguments(int argc, const char* argv[], Globals& globals) {
+ int i = 1;
+
+ std::vector<std::unique_ptr<Action>> actions;
+
+ while (i < argc) {
+ const char* arg = argv[i];
+ if ((strcmp(arg, "-h") == 0) || (strcasecmp(arg, "--help") == 0)) {
+ usage(NULL);
+ } else if ((strcmp(arg, "-v") == 0) || strcasecmp(arg, "--verbose") == 0) {
+ globals.set_is_verbose(true);
+ } else if (strcasecmp(arg, "--version") == 0) {
+ shouldPrintVersion = true;
+ } else if ((strcmp(arg, "-i") == 0) || strcasecmp(arg, "--initialize") == 0) {
+ if (++i >= argc)
+ usage("--initialize requires an argument");
+
+ arg = argv[i];
+ char* endptr;
+ uint32_t temp = (uint32_t)strtoul(arg, &endptr, 0);
+ if (*endptr == 0) {
+ globals.set_trace_buffer_size(temp);
+ } else {
+ usage("Unable to parse --initialize argument");
+ }
+ } else if (strcasecmp(arg, "--no-voucher-contents") == 0) {
+ globals.set_should_trace_voucher_contents(false);
+ } else if (strcasecmp(arg, "-L") == 0) {
+ if (++i >= argc)
+ usage("-L requires an argument");
+
+ arg = argv[i];
+ actions.push_back(std::make_unique<WriteTraceFileAction>(arg));
+ } else if (strcasecmp(arg, "--lifecycle") == 0) {
+ if (++i >= argc)
+ usage("--lifecycle requires an argument");
+
+ arg = argv[i];
+ if (strcasecmp(arg, "all") == 0) {
+ globals.set_lifecycle_filter(kLifecycleFilter::All);
+ } else if (strcasecmp(arg, "user") == 0) {
+ globals.set_lifecycle_filter(kLifecycleFilter::User);
+ } else if (strcasecmp(arg, "none") == 0) {
+ globals.set_lifecycle_filter(kLifecycleFilter::None);
+ } else {
+ usage("Unrecognized --lifecycle value");
+ }
+ } else if (strcasecmp(arg, "--mach-msg") == 0) {
+ if (++i >= argc)
+ usage("--mach-msg requires an argument");
+
+ arg = argv[i];
+ if (strcasecmp(arg, "all") == 0) {
+ globals.set_mach_msg_filter(kMachMsgFilter::All);
+ } else if (strcasecmp(arg, "user") == 0) {
+ globals.set_mach_msg_filter(kMachMsgFilter::User);
+ } else if (strcasecmp(arg, "voucher") == 0) {
+ globals.set_mach_msg_filter(kMachMsgFilter::Voucher);
+ } else if (strcasecmp(arg, "none") == 0) {
+ globals.set_mach_msg_filter(kMachMsgFilter::None);
+ } else {
+ usage("Unrecognized --mach-msg value");
+ }
+ } else if ((strcmp(arg, "-o") == 0) || strcasecmp(arg, "--output") == 0) {
+ if (++i >= argc)
+ usage("--output requires an argument");
+
+ FileDescriptor desc(argv[i], O_WRONLY | O_CREAT | O_TRUNC, 0644);
+ if (!desc.is_open()) {
+ char* errmsg = NULL;
+ asprintf(&errmsg, "Unable to create output file at %s", argv[i]);
+ usage(errmsg);
+ }
+ globals.set_output_fd(std::move(desc));
+ } else if (strcasecmp(arg, "--raw-timestamps") == 0) {
+ globals.set_should_zero_base_timestamps(false);
+ } else if (strcasecmp(arg, "--mach-absolute-time") == 0) {
+ globals.set_should_print_mach_absolute_timestamps(true);
+ } else if (strcasecmp(arg, "--event-index") == 0) {
+ globals.set_should_print_event_index(true);
+ } else {
+ //
+ // Last attempts to divine argument type/intent.
+ //
+ std::string temp(arg);
+
+ if (ends_with(temp, ".trace")) {
+ actions.push_back(create_read_trace_file_action(argv[i]));
+ goto no_error;
+ }
+
+ //
+ // ERROR!
+ //
+ char error_buffer[PATH_MAX];
+ snprintf(error_buffer, sizeof(error_buffer), "Unhandled argument: %s", arg);
+ usage(error_buffer);
+ }
+
+ no_error:
+
+ i++;
+ }
+
+ if (actions.empty()) {
+ actions.push_back(std::make_unique<LiveTraceAction>());
+ }
+
+ return actions;
+}
+
+int main(int argc, const char * argv[])
+{
+ //
+ // Use host values as defaults.
+ // User overrides as needed via flags.
+ //
+ Globals globals;
+ auto actions = parse_arguments(argc, argv, globals);
+
+ interestedInEverything = procsOfInterest.empty();
+
+ // globals.set_should_print_mach_absolute_timestamps(true);
+
+ for (auto& action : actions) {
+ action->execute(globals);
+ }
+
+ return 0;
+}
+
--- /dev/null
+.Dd 3/7/14\r
+.Dt msa 1\r
+.Os Darwin\r
+.Sh NAME\r
+.Nm msa\r
+.Nd Mach spy agency. Shows mach ipc, vouchers, importance boosts, etc.\r
+.Sh SYNOPSIS \" Section Header - required - don't modify\r
+.Nm\r
+.Op Fl ho\r
+.Op Fl -help\r
+.Op Fl -verbose\r
+.Op Fl -version\r
+.Op Fl -lifecycle Ar all | user | none\r
+.Op Fl -mach-msg Ar all | user | voucher | none\r
+.Op Fl -ouput Ar file\r
+.Op Fl -raw-timestamps\r
+.Op Fl -mach-absolute-time\r
+.Op Fl -event-index\r
+.Op Ar\r
+.Sh DESCRIPTION\r
+The\r
+.Nm\r
+command is used to observe voucher and importance propagation.\r
+.Nm\r
+displays mach message senders and receivers, message state (voucher, importance, reply expected, etc.) and receiver behavior such as refusing a voucher.\r
+.Nm\r
+shows process & thread lifecycle events, adoption & clearing of vouchers by threads, process importance count changes, and process DarwinBG state.\r
+.Nm\r
+uses the kernel trace facility and can be run live or against saved trace files.\r
+.Pp\r
+Options are as follows:\r
+.Pp\r
+\r
+.Bl -tag -width -indent\r
+.It Fl h, -help\r
+Print help.\r
+.It Fl -verbose\r
+Print additional details and output.\r
+.It Fl -version\r
+Print version info.\r
+.It Fl -lifecycle Ar all | user | none\r
+Set the process and thread lifecycle filter level. The default is "user".\r
+.Bl -tag -width -indent\r
+.It all\r
+Show kernel & userspace process and thread events\r
+.It user\r
+Show userspace process and thread events\r
+.It none\r
+Show no process and thread events\r
+.El\r
+.It Fl -mach-msg Ar all | user | voucher | none\r
+Set the mach message sender/receiver filter level. The default is "voucher".\r
+.Bl -tag -width -indent\r
+.It all\r
+Show all mach message senders/receivers\r
+.It user\r
+Show mach message senders/receivers where both the sender and receiver are in userspace\r
+.It voucher\r
+Show "interesting" mach message senders/receivers. An "interesting" message is any message containing a voucher, any message not containing a voucher if the sending thread has adopted a voucher, and any message carrying importance.\r
+.It none\r
+Show no mach message senders/receivers\r
+.El\r
+.It Fl o, -output Ar file\r
+Write output to \r
+.Ar file\r
+.It Fl -raw-timestamps\r
+Do not show time as a delta from the first trace event, print the actual time offsets.\r
+.It Fl -mach-absolute-time\r
+Do not translate time from mach absolute units to nanoseconds.\r
+.It Fl -event-index\r
+Print the index of each event.\r
+.El\r
+.Sh OUTPUT\r
+.Nm\r
+displays columns of data, the first five columns are fixed. The data after the fifth column depends on the type of event. Example output below (your terminal will need to be at least 200 characters wide to display this correctly):\r
+\r
+ Time(uS) Type Thread ThreadVoucher Process ;; Message-From/To MsgID MsgVoucher DeliveryTime FLAGS\r
+ 9304.56 send 8C2 - coreaudiod (236) ;; -> coreaudiod (236) 2 - - ONEWAY \r
+ 9346.52 impdelv 8A6 - coreaudiod (236) ;; linked to coreaudiod (236)'s live importance chain\r
+ 9349.02 recv 8A6 - coreaudiod (236) ;; <- coreaudiod (236) 2 5EBD6CB68FF6401F 44.46 \r
+ 9361.50 adopt 8A6 5EBD6CB68FF6401F coreaudiod (236) ;;\r
+\r
+.Bl -tag -width -indent\r
+.It The column headers have the following meanings:\r
+.Bl -tag -width -indent\r
+.It Time\r
+The Time column displays the number of microseconds elapsed since\r
+.Nm\r
+started, or since the first event in the trace file. This value may be modified by\r
+.Fl -raw-timestamps\r
+or\r
+.Fl -mach-absolute-time.\r
+You might set these flags in order to correlate timestamps with output from another process that was printing mach_absolute_time() based timestamps.\r
+.It Type\r
+This describes the type of event being reported. Information to the right of the ';;' will vary based on this type.\r
+.It Thread\r
+The Thread column shows the thread id of the thread that generated the event. This is the thread id as displayed by\r
+.Xr trace 1\r
+and\r
+.Xr kdprof 1\r
+and may be cross correlated with trace events shown by either. Note that in some cases a thread in process A may cause changes in process B, for example a thread in Safari might raise the importance of a daemon.\r
+.It ThreadVoucher\r
+The ThreadVoucher column shows the id of any voucher currently adopted by the thread. This voucher id is the same identifier as shown by\r
+.Xr lsmp 1 .\r
+A '-' means that\r
+.Nm\r
+has not yet seen a voucher adopt for the thread. A NULL voucher is displayed as '0'.\r
+.It Process\r
+The Process column shows the name and pid of the process executing. The name is limited to 16 characters and may appear truncated.\r
+.El\r
+.It Mach message send/recv have the following additional column headers:\r
+.Bl -tag -width -indent\r
+.It Message-From/To\r
+This field shows either the sender or the recipient of the message. The arrow at the beginning will indicate the direction the message is flowing. A '->' means sent to, a '<-' means received from. The name of the sender or recipient is limited to 16 characters and may appear truncated. Rarely, you may see '???' as the name, which means\r
+.Nm\r
+was unable to determine the source or destination of the message.\r
+.It MsgID\r
+The MsgID is a unique identifier for each mach message. A mach message has exactly one sender, and one receiver. However, a sending process may send several messages to a receiver before any are received. The MsgID allows disambiguation of exact message send and receipt time.\r
+.It MsgVoucher\r
+If this field is set, it shows the id of the voucher being carried by the mach message. Note that in some cases, the sender will show no voucher, but the receiver will have a voucher. This is the kernel providing a voucher for a process sending "old style" importance to a process that wants to receive vouchers.\r
+.It DeliveryTime\r
+This is the time it took to deliver the message, in uS. If the time cannot be calculated, it will show as '-'.\r
+.It FLAGS\r
+The FLAGS field will indicate various mach message behaviors:\r
+.Bl -tag -width -indent\r
+.It ONEWAY\r
+This message cannot be replied to\r
+.It MSGH_BITS_RAISED_IMPORTANCE\r
+This message carries "old style" importance\r
+.It VOUCHER-REFUSED\r
+The message carried a voucher, and the receiver refused to accept it\r
+.El\r
+.El\r
+.El\r
+.Sh EXAMPLES\r
+.Bl -tag -width -indent\r
+.It Here are several examples of usage:\r
+.Bl -tag -width -indent\r
+.It msa | grep BOOST\r
+This will show live boost/unboost behavior. Useful for watching what UI interactions will cause boosting.\r
+.It msa | grep -e APP-NAME -e DAEMON-NAME -e OTHER-DAEMON-NAME\r
+This will restrict output to only events dealing with an app and targetted daemons. This is useful to reduce the amount of data you need to watch.\r
+.It trace -L /tmp/temp.trace; msa /tmp/temp.trace\r
+This uses trace to capture a trace file for later analysis by msa.\r
+.El\r
+.El\r
+.Sh SEE ALSO \r
+.\" List links in ascending order by section, alphabetically within a section.\r
+.\" Please do not reference files that do not exist without filing a bug report\r
+.Xr kdprof 1 ,\r
+.Xr lsmp 1,\r
+.Xr trace 1\r
+.\" .Sh BUGS \" Document known, unremedied bugs\r
+.\" .Sh HISTORY \" Document history if command behaves in a unique manner
\ No newline at end of file