From patchwork Tue Jan 26 13:47:17 2016 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Paolo Bonzini X-Patchwork-Id: 8123111 Return-Path: X-Original-To: patchwork-qemu-devel@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork1.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork1.web.kernel.org (Postfix) with ESMTP id 61D8D9F1C0 for ; Tue, 26 Jan 2016 14:12:47 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id 78B022025B for ; Tue, 26 Jan 2016 14:12:46 +0000 (UTC) Received: from lists.gnu.org (lists.gnu.org [208.118.235.17]) (using TLSv1 with cipher AES256-SHA (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 638112025A for ; Tue, 26 Jan 2016 14:12:45 +0000 (UTC) Received: from localhost ([::1]:44168 helo=lists.gnu.org) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1aO4MO-0007HO-Mx for patchwork-qemu-devel@patchwork.kernel.org; Tue, 26 Jan 2016 09:12:44 -0500 Received: from eggs.gnu.org ([2001:4830:134:3::10]:35628) by lists.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1aO3z3-0001KW-96 for qemu-devel@nongnu.org; Tue, 26 Jan 2016 08:48:38 -0500 Received: from Debian-exim by eggs.gnu.org with spam-scanned (Exim 4.71) (envelope-from ) id 1aO3z1-0004J9-QP for qemu-devel@nongnu.org; Tue, 26 Jan 2016 08:48:37 -0500 Received: from mx1.redhat.com ([209.132.183.28]:46360) by eggs.gnu.org with esmtp (Exim 4.71) (envelope-from ) id 1aO3z1-0004J3-Hx for qemu-devel@nongnu.org; Tue, 26 Jan 2016 08:48:35 -0500 Received: from int-mx10.intmail.prod.int.phx2.redhat.com (int-mx10.intmail.prod.int.phx2.redhat.com [10.5.11.23]) by mx1.redhat.com (Postfix) with ESMTPS id 29AF3C0AC931; Tue, 26 Jan 2016 13:48:35 +0000 (UTC) Received: from 640k.localdomain.com (ovpn-112-67.ams2.redhat.com [10.36.112.67]) by int-mx10.intmail.prod.int.phx2.redhat.com (8.14.4/8.14.4) with ESMTP id u0QDlNqu028272; Tue, 26 Jan 2016 08:48:33 -0500 From: Paolo Bonzini To: qemu-devel@nongnu.org Date: Tue, 26 Jan 2016 14:47:17 +0100 Message-Id: <1453816041-36362-46-git-send-email-pbonzini@redhat.com> In-Reply-To: <1453816041-36362-1-git-send-email-pbonzini@redhat.com> References: <1453816041-36362-1-git-send-email-pbonzini@redhat.com> X-Scanned-By: MIMEDefang 2.68 on 10.5.11.23 X-detected-operating-system: by eggs.gnu.org: GNU/Linux 3.x X-Received-From: 209.132.183.28 Cc: Janosch Frank Subject: [Qemu-devel] [PULL 45/49] scripts/dump-guest-memory.py: Make methods functions X-BeenThere: qemu-devel@nongnu.org X-Mailman-Version: 2.1.14 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: qemu-devel-bounces+patchwork-qemu-devel=patchwork.kernel.org@nongnu.org Sender: qemu-devel-bounces+patchwork-qemu-devel=patchwork.kernel.org@nongnu.org X-Spam-Status: No, score=-6.9 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP From: Janosch Frank The functions dealing with qemu components rarely used parts of the class, so they were moved out of the class. As the uintptr_t variable is needed both within and outside the class, it was made a constant and moved to the top. Reviewed-by: Laszlo Ersek Signed-off-by: Janosch Frank Message-Id: <1453464520-3882-3-git-send-email-frankja@linux.vnet.ibm.com> Signed-off-by: Paolo Bonzini --- scripts/dump-guest-memory.py | 184 ++++++++++++++++++++++--------------------- 1 file changed, 93 insertions(+), 91 deletions(-) diff --git a/scripts/dump-guest-memory.py b/scripts/dump-guest-memory.py index e49c835..d0b927a 100644 --- a/scripts/dump-guest-memory.py +++ b/scripts/dump-guest-memory.py @@ -17,6 +17,8 @@ import struct +UINTPTR_T = gdb.lookup_type("uintptr_t") + TARGET_PAGE_SIZE = 0x1000 TARGET_PAGE_MASK = 0xFFFFFFFFFFFFF000 @@ -66,6 +68,94 @@ ELF64_PHDR = ("I" # p_type "Q" # p_align ) +def int128_get64(val): + assert (val["hi"] == 0) + return val["lo"] + +def qlist_foreach(head, field_str): + var_p = head["lh_first"] + while (var_p != 0): + var = var_p.dereference() + yield var + var_p = var[field_str]["le_next"] + +def qemu_get_ram_block(ram_addr): + ram_blocks = gdb.parse_and_eval("ram_list.blocks") + for block in qlist_foreach(ram_blocks, "next"): + if (ram_addr - block["offset"] < block["used_length"]): + return block + raise gdb.GdbError("Bad ram offset %x" % ram_addr) + +def qemu_get_ram_ptr(ram_addr): + block = qemu_get_ram_block(ram_addr) + return block["host"] + (ram_addr - block["offset"]) + +def memory_region_get_ram_ptr(mr): + if (mr["alias"] != 0): + return (memory_region_get_ram_ptr(mr["alias"].dereference()) + + mr["alias_offset"]) + return qemu_get_ram_ptr(mr["ram_addr"] & TARGET_PAGE_MASK) + +def get_guest_phys_blocks(): + guest_phys_blocks = [] + print "guest RAM blocks:" + print ("target_start target_end host_addr message " + "count") + print ("---------------- ---------------- ---------------- ------- " + "-----") + + current_map_p = gdb.parse_and_eval("address_space_memory.current_map") + current_map = current_map_p.dereference() + for cur in range(current_map["nr"]): + flat_range = (current_map["ranges"] + cur).dereference() + mr = flat_range["mr"].dereference() + + # we only care about RAM + if (not mr["ram"]): + continue + + section_size = int128_get64(flat_range["addr"]["size"]) + target_start = int128_get64(flat_range["addr"]["start"]) + target_end = target_start + section_size + host_addr = (memory_region_get_ram_ptr(mr) + + flat_range["offset_in_region"]) + predecessor = None + + # find continuity in guest physical address space + if (len(guest_phys_blocks) > 0): + predecessor = guest_phys_blocks[-1] + predecessor_size = (predecessor["target_end"] - + predecessor["target_start"]) + + # the memory API guarantees monotonically increasing + # traversal + assert (predecessor["target_end"] <= target_start) + + # we want continuity in both guest-physical and + # host-virtual memory + if (predecessor["target_end"] < target_start or + predecessor["host_addr"] + predecessor_size != host_addr): + predecessor = None + + if (predecessor is None): + # isolated mapping, add it to the list + guest_phys_blocks.append({"target_start": target_start, + "target_end" : target_end, + "host_addr" : host_addr}) + message = "added" + else: + # expand predecessor until @target_end; predecessor's + # start doesn't change + predecessor["target_end"] = target_end + message = "joined" + + print ("%016x %016x %016x %-7s %5u" % + (target_start, target_end, host_addr.cast(UINTPTR_T), + message, len(guest_phys_blocks))) + + return guest_phys_blocks + + class DumpGuestMemory(gdb.Command): """Extract guest vmcore from qemu process coredump. @@ -100,96 +190,9 @@ shape and this command should mostly work.""" super(DumpGuestMemory, self).__init__("dump-guest-memory", gdb.COMMAND_DATA, gdb.COMPLETE_FILENAME) - self.uintptr_t = gdb.lookup_type("uintptr_t") self.elf64_ehdr_le = struct.Struct("<%s" % ELF64_EHDR) self.elf64_phdr_le = struct.Struct("<%s" % ELF64_PHDR) - - def int128_get64(self, val): - assert (val["hi"] == 0) - return val["lo"] - - def qlist_foreach(self, head, field_str): - var_p = head["lh_first"] - while (var_p != 0): - var = var_p.dereference() - yield var - var_p = var[field_str]["le_next"] - - def qemu_get_ram_block(self, ram_addr): - ram_blocks = gdb.parse_and_eval("ram_list.blocks") - for block in self.qlist_foreach(ram_blocks, "next"): - if (ram_addr - block["offset"] < block["used_length"]): - return block - raise gdb.GdbError("Bad ram offset %x" % ram_addr) - - def qemu_get_ram_ptr(self, ram_addr): - block = self.qemu_get_ram_block(ram_addr) - return block["host"] + (ram_addr - block["offset"]) - - def memory_region_get_ram_ptr(self, mr): - if (mr["alias"] != 0): - return (self.memory_region_get_ram_ptr(mr["alias"].dereference()) + - mr["alias_offset"]) - return self.qemu_get_ram_ptr(mr["ram_addr"] & TARGET_PAGE_MASK) - - def guest_phys_blocks_init(self): - self.guest_phys_blocks = [] - - def guest_phys_blocks_append(self): - print "guest RAM blocks:" - print ("target_start target_end host_addr message " - "count") - print ("---------------- ---------------- ---------------- ------- " - "-----") - - current_map_p = gdb.parse_and_eval("address_space_memory.current_map") - current_map = current_map_p.dereference() - for cur in range(current_map["nr"]): - flat_range = (current_map["ranges"] + cur).dereference() - mr = flat_range["mr"].dereference() - - # we only care about RAM - if (not mr["ram"]): - continue - - section_size = self.int128_get64(flat_range["addr"]["size"]) - target_start = self.int128_get64(flat_range["addr"]["start"]) - target_end = target_start + section_size - host_addr = (self.memory_region_get_ram_ptr(mr) + - flat_range["offset_in_region"]) - predecessor = None - - # find continuity in guest physical address space - if (len(self.guest_phys_blocks) > 0): - predecessor = self.guest_phys_blocks[-1] - predecessor_size = (predecessor["target_end"] - - predecessor["target_start"]) - - # the memory API guarantees monotonically increasing - # traversal - assert (predecessor["target_end"] <= target_start) - - # we want continuity in both guest-physical and - # host-virtual memory - if (predecessor["target_end"] < target_start or - predecessor["host_addr"] + predecessor_size != host_addr): - predecessor = None - - if (predecessor is None): - # isolated mapping, add it to the list - self.guest_phys_blocks.append({"target_start": target_start, - "target_end" : target_end, - "host_addr" : host_addr}) - message = "added" - else: - # expand predecessor until @target_end; predecessor's - # start doesn't change - predecessor["target_end"] = target_end - message = "joined" - - print ("%016x %016x %016x %-7s %5u" % - (target_start, target_end, host_addr.cast(self.uintptr_t), - message, len(self.guest_phys_blocks))) + self.guest_phys_blocks = None def cpu_get_dump_info(self): # We can't synchronize the registers with KVM post-mortem, and @@ -263,8 +266,7 @@ shape and this command should mostly work.""" len(name) + 1, len(desc), type, name, desc) def dump_init(self): - self.guest_phys_blocks_init() - self.guest_phys_blocks_append() + self.guest_phys_blocks = get_guest_phys_blocks() self.cpu_get_dump_info() # we have no way to retrieve the VCPU status from KVM # post-mortem @@ -310,7 +312,7 @@ shape and this command should mostly work.""" cur = block["host_addr"] left = block["target_end"] - block["target_start"] print ("dumping range at %016x for length %016x" % - (cur.cast(self.uintptr_t), left)) + (cur.cast(UINTPTR_T), left)) while (left > 0): chunk_size = min(TARGET_PAGE_SIZE, left) chunk = qemu_core.read_memory(cur, chunk_size)