summaryrefslogtreecommitdiff
path: root/mytasks.py
blob: dcb53a8cc1cd4fabfe5ae2035320b9a86d030f5f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
#!/usr/bin/env python

# RaidGuessFS, a FUSE pseudo-filesystem to guess RAID parameters of a damaged device
# Copyright (C) 2015 Ludovic Pouzenc <ludovic@pouzenc.fr>
#
# This file is part of RaidGuessFS.
#
# RaidGuessFS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RaidGuessFS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RaidGuessFS. If not, see <http://www.gnu.org/licenses/>

import os, multiprocessing, binascii, logging
import mydisks

def do_find_files(d,state):
    logging.info("Enter do_find_files()")
    try:
        state['state'] = 'initializing'
        ref_paths = state['filepaths']
        ref_count = len(ref_paths)
        ref_big_hash = {}
        for ref_no in range(ref_count):
            path = state['filepaths'][ref_no]
            logging.debug("Try to open ref. file '%s'"%path)
            with open(path, 'rb') as fd:
                logging.info("Loading ref. file '%s'"%path)
                while True:
                    ref_offset = fd.tell()
                    data = fd.read(512)
                    if not data:
                        break
                    if data == '\0'*512:
                        logging.info("Ignoring empty sector in '%s'@0x%011x"%(path,ref_offset))
                    elif data in ref_big_hash:
                        (prev_ref_no, prev_ref_offset) = ref_big_hash[data]
                        logging.info("Non-unique sector found in ref. files ('%s'@0x%011x and '%s'@0x%011x)"%
                                (prev_ref_no, prev_ref_offset, ref_no, ref_offset))
                    else:
                        ref_big_hash[data] = (ref_no, ref_offset)

        start = 0
        end = min(d.disks_size)
        one_percent = (end - start) / 100
        one_percent = one_percent + ( (-one_percent)%512 )
        logging.debug("start/end/1pc : %i / %i / %i"%(start,end,one_percent))

        state['found'] = []
        state['progress'] = 0
        state['state'] = 'searching'
        for offset in range(start, end, 512):
            for disk_no in range(d.disk_count):
                d.disks[disk_no].seek(offset)
                data = d.disks[disk_no].read(512)
                if data in ref_big_hash:
                        f = state['found']
                        if len(f) < 200:
                            # TODO agreger les matches
                            (ref_no, ref_offset) = ref_big_hash[data]
                            f.append((ref_no,ref_offset,disk_no,offset))
                            state['found'] = f
                        else:
                            state['state'] = 'aborted'
                            raise Exception('Aborting after too many matches')
            if offset % one_percent == 0:
                state['progress'] = state['progress'] + 1

        ref_big_hash.clear()
        state['state'] = 'finished'
        state['progress'] = 100
    except Exception as e:
        logging.exception(e)
    logging.info("Exit. do_find_files()")


def do_find_bootsect(d,state):
    logging.info("Enter do_find_bootsect()")
    try:
        state['state'] = 'initializing'
        ref_sig = binascii.unhexlify('55AA')

        start = 0
        end = min(d.disks_size)
        one_percent = (end - start) / 100
        one_percent = one_percent + ( (-one_percent)%512 )
        logging.debug("start/end/1pc : %i / %i / %i"%(start,end,one_percent))

        state['found'] = []
        state['progress'] = 0
        state['state'] = 'searching'
        for offset in range(start, end, 512):
            for disk_no in range(d.disk_count):
                d.disks[disk_no].seek(offset)
                data = d.disks[disk_no].read(512)
                sig = data[510:]
                if sig == ref_sig:
                    f = state['found']
                    if len(f) < 200:
                        f.append((disk_no,offset))
                        state['found'] = f
                    else:
                        state['state'] = 'aborted'
                        raise Exception('Aborting after too many matches')

            if offset % one_percent == 0:
                state['progress'] = state['progress'] + 1

        state['progress'] = 100
        state['state'] = 'finished'
    except Exception as e:
        logging.exception(e)
    logging.info("Exit. do_find_bootsect()")



class MyTasks():
    """Auxiliary class, managing long or background tasks"""

    TASK_NAMES = [ 'find_bootsect', 'find_files' ]

    def __init__(self, mydisks):
        self.tasks = []
        self.d = mydisks
        self.find_files_pathlist = []
        m = multiprocessing.Manager()
        self.find_bootsect_state = m.dict()
        self.find_bootsect_process = None
        self.find_files_state = m.dict()
        self.find_files_process = None

    def get_find_files_pathlist(self):
        return self.find_files_pathlist

    def get_find_files_pathlist_str(self):
        return '\n'.join(self.find_files_pathlist)

    def task_start(self, task_name):
        if task_name == 'find_files':
            self.find_files_state['filepaths'] = list(self.find_files_pathlist)
            self.find_files_process = multiprocessing.Process(
                    target = do_find_files,
                    args = (self.d, self.find_files_state)
            )
            self.find_files_process.start()
        elif task_name == 'find_bootsect':
            self.find_bootsect_process = multiprocessing.Process(
                    target = do_find_bootsect,
                    args = (self.d, self.find_bootsect_state)
            )
            self.find_bootsect_process.start()
        else:
            raise ValueError('Valid task names are : %s'%','.join(MyTasks.TASK_NAMES))
    
    def task_kill(self, task_name):
        if task_name == 'find_bootsect':
            if self.find_bootsect_process != None and self.find_bootsect_process.is_alive():
                self.find_bootsect_process.terminate()
        elif task_name == 'find_files':
            if self.find_files_process != None and self.find_files_process.is_alive():
                self.find_files_process.terminate()
        else:
            raise ValueError('Valid task names are : %s'%','.join(MyTasks.TASK_NAMES))

    def append_find_files_pathlist(self, pathlist):
        # TODO : should receive a list, make changes in raidguessfs.py
        self.find_files_pathlist.extend(pathlist.split('\n'))

    def set_find_files_pathlist(self, new_find_files_pathlist):
        self.find_files_pathlist = new_find_files_pathlist

    def read_find_bootsect(self):
        if self.find_bootsect_process == None:
            return 'This task has never been started\n'
        else:
            return '%s\n'%self.find_bootsect_state

    def read_find_files(self):
        if self.find_files_process == None:
            return 'This task has never been started\n'
        else:
            return '%s\n'%self.find_files_state