From 08106a353fab69ee93e02a7d674c2597c0e37061 Mon Sep 17 00:00:00 2001 From: Ludovic Pouzenc Date: Sun, 30 Aug 2015 19:04:19 +0200 Subject: raid_type as a config attr + code refactoring --- myraid.py | 83 ++++++++++++++++++++++++++++++++++++++------------------------- 1 file changed, 50 insertions(+), 33 deletions(-) (limited to 'myraid.py') diff --git a/myraid.py b/myraid.py index 5847e6e..35c1e2a 100644 --- a/myraid.py +++ b/myraid.py @@ -19,24 +19,24 @@ # along with RaidGuessFS. If not, see import logging -from myraidmaths import MyRaidMaths +import myraidmaths class MyRaid(): """Auxiliary class, managing RAID layer""" RAID_TYPES = [ '0', '1', '5', '5+0' ] RAID5_LAYOUTS = [ 'la', 'ra', 'ls', 'rs' ] - def __init__(self, *args, **kwargs): - self.d = None + def __init__(self, mydisks): + self.d = mydisks self.raid_start = 0 self.raid_end = 0 - self.raid_sector_size = 512 # TODO : should be self.d.sector_size self.raid_chunk_size = 65536 self.raid_disk_order = [] self.raid_disk_count = 0 + self.raid_type = '5' self.raid_layout = 'ls' self.raid_disks = [] - self.nested_subraid = 2 + self.raid_subraid_count = 2 def get_raid_start(self): return self.raid_start @@ -53,9 +53,15 @@ class MyRaid(): def get_raid_disk_order_str(self): return ' '.join(map(str,self.raid_disk_order)) + def get_raid_type(self): + return self.raid_type + def get_raid_layout(self): return self.raid_layout + def get_raid_subraid_count(self): + return self.raid_subraid_count + def set_disks(self, new_mydisks): # FIXME : self.d don't need to be updaed (pass on __init__) self.d = new_mydisks @@ -89,75 +95,86 @@ class MyRaid(): self.raid_disk_order = new_raid_disk_order self.raid_disks = [ self.d.disks[i] for i in self.raid_disk_order ] + def set_raid_type(self, new_raid_type): + if new_raid_type in MyRaid.RAID_TYPES: + self.raid_type = new_raid_type + else: + raise ValueError('raid_type has to be one of %s'%' '.join(MyRaid.RAID_TYPES)) + + def set_raid_subraid_count(self, new_raid_subraid_count): + """Update the number of subcomponents in nested RAID levels""" + self.raid_subraid_count = new_raid_subraid_count + def set_raid_layout(self, new_raid_layout): + """Update the kind of data/parity block layout for RAID5 family""" if new_raid_layout in MyRaid.RAID5_LAYOUTS: self.raid_layout = new_raid_layout else: - raise ValueError('raid_layout has to be one of %s'%' '.join(RAID_LAYOUTS)) + raise ValueError('raid_layout has to be one of %s'%' '.join(MyRaid.RAID_LAYOUTS)) - def sizeof_raid_result(self, raid_type): + def sizeof_raid_result(self): size = max(0, self.raid_end - self.raid_start) return { '0' : size * self.raid_disk_count, '1' : size if self.raid_disk_count == 2 else 0, '5' : size * (self.raid_disk_count - 1) if self.raid_disk_count >= 3 else 0, '5+0': size * (self.raid_disk_count - 2) if self.raid_disk_count >= 6 and self.raid_disk_count % 2 == 0 else 0, - }[raid_type] + }[self.raid_type] - def sizeof_disk_xor(self, raid_type): + def sizeof_disk_xor(self): return max(0, self.raid_end - self.raid_start) - def sizeof_disk_parity(self, raid_type): - size = max(0, self.raid_end - self.raid_start) / self.raid_sector_size * 16 + def sizeof_disk_parity(self): + size = max(0, self.raid_end - self.raid_start) / self.d.sector_size * 16 return { '0' : 64, '1' : size if self.raid_disk_count == 2 else 64, '5' : size if self.raid_disk_count >= 3 else 64, '5+0': size if self.raid_disk_count >= 6 and self.raid_disk_count % 2 == 0 else 64, - }[raid_type] + }[self.raid_type] - def read_disk_xor(self,raid_type,offset,size): + def read_disk_xor(self,offset,size): """Returns raw bitwise XOR against a bunch of disks slice""" - return MyRaidMaths.xor_blocks(self.raid_disks,offset,size)[1] + return myraidmaths.MyRaidMaths.xor_blocks(self.raid_disks,offset,size)[1] - def read_disk_parity(self,raid_type,offset,size): + def read_disk_parity(self,offset,size): """Returns textual information about parity status of each sector""" - logging.warn("Enter read_disk_parity(%s,%d,%d)"%(raid_type,offset,size)) + logging.warn("Enter read_disk_parity(%d,%d)"%(offset,size)) msg = { '0' : 'There no notion of parity in RAID 0 mode\n', '1' : None if self.raid_disk_count == 2 else 'Wrong disk count (should be 2)\n', '5' : None if self.raid_disk_count >= 3 else 'Wrong disk count (should be >=3)\n', '5+0': None if self.raid_disk_count >= 6 and self.raid_disk_count % 2 == 0 else 'Wrong disk count (should be >=6 and even)\n', - }[raid_type] + }[self.raid_type] if msg: return msg[offset:offset+size] - start = self.raid_start + offset * self.raid_sector_size / 16 - end = start + size * self.raid_sector_size / 16 + start = self.raid_start + offset * self.d.sector_size / 16 + end = start + size * self.d.sector_size / 16 #TODO : improove for nested levels - if raid_type in ['1','5', '5+0']: + if self.raid_type in ['1','5', '5+0']: result = ''.join( - [ '0x%011x %c\n'%( addr, MyRaidMaths.xor_blocks(self.raid_disks, addr, self.raid_sector_size)[0]) - for addr in xrange(start, end, self.raid_sector_size) + [ '0x%011x %c\n'%( addr, myraidmaths.MyRaidMaths.xor_blocks(self.raid_disks, addr, self.d.sector_size)[0]) + for addr in xrange(start, end, self.d.sector_size) ]) else: result = None - logging.warn("Exit. read_disk_parity(%s,%d,%d)"%(raid_type,offset,size)) + logging.warn("Exit. read_disk_parity(%d,%d)"%(offset,size)) return result - def read_raid_result(self,raid_type,offset,size): + def read_raid_result(self,offset,size): """Returns actual RAID data""" - res = MyRaidMaths.apply_raid_layout(offset, size, raid_type, self.raid_layout, self.raid_chunk_size, self.raid_disk_count, self.raid_start, self.nested_subraid); + res = myraidmaths.MyRaidMaths.apply_raid_layout(offset, size, self.raid_type, self.raid_layout, self.raid_chunk_size, self.raid_disk_count, self.raid_start, self.raid_subraid_count); (segment_no, segment_off, stripe_no, subraid_no, par_disk, data_disk, off_disk, aligned_read_size) = res logging.debug("raid.read_result(%s): offset=%d,segment_no=%d,segment_off=%d,stripe_no=%d,subraid_no=%d,par_disk=%d(disk%02d),data_disk=%d(disk%02d),off_disk=%d,aligned_read_size=%d,segment_off+aligned_read_size=%d" - % (raid_type,offset,segment_no,segment_off,stripe_no,subraid_no,par_disk,self.raid_disk_order[par_disk],data_disk,self.raid_disk_order[data_disk],off_disk,aligned_read_size,segment_off+aligned_read_size) ) + % (self.raid_type,offset,segment_no,segment_off,stripe_no,subraid_no,par_disk,self.raid_disk_order[par_disk],data_disk,self.raid_disk_order[data_disk],off_disk,aligned_read_size,segment_off+aligned_read_size) ) data_fd = self.raid_disks[data_disk] @@ -176,29 +193,29 @@ class MyRaid(): other_fds.remove(data_fd) data_arr = [] - for s in xrange(off_disk, off_disk+aligned_read_size, self.raid_sector_size): - if self.d.is_readable(self.raid_disk_order[data_disk],s,self.raid_sector_size): + for s in xrange(off_disk, off_disk+aligned_read_size, self.d.sector_size): + if self.d.is_readable(self.raid_disk_order[data_disk],s,self.d.sector_size): # Current sector is readable from data disk, read it logging.debug('-> 0x%011x : readable'%s) data_fd.seek(off_disk) - data_arr.append(data_fd.read(self.raid_sector_size)) + data_arr.append(data_fd.read(self.d.sector_size)) else: # Current sector is dead on data disk, recover it if possible recoverable = reduce(lambda a,b: a and b, [ - self.d.is_readable(other_disk,off_disk,self.raid_sector_size) for other_disk in other_disks + self.d.is_readable(other_disk,off_disk,self.d.sector_size) for other_disk in other_disks ]) if recoverable: logging.info('-> 0x%011x : recoverable'%s) - data_arr.append( MyRaidMaths.xor_blocks(other_fds, s,self.raid_sector_size)[1] ) + data_arr.append( myraidmaths.MyRaidMaths.xor_blocks(other_fds, s,self.d.sector_size)[1] ) else: logging.warn('-> 0x%011x : unrecoverable'%s) - data_arr.append( '\0' * self.raid_sector_size) + data_arr.append( '\0' * self.d.sector_size) data = ''.join(data_arr) # Prevent short reads, seems mandatory for losetup'ing raid_result but kills performance #TODO : make it activable per config if aligned_read_size < size: - return ''.join( (data, self.read_raid_result(raid_type,offset+aligned_read_size,size-aligned_read_size) ) ) + return ''.join( (data, self.read_raid_result(offset + aligned_read_size, size - aligned_read_size) ) ) return data -- cgit v1.2.3