summaryrefslogtreecommitdiff
path: root/myraidmaths.py
diff options
context:
space:
mode:
Diffstat (limited to 'myraidmaths.py')
-rw-r--r--myraidmaths.py139
1 files changed, 139 insertions, 0 deletions
diff --git a/myraidmaths.py b/myraidmaths.py
new file mode 100644
index 0000000..c0d3b8b
--- /dev/null
+++ b/myraidmaths.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+
+# RaidGuessFS, a FUSE pseudo-filesystem to guess RAID parameters of a damaged device
+# Copyright (C) 2015 Ludovic Pouzenc <ludovic@pouzenc.fr>
+#
+# This file is part of RaidGuessFS.
+#
+# RaidGuessFS is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# RaidGuessFS is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with RaidGuessFS. If not, see <http://www.gnu.org/licenses/>
+
+import numpy
+
+class MyRaidMaths():
+ """Auxiliary class, managing RAID layer, mathematics algorithms"""
+
+ @staticmethod
+ def xor_blocks(fd_list, offset, size):
+ """Compute bitwise XOR against a bunch of disks slice"""
+ logging.info("Enter xor_blocks(fd_list(%i),0x%011x,%d)"%(len(fd_list), offset, size))
+
+ if size % 8 != 0:
+ raise ValueError('xor_blocks : size must be multiple of 8')
+ dt = numpy.dtype('<Q8')
+
+ fd_list[0].seek(offset)
+ str_b1=fd_list[0].read(size)
+ numpy_b1 = numpy.fromstring(str_b1, dtype=dt)
+ all_zero = (numpy.count_nonzero(numpy_b1) == 0 )
+ any_zero = all_zero
+
+ for fd in fd_list[1:]:
+ fd.seek(offset)
+ str_b2=fd.read(size)
+ numpy_b2 = numpy.fromstring(str_b2, dtype=dt)
+ b2_zero = (numpy.count_nonzero(numpy_b2) == 0 )
+ if all_zero == True:
+ all_zero = b2_zero
+ if any_zero == False:
+ any_zero = b2_zero
+
+ numpy.bitwise_xor(numpy_b1,numpy_b2,numpy_b1)
+
+ if all_zero == True:
+ result = 'z'
+ elif numpy.count_nonzero(numpy_b1) == 0:
+ if any_zero:
+ result = 'g'
+ else:
+ result = 'G'
+ else:
+ result = 'b'
+
+ #import sys, binascii
+ #print sys.stderr.write(binascii.hexlify(numpy_b1))
+ return (result,numpy_b1.tostring())
+
+ @staticmethod
+ def apply_raid_layout(wanted_raid_offset, wanted_read_size, raid_type, raid_layout, raid_chunk_size, raid_disk_count, raid_start, nested_subraid):
+ """Returns disk numbers, offset and so on to known where read data for a given RAID offset/size and layout"""
+
+ if raid_type == '0':
+ segment_no = wanted_raid_offset / raid_chunk_size
+ segment_off = wanted_raid_offset % raid_chunk_size
+ stripe_no = segment_no / raid_disk_count
+ subraid_no = -1
+ par_disk = -1
+ data_disk = segment_no % raid_disk_count
+ off_disk = raid_start + stripe_no * raid_chunk_size + segment_off
+ aligned_read_size = min(wanted_read_size, (segment_no+1) * raid_chunk_size - wanted_raid_offset)
+
+ elif raid_type == '1':
+ segment_no = -1
+ segment_off = -1
+ stripe_no = -1
+ subraid_no = -1
+ par_disk = 1
+ data_disk = 0
+ off_disk = raid_start + wanted_raid_offset
+ aligned_read_size = wanted_read_size
+
+ elif raid_type == '5':
+ segment_no = wanted_raid_offset / raid_chunk_size
+ segment_off = wanted_raid_offset % raid_chunk_size
+ stripe_no = segment_no / (raid_disk_count-1)
+ subraid_no = -1
+
+ if raid_layout in ['ls','la']:
+ par_disk = (raid_disk_count-1) - (stripe_no % raid_disk_count)
+ else: # raid_layout in ['rs','ra']:
+ par_disk = stripe_no % raid_disk_count
+
+ if raid_layout in ['ls','rs']:
+ data_disk = (par_disk+1 + (segment_no % (raid_disk_count-1)) ) % raid_disk_count
+ else: # raid_layout in ['la','ra']:
+ data_disk = segment_no % (raid_disk_count-1)
+ if data_disk >= par_disk:
+ data_disk = data_disk + 1
+
+ off_disk = raid_start + stripe_no * raid_chunk_size + segment_off
+ # Note : could make error-free shorter reads than asked but convince the reader to be chunck aligned, which is great for perf
+ aligned_read_size = min(wanted_read_size, (segment_no+1) * raid_chunk_size - wanted_raid_offset)
+
+ elif raid_type == '5+0':
+ subraid_disk_count = raid_disk_count / nested_subraid
+ segment_no = wanted_raid_offset / raid_chunk_size
+ segment_off = wanted_raid_offset % raid_chunk_size
+ stripe_no = segment_no / (raid_disk_count - nested_subraid)
+ subraid_no = (segment_no / (subraid_disk_count-1) ) % nested_subraid
+
+ if raid_layout in ['ls','la']:
+ subraid_par_disk = (subraid_disk_count-1) - (stripe_no % subraid_disk_count)
+ else: # raid_layout in ['rs','ra']:
+ subraid_par_disk = stripe_no % subraid_disk_count
+
+ if raid_layout in ['ls','rs']:
+ subraid_data_disk = (subraid_par_disk+1 + (segment_no % (subraid_disk_count-1)) ) % subraid_disk_count
+ else: # raid_layout in ['la','ra']:
+ subraid_data_disk = segment_no % (subraid_disk_count-1)
+ if subraid_data_disk >= subraid_par_disk:
+ subraid_data_disk = subraid_data_disk + 1
+
+ par_disk = subraid_no * subraid_disk_count + subraid_par_disk
+ data_disk = subraid_no * subraid_disk_count + subraid_data_disk
+
+ off_disk = raid_start + stripe_no * raid_chunk_size + segment_off
+ # Note : could make error-free shorter reads than asked but convince the reader to be chunck aligned, which is great for perf
+ aligned_read_size = min(wanted_read_size, (segment_no+1) * raid_chunk_size - wanted_raid_offset)
+
+ return (segment_no, segment_off, stripe_no, subraid_no, par_disk, data_disk, off_disk, aligned_read_size)