diff --git a/src/rockstor/fs/btrfs.py b/src/rockstor/fs/btrfs.py index d3a3beb2d..1aab32e51 100644 --- a/src/rockstor/fs/btrfs.py +++ b/src/rockstor/fs/btrfs.py @@ -16,10 +16,6 @@ along with this program. If not, see . """ -""" -system level helper methods to interact with the btrfs filesystem -""" - import re import time import os @@ -29,8 +25,12 @@ from system.exceptions import (CommandException) from pool_scrub import PoolScrub from django_ztask.decorators import task - import logging + +""" +system level helper methods to interact with the btrfs filesystem +""" + logger = logging.getLogger(__name__) MKFS_BTRFS = '/sbin/mkfs.btrfs' @@ -95,7 +95,7 @@ def get_pool_info(disk): """ cmd = [BTRFS, 'fi', 'show', '/dev/disk/by-id/%s' % disk] o, e, rc = run_command(cmd) - pool_info = {'disks': [],} + pool_info = {'disks': [], } for l in o: if (re.match('Label', l) is not None): fields = l.split() @@ -107,11 +107,11 @@ def get_pool_info(disk): # it's by-id references as that is the new format for Disks.name. # Original sda extraction: # pool_info['disks'].append(l.split()[-1].split('/')[-1]) - # Updated '/dev/sda' extraction to save on a split we no longer need - # and use this 'now' name to get our by-id name with path removed. - # This is required as that is how device names are stored in the - # db Disk.name so that we can locate a drive and update it's pool - # field reference. + # Updated '/dev/sda' extraction to save on a split we no longer + # need and use this 'now' name to get our by-id name with path + # removed. This is required as that is how device names are stored + # in the db Disk.name so that we can locate a drive and update it's + # pool field reference. dev_byid, is_byid = get_dev_byid_name(l.split()[-1], True) pool_info['disks'].append(dev_byid) return pool_info @@ -127,7 +127,7 @@ def pool_raid(mnt_pt): if (len(fields) > 1): block = fields[0][:-1].lower() raid = fields[1][:-1].lower() - if not block in raid_d and raid is not 'DUP': + if block not in raid_d and raid is not 'DUP': raid_d[block] = raid if (raid_d['metadata'] == 'single'): raid_d['data'] = raid_d['metadata'] @@ -171,12 +171,13 @@ def resize_pool(pool, dev_list_byid, add=True): If any device in the supplied dev_list fails this test then no command is executed and None is returned. :param pool: btrfs pool name - :param dev_list_byid: list of devices to add/delete in by-id (without path). + :param dev_list_byid: list of devices to add/delete in by-id (without + path). :param add: when true (default) or not specified then attempt to add - dev_list devices to pool, or when specified as True attempt to delete - dev_list devices from pool. + dev_list devices to pool, or when specified as True attempt to delete + dev_list devices from pool. :return: Tuple of results from run_command(generated command) or None if - the device member/pool sanity check fails. + the device member/pool sanity check fails. """ dev_list_byid = ['/dev/disk/by-id/' + d for d in dev_list_byid] root_mnt_pt = mount_root(pool) @@ -204,9 +205,9 @@ def mount_root(pool): """ Mounts a given pool at the default mount root (usually /mnt2/) using the pool.name as the final path entry. Ie pool.name = test-pool will be mounted - at /mnt2/test-pool. Any mount options held in pool.mnt_options will be added - to the mount command via the -o option as will a compress=pool.compression - entry. + at /mnt2/test-pool. Any mount options held in pool.mnt_options will be + added to the mount command via the -o option as will a compress = + pool.compression entry. N.B. Initially the mount target is defined by /dev/disk/by-label/pool.name, if this fails then an attempt to mount by each member of /dev/disk/by-id/pool.disk_set.all() but only if there are any members. @@ -235,10 +236,12 @@ def mount_root(pool): mnt_cmd.extend(['-o', mnt_options]) run_command(mnt_cmd) return root_pool_mnt + # If we cannot mount by-label, let's try mounting by device; one by one # until we get our first success. if (pool.disk_set.count() < 1): - raise Exception('Cannot mount Pool(%s) as it has no disks in it.' % pool.name) + raise Exception('Cannot mount Pool(%s) as it has no disks in it.' + % pool.name) last_device = pool.disk_set.last() for device in pool.disk_set.all(): mnt_device = ('/dev/disk/by-id/%s' % device.name) @@ -249,13 +252,15 @@ def mount_root(pool): try: run_command(mnt_cmd) return root_pool_mnt - except Exception, e: + except Exception as e: if (device.name == last_device.name): # exhausted mounting using all devices in the pool raise e - logger.error('Error mounting: %s. Will try using another device.' % mnt_cmd) + logger.error('Error mouting: %s. ' + 'Will try using another device.' % mnt_cmd) logger.exception(e) - raise Exception('Failed to mount Pool(%s) due to an unknown reason.' % pool.name) + raise Exception('Failed to mount Pool(%s) due to an unknown reason.' + % pool.name) def umount_root(root_pool_mnt): @@ -263,7 +268,7 @@ def umount_root(root_pool_mnt): return try: o, e, rc = run_command([UMOUNT, '-l', root_pool_mnt]) - except CommandException, ce: + except CommandException as ce: if (ce.rc == 32): for l in ce.err: l = l.strip() @@ -357,7 +362,7 @@ def subvol_list_helper(mnt_pt): while (True): try: return run_command([BTRFS, 'subvolume', 'list', mnt_pt]) - except CommandException, ce: + except CommandException as ce: if (ce.rc != 19): # rc == 19 is due to the slow kernel cleanup thread. It should # eventually succeed. @@ -381,12 +386,12 @@ def shares_info(pool): # useful to gather names of all shares in a pool try: mnt_pt = mount_root(pool) - except CommandException, e: + except CommandException as e: if (e.rc == 32): - #mount failed, so we just assume that something has gone wrong at a - #lower level, like a device failure. Return empty share map. - #application state can be removed. If the low level failure is - #recovered, state gets reconstructed anyway. + # mount failed, so we just assume that something has gone wrong at + # a lower level, like a device failure. Return empty share map. + # application state can be removed. If the low level failure is + # recovered, state gets reconstructed anyway. return {} raise o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', mnt_pt]) @@ -411,7 +416,8 @@ def shares_info(pool): clone = False if (len(snap_idmap[vol_id].split('/')) == 1): o, e, rc = run_command([BTRFS, 'property', 'get', - '%s/%s' % (mnt_pt, snap_idmap[vol_id])]) + '%s/%s' % (mnt_pt, + snap_idmap[vol_id])]) for l in o: if (l == 'ro=false'): clone = True @@ -431,6 +437,7 @@ def shares_info(pool): share_ids.append(vol_id) return shares_d + def parse_snap_details(mnt_pt, fields): writable = True snap_name = None @@ -448,8 +455,10 @@ def parse_snap_details(mnt_pt, fields): snap_name = fields[-1].split('/')[-1] return snap_name, writable + def snaps_info(mnt_pt, share_name): - o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-u', '-p', '-q', mnt_pt]) + o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-u', '-p', '-q', + mnt_pt]) share_id = share_uuid = None for l in o: if (re.match('ID ', l) is not None): @@ -457,7 +466,8 @@ def snaps_info(mnt_pt, share_name): if (fields[-1] == share_name): share_id = fields[1] share_uuid = fields[12] - if (share_id is None): return {} + if (share_id is None): + return {} o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', '-p', '-q', '-u', mnt_pt]) @@ -468,15 +478,15 @@ def snaps_info(mnt_pt, share_name): fields = l.split() # parent uuid must be share_uuid or another snapshot's uuid if (fields[7] != share_id and fields[15] != share_uuid and - fields[15] not in snap_uuids): + fields[15] not in snap_uuids): continue snap_name, writable = parse_snap_details(mnt_pt, fields) if (snap_name is not None): snaps_d[snap_name] = ('0/%s' % fields[1], writable, ) - # we rely on the observation that child snaps are listed after their - # parents, so no need to iterate through results separately. - # Instead, we add the uuid of a snap to the list and look up if - # it's a parent of subsequent entries. + # we rely on the observation that child snaps are listed after + # their parents, so no need to iterate through results + # separately. Instead, we add the uuid of a snap to the list + # and look up if it's a parent of subsequent entries. snap_uuids.append(fields[17]) return snaps_d @@ -492,7 +502,8 @@ def share_id(pool, share_name): 'ID 257 gen 13616 top level 5 path rock-ons-root' :param pool: a pool object. :param share_name: target share name to find - :return: the id for the given share_name or an Exception stating no id found + :return: the id for the given share_name or an Exception stating no id + found """ root_pool_mnt = mount_root(pool) out, err, rc = subvol_list_helper(root_pool_mnt) @@ -521,7 +532,8 @@ def remove_share(pool, share_name, pqgroup, force=False): if (not is_subvol(subvol_mnt_pt)): return if (force): - o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-o', subvol_mnt_pt]) + o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-o', + subvol_mnt_pt]) for l in o: if (re.match('ID ', l) is not None): subvol = root_pool_mnt + '/' + l.split()[-1] @@ -532,6 +544,7 @@ def remove_share(pool, share_name, pqgroup, force=False): qgroup_destroy(qgroup, root_pool_mnt) return qgroup_destroy(pqgroup, root_pool_mnt) + def remove_snap(pool, share_name, snap_name): root_mnt = mount_root(pool) snap_path = ('%s/.snapshots/%s/%s' % @@ -545,10 +558,11 @@ def remove_snap(pool, share_name, snap_name): else: o, e, rc = run_command([BTRFS, 'subvolume', 'list', '-s', root_mnt]) for l in o: - #just give the first match. + # just give the first match. if (re.match('ID.*%s$' % snap_name, l) is not None): snap = '%s/%s' % (root_mnt, l.split()[-1]) - return run_command([BTRFS, 'subvolume', 'delete', snap], log=True) + return run_command([BTRFS, 'subvolume', 'delete', snap], + log=True) def add_snap_helper(orig, snap, writable): @@ -557,7 +571,7 @@ def add_snap_helper(orig, snap, writable): cmd.insert(3, '-r') try: return run_command(cmd) - except CommandException, ce: + except CommandException as ce: if (ce.rc != 19): # rc == 19 is due to the slow kernel cleanup thread. snapshot gets # created just fine. lookup is delayed arbitrarily. @@ -633,6 +647,7 @@ def qgroup_id(pool, share_name): sid = share_id(pool, share_name) return '0/' + sid + def qgroup_max(mnt_pt): o, e, rc = run_command([BTRFS, 'qgroup', 'show', mnt_pt], log=True) res = 0 @@ -643,6 +658,7 @@ def qgroup_max(mnt_pt): res = cid return res + def qgroup_create(pool): # mount pool mnt_pt = mount_root(pool) @@ -667,11 +683,12 @@ def qgroup_is_assigned(qid, pqid, mnt_pt): for l in o: fields = l.split() if (len(fields) > 3 and - fields[0] == qid and - fields[3] == pqid): + fields[0] == qid and + fields[3] == pqid): return True return False + def qgroup_assign(qid, pqid, mnt_pt): if (qgroup_is_assigned(qid, pqid, mnt_pt)): return True @@ -681,24 +698,26 @@ def qgroup_assign(qid, pqid, mnt_pt): # exit code 1. try: run_command([BTRFS, 'qgroup', 'assign', qid, pqid, mnt_pt]) - except CommandException, e: + except CommandException as e: wmsg = 'WARNING: quotas may be inconsistent, rescan needed' if (e.rc == 1 and e.err[0] == wmsg): - #schedule a rescan if one is not currently running. + # schedule a rescan if one is not currently running. dmsg = ('Quota inconsistency while assigning %s. Rescan scheduled.' % qid) try: run_command([BTRFS, 'quota', 'rescan', mnt_pt]) return logger.debug(dmsg) - except CommandException, e2: + except CommandException as e2: emsg = 'ERROR: quota rescan failed: Operation now in progress' if (e2.rc == 1 and e2.err[0] == emsg): - return logger.debug('%s.. Another rescan already in progress.' % dmsg) + return logger.debug('%s.. Another rescan already in ' + 'progress.' % dmsg) logger.exception(e2) raise e2 logger.exception(e) raise e + def update_quota(pool, qgroup, size_bytes): root_pool_mnt = mount_root(pool) # Until btrfs adds better support for qgroup limits. We'll not set limits. @@ -773,49 +792,92 @@ def shares_usage(pool, share_map, snap_map): def pool_usage(mnt_pt): - # @todo: remove temporary raid5/6 custom logic once fi usage - # supports raid5/6. + """Return used space of the storage pool mounted at mnt_pt. + + Used space is considered to be: + - All space currently used by data; + - All space currently allocated for metadata and system data. + """ cmd = [BTRFS, 'fi', 'usage', '-b', mnt_pt] - total = 0 - inuse = 0 - free = 0 - data_ratio = 1 - raid56 = False - parity = 1 - disks = set() out, err, rc = run_command(cmd) - for e in err: - e = e.strip() - if (re.match('WARNING: RAID56', e) is not None): - raid56 = True - - for o in out: - o = o.strip() - if (raid56 is True and re.match('/dev/', o) is not None): - disks.add(o.split()[0]) - elif (raid56 is True and re.match('Data,RAID', o) is not None): - if (o[5:10] == 'RAID6'): - parity = 2 - elif (re.match('Device size:', o) is not None): - total = int(o.split()[2]) / 1024 - elif (re.match('Used:', o) is not None): - inuse = int(o.split()[1]) / 1024 - elif (re.match('Free ', o) is not None): - free = int(o.split()[2]) / 1024 - elif (re.match('Data ratio:', o) is not None): - data_ratio = float(o.split()[2]) - if (data_ratio < 0.01): - data_ratio = 0.01 - if (raid56 is True): - num_disks = len(disks) - if (num_disks > 0): - per_disk = total / num_disks - total = (num_disks - parity) * per_disk - else: - total = total / data_ratio - inuse = inuse / data_ratio - free = total - inuse - return (total, inuse, free) + + used = 0 + for line in out: + fields = re.split('\W+', line) + if line.startswith('Data'): + used += int(fields[5]) + elif re.search('Size', line): + used += int(fields[3]) + + return used / 1024 + + +def usage_bound(disk_sizes, num_devices, raid_level): + """Return the total amount of storage possible within this pool's set + of disks, in bytes. + + Algorithm adapted from Hugo Mills' implementation at: + http://carfax.org.uk/btrfs-usage/js/btrfs-usage.js + """ + # Determine RAID parameters + data_ratio = 1 + stripes = 1 + parity = 0 + + # Number of chunks to write at a time: as many as possible within the + # number of stripes + chunks = num_devices + + if raid_level == 'single': + chunks = 1 + elif raid_level == 'raid0': + stripes = 2 + elif raid_level == 'raid1': + data_ratio = 2 + chunks = 2 + elif raid_level == 'raid10': + data_ratio = 2 + stripes = max(2, int(num_devices / 2)) + elif raid_level == 'raid5': + parity = 1 + elif raid_level == 'raid6': + parity = 2 + + # Round down so that we have an exact number of duplicate copies + chunks -= chunks % data_ratio + + # Check for feasibility at the lower end + if num_devices < data_ratio * (stripes + parity): + return 0 + + # Compute the trivial bound + bound = int(sum(disk_sizes) / chunks) + + # For each partition point q, compute B_q (the test predicate) and + # modify the trivial bound if it passes. + bounding_q = -1 + for q in range(chunks - 1): + slice = sum(disk_sizes[q + 1:]) + b = int(slice / (chunks - q - 1)) + if disk_sizes[q] >= b and b < bound: + bound = b + bounding_q = q + + # The bound is the number of allocations we can make in total. If we + # have no bounding_q, then we have hit the trivial bound, and exhausted + # all space, so we can return immediately. + if bounding_q == -1: + return bound * ((chunks / data_ratio) - parity) + + # If we have a bounding_q, then all the devices past q are full, and + # we can remove them. The devices up to q have been used in every one + # of the allocations, so we can just reduce them by bound. + disk_sizes = [size - bound for index, size in enumerate(disk_sizes) + if index <= bounding_q] + + new_bound = usage_bound(disk_sizes, bounding_q + 1, raid_level) + + return bound * ((chunks / data_ratio) - parity) + new_bound def scrub_start(pool, force=False): @@ -927,7 +989,8 @@ def set_property(mnt_pt, name, val, mount=True): def get_snap(subvol_path, oldest=False, num_retain=None, regex=None): - if (not os.path.isdir(subvol_path)): return None + if (not os.path.isdir(subvol_path)): + return None share_name = subvol_path.split('/')[-1] cmd = [BTRFS, 'subvol', 'list', '-o', subvol_path] o, e, rc = run_command(cmd) @@ -936,12 +999,12 @@ def get_snap(subvol_path, oldest=False, num_retain=None, regex=None): fields = l.split() if (len(fields) > 0): snap_fields = fields[-1].split('/') - if (len(snap_fields) != 3 or - snap_fields[1] != share_name): - #not the Share we are interested in. + if (len(snap_fields) != 3 or snap_fields[1] != share_name): + # not the Share we are interested in. continue - if (regex is not None and re.search(regex, snap_fields[2]) is None): - #regex not in the name + if (regex is not None and + re.search(regex, snap_fields[2]) is None): + # regex not in the name continue snaps[int(fields[1])] = snap_fields[2] snap_ids = sorted(snaps.keys()) @@ -954,7 +1017,8 @@ def get_snap(subvol_path, oldest=False, num_retain=None, regex=None): def get_oldest_snap(subvol_path, num_retain, regex=None): - return get_snap(subvol_path, oldest=True, num_retain=num_retain, regex=regex) + return get_snap(subvol_path, oldest=True, num_retain=num_retain, + regex=regex) def get_lastest_snap(subvol_path, regex=None): diff --git a/src/rockstor/storageadmin/models/pool.py b/src/rockstor/storageadmin/models/pool.py index fd65644b2..5efc00444 100644 --- a/src/rockstor/storageadmin/models/pool.py +++ b/src/rockstor/storageadmin/models/pool.py @@ -1,5 +1,5 @@ """ -Copyright (c) 2012-2013 RockStor, Inc. +Copyright (c) 2012-2016 RockStor, Inc. This file is part of RockStor. RockStor is free software; you can redistribute it and/or modify @@ -19,7 +19,8 @@ from django.db import models from django.conf import settings from smart_manager.models import PoolUsage -from fs.btrfs import pool_usage +from fs.btrfs import pool_usage, usage_bound + class Pool(models.Model): """Name of the pool""" @@ -37,19 +38,22 @@ class Pool(models.Model): @property def free(self, *args, **kwargs): - #why do we compute pool usage on the fly like this and not like - #share usage as part of state refresh? This is a lot simpler and - #less code. For share usage, this type of logic could slow things - #down quite a bit because there can be 100's of Shares, but number - #of Pools even on a large instance is usually no more than a few. - try: - return pool_usage('%s%s' % (settings.MNT_PT, self.name))[2] - except: - return self.size + # Why do we compute pool usage on the fly like this and not like + # share usage as part of state refresh? This is a lot simpler and + # less code. For share usage, this type of logic could slow things + # down quite a bit because there can be 100's of Shares, but number + # of Pools even on a large instance is usually no more than a few. + return self.size - pool_usage('%s%s' % (settings.MNT_PT, self.name)) @property def reclaimable(self, *args, **kwargs): return 0 + def usage_bound(self): + disk_sizes = [int(size) for size in self.disk_set + .values_list('size', flat=True) + .order_by('-size')] + return usage_bound(disk_sizes, len(disk_sizes), self.raid) + class Meta: app_label = 'storageadmin' diff --git a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/add_pool_template.jst b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/add_pool_template.jst index f1160e1fb..dc1e6d645 100644 --- a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/add_pool_template.jst +++ b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/add_pool_template.jst @@ -1,105 +1,109 @@
-
Create Pool
+
Create Pool
-
-
-
- -
- -
-
-
- -
- -
-
-
- -
- -
-
-
- -
- -
-
-
-
-
- -
-
- - - - - - - - - - - {{#each disks}} - - - - - - - {{/each}} - - -
-
-
-
-
- Cancel - -
-
-
+
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+ +
+
+
+ +
+
+ + + + + + + + + + + {{#each disks}} + + + + + + + {{/each}} + + +
+
+
+
+ +
+ +
+
+
+
+ Cancel + +
+
+
diff --git a/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/selected_disks.jst b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/selected_disks.jst new file mode 100644 index 000000000..5f599e7a6 --- /dev/null +++ b/src/rockstor/storageadmin/static/storageadmin/js/templates/pool/selected_disks.jst @@ -0,0 +1,41 @@ +{{!}} + + +{{#if data}} + {{#each data}} + + {{count}} x {{size}} + {{sum}} + + {{/each}} + + Total Raw Capacity + {{total}} + + + Total Usable Capacity + Retrieving... + +{{else}} + No disks selected. +{{/if}} + diff --git a/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js b/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js index 453f51b96..33e86c824 100644 --- a/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js +++ b/src/rockstor/storageadmin/static/storageadmin/js/views/add_pool.js @@ -3,7 +3,7 @@ * @licstart The following is the entire license notice for the * JavaScript code in this page. * - * Copyright (c) 2012-2013 RockStor, Inc. + * Copyright (c) 2012-2016 RockStor, Inc. * This file is part of RockStor. * * RockStor is free software; you can redistribute it and/or modify @@ -26,26 +26,64 @@ AddPoolView = Backbone.View.extend({ events: { - "click #js-cancel": "cancel", - 'click [class="disk"]': 'clickCheckbox', - "click #checkAll": "selectAllCheckboxes", - "change #raid_level": "clickCheckbox" + 'click #js-cancel': 'cancel', + 'change [class="disk"]': 'updateSelection', + 'change #checkAll': 'selectAllCheckboxes', + 'change #raid_level': 'renderSummary' }, initialize: function () { - this.template = window.JST.pool_add_pool_template; // dont paginate disk selection table for now //this.pagination_template = window.JST.common_pagination; - this.collection = new DiskCollection(); // we do this as a workaround until we fix the add pool form properly. // with default page size, only upto 15 drives are shown. // @todo: fix this properly. + this.collection = new DiskCollection(); this.collection.setPageSize(100); this.filteredCollection = new DiskCollection(); - this.collection.on("reset", this.renderDisks, this); + this.collection.on('reset', this.renderDisks, this); this.initHandlebarHelpers(); + _.bindAll(this, 'submit'); + + // Also respond to selection changes using a collection: + this.selectedDisks = new DiskCollection(); + this.selectedDisks.on('reset', this.renderSummary, this); + + // Validation + var err_msg = 'Incorrect number of disks.'; + var raid_err_msg = function() { + return err_msg; + }; + + $.validator.addMethod('validatePoolName', function (pool_name) { + if (/^[A-Za-z0-9_.-]+$/.test(pool_name) == false) { + err_msg = 'Invalid characters in Pool name.'; + return false; + } + return true; + }, raid_err_msg); + + $.validator.addMethod('validateRaid', function (raid_level) { + var n = $("input:checked.disk").length; + var min = 1; + if (raid_level == 'single') { + err_msg = 'At least one disk is required.'; + } else { + if (_.contains(['raid0', 'raid1', 'raid5'], raid_level)) + min = 2; + else if (raid_level == 'raid6') + min = 3; + else if (raid_level == 'raid10') + min = 4; + err_msg = $.validator.format( + 'At least {0} disks are required for {1} mode.', + min, raid_level + ); + } + return n >= min; + }, raid_err_msg); }, render: function () { @@ -84,60 +122,7 @@ AddPoolView = Backbone.View.extend({ $(_this.el).append(_this.template({ disks: this.collection.toJSON(), })); - - var err_msg = 'Incorrect number of disks'; - var raid_err_msg = function () { - return err_msg; - } - - $.validator.addMethod('validatePoolName', function (value) { - var pool_name = $('#pool_name').val(); - if (/^[A-Za-z0-9_.-]+$/.test(pool_name) == false) { - err_msg = 'Invalid characters in Pool name.'; - return false; - } - return true - }, raid_err_msg); - - - $.validator.addMethod('validateRaid', function (value) { - var raid_level = $('#raid_level').val(); - var n = $("input:checked.disk").length; - if (raid_level == 'single') { - if (n < 1) { - err_msg = 'At least one disk must be selected'; - return false; - } - } else if (raid_level == 'raid0') { - if (n < 2) { - err_msg = 'Raid0 requires at least 2 disks to be selected'; - return false; - } - } else if (raid_level == 'raid1') { - if (n < 2) { - err_msg = 'Raid1 requires at least 2 disks to be selected'; - return false; - } - } else if (raid_level == 'raid5') { - if (n < 2) { - err_msg = 'Raid5 requires at least 2 disks to be selected'; - return false; - } - } else if (raid_level == 'raid6') { - if (n < 3) { - err_msg = 'Raid6 requires at least 3 disks to be selected'; - return false; - } - } else if (raid_level == 'raid10') { - - if (n < 4) { - err_msg = 'Raid10 requires at least 4 disks to be selected'; - return false; - } - } - return true; - }, raid_err_msg); - + this.renderSummary(); this.$("#disks-table").tablesorter({ headers: { @@ -168,7 +153,6 @@ AddPoolView = Backbone.View.extend({ title: "Choose a compression algorithm for this Pool.
zlib: slower but higher compression ratio.
lzo: faster compression/decompression, but ratio smaller than zlib.
Enabling compression at the pool level applies to all Shares carved out of this Pool.
Don't enable compression here if you like to have finer control at the Share level.
You can change the algorithm, disable or enable it later, if necessary." }); - $('#add-pool-form').validate({ onfocusout: false, onkeyup: false, @@ -176,171 +160,125 @@ AddPoolView = Backbone.View.extend({ pool_name: "validatePoolName", raid_level: "validateRaid" }, - - submitHandler: function () { - var button = $('#create_pool'); - if (buttonDisabled(button)) return false; - disableButton(button); - var pool_name = $('#pool_name').val(); - var raid_level = $('#raid_level').val(); - var compression = $('#compression').val(); - if (compression == 'no') { - compression = null; - } - var mnt_options = $('#mnt_options').val(); - if (mnt_options == '') { - mnt_options = null; - } - var disk_names = []; - var n = $("input:checked.disk").length; - $("input:checked.disk").each(function (i) { - if (i < n) { - disk_names.push($(this).val()); - } - }); - - var jqxhr = $.ajax({ - url: '/api/pools', - type: 'POST', - dataType: 'json', - contentType: 'application/json', - data: JSON.stringify({ - "disks": disk_names, "raid_level": raid_level, - "pname": pool_name, "compression": compression, - "mnt_options": mnt_options, - }), - }); - - jqxhr.done(function () { - enableButton(button); - - _this.$('#add-pool-form input').tooltip('hide'); - app_router.navigate('pools', {trigger: true}) - }); - - jqxhr.fail(function (xhr, status, error) { - enableButton(button); - }); - - } - + submitHandler: this.submit }); return this; }, - cancel: function (event) { - event.preventDefault(); - this.$('#add-pool-form :input').tooltip('hide'); - app_router.navigate('pools', {trigger: true}); - }, - - selectAllCheckboxes: function (event) { - $("#checkAll").change(function () { - $("input:checkbox").prop('checked', $(this).prop("checked")); - $("input:checkbox").closest("tr").toggleClass("row-highlight", this.checked); + renderSummary: function() { + // Extract various data from currently selected disks for display + var diskSizes = this.selectedDisks.map(function(disk) { + return disk.get('size') * 1024; + }); + var total = _.reduce(diskSizes, function(total, element) { + return total + element; + }); + var sizeCounts = _.countBy(diskSizes, function(size) { + return size; }); - if ($('#checkAll').prop("checked")) { - var _this = this; - var allDisks = {}; - _this.collection.each(function (disk, index) { - var capacity = disk.get('size') * 1024; - if (capacity in allDisks) { - allDisks[capacity] += 1; + var data = _.map(sizeCounts, function(count, size) { + return { + count: count, + size: humanize.filesize(size), + sum: humanize.filesize(count * size) + }; + }); + // Render + this.$('#selected-disks-table').html( + window.JST.pool_selected_disks({ + data: data, + total: humanize.filesize(total) + }) + ); + // Request usage bound calculation + if (diskSizes) { + $.ajax({ + url: '/api/pools/usage_bound', + contentType: 'application/json', + data: { + disk_sizes: diskSizes, + raid_level: $('#raid_level').val() + } + }) + .done(function(result) { + target = $('#usable > b'); + if (result) { + target.text(humanize.filesize(result)); + target.css('color', 'green'); } else { - allDisks[capacity] = 1; + target.text('Not enough disks selected.'); + target.css('color', 'red'); } }); - var diskSummary = this.diskSummaryTable(allDisks); - $("#SelectedDisksTable").html(diskSummary); - } else { - $("#SelectedDisksTable").empty(); } + return this; }, - clickCheckbox: function (event) { - $("input:checkbox").change(function () { - $(this).closest("tr").toggleClass("row-highlight", this.checked); - }); - var _this = this; - var n = $("input:checked.disk").length; - var singleDisk = {}; - $("input:checked.disk").each(function (index) { - var capacity = _this.collection.get(this.id).get('size') * 1024; - if (capacity in singleDisk) { - singleDisk[capacity] += 1; - } else { - singleDisk[capacity] = 1; - } + updateSelection: function(event) { + if (!event.currentTarget.checked) + $('#checkAll').prop('checked', false); + var checkboxes = $('input:checkbox.disk'); + checkboxes.each(function() { + $(this).closest('tr').toggleClass('row-highlight', this.checked); }); - var diskSummary = this.diskSummaryTable(singleDisk); - if (n > 0) { - $("#SelectedDisksTable").html(diskSummary); - } else { - $("#SelectedDisksTable").empty(); - } + var diskIds = checkboxes.filter(':checked').map(function() { + return this.id; + }).get(); + var disks = _.map(diskIds, function(id) { + return this.collection.get(id); + }, this); + + // Update and trigger re-validation of selected raid level + this.selectedDisks.reset(disks); + $('#raid_level').valid(); }, - diskSummaryTable: function (diskObj) { - var formStyle = "
"; - var diskSummary = formStyle + ""; - var grandTotal = 0; - for (var key in diskObj) { - var readableCapacity = humanize.filesize(key); - var totalCapacity = key * diskObj[key]; - diskSummary += ""; - diskSummary += ""; - diskSummary += ""; - diskSummary += ""; - grandTotal += totalCapacity; + submit: function() { + var button = $('#create_pool'); + if (buttonDisabled(button)) + return false; + disableButton(button); + var compression = $('#compression').val(); + if (compression == 'no') { + compression = null; } - var diskUsableCapacity = this.getUsableCapacity(diskObj, humanize.filesize(grandTotal)); - diskSummary += ""; - diskSummary += "" + ""; - diskSummary += ""; - return diskSummary; - }, - - getUsableCapacity: function (diskObject, rawCapacity) { - var raidConfig = $('#raid_level').val(); - - //get all the keys and convert them to numbers to get the disk size correctly. - var keysArr = Object.keys(diskObject); - var numericKeysArr = keysArr.map(function (key) { - return Number(key); - }); - //calculate least disk size from all the keys. - var minDiskSize = Math.min.apply(Math, numericKeysArr); - - var totalSelectedDisks = 0; - for (var key in diskObject) { - totalSelectedDisks += diskObject[key]; - } - - var usableCapacity; - switch (raidConfig) { - case "single": - usableCapacity = rawCapacity; - break; - case "raid0": - usableCapacity = humanize.filesize(minDiskSize * totalSelectedDisks); - break; - case "raid1": - usableCapacity = humanize.filesize(minDiskSize * totalSelectedDisks / 2); - break; - case "raid5": - usableCapacity = humanize.filesize(minDiskSize * (totalSelectedDisks - 1)); - break; - case "raid6": - usableCapacity = humanize.filesize(minDiskSize * (totalSelectedDisks - 2)); - break; - case "raid10": - usableCapacity = humanize.filesize(minDiskSize * totalSelectedDisks / 2); - break; + var mnt_options = $('#mnt_options').val(); + if (mnt_options == '') { + mnt_options = null; } + $.ajax({ + url: '/api/pools', + type: 'POST', + dataType: 'json', + contentType: 'application/json', + data: JSON.stringify({ + disks: this.selectedDisks.pluck('name'), + raid_level: $('#raid_level').val(), + pname: $('#pool_name').val(), + compression: compression, + mnt_options: mnt_options + }) + }) + .done(function() { + enableButton(button); + $('#add-pool-form input').tooltip('hide'); + app_router.navigate('pools', {trigger: true}) + }) + .fail(function() { + enableButton(button); + }); + }, - return usableCapacity; + cancel: function (event) { + event.preventDefault(); + this.$('#add-pool-form :input').tooltip('hide'); + app_router.navigate('pools', {trigger: true}); + }, + selectAllCheckboxes: function(event) { + $('input:checkbox').prop('checked', $('#checkAll').prop('checked')); + this.updateSelection(event); }, initHandlebarHelpers: function () { @@ -352,7 +290,6 @@ AddPoolView = Backbone.View.extend({ return humanize.filesize(diskSize * 1024); }); } - }); //Add pagination diff --git a/src/rockstor/storageadmin/templates/storageadmin/base.html b/src/rockstor/storageadmin/templates/storageadmin/base.html index 145133602..d3f79f906 100644 --- a/src/rockstor/storageadmin/templates/storageadmin/base.html +++ b/src/rockstor/storageadmin/templates/storageadmin/base.html @@ -79,7 +79,7 @@ - + diff --git a/src/rockstor/storageadmin/urls/pools.py b/src/rockstor/storageadmin/urls/pools.py index 256787a6e..cf1ef9ff5 100644 --- a/src/rockstor/storageadmin/urls/pools.py +++ b/src/rockstor/storageadmin/urls/pools.py @@ -18,7 +18,7 @@ from django.conf.urls import patterns, url from storageadmin.views import (PoolListView, PoolDetailView, PoolScrubView, - PoolBalanceView) + PoolBalanceView, get_usage_bound) from django.conf import settings pool_regex = settings.POOL_REGEX @@ -26,6 +26,7 @@ urlpatterns = patterns( '', url(r'^$', PoolListView.as_view(), name='pool-view'), + url(r'^/usage_bound$', get_usage_bound), url(r'^/(?P%s)$' % pool_regex, PoolDetailView.as_view(),), url(r'^/(?P%s)/balance$' % pool_regex, PoolBalanceView.as_view(),), url(r'^/(?P%s)/balance/(?P.*)$' % pool_regex, @@ -33,5 +34,6 @@ url(r'^/(?P%s)/scrub$' % pool_regex, PoolScrubView.as_view(),), url(r'^/(?P%s)/scrub/(?P.*)$' % pool_regex, PoolScrubView.as_view(),), - url(r'^/(?P%s)/(?P.*)$' % pool_regex, PoolDetailView.as_view(),), + url(r'^/(?P%s)/(?P.*)$' % pool_regex, + PoolDetailView.as_view(),) ) diff --git a/src/rockstor/storageadmin/views/__init__.py b/src/rockstor/storageadmin/views/__init__.py index d32d5e226..3d3024edf 100644 --- a/src/rockstor/storageadmin/views/__init__.py +++ b/src/rockstor/storageadmin/views/__init__.py @@ -21,7 +21,7 @@ from snapshot import SnapshotView from share import (ShareListView, ShareDetailView) from disk import (DiskMixin, DiskListView, DiskDetailView) -from pool import (PoolListView, PoolDetailView) +from pool import (PoolListView, PoolDetailView, get_usage_bound) from command import CommandView from appliances import (ApplianceListView, ApplianceDetailView) from login import LoginView diff --git a/src/rockstor/storageadmin/views/command.py b/src/rockstor/storageadmin/views/command.py index 48aa3cd89..01c0a10b1 100644 --- a/src/rockstor/storageadmin/views/command.py +++ b/src/rockstor/storageadmin/views/command.py @@ -67,7 +67,7 @@ def _refresh_pool_state(): pool_info = get_pool_info(fd.name) p.name = pool_info['label'] p.raid = pool_raid('%s%s' % (settings.MNT_PT, p.name))['data'] - p.size = pool_usage('%s%s' % (settings.MNT_PT, p.name))[0] + p.size = p.usage_bound() p.save() except Exception, e: logger.error('Exception while refreshing state for ' diff --git a/src/rockstor/storageadmin/views/disk.py b/src/rockstor/storageadmin/views/disk.py index 5df5c492e..f0a3c0db8 100644 --- a/src/rockstor/storageadmin/views/disk.py +++ b/src/rockstor/storageadmin/views/disk.py @@ -213,7 +213,7 @@ def _update_disk_state(): # update disk db object to reflect special root pool status dob.pool = p dob.save() - p.size = pool_usage(mount_root(p))[0] + p.size = p.usage_bound() enable_quota(p) p.uuid = btrfs_uuid(dob.name) p.save() @@ -379,7 +379,7 @@ def _btrfs_disk_import(self, dname, request): do.save() mount_root(po) po.raid = pool_raid('%s%s' % (settings.MNT_PT, po.name))['data'] - po.size = pool_usage('%s%s' % (settings.MNT_PT, po.name))[0] + po.size = po.usage_bound() po.save() enable_quota(po) import_shares(po, request) diff --git a/src/rockstor/storageadmin/views/pool.py b/src/rockstor/storageadmin/views/pool.py index 28491229a..228a1af62 100644 --- a/src/rockstor/storageadmin/views/pool.py +++ b/src/rockstor/storageadmin/views/pool.py @@ -24,12 +24,13 @@ import time from rest_framework.response import Response from rest_framework import status +from rest_framework.decorators import api_view from django.db import transaction from storageadmin.serializers import PoolInfoSerializer from storageadmin.models import (Disk, Pool, Share, PoolBalance) from storageadmin.views import DiskMixin from fs.btrfs import (add_pool, pool_usage, resize_pool, umount_root, - btrfs_uuid, mount_root, start_balance) + btrfs_uuid, mount_root, start_balance, usage_bound) from system.osi import remount from storageadmin.util import handle_exception from django.conf import settings @@ -162,7 +163,7 @@ def _remount(cls, request, pool): for m in mount_map[share]: try: remount(m, mnt_options) - except Exception, e: + except Exception as e: logger.exception(e) failed_remounts.append(m) if (len(failed_remounts) > 0): @@ -272,7 +273,7 @@ def post(self, request): d.pool = p d.save() add_pool(p, dnames) - p.size = pool_usage(mount_root(p))[0] + p.size = p.usage_bound() p.uuid = btrfs_uuid(dnames[0]) p.save() return Response(PoolInfoSerializer(p).data) @@ -415,11 +416,11 @@ def put(self, request, pname, command): size_cut = 0 for d in disks: size_cut += d.size - if (size_cut >= usage[2]): + if (size_cut >= usage): e_msg = ('Removing these(%s) disks may shrink the pool by ' '%dKB, which is greater than available free space' ' %dKB. This is not supported.' % - (dnames, size_cut, usage[2])) + (dnames, size_cut, usage)) handle_exception(Exception(e_msg), request) resize_pool(pool, dnames, add=False) @@ -434,8 +435,7 @@ def put(self, request, pname, command): else: e_msg = ('command(%s) is not supported.' % command) handle_exception(Exception(e_msg), request) - usage = pool_usage('/%s/%s' % (settings.MNT_PT, pool.name)) - pool.size = usage[0] + pool.size = pool.usage_bound() pool.save() return Response(PoolInfoSerializer(pool).data) @@ -462,6 +462,15 @@ def delete(self, request, pname): pool.delete() try: self._update_disk_state() - except Exception, e: + except Exception as e: logger.error('Exception while updating disk state: %s' % e.__str__()) return Response() + + +@api_view() +def get_usage_bound(request): + """Simple view to relay the computed usage bound to the front end.""" + disk_sizes = [int(size) for size in + request.query_params.getlist('disk_sizes[]')] + raid_level = request.query_params.get('raid_level', 'single') + return Response(usage_bound(disk_sizes, len(disk_sizes), raid_level))