Skip to content

Commit

Permalink
Added logic to save/read whole object as single json
Browse files Browse the repository at this point in the history
tendrl-spec: Tendrl/specifications#172
Signed-off-by: Shubhendu <shtripat@redhat.com>
  • Loading branch information
Shubhendu committed Aug 30, 2017
1 parent 5c72dc3 commit 51b18a1
Show file tree
Hide file tree
Showing 6 changed files with 48 additions and 92 deletions.
15 changes: 0 additions & 15 deletions tendrl/gluster_integration/objects/definition/gluster.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -141,21 +141,6 @@ namespace.gluster:
value: clusters/$TendrlContext.integration_id/raw_map/
list: clusters/TendrlContext.integration_id/TendrlContext/raw_map/
help: gluster cluster details
VolumeOptions:
attrs:
cluster_id:
help: "Tendrl managed/generated cluster id for the sds being managed by Tendrl"
type: String
vol_id:
help: Volume id
type: String
options:
help: options
type: dict
enabled: true
value: clusters/$TendrlContext.integration_id/Volumes/$Volume.vol_id/options
list: clusters/$TendrlContext.integration_id/Volumes/$Volume.vol_id/options
help: gluster volume options
RebalanceDetails:
attrs:
vol_id:
Expand Down
2 changes: 2 additions & 0 deletions tendrl/gluster_integration/objects/volume/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def __init__(
profiling_enabled=None,
client_count=None,
rebal_estimated_time=None,
options=None,
*args,
**kwargs
):
Expand Down Expand Up @@ -59,6 +60,7 @@ def __init__(
self.profiling_enabled = profiling_enabled
self.client_count = client_count
self.rebal_estimated_time = rebal_estimated_time
self.options = options
self.value = 'clusters/{0}/Volumes/{1}'

def render(self):
Expand Down
21 changes: 0 additions & 21 deletions tendrl/gluster_integration/objects/volume_options/__init__.py

This file was deleted.

80 changes: 35 additions & 45 deletions tendrl/gluster_integration/sds_sync/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,12 +49,11 @@ def _run(self):
gluster_brick_dir = NS.gluster.objects.GlusterBrickDir()
gluster_brick_dir.save()

try:
etcd_utils.read(
"clusters/%s/"
"cluster_network" % NS.tendrl_context.integration_id
)
except etcd.EtcdKeyNotFound:
cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id
).load()
if cluster.cluster_network is None or\
cluster.cluster_network == "":
try:
node_networks = etcd_utils.read(
"nodes/%s/Networks" % NS.node_context.node_id
Expand All @@ -64,9 +63,6 @@ def _run(self):
node_network = NS.tendrl.objects.NodeNetwork(
interface=node_networks.leaves.next().key.split('/')[-1]
).load()
cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id
).load()
cluster.cluster_network = node_network.subnet
cluster.save()
except etcd.EtcdKeyNotFound as ex:
Expand All @@ -81,18 +77,18 @@ def _run(self):
)
raise ex

NS.node_context = NS.tendrl.objects.NodeContext().load()
while not self._complete.is_set():
try:
gevent.sleep(
int(NS.config.data.get("sync_interval", 10))
)
try:
NS._int.wclient.write(
"clusters/%s/"
"sync_status" % NS.tendrl_context.integration_id,
"in_progress",
prevExist=False
)
cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id
).load()
cluster.sync_status = "in_progress"
cluster.save()
except (etcd.EtcdAlreadyExist, etcd.EtcdCompareFailed) as ex:
pass

Expand Down Expand Up @@ -184,10 +180,13 @@ def _run(self):
if k.startswith('%s.options' % volname):
dict1['.'.join(k.split(".")[2:])] = v
options.pop(k, None)
NS.gluster.objects.VolumeOptions(
vol_id=vol_id,
options=dict1
).save()
volume = NS.gluster.objects.Volume(
vol_id=vol_id
).load()
dest = dict(volume.options)
dest.update(dict1)
volume.options = dest
volume.save()

# Sync cluster global details
if "provisioner/%s" % NS.tendrl_context.integration_id \
Expand All @@ -208,7 +207,7 @@ def _run(self):

_cluster = NS.tendrl.objects.Cluster(
integration_id=NS.tendrl_context.integration_id
)
).load()
if _cluster.exists():
_cluster.sync_status = "done"
_cluster.last_sync = str(tendrl_now())
Expand Down Expand Up @@ -293,25 +292,22 @@ def sync_volumes(volumes, index, vol_options):
devicetree = b.devicetree

SYNC_TTL = int(NS.config.data.get("sync_interval", 10)) + 250
node_context = NS.node_context.load()
tag_list = node_context.tags
NS.node_context = NS.tendrl.objects.NodeContext().load()
tag_list = NS.node_context.tags
# Raise alerts for volume state change.
cluster_provisioner = "provisioner/%s" % NS.tendrl_context.integration_id
if cluster_provisioner in tag_list:
try:
stored_volume_status = NS._int.client.read(
"clusters/%s/Volumes/%s/status" % (
NS.tendrl_context.integration_id,
volumes['volume%s.id' % index]
)
).value
stored_volume = NS.gluster.objects.Volume(
vol_id=volumes['volume%s.id' % index]
).load()
current_status = volumes['volume%s.status' % index]
if stored_volume_status != "" and \
current_status != stored_volume_status:
if stored_volume.status and stored_volume.status != "" \
and current_status != stored_volume.status:
msg = "Status of volume: %s " + \
"changed from %s to %s" % (
volumes['volume%s.name' % index],
stored_volume_status,
stored_volume.status,
current_status
)
instance = "volume_%s" % volumes[
Expand Down Expand Up @@ -359,10 +355,8 @@ def sync_volumes(volumes, index, vol_options):
] = vol_options[
'volume%s.options.value%s' % (index, opt_count)
]
NS.gluster.objects.VolumeOptions(
vol_id=volume.vol_id,
options=vol_opt_dict
).save(ttl=SYNC_TTL)
volume.options = vol_opt_dict
volume.save()

rebal_det = NS.gluster.objects.RebalanceDetails(
vol_id=volumes['volume%s.id' % index],
Expand Down Expand Up @@ -429,18 +423,14 @@ def sync_volumes(volumes, index, vol_options):

# Raise alerts if the brick path changes
try:
sbs = NS._int.client.read(
"clusters/%s/Bricks/all/"
"%s/status" % (
NS.tendrl_context.
integration_id,
brick_name
)
).value
stored_brick = NS.gluster.objects.Brick(
name=brick_name
).load()
current_status = volumes.get(
'volume%s.brick%s.status' % (index, b_index)
)
if current_status != sbs:
if stored_brick.status and \
current_status != stored_brick.status:
msg = "Status of brick: %s " + \
"under volume %s chan" + \
"ged from %s to %s" % (
Expand All @@ -449,7 +439,7 @@ def sync_volumes(volumes, index, vol_options):
b_index
)],
volumes['volume%s.' 'name' % index],
sbs,
stored_brick.status,
current_status
)
instance = "volume_%s|brick_%s" % (
Expand Down
19 changes: 9 additions & 10 deletions tendrl/gluster_integration/sds_sync/brick_device_details.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,14 +54,13 @@ def update_brick_device_details(brick_name, brick_path, devicetree):
pvs = [dev.path for dev in device.disks]

brick = NS.gluster.objects.Brick(
brick_name,
devices=disks,
mount_path=mount_point,
lv=lv,
vg=vg,
pool=pool,
pv=pvs,
size=size
)

name=brick_name
).load()
brick.devices = disks
brick.mount_path = mount_point
brick.lv = lv
brick.vg = vg
brick.pool = pool
brick.pv = pvs
brick.size = size
brick.save()
3 changes: 2 additions & 1 deletion tendrl/gluster_integration/sds_sync/client_connections.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,8 @@ def sync_volume_connections(volumes):
fetched_brick = NS.gluster.objects.Brick(
name=brick_name
).load()
vol_connections += 0 if fetched_brick.client_count == '' \
vol_connections += 0 if (fetched_brick.client_count == ''
or fetched_brick.client_count is None) \
else int(fetched_brick.client_count)
subvol_count += 1
except etcd.EtcdKeyNotFound:
Expand Down

0 comments on commit 51b18a1

Please sign in to comment.