From 505d030cd6a253d1dd12d0d02a2c20f200596fa6 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 14 Jun 2017 10:15:56 -0400 Subject: [PATCH 01/26] remove old demo from tests directory --- tests/demo/requirements.txt | 5 - tests/demo/static/index.js | 51 ------ tests/demo/static/milligram.min.css | 12 -- tests/demo/templates/consul.html | 46 ------ tests/demo/templates/docker.html | 14 -- tests/demo/templates/index.html | 52 ------ tests/demo/templates/mysql.html | 1 - tests/demo/ui.py | 243 ---------------------------- 8 files changed, 424 deletions(-) delete mode 100644 tests/demo/requirements.txt delete mode 100644 tests/demo/static/index.js delete mode 100755 tests/demo/static/milligram.min.css delete mode 100644 tests/demo/templates/consul.html delete mode 100644 tests/demo/templates/docker.html delete mode 100644 tests/demo/templates/index.html delete mode 100644 tests/demo/templates/mysql.html delete mode 100644 tests/demo/ui.py diff --git a/tests/demo/requirements.txt b/tests/demo/requirements.txt deleted file mode 100644 index f9e82a2..0000000 --- a/tests/demo/requirements.txt +++ /dev/null @@ -1,5 +0,0 @@ -Flask==0.10.1 -manta==2.5.0 -PyMySQL==0.6.7 -python-consul==0.4.7 -requests==2.9.0 diff --git a/tests/demo/static/index.js b/tests/demo/static/index.js deleted file mode 100644 index 7f3eae8..0000000 --- a/tests/demo/static/index.js +++ /dev/null @@ -1,51 +0,0 @@ -var hideLogSection = function(e) { - e.parentNode.parentNode.style.display = "none"; -} - -var replaceTable = function(name) { - return function(data) { - thisDiv = document.getElementById(name); - thisDiv.innerHTML = data; - }; -}; - -var appendTable = function(name) { - return function(data) { - thisDiv = document.getElementById(name); - if (data.length > 0) { - data = thisDiv.innerHTML + data; - thisDiv.innerHTML = data.substr(data.length-1000); // truncate old - thisDiv.scrollTop = thisDiv.scrollHeight; - thisDiv.parentNode.style.display = "block"; - } - }; -}; - -var fillTable = function(fillFn, url) { - var req = new XMLHttpRequest(); - req.onreadystatechange = function() { - if (req.readyState == XMLHttpRequest.DONE) { - var data = ""; - if (req.status === 200) { - data = req.responseText; - } - fillFn(data); - } - } - req.open('GET', url, true); - req.send(null); -} - -var scrollBottom = function(divName) { - var thisDiv = document.getElementById(divName); -} - -window.onload = function() { - window.setInterval(function () { - fillTable(replaceTable("dockerTable"), "docker"); - fillTable(replaceTable("consulTable"), "consul"); - fillTable(appendTable("mysql_1"), "mysql/1"); - fillTable(appendTable("mysql_2"), "mysql/2"); - fillTable(appendTable("mysql_3"), "mysql/3"); - }, 1000); -}; diff --git a/tests/demo/static/milligram.min.css b/tests/demo/static/milligram.min.css deleted file mode 100755 index c9d7206..0000000 --- a/tests/demo/static/milligram.min.css +++ /dev/null @@ -1,12 +0,0 @@ -/*! - * Milligram v1.1.0 - * http://milligram.github.io - * - * Copyright (c) 2016 CJ Patoilo - * Licensed under the MIT license -*/ - - -html{box-sizing:border-box;font-size:62.5%}body{color:#606c76;font-family:"Roboto","Helvetica Neue","Helvetica","Arial",sans-serif;font-size:1.6em;font-weight:300;letter-spacing:.01em;line-height:1.6}*,*:after,*:before{box-sizing:inherit}blockquote{border-left:.3rem solid #d1d1d1;margin-left:0;margin-right:0;padding:1rem 1.5rem}blockquote *:last-child{margin:0}.button,button,input[type='button'],input[type='reset'],input[type='submit']{background-color:#9b4dca;border:.1rem solid #9b4dca;border-radius:.4rem;color:#fff;cursor:pointer;display:inline-block;font-size:1.1rem;font-weight:700;height:3.8rem;letter-spacing:.1rem;line-height:3.8rem;padding:0 3rem;text-align:center;text-decoration:none;text-transform:uppercase;white-space:nowrap}.button:hover,.button:focus,button:hover,button:focus,input[type='button']:hover,input[type='button']:focus,input[type='reset']:hover,input[type='reset']:focus,input[type='submit']:hover,input[type='submit']:focus{background-color:#606c76;border-color:#606c76;color:#fff;outline:0}.button.button-disabled,.button[disabled],button.button-disabled,button[disabled],input[type='button'].button-disabled,input[type='button'][disabled],input[type='reset'].button-disabled,input[type='reset'][disabled],input[type='submit'].button-disabled,input[type='submit'][disabled]{opacity:.5;cursor:default}.button.button-disabled:hover,.button.button-disabled:focus,.button[disabled]:hover,.button[disabled]:focus,button.button-disabled:hover,button.button-disabled:focus,button[disabled]:hover,button[disabled]:focus,input[type='button'].button-disabled:hover,input[type='button'].button-disabled:focus,input[type='button'][disabled]:hover,input[type='button'][disabled]:focus,input[type='reset'].button-disabled:hover,input[type='reset'].button-disabled:focus,input[type='reset'][disabled]:hover,input[type='reset'][disabled]:focus,input[type='submit'].button-disabled:hover,input[type='submit'].button-disabled:focus,input[type='submit'][disabled]:hover,input[type='submit'][disabled]:focus{background-color:#9b4dca;border-color:#9b4dca}.button.button-outline,button.button-outline,input[type='button'].button-outline,input[type='reset'].button-outline,input[type='submit'].button-outline{color:#9b4dca;background-color:transparent}.button.button-outline:hover,.button.button-outline:focus,button.button-outline:hover,button.button-outline:focus,input[type='button'].button-outline:hover,input[type='button'].button-outline:focus,input[type='reset'].button-outline:hover,input[type='reset'].button-outline:focus,input[type='submit'].button-outline:hover,input[type='submit'].button-outline:focus{color:#606c76;background-color:transparent;border-color:#606c76}.button.button-outline.button-disabled:hover,.button.button-outline.button-disabled:focus,.button.button-outline[disabled]:hover,.button.button-outline[disabled]:focus,button.button-outline.button-disabled:hover,button.button-outline.button-disabled:focus,button.button-outline[disabled]:hover,button.button-outline[disabled]:focus,input[type='button'].button-outline.button-disabled:hover,input[type='button'].button-outline.button-disabled:focus,input[type='button'].button-outline[disabled]:hover,input[type='button'].button-outline[disabled]:focus,input[type='reset'].button-outline.button-disabled:hover,input[type='reset'].button-outline.button-disabled:focus,input[type='reset'].button-outline[disabled]:hover,input[type='reset'].button-outline[disabled]:focus,input[type='submit'].button-outline.button-disabled:hover,input[type='submit'].button-outline.button-disabled:focus,input[type='submit'].button-outline[disabled]:hover,input[type='submit'].button-outline[disabled]:focus{color:#9b4dca;border-color:inherit}.button.button-clear,button.button-clear,input[type='button'].button-clear,input[type='reset'].button-clear,input[type='submit'].button-clear{color:#9b4dca;background-color:transparent;border-color:transparent}.button.button-clear:hover,.button.button-clear:focus,button.button-clear:hover,button.button-clear:focus,input[type='button'].button-clear:hover,input[type='button'].button-clear:focus,input[type='reset'].button-clear:hover,input[type='reset'].button-clear:focus,input[type='submit'].button-clear:hover,input[type='submit'].button-clear:focus{color:#606c76;background-color:transparent;border-color:transparent}.button.button-clear.button-disabled:hover,.button.button-clear.button-disabled:focus,.button.button-clear[disabled]:hover,.button.button-clear[disabled]:focus,button.button-clear.button-disabled:hover,button.button-clear.button-disabled:focus,button.button-clear[disabled]:hover,button.button-clear[disabled]:focus,input[type='button'].button-clear.button-disabled:hover,input[type='button'].button-clear.button-disabled:focus,input[type='button'].button-clear[disabled]:hover,input[type='button'].button-clear[disabled]:focus,input[type='reset'].button-clear.button-disabled:hover,input[type='reset'].button-clear.button-disabled:focus,input[type='reset'].button-clear[disabled]:hover,input[type='reset'].button-clear[disabled]:focus,input[type='submit'].button-clear.button-disabled:hover,input[type='submit'].button-clear.button-disabled:focus,input[type='submit'].button-clear[disabled]:hover,input[type='submit'].button-clear[disabled]:focus{color:#9b4dca}code{background:#f4f5f6;border-radius:.4rem;font-size:86%;padding:.2rem .5rem;margin:0 .2rem;white-space:nowrap}pre{background:#f4f5f6;border-left:.3rem solid #9b4dca;font-family:"Menlo","Consolas","Bitstream Vera Sans Mono","DejaVu Sans Mono","Monaco",monospace}pre>code{background:transparent;border-radius:0;display:block;padding:1rem 1.5rem;white-space:pre}hr{border:0;border-top:.1rem solid #f4f5f6;margin-bottom:3.5rem;margin-top:3rem}input[type='email'],input[type='number'],input[type='password'],input[type='search'],input[type='tel'],input[type='text'],input[type='url'],textarea,select{-webkit-appearance:none;-moz-appearance:none;appearance:none;background-color:transparent;border:.1rem solid #d1d1d1;border-radius:.4rem;box-shadow:none;height:3.8rem;padding:.6rem 1rem;width:100%}input[type='email']:focus,input[type='number']:focus,input[type='password']:focus,input[type='search']:focus,input[type='tel']:focus,input[type='text']:focus,input[type='url']:focus,textarea:focus,select:focus{border:.1rem solid #9b4dca;outline:0}select{padding:.6rem 3rem .6rem 1rem;background:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiIHN0YW5kYWxvbmU9Im5vIj8+PHN2ZyAgIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgICB4bWxuczpjYz0iaHR0cDovL2NyZWF0aXZlY29tbW9ucy5vcmcvbnMjIiAgIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyIgICB4bWxuczpzdmc9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiAgIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgICB4bWxuczpzb2RpcG9kaT0iaHR0cDovL3NvZGlwb2RpLnNvdXJjZWZvcmdlLm5ldC9EVEQvc29kaXBvZGktMC5kdGQiICAgeG1sbnM6aW5rc2NhcGU9Imh0dHA6Ly93d3cuaW5rc2NhcGUub3JnL25hbWVzcGFjZXMvaW5rc2NhcGUiICAgZW5hYmxlLWJhY2tncm91bmQ9Im5ldyAwIDAgMjkgMTQiICAgaGVpZ2h0PSIxNHB4IiAgIGlkPSJMYXllcl8xIiAgIHZlcnNpb249IjEuMSIgICB2aWV3Qm94PSIwIDAgMjkgMTQiICAgd2lkdGg9IjI5cHgiICAgeG1sOnNwYWNlPSJwcmVzZXJ2ZSIgICBpbmtzY2FwZTp2ZXJzaW9uPSIwLjQ4LjQgcjk5MzkiICAgc29kaXBvZGk6ZG9jbmFtZT0iY2FyZXQtZ3JheS5zdmciPjxtZXRhZGF0YSAgICAgaWQ9Im1ldGFkYXRhMzAzOSI+PHJkZjpSREY+PGNjOldvcmsgICAgICAgICByZGY6YWJvdXQ9IiI+PGRjOmZvcm1hdD5pbWFnZS9zdmcreG1sPC9kYzpmb3JtYXQ+PGRjOnR5cGUgICAgICAgICAgIHJkZjpyZXNvdXJjZT0iaHR0cDovL3B1cmwub3JnL2RjL2RjbWl0eXBlL1N0aWxsSW1hZ2UiIC8+PC9jYzpXb3JrPjwvcmRmOlJERj48L21ldGFkYXRhPjxkZWZzICAgICBpZD0iZGVmczMwMzciIC8+PHNvZGlwb2RpOm5hbWVkdmlldyAgICAgcGFnZWNvbG9yPSIjZmZmZmZmIiAgICAgYm9yZGVyY29sb3I9IiM2NjY2NjYiICAgICBib3JkZXJvcGFjaXR5PSIxIiAgICAgb2JqZWN0dG9sZXJhbmNlPSIxMCIgICAgIGdyaWR0b2xlcmFuY2U9IjEwIiAgICAgZ3VpZGV0b2xlcmFuY2U9IjEwIiAgICAgaW5rc2NhcGU6cGFnZW9wYWNpdHk9IjAiICAgICBpbmtzY2FwZTpwYWdlc2hhZG93PSIyIiAgICAgaW5rc2NhcGU6d2luZG93LXdpZHRoPSI5MDMiICAgICBpbmtzY2FwZTp3aW5kb3ctaGVpZ2h0PSI1OTQiICAgICBpZD0ibmFtZWR2aWV3MzAzNSIgICAgIHNob3dncmlkPSJ0cnVlIiAgICAgaW5rc2NhcGU6em9vbT0iMTIuMTM3OTMxIiAgICAgaW5rc2NhcGU6Y3g9Ii00LjExOTMxODJlLTA4IiAgICAgaW5rc2NhcGU6Y3k9IjciICAgICBpbmtzY2FwZTp3aW5kb3cteD0iNTAyIiAgICAgaW5rc2NhcGU6d2luZG93LXk9IjMwMiIgICAgIGlua3NjYXBlOndpbmRvdy1tYXhpbWl6ZWQ9IjAiICAgICBpbmtzY2FwZTpjdXJyZW50LWxheWVyPSJMYXllcl8xIj48aW5rc2NhcGU6Z3JpZCAgICAgICB0eXBlPSJ4eWdyaWQiICAgICAgIGlkPSJncmlkMzA0MSIgLz48L3NvZGlwb2RpOm5hbWVkdmlldz48cG9seWdvbiAgICAgcG9pbnRzPSIwLjE1LDAgMTQuNSwxNC4zNSAyOC44NSwwICIgICAgIGlkPSJwb2x5Z29uMzAzMyIgICAgIHRyYW5zZm9ybT0ibWF0cml4KDAuMzU0MTEzODcsMCwwLDAuNDgzMjkxMSw5LjMyNDE1NDUsMy42MjQ5OTkyKSIgICAgIHN0eWxlPSJmaWxsOiNkMWQxZDE7ZmlsbC1vcGFjaXR5OjEiIC8+PC9zdmc+) center right no-repeat}select:focus{background-image:url(data:image/svg+xml;base64,PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiIHN0YW5kYWxvbmU9Im5vIj8+PHN2ZyAgIHhtbG5zOmRjPSJodHRwOi8vcHVybC5vcmcvZGMvZWxlbWVudHMvMS4xLyIgICB4bWxuczpjYz0iaHR0cDovL2NyZWF0aXZlY29tbW9ucy5vcmcvbnMjIiAgIHhtbG5zOnJkZj0iaHR0cDovL3d3dy53My5vcmcvMTk5OS8wMi8yMi1yZGYtc3ludGF4LW5zIyIgICB4bWxuczpzdmc9Imh0dHA6Ly93d3cudzMub3JnLzIwMDAvc3ZnIiAgIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyIgICB4bWxuczpzb2RpcG9kaT0iaHR0cDovL3NvZGlwb2RpLnNvdXJjZWZvcmdlLm5ldC9EVEQvc29kaXBvZGktMC5kdGQiICAgeG1sbnM6aW5rc2NhcGU9Imh0dHA6Ly93d3cuaW5rc2NhcGUub3JnL25hbWVzcGFjZXMvaW5rc2NhcGUiICAgZW5hYmxlLWJhY2tncm91bmQ9Im5ldyAwIDAgMjkgMTQiICAgaGVpZ2h0PSIxNHB4IiAgIGlkPSJMYXllcl8xIiAgIHZlcnNpb249IjEuMSIgICB2aWV3Qm94PSIwIDAgMjkgMTQiICAgd2lkdGg9IjI5cHgiICAgeG1sOnNwYWNlPSJwcmVzZXJ2ZSIgICBpbmtzY2FwZTp2ZXJzaW9uPSIwLjQ4LjQgcjk5MzkiICAgc29kaXBvZGk6ZG9jbmFtZT0iY2FyZXQuc3ZnIj48bWV0YWRhdGEgICAgIGlkPSJtZXRhZGF0YTMwMzkiPjxyZGY6UkRGPjxjYzpXb3JrICAgICAgICAgcmRmOmFib3V0PSIiPjxkYzpmb3JtYXQ+aW1hZ2Uvc3ZnK3htbDwvZGM6Zm9ybWF0PjxkYzp0eXBlICAgICAgICAgICByZGY6cmVzb3VyY2U9Imh0dHA6Ly9wdXJsLm9yZy9kYy9kY21pdHlwZS9TdGlsbEltYWdlIiAvPjwvY2M6V29yaz48L3JkZjpSREY+PC9tZXRhZGF0YT48ZGVmcyAgICAgaWQ9ImRlZnMzMDM3IiAvPjxzb2RpcG9kaTpuYW1lZHZpZXcgICAgIHBhZ2Vjb2xvcj0iI2ZmZmZmZiIgICAgIGJvcmRlcmNvbG9yPSIjNjY2NjY2IiAgICAgYm9yZGVyb3BhY2l0eT0iMSIgICAgIG9iamVjdHRvbGVyYW5jZT0iMTAiICAgICBncmlkdG9sZXJhbmNlPSIxMCIgICAgIGd1aWRldG9sZXJhbmNlPSIxMCIgICAgIGlua3NjYXBlOnBhZ2VvcGFjaXR5PSIwIiAgICAgaW5rc2NhcGU6cGFnZXNoYWRvdz0iMiIgICAgIGlua3NjYXBlOndpbmRvdy13aWR0aD0iOTAzIiAgICAgaW5rc2NhcGU6d2luZG93LWhlaWdodD0iNTk0IiAgICAgaWQ9Im5hbWVkdmlldzMwMzUiICAgICBzaG93Z3JpZD0idHJ1ZSIgICAgIGlua3NjYXBlOnpvb209IjEyLjEzNzkzMSIgICAgIGlua3NjYXBlOmN4PSItNC4xMTkzMTgyZS0wOCIgICAgIGlua3NjYXBlOmN5PSI3IiAgICAgaW5rc2NhcGU6d2luZG93LXg9IjUwMiIgICAgIGlua3NjYXBlOndpbmRvdy15PSIzMDIiICAgICBpbmtzY2FwZTp3aW5kb3ctbWF4aW1pemVkPSIwIiAgICAgaW5rc2NhcGU6Y3VycmVudC1sYXllcj0iTGF5ZXJfMSI+PGlua3NjYXBlOmdyaWQgICAgICAgdHlwZT0ieHlncmlkIiAgICAgICBpZD0iZ3JpZDMwNDEiIC8+PC9zb2RpcG9kaTpuYW1lZHZpZXc+PHBvbHlnb24gICAgIHBvaW50cz0iMjguODUsMCAwLjE1LDAgMTQuNSwxNC4zNSAiICAgICBpZD0icG9seWdvbjMwMzMiICAgICB0cmFuc2Zvcm09Im1hdHJpeCgwLjM1NDExMzg3LDAsMCwwLjQ4MzI5MTEsOS4zMjQxNTUzLDMuNjI1KSIgICAgIHN0eWxlPSJmaWxsOiM5YjRkY2Y7ZmlsbC1vcGFjaXR5OjEiIC8+PC9zdmc+)}textarea{padding-bottom:.6rem;padding-top:.6rem;min-height:6.5rem}label,legend{font-size:1.6rem;font-weight:700;display:block;margin-bottom:.5rem}fieldset{border-width:0;padding:0}input[type='checkbox'],input[type='radio']{display:inline}.label-inline{font-weight:normal;display:inline-block;margin-left:.5rem}.container{margin:0 auto;max-width:112rem;padding:0 2rem;position:relative;width:100%}.row{display:flex;flex-direction:column;padding:0;width:100%}.row .row-wrap{flex-wrap:wrap}.row .row-no-padding{padding:0}.row .row-no-padding>.column{padding:0}.row .row-top{align-items:flex-start}.row .row-bottom{align-items:flex-end}.row .row-center{align-items:center}.row .row-stretch{align-items:stretch}.row .row-baseline{align-items:baseline}.row .column{display:block;flex:1;margin-left:0;max-width:100%;width:100%}.row .column .col-top{align-self:flex-start}.row .column .col-bottom{align-self:flex-end}.row .column .col-center{align-self:center}.row .column.column-offset-10{margin-left:10%}.row .column.column-offset-20{margin-left:20%}.row .column.column-offset-25{margin-left:25%}.row .column.column-offset-33,.row .column.column-offset-34{margin-left:33.3333%}.row .column.column-offset-50{margin-left:50%}.row .column.column-offset-66,.row .column.column-offset-67{margin-left:66.6666%}.row .column.column-offset-75{margin-left:75%}.row .column.column-offset-80{margin-left:80%}.row .column.column-offset-90{margin-left:90%}.row .column.column-10{flex:0 0 10%;max-width:10%}.row .column.column-20{flex:0 0 20%;max-width:20%}.row .column.column-25{flex:0 0 25%;max-width:25%}.row .column.column-33,.row .column.column-34{flex:0 0 33.3333%;max-width:33.3333%}.row .column.column-40{flex:0 0 40%;max-width:40%}.row .column.column-50{flex:0 0 50%;max-width:50%}.row .column.column-60{flex:0 0 60%;max-width:60%}.row .column.column-66,.row .column.column-67{flex:0 0 66.6666%;max-width:66.6666%}.row .column.column-75{flex:0 0 75%;max-width:75%}.row .column.column-80{flex:0 0 80%;max-width:80%}.row .column.column-90{flex:0 0 90%;max-width:90%}@media (min-width: 40rem){.row{flex-direction:row;margin-left:-1rem;width:calc(100% + 2.0rem)}.row .column{margin-bottom:inherit;padding:0 1rem}}a{color:#9b4dca;text-decoration:none}a:hover{color:#606c76}dl,ol,ul{margin-top:0;padding-left:0}dl ul,dl ol,ol ul,ol ol,ul ul,ul ol{font-size:90%;margin:1.5rem 0 1.5rem 3rem}dl{list-style:none}ul{list-style:circle inside}ol{list-style:decimal inside}dt,dd,li{margin-bottom:1rem}.button,button{margin-bottom:1rem}input,textarea,select,fieldset{margin-bottom:1.5rem}pre,blockquote,dl,figure,table,p,ul,ol,form{margin-bottom:2.5rem}table{width:100%}th,td{border-bottom:.1rem solid #e1e1e1;padding:1.2rem 1.5rem;text-align:left}th:first-child,td:first-child{padding-left:0}th:last-child,td:last-child{padding-right:0}p{margin-top:0}h1,h2,h3,h4,h5,h6{font-weight:300;margin-bottom:2rem;margin-top:0}h1{font-size:4rem;letter-spacing:-0.1rem;line-height:1.2}h2{font-size:3.6rem;letter-spacing:-0.1rem;line-height:1.25}h3{font-size:3rem;letter-spacing:-0.1rem;line-height:1.3}h4{font-size:2.4rem;letter-spacing:-0.08rem;line-height:1.35}h5{font-size:1.8rem;letter-spacing:-0.05rem;line-height:1.5}h6{font-size:1.6rem;letter-spacing:0;line-height:1.4}@media (min-width: 40rem){h1{font-size:5rem}h2{font-size:4.2rem}h3{font-size:3.6rem}h4{font-size:3rem}h5{font-size:2.4rem}h6{font-size:1.5rem}}.float-right{float:right}.float-left{float:left}.clearfix{*zoom:1}.clearfix:after,.clearfix:before{content:"";display:table}.clearfix:after{clear:both} - -/*# sourceMappingURL=milligram.min.css.map */ \ No newline at end of file diff --git a/tests/demo/templates/consul.html b/tests/demo/templates/consul.html deleted file mode 100644 index 83f41a3..0000000 --- a/tests/demo/templates/consul.html +++ /dev/null @@ -1,46 +0,0 @@ -
-
 
-
ID
-
Address
-
Status
-
-
-
Primary:
- {% if consul.primary %} -
{{ consul.primary.name }}
-
{{ consul.primary.ip}}:{{consul.primary.port}}
-
{{ consul.primary.health|colorize|safe}}
- {% endif %} -
- -{% for replica in consul.replicas %} -
-
{% if loop.first %}Replicas:{% endif %}
-
{{ replica.name }}
-
{{ replica.ip}}:{{replica.port}}
-
{{ replica.health|colorize|safe }}
-
-{% endfor %} -
 
- -
-
Lock by Container:
- {% if consul.lock %} -
{{ consul.lock.host }}
- {% endif %} -
-
-
Lock Session:
- {% if consul.lock %} -
{{ consul.lock.lock_session_id }} w/ TTL: {{ consul.lock.ttl }}
- {% endif %} -
- -
-
Last Backup:
-
{{ consul.last_backup }}
-
-
-
Last Binlog:
-
{{ consul.last_binlog }}
-
diff --git a/tests/demo/templates/docker.html b/tests/demo/templates/docker.html deleted file mode 100644 index 822133a..0000000 --- a/tests/demo/templates/docker.html +++ /dev/null @@ -1,14 +0,0 @@ -
-
ID
-
Name
-
Command
-
Status
-
-{% for row in docker %} -
-
{{ row.id }}
-
{{ row.name }}
-
{{ row.command }}
-
Up {{ row.uptime }}
-
-{% endfor %} diff --git a/tests/demo/templates/index.html b/tests/demo/templates/index.html deleted file mode 100644 index 321f762..0000000 --- a/tests/demo/templates/index.html +++ /dev/null @@ -1,52 +0,0 @@ - - - - - - - - - - - -
- -
Docker
-
-
-
ID
-
Name
-
Command
-
Status
-
-
-
-
Consul
-
-
- -
-
my_mysql_1
-
-
-
- -
-
my_mysql_2
-
-
-
- -
-
my_mysql_3
-
-
-
- -
- - - diff --git a/tests/demo/templates/mysql.html b/tests/demo/templates/mysql.html deleted file mode 100644 index 07db892..0000000 --- a/tests/demo/templates/mysql.html +++ /dev/null @@ -1 +0,0 @@ -{% for log in logs %}{{ log }}
{% endfor %} diff --git a/tests/demo/ui.py b/tests/demo/ui.py deleted file mode 100644 index 2f90ea9..0000000 --- a/tests/demo/ui.py +++ /dev/null @@ -1,243 +0,0 @@ -from __future__ import print_function -import datetime as dt -import json -import os -import subprocess -import time - -import consul as pyconsul -from requests.exceptions import ConnectionError - -from flask import Flask, render_template -app = Flask(__name__) - - -class Pane(object): - - def __init__(self, name=''): - self.name = name - self.last_timestamp = None - - def fetch(self): - raise NotImplementedError() - -class DockerPane(Pane): - - def fetch(self): - """ Use the Docker client to run `docker ps` and returns a list """ - try: - output = subprocess.check_output([ - 'docker', 'ps', - "--format='table {{.ID}};{{.Names}};{{.Command}};{{.RunningFor}}'"]) - out = [] - except subprocess.CalledProcessError: - return {} - - for line in output.splitlines()[1:]: - fields = line.split(';') - out.append({ - "id": fields[0], - "name": fields[1], - "command": fields[2], - "uptime": fields[3], - }) - return out - - -class ConsulPane(Pane): - - _consul = None - - @property - def consul(self): - if self._consul: - return self._consul - - docker = os.environ['DOCKER_HOST'] # let this crash if not defined - docker = docker.replace('tcp://', '').replace(':2376','') - if '192.' in docker: - docker_host = docker - else: - try: - output = subprocess.check_output([ - 'docker', 'inspect', 'my_consul_1' - ]) - data = json.loads(output) - docker_host = data[0]['NetworkSettings']['IPAddress'] - except (subprocess.CalledProcessError, KeyError): - return None - - if docker_host: - self._consul = pyconsul.Consul(host=docker_host) - return self._consul - - - def fetch(self): - """ - Makes queries to Consul about the health of the mysql instances - and status of locks/backups used for coordination. - """ - try: - if not self.consul: - raise Exception("No Consul container running.") - return { - "primary": self._fetch_primary(), - "replicas": self._fetch_replicas(), - "lock": self._fetch_lock(), - "last_backup": self._fetch_last_backup(), - "last_binlog": self._fetch_last_binlog() - } - except (ConnectionError, pyconsul.ConsulException): - return {"error": "Could not connect to Consul"} - except Exception as ex: - return {"error": ex.message} - - def _fetch_primary(self): - """ Get status of the primary """ - try: - primary_node = self.consul.catalog.service('mysql-primary')[1][0] - primary_name = primary_node['ServiceID'].replace('mysql-primary-', '') - primary_ip = primary_node['Address'] - primary_port = primary_node['ServicePort'] - try: - primary_health = self.consul.health.checks('mysql-primary')[1][0]['Status'] - except (IndexError, TypeError): - primary_health = 'unknown' - - return { - "name": primary_name, - "ip": primary_ip, - "port": primary_port, - "health": primary_health - } - - except (IndexError, TypeError): - out = {} - return out - - def _fetch_replicas(self): - """ Get status of the replicas """ - try: - replica_nodes = self.consul.catalog.service('mysql')[1] - replica_health = {n['Name']: n['Status'] - for n in self.consul.health.checks('mysql')[1]} - replicas = [] - for replica in replica_nodes: - service_id = replica['ServiceID'] - name = service_id.replace('mysql-', '') - ip = replica['Address'] - port = replica['ServicePort'] - health = replica_health.get(service_id, 'unknown') - replicas.append({ - "name": name, - "ip": ip, - "port": port, - "health": health - }) - return replicas - except IndexError: - return [] - - def _fetch_lock(self): - """ Get information about the session lock we have for the primary""" - try: - lock_val = self.consul.kv.get('mysql-primary')[1] - lock_host = lock_val['Value'] - lock_session_id = lock_val['Session'] - lock_ttl = self.consul.session.info(lock_session_id)[1]['TTL'] - return { - "host": lock_host, - "ttl": lock_ttl, - "lock_session_id": lock_session_id - } - except: - return {} - - def _fetch_last_backup(self): - try: - return self.consul.kv.get('mysql-last-backup')[1]['Value']\ - .replace('mysql-backup-', '') - except (IndexError, TypeError): - return '' - - def _fetch_last_binlog(self): - try: - return self.consul.kv.get('mysql-last-binlog')[1]['Value'] - except (IndexError, TypeError): - return '' - - -class LogsPane(Pane): - """ - A LogsPane streams its data from a subprocess so the paint method is - completely different and we need to make sure we clean up files and - processes left over. - """ - def fetch(self): - - args = ['docker', 'logs', '-t'] - if self.last_timestamp: - args.extend(['--since', self.last_timestamp]) - args.append(self.name) - - try: - output = subprocess.check_output(args, stderr=subprocess.STDOUT).splitlines() - except subprocess.CalledProcessError: - return [] - - if len(output) > 0: - # we get back the UTC timestamp from the server but the client - # uses the local TZ for the timestamp for some unholy reason, - # so we need to add an offset - ts = output[-1].split()[0] - utc_dt = dt.datetime.strptime(ts[:19], '%Y-%m-%dT%H:%M:%S') - now = time.time() - offset = dt.datetime.fromtimestamp(now) - dt.datetime.utcfromtimestamp(now) - utc_dt = utc_dt + offset - self.last_timestamp = str(int(time.mktime(utc_dt.timetuple()))) - - # trim out the `docker logs` timestamps b/c we have the timestamps - # from the servers - output = [line[31:] for line in output] - return output - -@app.template_filter('colorize') -def colorize(value): - if value == 'critical': - return 'critical' - elif value == 'passing': - return 'passing' - else: - return value - - -@app.route('/') -def index(): - return render_template('index.html') - -@app.route('/docker') -def docker_handler(): - resp = docker_pane.fetch() - return render_template('docker.html', docker=resp) - -@app.route('/consul') -def consul_handler(): - resp = consul_pane.fetch() - return render_template('consul.html', consul=resp) - -@app.route('/mysql/') -def mysql_handler(mysql_id): - resp = mysql_panes[mysql_id - 1].fetch() - return render_template('mysql.html', logs=resp) - - -if __name__ == '__main__': - docker_pane = DockerPane(name='Docker') - consul_pane = ConsulPane(name='Consul') - mysql_panes = [ - LogsPane(name='my_mysql_1'), - LogsPane(name='my_mysql_2'), - LogsPane(name='my_mysql_3'), - ] - - app.run(threaded=True, debug=True) From 3e17b42b4b96d4c8a58b898146c0293ae1670a54 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 14 Jun 2017 10:23:15 -0400 Subject: [PATCH 02/26] remove autopilotpattern/testing submodule --- .gitmodules | 3 --- tests/testing | 1 - 2 files changed, 4 deletions(-) delete mode 160000 tests/testing diff --git a/.gitmodules b/.gitmodules index 0405ae1..e69de29 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,3 +0,0 @@ -[submodule "tests/testing"] - path = tests/testing - url = https://github.com/autopilotpattern/testing.git diff --git a/tests/testing b/tests/testing deleted file mode 160000 index 196e0ed..0000000 --- a/tests/testing +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 196e0ed2aa017be9850b0b2aaa61aa756fde6f5b From 413898d8624b31f3c21ca2e052c942bbe20a651f Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 14 Jun 2017 10:25:09 -0400 Subject: [PATCH 03/26] add triton-docker-cli submodule --- .gitmodules | 3 +++ tests/triton-docker-cli | 1 + 2 files changed, 4 insertions(+) create mode 160000 tests/triton-docker-cli diff --git a/.gitmodules b/.gitmodules index e69de29..d5478d7 100644 --- a/.gitmodules +++ b/.gitmodules @@ -0,0 +1,3 @@ +[submodule "tests/triton-docker-cli"] + path = tests/triton-docker-cli + url = https://github.com/joyent/triton-docker-cli diff --git a/tests/triton-docker-cli b/tests/triton-docker-cli new file mode 160000 index 0000000..031e98e --- /dev/null +++ b/tests/triton-docker-cli @@ -0,0 +1 @@ +Subproject commit 031e98e50a9bf32c7d1138a9089aa3835410ce52 From c49d479f3d82f483e2e0ca4ce48d1dd1d0c2706b Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 14 Jun 2017 14:13:08 -0400 Subject: [PATCH 04/26] test/deploy improvements --- examples/compose/docker-compose.yml | 36 +++ examples/compose/setup.sh | 0 .../triton/docker-compose.yml | 1 + setup.sh => examples/triton/setup.sh | 6 - local-compose.yml | 23 -- makefile | 77 +++-- tests/Dockerfile | 41 ++- tests/compose.sh | 0 tests/triton.sh | 280 ++++++++++++++++++ 9 files changed, 382 insertions(+), 82 deletions(-) create mode 100644 examples/compose/docker-compose.yml create mode 100644 examples/compose/setup.sh rename docker-compose.yml => examples/triton/docker-compose.yml (92%) rename setup.sh => examples/triton/setup.sh (95%) delete mode 100644 local-compose.yml create mode 100644 tests/compose.sh create mode 100755 tests/triton.sh diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml new file mode 100644 index 0000000..9874b10 --- /dev/null +++ b/examples/compose/docker-compose.yml @@ -0,0 +1,36 @@ +version: '2.1' + +services: + mysql: + image: autopilotpattern/mysql:${TAG:-latest} + mem_limit: 512m + restart: always + expose: + - 3306 + env_file: _env + network_mode: bridge + environment: + - CONSUL_AGENT=1 + - LOG_LEVEL=DEBUG + - CONSUL=consul + links: + - consul:consul + + consul: + image: consul:0.7.1 + command: > + agent -server -client=0.0.0.0 -bootstrap -ui + restart: always + mem_limit: 128m + ports: + - 8500:8500 + expose: + - 53 + - 8300 + - 8301 + - 8302 + - 8400 + - 8500 + network_mode: bridge + dns: + - 127.0.0.1 diff --git a/examples/compose/setup.sh b/examples/compose/setup.sh new file mode 100644 index 0000000..e69de29 diff --git a/docker-compose.yml b/examples/triton/docker-compose.yml similarity index 92% rename from docker-compose.yml rename to examples/triton/docker-compose.yml index d04a836..af389c5 100644 --- a/docker-compose.yml +++ b/examples/triton/docker-compose.yml @@ -17,6 +17,7 @@ services: - CONSUL_AGENT=1 - LOG_LEVEL=INFO - SERVICE_NAME=mysql + - CONSUL=mc.svc.${TRITON_CNS_SEARCH_DOMAIN_PRIVATE} consul: image: consul:0.7.1 diff --git a/setup.sh b/examples/triton/setup.sh similarity index 95% rename from setup.sh rename to examples/triton/setup.sh index 661f516..0ef3a6b 100755 --- a/setup.sh +++ b/examples/triton/setup.sh @@ -20,7 +20,6 @@ help() { # populated by `check` function whenever we're using Triton TRITON_USER= TRITON_DC= -TRITON_ACCOUNT= # --------------------------------------------------- # Top-level commands @@ -85,7 +84,6 @@ envcheck() { local docker_dc=$(echo $DOCKER_HOST | awk -F"/" '{print $3}' | awk -F'.' '{print $1}') TRITON_USER=$(triton profile get | awk -F": " '/account:/{print $2}') TRITON_DC=$(triton profile get | awk -F"/" '/url:/{print $3}' | awk -F'.' '{print $1}') - TRITON_ACCOUNT=$(triton account get | awk -F": " '/id:/{print $2}') if [ ! "$docker_user" = "$TRITON_USER" ] || [ ! "$docker_dc" = "$TRITON_DC" ]; then echo tput rev # reverse @@ -145,10 +143,6 @@ envcheck() { echo MANTA_PRIVATE_KEY=$(cat ${MANTA_PRIVATE_KEY_PATH} | tr '\n' '#') >> _env echo >> _env - echo '# Consul discovery via Triton CNS' >> _env - echo CONSUL=mysql-consul.svc.${TRITON_ACCOUNT}.${TRITON_DC}.cns.joyent.com >> _env - echo >> _env - echo 'Edit the _env file with your desired MYSQL_* and MANTA_* config' else echo 'Existing _env file found, exiting' diff --git a/local-compose.yml b/local-compose.yml deleted file mode 100644 index 9ea3685..0000000 --- a/local-compose.yml +++ /dev/null @@ -1,23 +0,0 @@ -version: '2.1' - -services: - mysql: - extends: - file: docker-compose.yml - service: mysql - mem_limit: 512m - environment: - - CONSUL_AGENT=1 - - CONSUL=consul - - LOG_LEVEL=DEBUG - links: - - consul:consul - ports: - - 3306 - - consul: - extends: - file: docker-compose.yml - service: consul - ports: - - 8500:8500 diff --git a/makefile b/makefile index 19982c4..09c617c 100644 --- a/makefile +++ b/makefile @@ -22,8 +22,6 @@ help: # ------------------------------------------------ # Target environment configuration -dockerLocal := DOCKER_HOST= DOCKER_TLS_VERIFY= DOCKER_CERT_PATH= docker - # if you pass `TRACE=1` into the call to `make` then the Python tests will # run under the `trace` module (provides detailed call logging) ifndef TRACE @@ -36,25 +34,25 @@ endif # Container builds ## Builds the application container image locally -build: test-runner - $(dockerLocal) build -t=$(image):$(tag) . +build: build/tester + docker build -t=$(image):$(tag) . ## Build the test running container -test-runner: - $(dockerLocal) build -f tests/Dockerfile -t=$(test_image):$(tag) . +build/tester: + docker build -f tests/Dockerfile -t=$(test_image):$(tag) . ## Push the current application container images to the Docker Hub push: - $(dockerLocal) push $(image):$(tag) - $(dockerLocal) push $(test_image):$(tag) + docker push $(image):$(tag) + docker push $(test_image):$(tag) ## Tag the current images as 'latest' and push them to the Docker Hub ship: - $(dockerLocal) tag $(image):$(tag) $(image):latest - $(dockerLocal) tag $(test_image):$(tag) $(test_image):latest - $(dockerLocal) tag $(image):$(tag) $(image):latest - $(dockerLocal) push $(image):$(tag) - $(dockerLocal) push $(image):latest + docker tag $(image):$(tag) $(image):latest + docker tag $(test_image):$(tag) $(test_image):latest + docker tag $(image):$(tag) $(image):latest + docker push $(image):$(tag) + docker push $(image):latest # ------------------------------------------------ @@ -64,16 +62,19 @@ ship: pull: docker pull $(image):$(tag) +## Run all tests +test: test/unit test/triton # test/compose + ## Run the unit tests inside the mysql container -test: - $(dockerLocal) run --rm -w /usr/local/bin \ +test/unit: + docker run --rm -w /usr/local/bin \ -e LOG_LEVEL=DEBUG \ $(image):$(tag) \ $(python) test.py ## Run the unit tests with source mounted to the container for local dev -test-src: - $(dockerLocal) run --rm -w /usr/local/bin \ +test/unit-src: + docker run --rm -w /usr/local/bin \ -v $(shell pwd)/bin/manager:/usr/local/bin/manager \ -v $(shell pwd)/bin/manage.py:/usr/local/bin/manage.py \ -v $(shell pwd)/bin/test.py:/usr/local/bin/test.py \ @@ -81,32 +82,29 @@ test-src: $(image):$(tag) \ $(python) test.py -$(DOCKER_CERT_PATH)/key.pub: - ssh-keygen -y -f $(DOCKER_CERT_PATH)/key.pem > $(DOCKER_CERT_PATH)/key.pub - -# For Jenkins test runner only: make sure we have public keys available -SDC_KEYS_VOL ?= -v $(DOCKER_CERT_PATH):$(DOCKER_CERT_PATH) -MANTA_KEY_ID ?= $(shell ssh-keygen -l -f $(DOCKER_CERT_PATH)/key.pub | awk '{print $$2}') -keys: $(DOCKER_CERT_PATH)/key.pub +# TODO: add once we can run with non-Manta storage backend +# Run the integration test runner against Compose locally. +# test/compose: +# docker run --rm \ +# -e TAG=$(tag) \ +# -e GIT_BRANCH=$(GIT_BRANCH) \ +# --network=bridge \ +# -v /var/run/docker.sock:/var/run/docker.sock \ +# -w /src \ +# $(testImage):$(tag) /src/compose.sh ## Run the integration test runner. Runs locally but targets Triton. -integration-test: - $(call check_var, TRITON_ACCOUNT TRITON_DC, \ +test/triton: + $(call check_var, TRITON_PROFILE, \ required to run integration tests on Triton.) - $(dockerLocal) run --rm \ + docker run --rm \ -e TAG=$(tag) \ - -e COMPOSE_HTTP_TIMEOUT=300 \ - -e DOCKER_HOST=$(DOCKER_HOST) \ - -e DOCKER_TLS_VERIFY=1 \ - -e DOCKER_CERT_PATH=$(DOCKER_CERT_PATH) \ - -e MANTA_KEY_ID=$(MANTA_KEY_ID) \ - -e MANTA_URL=$(MANTA_URL) \ - -e MANTA_USER=$(MANTA_USER) \ - -e MANTA_SUBUSER=$(MANTA_SUBUSER) \ - -e MANTA_ROLE=$(MANTA_ROLE) \ - -e CONSUL=mc.svc.$(TRITON_ACCOUNT).$(TRITON_DC).cns.joyent.com \ - $(SDC_KEYS_VOL) -w /src \ - $(test_image):$(tag) python3 tests.py + -e TRITON_PROFILE=$(TRITON_PROFILE) \ + -e GIT_BRANCH=$(GIT_BRANCH) \ + -v ~/.ssh:/root/.ssh:ro \ + -v ~/.triton/profiles.d:/root/.triton/profiles.d:ro \ + -w /src \ + $(testImage):$(tag) /src/triton.sh # ------------------------------------------------------- @@ -174,7 +172,6 @@ debug: @echo tag=$(tag) @echo image=$(image) @echo test_image=$(test_image) - @echo python=$(python) check_var = $(foreach 1,$1,$(__check_var)) __check_var = $(if $(value $1),,\ diff --git a/tests/Dockerfile b/tests/Dockerfile index 7aa41fb..4d4bc9d 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -1,17 +1,32 @@ -FROM alpine:3.4 +# NOTE: this Dockerfile needs to be run from one-level up so that +# we get the examples docker-compose.yml files. Use 'make build/tester' +# in the makefile at the root of this repo and everything will work + +FROM alpine:3.5 RUN apk update \ - && apk add nodejs docker python3 -RUN npm install -g triton json + && apk add nodejs python3 openssl bash curl docker +RUN npm install -g triton manta json + +# the Compose package in the public releases doesn't work on Alpine +RUN pip3 install docker-compose==1.10.0 + +# install specific version of Docker and Compose client +COPY test/triton-docker-cli/triton-docker /usr/local/bin/triton-docker +RUN sed -i 's/1.9.0/1.10.0/' /usr/local/bin/triton-docker \ + && ln -s /usr/local/bin/triton-docker /usr/local/bin/triton-compose \ + && ln -s /usr/local/bin/triton-docker /usr/local/bin/triton-docker-install \ + && /usr/local/bin/triton-docker-install \ + && rm /usr/local/bin/triton-compose-helper \ + && ln -s /usr/bin/docker-compose /usr/local/bin/triton-compose-helper + -# install dependencies -RUN pip3 install \ - docker-compose==1.10.0 \ - python-Consul==0.4.7 \ - IPy==0.83 +# install test targets +COPY examples/triton/docker-compose.yml /src/examples/triton/docker-compose.yml +COPY examples/triton/setup.sh /src/examples/triton/setup.sh +COPY examples/compose/docker-compose.yml /src/examples/compose/docker-compose.yml +COPY examples/compose/setup.sh /src/examples/compose/setup.sh -# install tests -COPY tests/testing/testcases.py /src/testcases.py -COPY tests/tests.py /src/tests.py -COPY docker-compose.yml /src/docker-compose.yml -COPY setup.sh /src/setup.sh +# install test code +COPY test/triton.sh /src/triton.sh +COPY test/compose.sh /src/compose.sh diff --git a/tests/compose.sh b/tests/compose.sh new file mode 100644 index 0000000..e69de29 diff --git a/tests/triton.sh b/tests/triton.sh new file mode 100755 index 0000000..b52e9c9 --- /dev/null +++ b/tests/triton.sh @@ -0,0 +1,280 @@ +#!/bin/bash +set -e + +export GIT_BRANCH="${GIT_BRANCH:-$(git rev-parse --abbrev-ref HEAD)}" +export TAG="${TAG:-branch-$(basename "$GIT_BRANCH")}" +export COMPOSE_PROJECT="${COMPOSE_PROJECT_NAME:-my}" +export COMPOSE_FILE="${COMPOSE_FILE:-./examples/triton/docker-compose.yml}" + +user=${MYSQL_USER:-mytestuser} +passwd=${MYSQL_PASSWORD:-password1} +db=${MYSQL_DATABASE:-mytestdb} +repl_user=${MYSQL_REPL_USER:-myrepluser} +repl_passwd=${MYSQL_REPL_PASSWORD:-password2} + +manta_bucket=${MANTA_BUCKET:-"~~/stor/triton_mysql"} +manta_url=${MANTA_URL:-https://us-east.manta.joyent.com} +manta_user=${MANTA_USER:-triton_mysql} +manta_subuser=${MANTA_SUBUSER:-triton_mysql} +manta_role=${MANTA_ROLE:-triton_mysql} + +project="$COMPOSE_PROJECT" +manifest="$COMPOSE_FILE" + +fail() { + echo + echo '------------------------------------------------' + echo 'FAILED: dumping logs' + echo '------------------------------------------------' + docker-compose -p "$project" -f "$manifest" ps + docker-compose -p "$project" -f "$manifest" logs + echo '------------------------------------------------' + echo 'FAILED' + echo "$1" + echo '------------------------------------------------' + exit 1 +} + +pass() { + teardown + echo + echo '------------------------------------------------' + echo 'PASSED!' + echo + exit 0 +} + +function finish { + result=$? + if [ $result -ne 0 ]; then fail "unexpected error"; fi + pass +} +trap finish EXIT + + + +# -------------------------------------------------------------------- +# Helpers + +profile() { + echo + echo '------------------------------------------------' + echo 'setting up profile for tests' + echo '------------------------------------------------' + echo + export TRITON_PROFILE="${TRITON_PROFILE:-us-east-1}" + set +e + # if we're already set up for Docker this will fail noisily + triton profile docker-setup -y "$TRITON_PROFILE" > /dev/null 2>&1 + set -e + triton profile set-current "$TRITON_PROFILE" + eval "$(triton env)" + + # print out for profile debugging + env | grep DOCKER + env | grep SDC + env | grep TRITON + + local manta_key + manta_key=$(tr '\n' '#' < "${DOCKER_CERT_PATH}/key/pem") + { + echo "MYSQL_USER=${user}" + echo "MYSQL_PASSWORD=${passwd}" + echo "MYSQL_REPL_USER=${repl_user}" + echo "MYSQL_REPL_PASSWORD=$repl_passwd" + echo "MYSQL_DATABASE=$db" + + echo "MANTA_URL=$manta_url" + echo "MANTA_BUCKET=$manta_bucket" + echo "MANTA_USER=$manta_user" + echo "MANTA_SUBUSER=$manta_subuser" + echo "MANTA_ROLE=$manta_role" + echo "MANTA_KEY=$manta_key" + } > ./examples/triton/_env +} + +# asserts that 'count' MySQL instances are running and marked as Up +# by Docker. fails after the timeout. +wait_for_containers() { + local count timeout i got + count="$1" + timeout="${3:-120}" # default 120sec + i=0 + echo "waiting for $count MySQL containers to be Up..." + while [ $i -lt "$timeout" ]; do + got=$(docker-compose -p "$project" -f "$manifest" ps mysql) + got=$(echo "$got" | grep -c "Up") + if [ "$got" -eq "$count" ]; then + echo "$count instances reported Up in <= $i seconds" + return + fi + i=$((i+1)) + sleep 1 + done + fail "$count instances did not report Up within $timeout seconds" +} + +# asserts that the application has registered at least n instances with +# Consul. fails after the timeout. +wait_for_service() { + local service count timeout i got consul_ip + service="$1" + count="$2" + timeout="${3:-300}" # default 300sec + i=0 + echo "waiting for $count instances of $service to be registered with Consul..." + consul_ip=$(triton ip "${project}_consul_1") + while [ $i -lt "$timeout" ]; do + got=$(curl -s "http://${consul_ip}:8500/v1/health/service/${service}?passing" \ + | json -a Service.Address | wc -l | tr -d ' ') + if [ "$got" -eq "$count" ]; then + echo "$service registered in <= $i seconds" + return + fi + i=$((i+1)) + sleep 1 + done + fail "waited for service $service for $timeout seconds but it was not registed with Consul" +} + +# gets the container that's currently primary in Consul +get_primary() { + local got consul_ip + consul_ip=$(triton ip "${project}_consul_1") + got=$(curl -s "http://${consul_ip}:8500/v1/health/service/mysql-primary?passing" \ + | json -a Node.Node | wc -l | tr -d ' ') + echo "$got" +} + +# gets a container that's currently a replica in Consul +get_replica() { + local got consul_ip + consul_ip=$(triton ip "${project}_consul_1") + got=$(curl -s "http://${consul_ip}:8500/v1/health/service/mysql?passing" \ + | json -a Node.Node | wc -l | tr -d ' ') + echo "$got" +} + +# creates a table on the first instance, which will be replicated to +# the other nodes +create_table() { + echo 'creating test table' + exec_query "${project}_mysql_1" 'CREATE TABLE tbl1 (field1 INT, field2 VARCHAR(36));' +} + +check_replication() { + echo 'checking replication' + local primary="$1" + local replica="$2" + local testkey="$3" + local testval="$3" + exec_query "$primary" "INSERT INTO tbl1 (field1, field2) VALUES ($testkey, \"$testval\");" + + # check the replica, giving it a few seconds to catch up + local timeout i + timeout=5 + i=0 + while [ $i -lt "$timeout" ]; do + got=$(exec_query "$replica" "SELECT * FROM tbl1 WHERE field1=$testkey;") + if [ "$got" -eq "$testval" ]; then + return + fi + i=$((i+1)) + sleep 1 + done + fail "failed to replicate write from $primary to $replica; query got $got" +} + +# runs a SQL statement on the node via docker exec. normally this method +# would be subject to SQL injection but we control all inputs and we don't +# want to have to ship a mysql client in this test rig. +exec_query() { + local node="$1" + local query="$2" + out=$(triton-docker exec "$node" \ + "mysql -u $user -p${passwd} --vertical -e '$query' $db") + echo "$out" +} + +restart() { + node="${project}_$1" + docker restart "$node" +} + +stop() { + node="${project}_$1" + docker stop "$node" +} + +run() { + echo + echo '* cleaning up previous test run' + echo + docker-compose -p "$project" -f "$manifest" stop + docker-compose -p "$project" -f "$manifest" rm -f + + echo + echo '* standing up initial test targets' + echo + docker-compose -p "$project" -f "$manifest" up -d +} + +teardown() { + echo + echo '* tearing down containers' + echo + docker-compose -p "$project" -f "$manifest" stop + docker-compose -p "$project" -f "$manifest" rm -f + + # TODO: cleanup Manta directory too + # echo '* cleaning up Manta directory' + # mrm ...? +} + +scale() { + count="$1" + echo + echo '* scaling up cluster' + echo + docker-compose -p "$project" -f "$manifest" scale mysql="$count" +} + + +# -------------------------------------------------------------------- +# Test sections + +test-failover() { + echo + echo '------------------------------------------------' + echo 'executing failover test' + echo '------------------------------------------------' + + # stand up and setup + run + wait_for_containers 1 + wait_for_service 'mysql-primary' 1 + scale 3 + wait_for_containers 3 + wait_for_service 'mysql' 2 + create_table + + # verify working + check_replication 'mysql_1' 'mysql_2' "1" "a" + + # force failover and verify again + stop "mysql_1" + wait_for_containers 2 + wait_for_service 'mysql-primary' 1 + wait_for_service 'mysql' 1 + + local primary replica + primary=$(get_primary) + replica=$(get_replica) + check_replication "$primary" "$replica" "2" "b" +} + +# -------------------------------------------------------------------- +# Main loop + +profile +test-failover From 34deae99bd5d76cec47c2e59c1d984fc3e572e57 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 14 Jun 2017 15:03:32 -0400 Subject: [PATCH 05/26] fix paths in triton.sh tests --- makefile | 10 +++++----- tests/Dockerfile | 6 +++--- tests/triton.sh | 24 ++++++++++++------------ 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/makefile b/makefile index 09c617c..cdc9cd5 100644 --- a/makefile +++ b/makefile @@ -13,7 +13,7 @@ GIT_BRANCH ?= $(shell git rev-parse --abbrev-ref HEAD) namespace ?= autopilotpattern tag := branch-$(shell basename $(GIT_BRANCH)) image := $(namespace)/mysql -test_image := $(namespace)/mysql-testrunner +testImage := $(namespace)/mysql-testrunner ## Display this help message help: @@ -39,17 +39,17 @@ build: build/tester ## Build the test running container build/tester: - docker build -f tests/Dockerfile -t=$(test_image):$(tag) . + docker build -f tests/Dockerfile -t=$(testImage):$(tag) . ## Push the current application container images to the Docker Hub push: docker push $(image):$(tag) - docker push $(test_image):$(tag) + docker push $(testImage):$(tag) ## Tag the current images as 'latest' and push them to the Docker Hub ship: docker tag $(image):$(tag) $(image):latest - docker tag $(test_image):$(tag) $(test_image):latest + docker tag $(testImage):$(tag) $(testImage):latest docker tag $(image):$(tag) $(image):latest docker push $(image):$(tag) docker push $(image):latest @@ -171,7 +171,7 @@ debug: @echo namespace=$(namespace) @echo tag=$(tag) @echo image=$(image) - @echo test_image=$(test_image) + @echo testImage=$(testImage) check_var = $(foreach 1,$1,$(__check_var)) __check_var = $(if $(value $1),,\ diff --git a/tests/Dockerfile b/tests/Dockerfile index 4d4bc9d..b7bc84f 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -12,7 +12,7 @@ RUN npm install -g triton manta json RUN pip3 install docker-compose==1.10.0 # install specific version of Docker and Compose client -COPY test/triton-docker-cli/triton-docker /usr/local/bin/triton-docker +COPY tests/triton-docker-cli/triton-docker /usr/local/bin/triton-docker RUN sed -i 's/1.9.0/1.10.0/' /usr/local/bin/triton-docker \ && ln -s /usr/local/bin/triton-docker /usr/local/bin/triton-compose \ && ln -s /usr/local/bin/triton-docker /usr/local/bin/triton-docker-install \ @@ -28,5 +28,5 @@ COPY examples/compose/docker-compose.yml /src/examples/compose/docker-compose.ym COPY examples/compose/setup.sh /src/examples/compose/setup.sh # install test code -COPY test/triton.sh /src/triton.sh -COPY test/compose.sh /src/compose.sh +COPY tests/triton.sh /src/triton.sh +COPY tests/compose.sh /src/compose.sh diff --git a/tests/triton.sh b/tests/triton.sh index b52e9c9..a656e43 100755 --- a/tests/triton.sh +++ b/tests/triton.sh @@ -26,8 +26,8 @@ fail() { echo '------------------------------------------------' echo 'FAILED: dumping logs' echo '------------------------------------------------' - docker-compose -p "$project" -f "$manifest" ps - docker-compose -p "$project" -f "$manifest" logs + triton-compose -p "$project" -f "$manifest" ps + triton-compose -p "$project" -f "$manifest" logs echo '------------------------------------------------' echo 'FAILED' echo "$1" @@ -76,7 +76,7 @@ profile() { env | grep TRITON local manta_key - manta_key=$(tr '\n' '#' < "${DOCKER_CERT_PATH}/key/pem") + manta_key=$(tr '\n' '#' < "${DOCKER_CERT_PATH}/key.pem") { echo "MYSQL_USER=${user}" echo "MYSQL_PASSWORD=${passwd}" @@ -102,7 +102,7 @@ wait_for_containers() { i=0 echo "waiting for $count MySQL containers to be Up..." while [ $i -lt "$timeout" ]; do - got=$(docker-compose -p "$project" -f "$manifest" ps mysql) + got=$(triton-compose -p "$project" -f "$manifest" ps mysql) got=$(echo "$got" | grep -c "Up") if [ "$got" -eq "$count" ]; then echo "$count instances reported Up in <= $i seconds" @@ -198,33 +198,33 @@ exec_query() { restart() { node="${project}_$1" - docker restart "$node" + triton-docker restart "$node" } stop() { node="${project}_$1" - docker stop "$node" + triton-docker stop "$node" } run() { echo echo '* cleaning up previous test run' echo - docker-compose -p "$project" -f "$manifest" stop - docker-compose -p "$project" -f "$manifest" rm -f + triton-compose -p "$project" -f "$manifest" stop + triton-compose -p "$project" -f "$manifest" rm -f echo echo '* standing up initial test targets' echo - docker-compose -p "$project" -f "$manifest" up -d + triton-compose -p "$project" -f "$manifest" up -d } teardown() { echo echo '* tearing down containers' echo - docker-compose -p "$project" -f "$manifest" stop - docker-compose -p "$project" -f "$manifest" rm -f + triton-compose -p "$project" -f "$manifest" stop + triton-compose -p "$project" -f "$manifest" rm -f # TODO: cleanup Manta directory too # echo '* cleaning up Manta directory' @@ -236,7 +236,7 @@ scale() { echo echo '* scaling up cluster' echo - docker-compose -p "$project" -f "$manifest" scale mysql="$count" + triton-compose -p "$project" -f "$manifest" scale mysql="$count" } From f5aaf44f7b5bfbe48e9a954f7fd364d66363b60f Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 15 Jun 2017 10:08:41 -0400 Subject: [PATCH 06/26] move consul-agent join to its own health check --- etc/containerpilot.json5 | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/etc/containerpilot.json5 b/etc/containerpilot.json5 index a69b244..e7e2a86 100644 --- a/etc/containerpilot.json5 +++ b/etc/containerpilot.json5 @@ -6,16 +6,22 @@ jobs: [ { name: "preStart", - exec: "python /usr/local/bin/manage.py" + exec: "python /usr/local/bin/manage.py", + when: { + source: "consul-agent", + once: "healthy" + } }, { name: '{{ .SERVICE_NAME | default "mysql" }}', - exec: ["mysqld", - "--console", - "--log-bin=mysql-bin", - "--log_slave_updates=ON", - "--gtid-mode=ON", - "--enforce-gtid-consistency=ON"], + exec: [ + "mysqld", + "--console", + "--log-bin=mysql-bin", + "--log_slave_updates=ON", + "--gtid-mode=ON", + "--enforce-gtid-consistency=ON" + ], port: 3306, when: { source: "preStart", @@ -46,13 +52,16 @@ {{ if .CONSUL_AGENT }}{ name: "consul-agent", restarts: "unlimited", - exec: ["/usr/local/bin/consul", "agent", - "-data-dir=/data", - "-config-dir=/config", - "-rejoin", - "-retry-join", '{{ .CONSUL | default "consul"}}', - "-retry-max", "10", - "-retry-interval", "10s"] + exec: [ + "/usr/local/bin/consul", "agent", + "-data-dir=/data", + "-config-dir=/config" + ], + health: { + exec: 'consul join {{ .CONSUL | default "consul"}}', + interval: 5, + ttl: 10 + } }{{ end }} ], watches: [ From 8066e90d437b9dfe7eafb2c78f7dded155715ac8 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Thu, 15 Jun 2017 10:09:21 -0400 Subject: [PATCH 07/26] remove ~~ spec from manta path b/c python-manta seems to choke on it --- makefile | 3 ++- tests/triton.sh | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/makefile b/makefile index cdc9cd5..2a50d71 100644 --- a/makefile +++ b/makefile @@ -95,11 +95,12 @@ test/unit-src: ## Run the integration test runner. Runs locally but targets Triton. test/triton: - $(call check_var, TRITON_PROFILE, \ + $(call check_var, TRITON_PROFILE, MANTA_USER \ required to run integration tests on Triton.) docker run --rm \ -e TAG=$(tag) \ -e TRITON_PROFILE=$(TRITON_PROFILE) \ + -e MANTA_USER=$(MANTA_USER) \ -e GIT_BRANCH=$(GIT_BRANCH) \ -v ~/.ssh:/root/.ssh:ro \ -v ~/.triton/profiles.d:/root/.triton/profiles.d:ro \ diff --git a/tests/triton.sh b/tests/triton.sh index a656e43..c860dd8 100755 --- a/tests/triton.sh +++ b/tests/triton.sh @@ -12,11 +12,11 @@ db=${MYSQL_DATABASE:-mytestdb} repl_user=${MYSQL_REPL_USER:-myrepluser} repl_passwd=${MYSQL_REPL_PASSWORD:-password2} -manta_bucket=${MANTA_BUCKET:-"~~/stor/triton_mysql"} manta_url=${MANTA_URL:-https://us-east.manta.joyent.com} manta_user=${MANTA_USER:-triton_mysql} manta_subuser=${MANTA_SUBUSER:-triton_mysql} manta_role=${MANTA_ROLE:-triton_mysql} +manta_bucket=${MANTA_BUCKET:-"${manta_user}/stor/triton_mysql"} project="$COMPOSE_PROJECT" manifest="$COMPOSE_FILE" From 10dd442d86e6e5af221ae825dca6260e51a433d0 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Fri, 23 Jun 2017 16:07:40 -0400 Subject: [PATCH 08/26] bump Consul to 0.8.4, ContainerPilot to 3.1.0 --- Dockerfile | 10 +++++----- bin/test.py | 10 ++++------ examples/triton/docker-compose.yml | 4 ++-- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/Dockerfile b/Dockerfile index 38dda85..4a59ea0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,6 @@ FROM percona:5.6 -ENV CONTAINERPILOT_VER 3.0.0 +ENV CONTAINERPILOT_VER 3.1.0 ENV CONTAINERPILOT /etc/containerpilot.json5 # By keeping a lot of discrete steps in a single RUN we can clean up after @@ -20,15 +20,15 @@ RUN set -ex \ && curl -Lvo get-pip.py https://bootstrap.pypa.io/get-pip.py \ && python get-pip.py \ && pip install \ - python-Consul==0.4.7 \ + python-Consul==0.7.0 \ manta==2.5.0 \ mock==2.0.0 \ json5==0.2.4 \ # \ # Add Consul from https://releases.hashicorp.com/consul \ # \ - && export CHECKSUM=5dbfc555352bded8a39c7a8bf28b5d7cf47dec493bc0496e21603c84dfe41b4b \ - && curl -Lvo /tmp/consul.zip https://releases.hashicorp.com/consul/0.7.1/consul_0.7.1_linux_amd64.zip \ + && export CHECKSUM=c8859a0a34c50115cdff147f998b2b63226f5f052e50f342209142420d1c2668 \ + && curl -Lvo /tmp/consul.zip https://releases.hashicorp.com/consul/0.8.4/consul_0.8.4_linux_amd64.zip \ && echo "${CHECKSUM} /tmp/consul.zip" | sha256sum -c \ && unzip /tmp/consul.zip -d /usr/local/bin \ && rm /tmp/consul.zip \ @@ -36,7 +36,7 @@ RUN set -ex \ # \ # Add ContainerPilot and set its configuration file path \ # \ - && export CONTAINERPILOT_CHECKSUM=6da4a4ab3dd92d8fd009cdb81a4d4002a90c8b7c \ + && export CONTAINERPILOT_CHECKSUM=d06e289e6e0ca82156d77cea36ff0f0246fcca60 \ && curl -Lvo /tmp/containerpilot.tar.gz "https://github.com/joyent/containerpilot/releases/download/${CONTAINERPILOT_VER}/containerpilot-${CONTAINERPILOT_VER}.tar.gz" \ && echo "${CONTAINERPILOT_CHECKSUM} /tmp/containerpilot.tar.gz" | sha1sum -c \ && tar zxf /tmp/containerpilot.tar.gz -C /usr/local/bin \ diff --git a/bin/test.py b/bin/test.py index 8cf5912..9252a4d 100644 --- a/bin/test.py +++ b/bin/test.py @@ -837,9 +837,8 @@ def test_parse_with_consul_agent(self): cp.load(envs=self.environ) self.assertEqual(cp.config['consul'], 'localhost:8500') - cmd = cp.config['jobs'][4]['exec'] - host_cfg_idx = cmd.index('-retry-join') + 1 - self.assertEqual(cmd[host_cfg_idx], 'my.consul.example.com') + health_check_exec = cp.config['jobs'][4]['health']['exec'] + self.assertIn('my.consul.example.com', health_check_exec) self.assertEqual(cp.state, UNASSIGNED) def test_parse_without_consul_agent(self): @@ -870,9 +869,8 @@ def test_update(self): with open(temp_file.name, 'r') as updated: config = json5.loads(updated.read()) self.assertEqual(config['consul'], 'localhost:8500') - cmd = config['jobs'][4]['exec'] - host_cfg_idx = cmd.index('-retry-join') + 1 - self.assertEqual(cmd[host_cfg_idx], 'my.consul.example.com') + health_check_exec = config['jobs'][4]['health']['exec'] + self.assertIn('my.consul.example.com', health_check_exec) class TestMantaConfig(unittest.TestCase): diff --git a/examples/triton/docker-compose.yml b/examples/triton/docker-compose.yml index af389c5..dcfb346 100644 --- a/examples/triton/docker-compose.yml +++ b/examples/triton/docker-compose.yml @@ -15,12 +15,12 @@ services: network_mode: bridge environment: - CONSUL_AGENT=1 - - LOG_LEVEL=INFO + - LOG_LEVEL=DEBUG - SERVICE_NAME=mysql - CONSUL=mc.svc.${TRITON_CNS_SEARCH_DOMAIN_PRIVATE} consul: - image: consul:0.7.1 + image: consul:0.8.4 command: > agent -server -client=0.0.0.0 -bootstrap -ui restart: always From ddb4f12ec4f39d64b27f7e554255734769d6ee47 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 26 Jun 2017 13:27:41 -0400 Subject: [PATCH 09/26] rearrange manager application to allow for multiple storage backends --- bin/manage.py | 16 ++--- bin/manager/{libmysql.py => client.py} | 5 +- bin/manager/{containerpilot.py => config.py} | 4 +- bin/manager/{libconsul.py => discovery.py} | 6 +- bin/manager/env.py | 47 +++++++++++++++ bin/manager/network.py | 18 ++++++ bin/manager/storage/__init__.py | 0 bin/manager/storage/local.py | 0 .../{libmanta.py => storage/manta_stor.py} | 4 +- bin/manager/utils.py | 60 ------------------- bin/test.py | 11 ++-- 11 files changed, 94 insertions(+), 77 deletions(-) rename bin/manager/{libmysql.py => client.py} (99%) rename bin/manager/{containerpilot.py => config.py} (96%) rename bin/manager/{libconsul.py => discovery.py} (98%) create mode 100644 bin/manager/env.py create mode 100644 bin/manager/network.py create mode 100644 bin/manager/storage/__init__.py create mode 100644 bin/manager/storage/local.py rename bin/manager/{libmanta.py => storage/manta_stor.py} (96%) diff --git a/bin/manage.py b/bin/manage.py index a4ea7b8..5fb8f0d 100644 --- a/bin/manage.py +++ b/bin/manage.py @@ -7,15 +7,15 @@ import sys # pylint: disable=invalid-name,no-self-use,dangerous-default-value -from manager.containerpilot import ContainerPilot -from manager.libconsul import Consul -from manager.libmanta import Manta -from manager.libmysql import MySQL, MySQLError -from manager.utils import \ - log, get_ip, debug, \ - UnknownPrimary, WaitTimeoutError, \ +from manager.client import MySQL, MySQLError +from manager.config import ContainerPilot +from manager.discovery import Consul +from manager.env import PRIMARY_KEY, BACKUP_NAME +from manager.network import get_ip +from manager.storage.manta_stor import Manta +from manager.utils import log, debug, \ PRIMARY, REPLICA, UNASSIGNED, \ - PRIMARY_KEY, BACKUP_NAME + UnknownPrimary, WaitTimeoutError class Node(object): diff --git a/bin/manager/libmysql.py b/bin/manager/client.py similarity index 99% rename from bin/manager/libmysql.py rename to bin/manager/client.py index 38e778a..c3e1b94 100644 --- a/bin/manager/libmysql.py +++ b/bin/manager/client.py @@ -7,7 +7,10 @@ import subprocess import string import time -from manager.utils import debug, env, log, get_ip, to_flag, \ + +from manager.env import env, to_flag +from manager.network import get_ip +from manager.utils import debug, log, \ WaitTimeoutError, UnknownPrimary # pylint: disable=import-error,no-self-use,invalid-name,dangerous-default-value diff --git a/bin/manager/containerpilot.py b/bin/manager/config.py similarity index 96% rename from bin/manager/containerpilot.py rename to bin/manager/config.py index 45ec063..9db8efa 100644 --- a/bin/manager/containerpilot.py +++ b/bin/manager/config.py @@ -5,7 +5,9 @@ import json5 -from manager.utils import debug, env, to_flag, log, UNASSIGNED +from manager.env import env, to_flag +from manager.utils import debug, log, UNASSIGNED + # pylint: disable=invalid-name,no-self-use,dangerous-default-value diff --git a/bin/manager/libconsul.py b/bin/manager/discovery.py similarity index 98% rename from bin/manager/libconsul.py rename to bin/manager/discovery.py index c30bd9b..8b83962 100644 --- a/bin/manager/libconsul.py +++ b/bin/manager/discovery.py @@ -5,9 +5,11 @@ import os import time -from manager.utils import debug, env, log, to_flag, \ - WaitTimeoutError, UnknownPrimary, PRIMARY_KEY, LAST_BACKUP_KEY, \ +from manager.env import env, to_flag, \ + PRIMARY_KEY, LAST_BACKUP_KEY, \ BACKUP_TTL, BACKUP_LOCK_KEY, LAST_BINLOG_KEY +from manager.utils import debug, log, \ + WaitTimeoutError, UnknownPrimary # pylint: disable=import-error,invalid-name,dangerous-default-value import consul as pyconsul diff --git a/bin/manager/env.py b/bin/manager/env.py new file mode 100644 index 0000000..612eb7a --- /dev/null +++ b/bin/manager/env.py @@ -0,0 +1,47 @@ +""" environment functions """ +import os + +# pylint: disable=invalid-name,no-self-use,dangerous-default-value + +# --------------------------------------------------------- +# misc utility functions for setting up environment + +def env(key, default, environ=os.environ, fn=None): + """ + Gets an environment variable, trims away comments and whitespace, + and expands other environment variables. + """ + val = environ.get(key, default) + try: + val = val.split('#')[0] + val = val.strip() + val = os.path.expandvars(val) + except (AttributeError, IndexError): + # just swallow AttributeErrors for non-strings + pass + if fn: # transformation function + val = fn(val) + return val + +def to_flag(val): + """ + Parse environment variable strings like "yes/no", "on/off", + "true/false", "1/0" into a bool. + """ + try: + return bool(int(val)) + except ValueError: + val = val.lower() + if val in ('false', 'off', 'no', 'n'): + return False + # non-"1" or "0" string, we'll treat as truthy + return bool(val) + + +# env values for keys +PRIMARY_KEY = env('PRIMARY_KEY', env('SERVICE_NAME','mysql')+'-primary') +LAST_BACKUP_KEY = env('LAST_BACKUP_KEY', 'mysql-last-backup') +BACKUP_LOCK_KEY = env('BACKUP_LOCK_KEY', 'mysql-backup-running') +LAST_BINLOG_KEY = env('LAST_BINLOG_KEY', 'mysql-last-binlog') +BACKUP_NAME = env('BACKUP_NAME', 'mysql-backup-%Y-%m-%dT%H-%M-%SZ') +BACKUP_TTL = env('BACKUP_TTL', 86400, fn='{}s'.format) # every 24 hours diff --git a/bin/manager/network.py b/bin/manager/network.py new file mode 100644 index 0000000..c2b9d49 --- /dev/null +++ b/bin/manager/network.py @@ -0,0 +1,18 @@ +""" network functions """ +import fcntl +import socket +import struct + + +def get_ip(iface='eth0'): + """ + Use Linux SIOCGIFADDR ioctl to get the IP for the interface. + ref http://code.activestate.com/recipes/439094-get-the-ip-address\ + -associated-with-a-network-inter/ + """ + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + return socket.inet_ntoa(fcntl.ioctl( + sock.fileno(), + 0x8915, # SIOCGIFADDR + struct.pack('256s', iface[:15]) + )[20:24]) diff --git a/bin/manager/storage/__init__.py b/bin/manager/storage/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/bin/manager/storage/local.py b/bin/manager/storage/local.py new file mode 100644 index 0000000..e69de29 diff --git a/bin/manager/libmanta.py b/bin/manager/storage/manta_stor.py similarity index 96% rename from bin/manager/libmanta.py rename to bin/manager/storage/manta_stor.py index 5a29355..46c34d4 100644 --- a/bin/manager/libmanta.py +++ b/bin/manager/storage/manta_stor.py @@ -1,7 +1,9 @@ """ Module for Manta client wrapper and related tooling. """ import logging import os -from manager.utils import debug, env, to_flag + +from manager.env import env, to_flag +from manager.utils import debug # pylint: disable=import-error,dangerous-default-value,invalid-name import manta as pymanta diff --git a/bin/manager/utils.py b/bin/manager/utils.py index d9ed6a5..55dabec 100644 --- a/bin/manager/utils.py +++ b/bin/manager/utils.py @@ -1,10 +1,7 @@ """ utility functions """ -import fcntl from functools import wraps import logging import os -import socket -import struct import sys # pylint: disable=invalid-name,no-self-use,dangerous-default-value @@ -71,60 +68,3 @@ def wrapper(*args, **kwargs): if fn: return _decorate(fn) return _decorate - - -# --------------------------------------------------------- -# misc utility functions for setting up environment - -def env(key, default, environ=os.environ, fn=None): - """ - Gets an environment variable, trims away comments and whitespace, - and expands other environment variables. - """ - val = environ.get(key, default) - try: - val = val.split('#')[0] - val = val.strip() - val = os.path.expandvars(val) - except (AttributeError, IndexError): - # just swallow AttributeErrors for non-strings - pass - if fn: # transformation function - val = fn(val) - return val - -def to_flag(val): - """ - Parse environment variable strings like "yes/no", "on/off", - "true/false", "1/0" into a bool. - """ - try: - return bool(int(val)) - except ValueError: - val = val.lower() - if val in ('false', 'off', 'no', 'n'): - return False - # non-"1" or "0" string, we'll treat as truthy - return bool(val) - - -# env values for keys -PRIMARY_KEY = env('PRIMARY_KEY', env('SERVICE_NAME','mysql')+'-primary') -LAST_BACKUP_KEY = env('LAST_BACKUP_KEY', 'mysql-last-backup') -BACKUP_LOCK_KEY = env('BACKUP_LOCK_KEY', 'mysql-backup-running') -LAST_BINLOG_KEY = env('LAST_BINLOG_KEY', 'mysql-last-binlog') -BACKUP_NAME = env('BACKUP_NAME', 'mysql-backup-%Y-%m-%dT%H-%M-%SZ') -BACKUP_TTL = env('BACKUP_TTL', 86400, fn='{}s'.format) # every 24 hours - -def get_ip(iface='eth0'): - """ - Use Linux SIOCGIFADDR ioctl to get the IP for the interface. - ref http://code.activestate.com/recipes/439094-get-the-ip-address\ - -associated-with-a-network-inter/ - """ - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - return socket.inet_ntoa(fcntl.ioctl( - sock.fileno(), - 0x8915, # SIOCGIFADDR - struct.pack('256s', iface[:15]) - )[20:24]) diff --git a/bin/test.py b/bin/test.py index 9252a4d..1e173da 100644 --- a/bin/test.py +++ b/bin/test.py @@ -11,10 +11,13 @@ import mock import manage -from manager.containerpilot import ContainerPilot -from manager.libconsul import Consul -from manager.libmanta import Manta -from manager.libmysql import MySQL +# pylint: disable=invalid-name,no-self-use,dangerous-default-value +from manager.client import MySQL +from manager.config import ContainerPilot +from manager.discovery import Consul +from manager.env import * +from manager.network import * +from manager.storage.manta_stor import Manta from manager.utils import * From 7702ecf2f51a392c58d5502aabc89804fc721273 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 26 Jun 2017 14:06:53 -0400 Subject: [PATCH 10/26] add local storage option for snapshots --- bin/manage.py | 26 ++++++++++++++-------- bin/manager/storage/__init__.py | 27 +++++++++++++++++++++++ bin/manager/storage/local.py | 38 +++++++++++++++++++++++++++++++++ bin/test.py | 10 ++++----- 4 files changed, 87 insertions(+), 14 deletions(-) diff --git a/bin/manage.py b/bin/manage.py index 5fb8f0d..77750fb 100644 --- a/bin/manage.py +++ b/bin/manage.py @@ -12,7 +12,10 @@ from manager.discovery import Consul from manager.env import PRIMARY_KEY, BACKUP_NAME from manager.network import get_ip + from manager.storage.manta_stor import Manta +from manager.storage.local import Local + from manager.utils import log, debug, \ PRIMARY, REPLICA, UNASSIGNED, \ UnknownPrimary, WaitTimeoutError @@ -21,12 +24,12 @@ class Node(object): """ Node represents the state of our running container and carries - around the MySQL config, and clients for Consul and Manta. + around the MySQL config, and clients for Consul and Snapshots. """ - def __init__(self, mysql=None, cp=None, consul=None, manta=None): + def __init__(self, mysql=None, cp=None, consul=None, snaps=None): self.mysql = mysql self.consul = consul - self.manta = manta + self.snaps = snaps self.cp = cp self.hostname = socket.gethostname() @@ -110,7 +113,7 @@ def pre_start(node): if not os.path.isdir(os.path.join(my.datadir, 'mysql')): last_backup = node.consul.has_snapshot() if last_backup: - node.manta.get_backup(last_backup) + node.snaps.get_backup(last_backup) my.restore_from_snapshot(last_backup) else: if not my.initialize_db(): @@ -249,7 +252,7 @@ def snapshot_task(node): def write_snapshot(node): """ Calls out to innobackupex to snapshot the DB, then pushes the file - to Manta and writes that the work is completed in Consul. + to Snapshot storage and writes that the work is completed in Consul. """ now = datetime.utcnow() # we don't want .isoformat() here because of URL encoding @@ -265,8 +268,8 @@ def write_snapshot(node): '--stream=tar', '/tmp/backup'], stdout=f) log.info('snapshot completed, uploading to object store') - node.manta.put_backup(backup_id, '/tmp/backup.tar') - log.info('snapshot uploaded to %s/%s', node.manta.bucket, backup_id) + node.snaps.put_backup(backup_id, '/tmp/backup.tar') + log.info('snapshot uploaded to %s/%s', node.snaps.bucket, backup_id) # write the filename of the binlog to Consul so that we know if # we've rotated since the last backup. @@ -396,10 +399,15 @@ def main(): sys.exit(1) my = MySQL() - manta = Manta() + + if os.environ.get('SNAPSHOT_BACKEND', 'manta') == 'local': + snaps = Local() + else: + snaps = Manta() + cp = ContainerPilot() cp.load() - node = Node(mysql=my, consul=consul, manta=manta, cp=cp) + node = Node(mysql=my, consul=consul, snaps=snaps, cp=cp) cmd(node) diff --git a/bin/manager/storage/__init__.py b/bin/manager/storage/__init__.py index e69de29..acfdc56 100644 --- a/bin/manager/storage/__init__.py +++ b/bin/manager/storage/__init__.py @@ -0,0 +1,27 @@ +import os +from manager.utils import debug + + +class SnapshotBackup(object): + """ + The SnapshotBackup class defines an expected interface to the + backup storage, where we'll put our MySQL snapshots. + """ + def __init__(self, envs=os.environ): + raise NotImplementedError + + @debug + def get_backup(self, backup_id): + """ + fetch the snapshot file from the storage location, allowing + exceptions to bubble up to the caller + """ + raise NotImplementedError + + @debug + def put_backup(self, backup_id, infile): + """ + store the snapshot file to the expected path, allowing + exceptions to bubble up to the caller. + """ + raise NotImplementedError diff --git a/bin/manager/storage/local.py b/bin/manager/storage/local.py index e69de29..622b2a9 100644 --- a/bin/manager/storage/local.py +++ b/bin/manager/storage/local.py @@ -0,0 +1,38 @@ +""" Module for storing snapshots in shared local disk """ +import os +from shutil import copyfile + +from manager.env import env +from manager.utils import debug + +class Local(object): + """ + + The Manta class wraps access to the Manta object store, where we'll put + our MySQL backups. + """ + def __init__(self, envs=os.environ): + self.dir = env('STORAGE_DIR', '/tmp/snapshots', envs) + + @debug + def get_backup(self, backup_id): + """ + copies snapshot from 'STORAGE_DIR' location to a working + directory so it can be loaded into the DB without worrying + about other processes writing to the snapshot. + """ + try: + os.mkdir(self.dir, 0770) + except OSError: + pass + + dst = '/tmp/backup/{}'.format(backup_id) + src = '{}/{}'.format(self.dir, backup_id) + copyfile(src, dst) + + def put_backup(self, backup_id, src): + """ + copies snapshot to 'STORAGE_DIR' + """ + dst = '{}/{}'.format(self.dir, backup_id) + copyfile(src, dst) diff --git a/bin/test.py b/bin/test.py index 1e173da..5f0d0c9 100644 --- a/bin/test.py +++ b/bin/test.py @@ -29,7 +29,7 @@ def setUp(self): manta = mock.MagicMock() my = mock.MagicMock() my.datadir = tempfile.mkdtemp() - self.node = manage.Node(consul=consul, manta=manta, mysql=my) + self.node = manage.Node(consul=consul, snaps=manta, mysql=my) def tearDown(self): logging.getLogger().setLevel(logging.DEBUG) @@ -42,7 +42,7 @@ def test_pre_start_first_node(self): manage.pre_start(self.node) self.node.consul.has_snapshot.assert_called_once() self.node.mysql.initialize_db.assert_called_once() - self.assertFalse(self.node.manta.get_backup.called) + self.assertFalse(self.node.snaps.get_backup.called) self.assertFalse(self.node.mysql.restore_from_snapshot.called) def test_pre_start_snapshot_complete(self): @@ -53,7 +53,7 @@ def test_pre_start_snapshot_complete(self): self.node.consul.has_snapshot.return_value = True manage.pre_start(self.node) self.node.consul.has_snapshot.assert_called_once() - self.node.manta.get_backup.assert_called_once() + self.node.snaps.get_backup.assert_called_once() self.node.mysql.restore_from_snapshot.assert_called_once() self.assertFalse(self.node.mysql.initialize_db.called) @@ -82,7 +82,7 @@ def kv_gets(*args, **kwargs): self.node.consul.client.kv.get.side_effect = kv_gets() manage.pre_start(self.node) - self.node.manta.get_backup.assert_called_once() + self.node.snaps.get_backup.assert_called_once() self.assertEqual(self.node.consul.client.kv.get.call_count, 2) self.node.mysql.restore_from_snapshot.assert_called_once() self.assertFalse(self.node.mysql.initialize_db.called) @@ -604,7 +604,7 @@ def setUp(self): my.datadir = tempfile.mkdtemp() cp.state = PRIMARY my.datadir = tempfile.mkdtemp() - self.node = manage.Node(consul=consul, cp=cp, manta=manta, mysql=my) + self.node = manage.Node(consul=consul, cp=cp, snaps=manta, mysql=my) def tearDown(self): logging.getLogger().setLevel(logging.DEBUG) From a93ae19f366d346b239d3a1d3a80696ce259c4dd Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 26 Jun 2017 14:07:02 -0400 Subject: [PATCH 11/26] update compose example to use local storage --- examples/compose/docker-compose.yml | 19 ++++++++++++++++--- examples/compose/setup.sh | 0 makefile | 6 ++++++ 3 files changed, 22 insertions(+), 3 deletions(-) delete mode 100644 examples/compose/setup.sh diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml index 9874b10..b0d9105 100644 --- a/examples/compose/docker-compose.yml +++ b/examples/compose/docker-compose.yml @@ -7,23 +7,36 @@ services: restart: always expose: - 3306 - env_file: _env network_mode: bridge environment: + - MYSQL_USER=dbuser + - MYSQL_PASSWORD=seekretPassword + - MYSQL_REPL_USER=repluser + - MYSQL_REPL_PASSWORD=seekretReplPassword + - MYSQL_DATABASE=demodb + - BACKUP_TTL=120 - CONSUL_AGENT=1 - LOG_LEVEL=DEBUG - CONSUL=consul + - SNAPSHOT_BACKEND=local + - SNAPSHOT_DIR=/tmp/snapshots + volumes: + # shared storage location for snapshots + - ../../tmp:/tmp/snapshots + - ../../bin/manage.py:/usr/local/bin/manage.py + - ../../bin/test.py:/usr/local/bin/test.py + - ../../bin/manager:/usr/local/bin/manager links: - consul:consul consul: - image: consul:0.7.1 + image: consul:0.8.4 command: > agent -server -client=0.0.0.0 -bootstrap -ui restart: always mem_limit: 128m ports: - - 8500:8500 + - 8500 expose: - 53 - 8300 diff --git a/examples/compose/setup.sh b/examples/compose/setup.sh deleted file mode 100644 index e69de29..0000000 diff --git a/makefile b/makefile index 2a50d71..1d7f12c 100644 --- a/makefile +++ b/makefile @@ -55,6 +55,12 @@ ship: docker push $(image):latest +# ------------------------------------------------ +# Run the example stack + +run/compose: + cd examples/compose && TAG=$(tag) docker-compose -p my up -d + # ------------------------------------------------ # Test running From e60681ddcf32949069a758701bdf2e55532aa360 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Mon, 26 Jun 2017 14:09:05 -0400 Subject: [PATCH 12/26] add compose/scale makefile target --- makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/makefile b/makefile index 1d7f12c..682a5ff 100644 --- a/makefile +++ b/makefile @@ -58,9 +58,14 @@ ship: # ------------------------------------------------ # Run the example stack +## Run the stack under local Compose run/compose: cd examples/compose && TAG=$(tag) docker-compose -p my up -d +## Scale up the local Compose stack +run/compose/scale: + cd examples/compose && TAG=$(tag) docker-compose -p my scale mysql=2 + # ------------------------------------------------ # Test running From 428661775a677dd9cec9aa30e1daf681f6104e1e Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 27 Jun 2017 09:33:27 -0400 Subject: [PATCH 13/26] update to ContainerPilot v3.1.1 to fix pre_start bug --- Dockerfile | 30 ++++++++++++++--------------- examples/compose/docker-compose.yml | 2 +- makefile | 1 + tests/Dockerfile | 2 +- 4 files changed, 18 insertions(+), 17 deletions(-) diff --git a/Dockerfile b/Dockerfile index 4a59ea0..8bee80e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,8 +1,5 @@ FROM percona:5.6 -ENV CONTAINERPILOT_VER 3.1.0 -ENV CONTAINERPILOT /etc/containerpilot.json5 - # By keeping a lot of discrete steps in a single RUN we can clean up after # ourselves in the same layer. This is gross but it saves ~100MB in the image RUN set -ex \ @@ -13,11 +10,11 @@ RUN set -ex \ # \ # get Python drivers MySQL, Consul, and Manta \ # \ - && curl -Lvo /tmp/mysql-connector.deb http://dev.mysql.com/get/Downloads/Connector-Python/mysql-connector-python_2.1.3-1debian8.2_all.deb \ + && curl -Lsfo /tmp/mysql-connector.deb http://dev.mysql.com/get/Downloads/Connector-Python/mysql-connector-python_2.1.3-1debian8.2_all.deb \ && dpkg -i /tmp/mysql-connector.deb \ - && curl -v -Lo /tmp/mysql-utils.deb http://dev.mysql.com/get/Downloads/MySQLGUITools/mysql-utilities_1.5.6-1debian8_all.deb \ + && curl -Lsfo /tmp/mysql-utils.deb http://dev.mysql.com/get/Downloads/MySQLGUITools/mysql-utilities_1.5.6-1debian8_all.deb \ && dpkg -i /tmp/mysql-utils.deb \ - && curl -Lvo get-pip.py https://bootstrap.pypa.io/get-pip.py \ + && curl -Lsfo get-pip.py https://bootstrap.pypa.io/get-pip.py \ && python get-pip.py \ && pip install \ python-Consul==0.7.0 \ @@ -28,20 +25,12 @@ RUN set -ex \ # Add Consul from https://releases.hashicorp.com/consul \ # \ && export CHECKSUM=c8859a0a34c50115cdff147f998b2b63226f5f052e50f342209142420d1c2668 \ - && curl -Lvo /tmp/consul.zip https://releases.hashicorp.com/consul/0.8.4/consul_0.8.4_linux_amd64.zip \ + && curl -Lsfo /tmp/consul.zip https://releases.hashicorp.com/consul/0.8.4/consul_0.8.4_linux_amd64.zip \ && echo "${CHECKSUM} /tmp/consul.zip" | sha256sum -c \ && unzip /tmp/consul.zip -d /usr/local/bin \ && rm /tmp/consul.zip \ && mkdir /config \ # \ - # Add ContainerPilot and set its configuration file path \ - # \ - && export CONTAINERPILOT_CHECKSUM=d06e289e6e0ca82156d77cea36ff0f0246fcca60 \ - && curl -Lvo /tmp/containerpilot.tar.gz "https://github.com/joyent/containerpilot/releases/download/${CONTAINERPILOT_VER}/containerpilot-${CONTAINERPILOT_VER}.tar.gz" \ - && echo "${CONTAINERPILOT_CHECKSUM} /tmp/containerpilot.tar.gz" | sha1sum -c \ - && tar zxf /tmp/containerpilot.tar.gz -C /usr/local/bin \ - && rm /tmp/containerpilot.tar.gz \ - # \ # clean up to minimize image layer size \ # \ && rm -rf /var/lib/apt/lists/* \ @@ -52,6 +41,17 @@ RUN set -ex \ && rm /docker-entrypoint.sh +ENV CONTAINERPILOT_VER 3.1.1 +ENV CONTAINERPILOT /etc/containerpilot.json5 + +# Add ContainerPilot +RUN set -ex \ + && export CONTAINERPILOT_CHECKSUM=1f159207c7dc2b622f693754f6dda77c82a88263 \ + && curl -Lsfo /tmp/containerpilot.tar.gz "https://github.com/joyent/containerpilot/releases/download/${CONTAINERPILOT_VER}/containerpilot-${CONTAINERPILOT_VER}.tar.gz" \ + && echo "${CONTAINERPILOT_CHECKSUM} /tmp/containerpilot.tar.gz" | sha1sum -c \ + && tar zxf /tmp/containerpilot.tar.gz -C /usr/local/bin \ + && rm /tmp/containerpilot.tar.gz + # configure ContainerPilot and MySQL COPY etc/* /etc/ COPY bin/manager /usr/local/bin/manager diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml index b0d9105..fe6aee7 100644 --- a/examples/compose/docker-compose.yml +++ b/examples/compose/docker-compose.yml @@ -36,7 +36,7 @@ services: restart: always mem_limit: 128m ports: - - 8500 + - 8500:8500 expose: - 53 - 8300 diff --git a/makefile b/makefile index 682a5ff..58d9d89 100644 --- a/makefile +++ b/makefile @@ -61,6 +61,7 @@ ship: ## Run the stack under local Compose run/compose: cd examples/compose && TAG=$(tag) docker-compose -p my up -d + cd examples/compose && TAG=$(tag) docker-compose -p my logs -f mysql ## Scale up the local Compose stack run/compose/scale: diff --git a/tests/Dockerfile b/tests/Dockerfile index b7bc84f..d317866 100644 --- a/tests/Dockerfile +++ b/tests/Dockerfile @@ -25,7 +25,7 @@ RUN sed -i 's/1.9.0/1.10.0/' /usr/local/bin/triton-docker \ COPY examples/triton/docker-compose.yml /src/examples/triton/docker-compose.yml COPY examples/triton/setup.sh /src/examples/triton/setup.sh COPY examples/compose/docker-compose.yml /src/examples/compose/docker-compose.yml -COPY examples/compose/setup.sh /src/examples/compose/setup.sh +#COPY examples/compose/setup.sh /src/examples/compose/setup.sh # install test code COPY tests/triton.sh /src/triton.sh From dfa90c5ae038ad07453387524cd1030fbfc12010 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 27 Jun 2017 09:38:24 -0400 Subject: [PATCH 14/26] add tmp/ dir to .gitignore so local compose backups are committed --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 9b26d80..e2d7a7e 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ manta.pub # temp python-manta/ +tmp/ # macos frustration .DS_Store From 01431ff737639267da5fa01a9503d0e3d7ff77a9 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 27 Jun 2017 10:10:39 -0400 Subject: [PATCH 15/26] avoid AttributeException in log output from snapshot storage --- bin/manage.py | 6 ++++-- bin/manager/storage/local.py | 1 + bin/manager/storage/manta_stor.py | 1 + 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/bin/manage.py b/bin/manage.py index 77750fb..4118fad 100644 --- a/bin/manage.py +++ b/bin/manage.py @@ -44,6 +44,7 @@ def is_primary(self): the node `state` field needs to be set to UNASSIGNED if you want to force a check of Consul, etc. """ + log.debug('state: %s' % self.cp.state) if self.cp.state != UNASSIGNED: return self.cp.state == PRIMARY @@ -77,6 +78,7 @@ def is_primary(self): # am I listed in the Consul PRIMARY_KEY?? _, primary_name = self.consul.read_lock(PRIMARY_KEY) + log.debug('DEBUG: primary_name: %s' % primary_name) if primary_name == self.name: self.cp.state = PRIMARY return True @@ -268,8 +270,8 @@ def write_snapshot(node): '--stream=tar', '/tmp/backup'], stdout=f) log.info('snapshot completed, uploading to object store') - node.snaps.put_backup(backup_id, '/tmp/backup.tar') - log.info('snapshot uploaded to %s/%s', node.snaps.bucket, backup_id) + out = node.snaps.put_backup(backup_id, '/tmp/backup.tar') + log.info('snapshot uploaded to %s', out) # write the filename of the binlog to Consul so that we know if # we've rotated since the last backup. diff --git a/bin/manager/storage/local.py b/bin/manager/storage/local.py index 622b2a9..25e9f08 100644 --- a/bin/manager/storage/local.py +++ b/bin/manager/storage/local.py @@ -36,3 +36,4 @@ def put_backup(self, backup_id, src): """ dst = '{}/{}'.format(self.dir, backup_id) copyfile(src, dst) + return dst diff --git a/bin/manager/storage/manta_stor.py b/bin/manager/storage/manta_stor.py index 46c34d4..984f4fa 100644 --- a/bin/manager/storage/manta_stor.py +++ b/bin/manager/storage/manta_stor.py @@ -57,3 +57,4 @@ def put_backup(self, backup_id, infile): mpath = '{}/{}'.format(self.bucket, backup_id) with open(infile, 'r') as f: self.client.put_object(mpath, file=f) + return mpath From 2fd191bb437553898c007c46b3b63f743f0121f3 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 27 Jun 2017 10:26:01 -0400 Subject: [PATCH 16/26] add clean target to makefile to remove test debris --- makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/makefile b/makefile index 58d9d89..4b14d59 100644 --- a/makefile +++ b/makefile @@ -177,6 +177,11 @@ cleanup: # ------------------------------------------------------- # helper functions for testing if variables are defined +## Cleanup local backups and log debris +clean: + rm -rf tmp/ + find . -name '*.log' -delete + ## Print environment for build debugging debug: @echo GIT_COMMIT=$(GIT_COMMIT) From fc87263e2081d92dfdbae619de9fcbfb4d8f0a12 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 27 Jun 2017 10:26:39 -0400 Subject: [PATCH 17/26] make sure /tmp/backup destination exists in storage.Local --- bin/manage.py | 2 +- bin/manager/storage/local.py | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/bin/manage.py b/bin/manage.py index 4118fad..63496c3 100644 --- a/bin/manage.py +++ b/bin/manage.py @@ -78,7 +78,7 @@ def is_primary(self): # am I listed in the Consul PRIMARY_KEY?? _, primary_name = self.consul.read_lock(PRIMARY_KEY) - log.debug('DEBUG: primary_name: %s' % primary_name) + log.debug('primary_name: %s' % primary_name) if primary_name == self.name: self.cp.state = PRIMARY return True diff --git a/bin/manager/storage/local.py b/bin/manager/storage/local.py index 25e9f08..7b7db7c 100644 --- a/bin/manager/storage/local.py +++ b/bin/manager/storage/local.py @@ -25,6 +25,10 @@ def get_backup(self, backup_id): os.mkdir(self.dir, 0770) except OSError: pass + try: + os.mkdir('/tmp/backup', 0770) + except OSError: + pass dst = '/tmp/backup/{}'.format(backup_id) src = '{}/{}'.format(self.dir, backup_id) From 01df63575099e9d368275830dbc40e9510f583eb Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 27 Jun 2017 10:27:09 -0400 Subject: [PATCH 18/26] local compose can't use the Consul agent --- etc/containerpilot.json5 | 4 ++-- examples/compose/docker-compose.yml | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/etc/containerpilot.json5 b/etc/containerpilot.json5 index e7e2a86..b14398a 100644 --- a/etc/containerpilot.json5 +++ b/etc/containerpilot.json5 @@ -7,10 +7,10 @@ { name: "preStart", exec: "python /usr/local/bin/manage.py", - when: { + {{ if .CONSUL_AGENT }}when: { source: "consul-agent", once: "healthy" - } + }{{ end }} }, { name: '{{ .SERVICE_NAME | default "mysql" }}', diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml index fe6aee7..a1d9ee9 100644 --- a/examples/compose/docker-compose.yml +++ b/examples/compose/docker-compose.yml @@ -15,11 +15,9 @@ services: - MYSQL_REPL_PASSWORD=seekretReplPassword - MYSQL_DATABASE=demodb - BACKUP_TTL=120 - - CONSUL_AGENT=1 - LOG_LEVEL=DEBUG - CONSUL=consul - SNAPSHOT_BACKEND=local - - SNAPSHOT_DIR=/tmp/snapshots volumes: # shared storage location for snapshots - ../../tmp:/tmp/snapshots From 36c2e49e4d1a1046caee380a25e63e0f5efef773 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 27 Jun 2017 11:50:17 -0400 Subject: [PATCH 19/26] add MANTA_KEY_ID to required test inputs --- makefile | 3 ++- tests/triton.sh | 4 +++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/makefile b/makefile index 4b14d59..09e88c1 100644 --- a/makefile +++ b/makefile @@ -107,12 +107,13 @@ test/unit-src: ## Run the integration test runner. Runs locally but targets Triton. test/triton: - $(call check_var, TRITON_PROFILE, MANTA_USER \ + $(call check_var, TRITON_PROFILE, MANTA_USER, MANTA_KEY_ID \ required to run integration tests on Triton.) docker run --rm \ -e TAG=$(tag) \ -e TRITON_PROFILE=$(TRITON_PROFILE) \ -e MANTA_USER=$(MANTA_USER) \ + -e MANTA_KEY_ID=$(MANTA_KEY_ID) \ -e GIT_BRANCH=$(GIT_BRANCH) \ -v ~/.ssh:/root/.ssh:ro \ -v ~/.triton/profiles.d:/root/.triton/profiles.d:ro \ diff --git a/tests/triton.sh b/tests/triton.sh index c860dd8..01739f7 100755 --- a/tests/triton.sh +++ b/tests/triton.sh @@ -16,7 +16,8 @@ manta_url=${MANTA_URL:-https://us-east.manta.joyent.com} manta_user=${MANTA_USER:-triton_mysql} manta_subuser=${MANTA_SUBUSER:-triton_mysql} manta_role=${MANTA_ROLE:-triton_mysql} -manta_bucket=${MANTA_BUCKET:-"${manta_user}/stor/triton_mysql"} +manta_bucket=${MANTA_BUCKET:-"/${manta_user}/stor/triton_mysql"} +manta_key_id=${MANTA_KEY_ID} project="$COMPOSE_PROJECT" manifest="$COMPOSE_FILE" @@ -90,6 +91,7 @@ profile() { echo "MANTA_SUBUSER=$manta_subuser" echo "MANTA_ROLE=$manta_role" echo "MANTA_KEY=$manta_key" + echo "MANTA_KEY_ID=$manta_key_id" } > ./examples/triton/_env } From d5eaec79d415d9e96db540e9fb82fdc5ecb1bd41 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Tue, 27 Jun 2017 16:51:25 -0400 Subject: [PATCH 20/26] integration test for Compose example --- examples/compose/docker-compose.yml | 7 +- makefile | 33 ++-- tests/compose.sh | 235 ++++++++++++++++++++++++++++ 3 files changed, 260 insertions(+), 15 deletions(-) mode change 100644 => 100755 tests/compose.sh diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml index a1d9ee9..a5ec51f 100644 --- a/examples/compose/docker-compose.yml +++ b/examples/compose/docker-compose.yml @@ -20,17 +20,14 @@ services: - SNAPSHOT_BACKEND=local volumes: # shared storage location for snapshots - - ../../tmp:/tmp/snapshots - - ../../bin/manage.py:/usr/local/bin/manage.py - - ../../bin/test.py:/usr/local/bin/test.py - - ../../bin/manager:/usr/local/bin/manager + - ${WORK_DIR:-../..}/tmp:/tmp/snapshots links: - consul:consul consul: image: consul:0.8.4 command: > - agent -server -client=0.0.0.0 -bootstrap -ui + agent -server -client=0.0.0.0 -dev -ui restart: always mem_limit: 128m ports: diff --git a/makefile b/makefile index 09e88c1..e4793f7 100644 --- a/makefile +++ b/makefile @@ -94,16 +94,29 @@ test/unit-src: $(image):$(tag) \ $(python) test.py -# TODO: add once we can run with non-Manta storage backend -# Run the integration test runner against Compose locally. -# test/compose: -# docker run --rm \ -# -e TAG=$(tag) \ -# -e GIT_BRANCH=$(GIT_BRANCH) \ -# --network=bridge \ -# -v /var/run/docker.sock:/var/run/docker.sock \ -# -w /src \ -# $(testImage):$(tag) /src/compose.sh +## Run the integration test runner against Compose locally. +test/compose: + docker run --rm \ + -e TAG=$(tag) \ + -e GIT_BRANCH=$(GIT_BRANCH) \ + -e WORK_DIR=/src \ + --network=bridge \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v $(shell pwd)/tests/compose.sh:/src/compose.sh \ + -w /src \ + $(testImage):$(tag) /src/compose.sh + + +test/shell: + docker run --rm -it \ + -e TAG=$(tag) \ + -e GIT_BRANCH=$(GIT_BRANCH) \ + -e WORK_DIR=/src \ + --network=bridge \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -v $(shell pwd)/tests/compose.sh:/src/compose.sh \ + -w /src \ + $(testImage):$(tag) /bin/bash ## Run the integration test runner. Runs locally but targets Triton. test/triton: diff --git a/tests/compose.sh b/tests/compose.sh old mode 100644 new mode 100755 index e69de29..51d6622 --- a/tests/compose.sh +++ b/tests/compose.sh @@ -0,0 +1,235 @@ +#!/bin/bash +set -e + +export GIT_BRANCH="${GIT_BRANCH:-$(git rev-parse --abbrev-ref HEAD)}" +export TAG="${TAG:-branch-$(basename "$GIT_BRANCH")}" +export COMPOSE_PROJECT="${COMPOSE_PROJECT_NAME:-my}" +export COMPOSE_FILE="${COMPOSE_FILE:-./examples/compose/docker-compose.yml}" + +user=${MYSQL_USER:-dbuser} +passwd=${MYSQL_PASSWORD:-seekretPassword} +db=${MYSQL_DATABASE:-demodb} + +project="$COMPOSE_PROJECT" +manifest="$COMPOSE_FILE" + + +fail() { + echo + echo '------------------------------------------------' + echo 'FAILED: dumping logs' + echo '------------------------------------------------' + docker-compose -p "$project" -f "$manifest" ps + docker-compose -p "$project" -f "$manifest" logs > compose.log + echo '------------------------------------------------' + echo 'FAILED' + echo "$1" + echo '------------------------------------------------' + exit 1 +} + +pass() { + teardown + echo + echo '------------------------------------------------' + echo 'PASSED!' + echo + exit 0 +} + +function finish { + result=$? + if [ $result -ne 0 ]; then fail "unexpected error"; fi + pass +} +trap finish EXIT + + + +# -------------------------------------------------------------------- +# Helpers + +# asserts that 'count' MySQL instances are running and marked as Up +# by Docker. fails after the timeout. +wait_for_containers() { + local count timeout i got + count="$1" + timeout="${3:-120}" # default 120sec + i=0 + echo "waiting for $count MySQL containers to be Up..." + while [ $i -lt "$timeout" ]; do + got=$(docker-compose -p "$project" -f "$manifest" ps mysql) + got=$(echo "$got" | grep -c "Up") + if [ "$got" -eq "$count" ]; then + echo "$count instances reported Up in <= $i seconds" + return + fi + i=$((i+1)) + sleep 1 + done + fail "$count instances did not report Up within $timeout seconds" +} + +# asserts that the application has registered at least n instances with +# Consul. fails after the timeout. +wait_for_service() { + local service count timeout i got consul_ip + service="$1" + count="$2" + timeout="${3:-300}" # default 300sec + i=0 + echo "waiting for $count instances of $service to be registered with Consul..." + consul_ip=$(docker inspect "${project}_consul_1" | json -a NetworkSettings.IPAddress) + while [ $i -lt "$timeout" ]; do + got=$(curl -s "http://${consul_ip}:8500/v1/health/service/${service}?passing" \ + | json -a Service.Address | wc -l | tr -d ' ') + if [ "$got" -eq "$count" ]; then + echo "$service registered in <= $i seconds" + return + fi + i=$((i+1)) + sleep 1 + done + fail "waited for service $service for $timeout seconds but it was not registed with Consul" +} + +# gets the container that's currently primary in Consul +get_primary() { + local got consul_ip + consul_ip=$(docker inspect "${project}_consul_1" | json -a NetworkSettings.IPAddress) + got=$(curl -s "http://${consul_ip}:8500/v1/health/service/mysql-primary?passing" \ + | json -a Node.Node | wc -l | tr -d ' ') + echo "$got" +} + +# gets a container that's currently a replica in Consul +get_replica() { + local got consul_ip + consul_ip=$(docker inspect "${project}_consul_1" | json -a NetworkSettings.IPAddress) + got=$(curl -s "http://${consul_ip}:8500/v1/health/service/mysql?passing" \ + | json -a Node.Node | wc -l | tr -d ' ') + echo "$got" +} + +# creates a table on the first instance, which will be replicated to +# the other nodes +create_table() { + echo 'creating test table' + exec_query "${project}_mysql_1" 'CREATE TABLE tbl1 (field1 INT, field2 VARCHAR(36));' +} + +check_replication() { + echo 'checking replication' + local primary="$1" + local replica="$2" + local testkey="$3" + local testval="$3" + echo "writing to $primary" + exec_query "$primary" "INSERT INTO tbl1 (field1, field2) VALUES ($testkey, \"$testval\");" + + # check the replica, giving it a few seconds to catch up + local timeout i + timeout=5 + i=0 + echo "checking read from $replica" + while [ $i -lt "$timeout" ]; do + got=$(exec_query "$replica" "SELECT * FROM tbl1 WHERE field1=$testkey;") + got=$(echo "$got" | grep -c "$testkey: $testval") + if [ "$got" -eq 1 ]; then + return + fi + i=$((i+1)) + sleep 1 + done + fail "failed to replicate write from $primary to $replica; query got $got" +} + +# runs a SQL statement on the node via docker exec. normally this method +# would be subject to SQL injection but we control all inputs and we don't +# want to have to ship a mysql client in this test rig. +exec_query() { + local node="$1" + local query="$2" + echo "$node" + out=$(docker exec -i "$node" \ + mysql -u "$user" "-p${passwd}" --vertical -e "$query" "$db") + echo "$out" +} + +restart() { + node="${project}_$1" + docker restart "$node" +} + +stop() { + node="${project}_$1" + docker stop "$node" +} + +run() { + echo + echo '* cleaning up previous test run' + echo + docker-compose -p "$project" -f "$manifest" stop + docker-compose -p "$project" -f "$manifest" rm -f + + echo + echo '* standing up initial test targets' + echo + docker-compose -p "$project" -f "$manifest" up -d +} + +teardown() { + echo + echo '* tearing down containers' + echo + docker-compose -p "$project" -f "$manifest" stop + docker-compose -p "$project" -f "$manifest" rm -f +} + +scale() { + count="$1" + echo + echo '* scaling up cluster' + echo + docker-compose -p "$project" -f "$manifest" scale mysql="$count" +} + + +# -------------------------------------------------------------------- +# Test sections + +test-failover() { + echo + echo '------------------------------------------------' + echo 'executing failover test' + echo '------------------------------------------------' + + # stand up and setup + run + wait_for_containers 1 + wait_for_service 'mysql-primary' 1 + scale 3 + wait_for_containers 3 + wait_for_service 'mysql' 2 + create_table + + # verify working + check_replication "${project}_mysql_1" "${project}_mysql_2" "1" "a" + + # force failover and verify again + stop "mysql_1" + wait_for_containers 2 + wait_for_service 'mysql-primary' 1 + wait_for_service 'mysql' 1 + + local primary replica + primary=$(get_primary) + replica=$(get_replica) + check_replication "$primary" "$replica" "2" "b" +} + +# -------------------------------------------------------------------- +# Main loop + +test-failover From e89c7f15c9dff842841114ca3146cf67c0be97c9 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 28 Jun 2017 11:07:14 -0400 Subject: [PATCH 21/26] add delay to test for initial watch to register --- tests/compose.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/compose.sh b/tests/compose.sh index 51d6622..d90f093 100755 --- a/tests/compose.sh +++ b/tests/compose.sh @@ -98,7 +98,7 @@ get_primary() { local got consul_ip consul_ip=$(docker inspect "${project}_consul_1" | json -a NetworkSettings.IPAddress) got=$(curl -s "http://${consul_ip}:8500/v1/health/service/mysql-primary?passing" \ - | json -a Node.Node | wc -l | tr -d ' ') + | json -a Node.Address) echo "$got" } @@ -107,7 +107,7 @@ get_replica() { local got consul_ip consul_ip=$(docker inspect "${project}_consul_1" | json -a NetworkSettings.IPAddress) got=$(curl -s "http://${consul_ip}:8500/v1/health/service/mysql?passing" \ - | json -a Node.Node | wc -l | tr -d ' ') + | json -a Node.Address) echo "$got" } @@ -217,6 +217,8 @@ test-failover() { # verify working check_replication "${project}_mysql_1" "${project}_mysql_2" "1" "a" + sleep 15 + # force failover and verify again stop "mysql_1" wait_for_containers 2 From 1db1d9064cb26a5340bc6882d498ad2d4fbf4613 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 28 Jun 2017 11:09:36 -0400 Subject: [PATCH 22/26] update triton tests to match fixes in compose tests --- tests/triton.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/triton.sh b/tests/triton.sh index 01739f7..fde1c3f 100755 --- a/tests/triton.sh +++ b/tests/triton.sh @@ -144,7 +144,7 @@ get_primary() { local got consul_ip consul_ip=$(triton ip "${project}_consul_1") got=$(curl -s "http://${consul_ip}:8500/v1/health/service/mysql-primary?passing" \ - | json -a Node.Node | wc -l | tr -d ' ') + | json -a Node.Address) echo "$got" } @@ -153,7 +153,7 @@ get_replica() { local got consul_ip consul_ip=$(triton ip "${project}_consul_1") got=$(curl -s "http://${consul_ip}:8500/v1/health/service/mysql?passing" \ - | json -a Node.Node | wc -l | tr -d ' ') + | json -a Node.Address) echo "$got" } @@ -178,7 +178,8 @@ check_replication() { i=0 while [ $i -lt "$timeout" ]; do got=$(exec_query "$replica" "SELECT * FROM tbl1 WHERE field1=$testkey;") - if [ "$got" -eq "$testval" ]; then + got=$(echo "$got" | grep -c "$testkey: $testval") + if [ "$got" -eq 1 ]; then return fi i=$((i+1)) @@ -193,8 +194,9 @@ check_replication() { exec_query() { local node="$1" local query="$2" - out=$(triton-docker exec "$node" \ - "mysql -u $user -p${passwd} --vertical -e '$query' $db") + echo "$node" + out=$(triton-docker exec -i "$node" \ + mysql -u "$user" "-p${passwd}" --vertical -e "$query" "$db") echo "$out" } From 01a84622e9f3cb4fd8140adcb586c68be7872338 Mon Sep 17 00:00:00 2001 From: Moghedrin Date: Mon, 10 Jul 2017 17:12:11 -0700 Subject: [PATCH 23/26] Adds initial Minio snapshotter --- Dockerfile | 1 + bin/manage.py | 1 + bin/manager/storage/minio_stor.py | 45 +++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+) create mode 100644 bin/manager/storage/minio_stor.py diff --git a/Dockerfile b/Dockerfile index 8bee80e..cf7549a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -19,6 +19,7 @@ RUN set -ex \ && pip install \ python-Consul==0.7.0 \ manta==2.5.0 \ + minio==2.2.4 \ mock==2.0.0 \ json5==0.2.4 \ # \ diff --git a/bin/manage.py b/bin/manage.py index 63496c3..30ed7de 100644 --- a/bin/manage.py +++ b/bin/manage.py @@ -14,6 +14,7 @@ from manager.network import get_ip from manager.storage.manta_stor import Manta +from manager.storage.minio_stor import Minio from manager.storage.local import Local from manager.utils import log, debug, \ diff --git a/bin/manager/storage/minio_stor.py b/bin/manager/storage/minio_stor.py new file mode 100644 index 0000000..7eae054 --- /dev/null +++ b/bin/manager/storage/minio_stor.py @@ -0,0 +1,45 @@ +""" Module for storing snapshots in shared local disk """ +import os +from shutil import copyfile + +from manager.env import env +from manager.utils import debug +from minio import Minio as pyminio + +class Minio(object): + """ + + The Minio class wraps access to the Minio object store, where we'll put + our MySQL backups. + """ + def __init__(self, envs=os.environ): + self.access_key = env('MINIO_ACCESS_KEY', None, envs) + self.secret_key = env('MINIO_SECRET_KEY', None, envs) + self.bucket = env('MINIO_BUCKET', 'backups', envs) + self.location = env('MINIO_LOCATION', 'us-east-1', envs) + self.url = env('MINIO_URL', 'minio:9000') + is_tls - env('MINIO_TLS_INSECURE', False, envs, fn=to_flag) + + self.client = pyminio( + self.url, + access_key=self.access_key, + secret_key=self.secret_key, + secure=is_tls) + + try: + self.client.make_bucket(self.bucket, location=self.location) + except: + raise + + @debug + def get_backup(self, backup_id): + """ + Download file from Manta, allowing exceptions to bubble up. + """ + return NotImplementedError + + def put_backup(self, backup_id, src): + """ + Upload the backup file to the expected path. + """ + return NotImplementedError From 691077dc4c94bcdf10ce50ad78647c994812992b Mon Sep 17 00:00:00 2001 From: Moghedrin Date: Tue, 11 Jul 2017 13:16:14 -0700 Subject: [PATCH 24/26] Minio storage working, using instead of local for run/compose --- bin/manage.py | 5 ++++- bin/manager/storage/minio_stor.py | 28 ++++++++++++++++++---------- examples/compose/docker-compose.yml | 16 +++++++++++++++- 3 files changed, 37 insertions(+), 12 deletions(-) diff --git a/bin/manage.py b/bin/manage.py index 30ed7de..3a68239 100644 --- a/bin/manage.py +++ b/bin/manage.py @@ -403,8 +403,11 @@ def main(): my = MySQL() - if os.environ.get('SNAPSHOT_BACKEND', 'manta') == 'local': + snapshot_backend = os.environ.get('SNAPSHOT_BACKEND', 'manta') + if snapshot_backend == 'local': snaps = Local() + elif snapshot_backend == 'minio': + snaps = Minio() else: snaps = Manta() diff --git a/bin/manager/storage/minio_stor.py b/bin/manager/storage/minio_stor.py index 7eae054..629fc74 100644 --- a/bin/manager/storage/minio_stor.py +++ b/bin/manager/storage/minio_stor.py @@ -1,10 +1,13 @@ """ Module for storing snapshots in shared local disk """ +import logging import os from shutil import copyfile -from manager.env import env +from manager.env import env, to_flag from manager.utils import debug -from minio import Minio as pyminio +from minio import Minio as pyminio, error as minioerror + +logging.getLogger('manta').setLevel(logging.INFO) class Minio(object): """ @@ -18,28 +21,33 @@ def __init__(self, envs=os.environ): self.bucket = env('MINIO_BUCKET', 'backups', envs) self.location = env('MINIO_LOCATION', 'us-east-1', envs) self.url = env('MINIO_URL', 'minio:9000') - is_tls - env('MINIO_TLS_INSECURE', False, envs, fn=to_flag) + is_tls = env('MINIO_TLS_SECURE', False, envs, fn=to_flag) self.client = pyminio( self.url, access_key=self.access_key, secret_key=self.secret_key, secure=is_tls) - try: self.client.make_bucket(self.bucket, location=self.location) - except: - raise + except minioerror.BucketAlreadyOwnedByYou: + pass @debug def get_backup(self, backup_id): """ - Download file from Manta, allowing exceptions to bubble up. + Download file from Minio, allowing exceptions to bubble up. """ - return NotImplementedError + try: + os.mkdir('/tmp/backup', 0770) + except OSError: + pass + outfile = '/tmp/backup/{}'.format(backup_id) + self.client.fget_object(self.bucket, backup_id, outfile) - def put_backup(self, backup_id, src): + def put_backup(self, backup_id, infile): """ Upload the backup file to the expected path. """ - return NotImplementedError + self.client.fput_object(self.bucket, backup_id, infile) + return backup_id diff --git a/examples/compose/docker-compose.yml b/examples/compose/docker-compose.yml index a5ec51f..c062254 100644 --- a/examples/compose/docker-compose.yml +++ b/examples/compose/docker-compose.yml @@ -17,12 +17,15 @@ services: - BACKUP_TTL=120 - LOG_LEVEL=DEBUG - CONSUL=consul - - SNAPSHOT_BACKEND=local + - SNAPSHOT_BACKEND=minio + - MINIO_ACCESS_KEY=supersecretaccesskey + - MINIO_SECRET_KEY=supersecretsecretkey volumes: # shared storage location for snapshots - ${WORK_DIR:-../..}/tmp:/tmp/snapshots links: - consul:consul + - minio:minio consul: image: consul:0.8.4 @@ -42,3 +45,14 @@ services: network_mode: bridge dns: - 127.0.0.1 + + minio: + image: minio/minio + command: server /export + restart: always + expose: + - 9000 + network_mode: bridge + environment: + - MINIO_ACCESS_KEY=supersecretaccesskey + - MINIO_SECRET_KEY=supersecretsecretkey From f7234fcbc79f202bc0d8381ec87e6ad6962c7841 Mon Sep 17 00:00:00 2001 From: Joe Francis Date: Tue, 17 Oct 2017 14:18:15 -0500 Subject: [PATCH 25/26] Fix broken link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 19ba88e..34f23c7 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,7 @@ A running cluster includes the following components: - [MySQL](https://dev.mysql.com/): we're using MySQL5.6 via [Percona Server](https://www.percona.com/software/mysql-database/percona-server), and [`xtrabackup`](https://www.percona.com/software/mysql-database/percona-xtrabackup) for running hot snapshots. - [ContainerPilot](https://www.joyent.com/containerpilot): included in our MySQL containers to orchestrate bootstrap behavior and coordinate replication using keys and checks stored in Consul in the `preStart`, `health`, and `onChange` handlers. - [Consul](https://www.consul.io/): is our service catalog that works with ContainerPilot and helps coordinate service discovery, replication, and failover -- [Manta](https://www.joyent.com/object-storage): the Joyent object store, for securely and durably storing our MySQL snapshots. +- [Manta](https://www.joyent.com/triton/object-storage): the Joyent object store, for securely and durably storing our MySQL snapshots. - `manage.py`: a small Python application that ContainerPilot's lifecycle hooks will call to bootstrap MySQL, perform health checks, manage replication setup, and perform coordinated failover. The lifecycle of a MySQL container is managed by 4 lifecycle hooks in the `manage.py` application: `pre_start`, `health`, `on_change`, and `snapshot_task`. From 35ada27c99b316c85294b3577d2bbd48aa0778b6 Mon Sep 17 00:00:00 2001 From: Furtchet Date: Wed, 13 Dec 2017 14:47:22 -0600 Subject: [PATCH 26/26] Add documentation for env SNAPSHOT_BACKEND, minio, and local. --- README.md | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 19ba88e..1b8ab9e 100644 --- a/README.md +++ b/README.md @@ -103,6 +103,14 @@ Pass these variables via an `_env` file. The included `setup.sh` can be used to - `MYSQL_USER`: this user will be set up as the default non-root user on the node - `MYSQL_PASSWORD`: this user will be set up as the default non-root user on the node + + +#### Snapshots +These variables control where the database snapshots are saved. + +- `SNAPSHOT_BACKEND`: Select from: `manta`, `minio`, or `local` (Defaults to `manta`.) + +##### Manta - `MANTA_URL`: the full Manta endpoint URL. (ex. `https://us-east.manta.joyent.com`) - `MANTA_USER`: the Manta account name. - `MANTA_SUBUSER`: the Manta subuser account name, if any. @@ -111,6 +119,19 @@ Pass these variables via an `_env` file. The included `setup.sh` can be used to - `MANTA_PRIVATE_KEY`: the private ssh key for the Manta account/subuser; the included `setup.sh` will encode this automatically - `MANTA_BUCKET`: the path on Manta where backups will be stored. (ex. `/myaccount/stor/triton-mysql`); the bucket must already exist and be writeable by the `MANTA_USER`/`MANTA_PRIVATE_KEY` +##### Minio +- `MINIO_ACCESS_KEY`: S3 Access key to login. +- `MINIO_SECRET_KEY`: S3 Secret key to login. +- `MINIO_BUCKET`: The S3 bucket to put snapshots in. (Defaults to `backups`.) +- `MINIO_LOCATION`: Define the region/ location where the bucket is. (Defaults to `us-east-1`.) +- `MINIO_URL`: The url of minio. (Defaults to `minio:9000`.) +- `MINIO_TLS_SECURE`: Use a secure https connection to minio. (Defaults to `false`.) + +##### Local +- `STORAGE_DIR`: The local directory to store snapshots. (Defaults to `/tmp/snapshots`.) + +#### Optional Configs + These variables are optional but you most likely want them: - `SERVICE_NAME`: the name by which this instance will register itself in consul. If you do not provide one, defaults to `"mysql"`. @@ -118,18 +139,22 @@ These variables are optional but you most likely want them: - `MYSQL_REPL_PASSWORD`: this password will be used on all instances to set up MySQL replication. If not set, then replication will not be set up on the replicas. - `MYSQL_DATABASE`: create this database on startup if it doesn't already exist. The `MYSQL_USER` user will be granted superuser access to that DB. - `LOG_LEVEL`: will set the logging level of the `manage.py` application. It defaults to `DEBUG` and uses the Python stdlib [log levels](https://docs.python.org/2/library/logging.html#levels). The `DEBUG` log level is extremely verbose -- in production you'll want this to be at `INFO` or above. -- `CONSUL` is the hostname for the Consul instance(s). Defaults to `consul`. +- `CONSUL` is the hostname for the Consul instance(s). (Defaults to `consul`.) + +#### Consul keys The following variables control the names of keys written to Consul. They are optional with sane defaults, but if you are using Consul for many other services you might have requirements to namespace keys: - `PRIMARY_KEY`: The key used to record a lock on what node is primary. (Defaults to `${SERVICE_NAME}-primary`.) -- `BACKUP_LOCK_KEY`: The key used to record a lock on a running snapshot. (Defaults to `mysql-backup-runninbg`.) +- `BACKUP_LOCK_KEY`: The key used to record a lock on a running snapshot. (Defaults to `mysql-backup-running`.) - `LAST_BACKUP_KEY`: The key used to store the path and timestamp of the most recent backup. (Defaults to `mysql-last-backup`.) - `LAST_BINLOG_KEY`: The key used to store the filename of the most recent binlog file on the primary. (Defaults to `mysql-last-binlog`.) - `BACKUP_NAME`: The name of the backup file that's stored on Manta, with optional [strftime](https://docs.python.org/2/library/time.html#time.strftime) directives. (Defaults to `mysql-backup-%Y-%m-%dT%H-%M-%SZ`.) - `BACKUP_TTL`: Time in seconds to wait between backups. (Defaults to `86400`, or 24 hours.) - `SESSION_NAME`: The name used for session locks. (Defaults to `mysql-primary-lock`.) +#### MySQL + These variables *may* be passed but it's not recommended to do this. Instead we'll set a one-time root password during DB initialization; the password will be dropped into the logs. Security can be improved by using a key management system in place of environment variables. The constructor for the `Node` class in `manage.py` would be a good place to hook in this behavior, which is out-of-scope for this demonstration. - `MYSQL_RANDOM_ROOT_PASSWORD`: defaults to "yes"