Search

Todd Rodzen

Agile Application Development

Tag

mysql

UUID vs Auto Increment

What is the best method to create a key in today’s advanced Javascript node.js style applications? Do you rely on the old tried and true method to use auto-increment on the database primary key? or is a UUID better? One thought to help answer that; is a sequential key useful? Especially in the situation where the unique key may start out as only in the application or only on the client session store (ie. Redis key memory store.) In this situation, a sequential key is not useful and creating the auto increment key takes an additional step using INCR on Redis or INSERT on MySQL which could also create an unnecessary round trip to your database.

On the other hand, the UUID v4 implementation which creates a unique randomized UUID may appear to be a CPU time slice consuming operation, but one stackoverflow user did some testing, as posted here,:

uuid.png

You can see the green and yellow lines of increased connections. As connections increase AUTO INCR method creates an increased latency; while the UUID is a steady same or lower process time slice.

I didn’t come up with this one but found it may possibly be the smallest UUID v4 generator code.

exports.uuid = function b(a){return a?(a^Math.random()*16>>a/4).
   toString(16):([1e7]+-1e3+-4e3+-8e3+-1e11).replace(/[018]/g,b)}

There is no one answer. It’s always good to have multiple available methods but be sure to consider the uses and weigh the options.

Advertisements

Redis Session and MySQL Login

The following does a Redis session store and MySQL user register and login as well as a simple message post. This is run on a node.js server

package.json

{
 "name": "users",
 "version": "1.0.0",
 "description": "Register User",
 "main": "app.js",
 "script": "./app.js",
 "watch": true,
 "ignore_watch": ["node_modules"],
 "keywords": [
"login"
 ],
 "author": "Todd Rodzen",
 "license": "MIT",
 "dependencies": {
 "async": "^1.2.1",
 "body-parser": "^1.13.0",
 "connect-redis": "^2.3.0",
 "cookie-parser": "^1.3.5",
 "ejs": "^2.3.1",
 "express": "^4.14.0",
 "express-session": "^1.11.3",
 "mysql": "^2.7.0",
 "redis": "^0.12.1"
 }
}

app.js

/**
 Loading all dependencies.
**/
var express = require("express");
var redis = require("redis");
var mysql = require("mysql");
var session = require('express-session');
var redisStore = require('connect-redis')(session);
var bodyParser = require('body-parser');
var cookieParser = require('cookie-parser');
var path = require("path");
var async = require("async");
var client = redis.createClient();
var app = express();
var router = express.Router();

// Always use MySQL pooling.
// Helpful for multiple connections.

var pool = mysql.createPool({
 connectionLimit : 100,
 host : 'hmmmmm',
 user : 'you',
 password : 'ssshhhhh',
 database : 'hmmmm',
 debug : false
});

app.set('views', 'view');
app.engine('html', require('ejs').renderFile);

// IMPORTANT
// Here we tell Express to use Redis as session store.
// We pass Redis credentials and port information.
// And express does the rest ! 

app.use(session({
 secret: 'topics-session',
 store: new redisStore({ host: 'localhost', port: 6379, client: client,ttl : 260}),
 saveUninitialized: false,
 resave: false
}));
app.use(cookieParser("secretSign#143_!223"));
app.use(bodyParser.urlencoded({extended: false}));
app.use(bodyParser.json());

// This is an important function.
// This function does the database handling task.
// We also use async here for control flow.

function handle_database(req,type,callback) {
 async.waterfall([
 function(callback) {
 pool.getConnection(function(err,connection){
 if(err) {
 // if there is error, stop right away.
 // This will stop the async code execution and goes to last function.
 callback(true);
 } else {
 callback(null,connection);
 }
 });
 },
 function(connection,callback) {
 var SQLquery;
 switch(type) {
 case "login" :
 SQLquery = "SELECT * from user_login WHERE user_email='"+req.body.user_email+"' AND `user_password`='"+req.body.user_password+"'";
 break;
 case "checkEmail" :
 SQLquery = "SELECT * from user_login WHERE user_email='"+req.body.user_email+"'";
 break;
 case "register" :
 SQLquery = "INSERT into user_login(user_email,user_password,user_name) VALUES ('"+req.body.user_email+"','"+req.body.user_password+"','"+req.body.user_name+"')";
 break;
 case "addStatus" :
 SQLquery = "INSERT into msg_text(user_id,msg_text) VALUES ("+req.session.key["user_id"]+",'"+req.body.status+"')";
 break;
 case "getStatus" :
 SQLquery = "SELECT * FROM msg_text WHERE user_id="+req.session.key["user_id"];
 break;
 default :
 break;
 }
 callback(null,connection,SQLquery);
 },
 function(connection,SQLquery,callback) {
 connection.query(SQLquery,function(err,rows){
 connection.release();
 if(!err) {
 if(type === "login") {
 callback(rows.length === 0 ? false : rows[0]);
 } else if(type === "getStatus") {
 callback(rows.length === 0 ? false : rows);
 } else if(type === "checkEmail") {
 callback(rows.length === 0 ? false : true);
 } else {
 callback(false);
 }
 } else {
 // if there is error, stop right away.
 // This will stop the async code execution and goes to last function.
 callback(true);
 }
 });
 }],
 function(result){
 // This function gets call after every async task finished.
 if(typeof(result) === "boolean" && result === true) {
 callback(null);
 } else {
 callback(result);
 }
 });
}

/**
 --- Router Code begins here.
**/

router.get('/',function(req,res){
 res.render('index.html');
});

router.post('/login',function(req,res){
 handle_database(req,"login",function(response){
 if(response === null) {
 res.json({"error" : "true","message" : "Database error occured"});
 } else {
 if(!response) {
 res.json({
 "error" : "true",
 "message" : "Login failed ! Please register"
 });
 } else {
 req.session.key = response;
 res.json({"error" : false,"message" : "Login success."});
 }
 }
 });
});

router.get('/home',function(req,res){
 if(req.session.key) {
 res.render("home.html",{ email : req.session.key["user_name"]});
 } else {
 res.redirect("/");
 }
});

router.get("/fetchStatus",function(req,res){
 if(req.session.key) {
 handle_database(req,"getStatus",function(response){
 if(!response) {
 res.json({"error" : false, "message" : "There is no status to show."});
 } else {
 res.json({"error" : false, "message" : response});
 }
 });
 } else {
 res.json({"error" : true, "message" : "Please login first."});
 }
});

router.post("/addStatus",function(req,res){
 if(req.session.key) {
 handle_database(req,"addStatus",function(response){
 if(!response) {
 res.json({"error" : false, "message" : "Status is added."});
 } else {
 res.json({"error" : false, "message" : "Error while adding Status"});
 }
 });
 } else {
 res.json({"error" : true, "message" : "Please login first."});
 }
});

router.post("/register",function(req,res){
 handle_database(req,"checkEmail",function(response){
 if(response === null) {
 res.json({"error" : true, "message" : "This email is already present"});
 } else {
 handle_database(req,"register",function(response){
 if(response === null) {
 res.json({"error" : true , "message" : "Error while adding user."});
 } else {
 req.session.key = response;
 res.json({"error" : false, "message" : "Registered successfully."});
 }
 });
 }
 });
});

router.get('/logout',function(req,res){
 if(req.session.key) {
 req.session.destroy(function(){
 res.redirect('/');
 });
 } else {
 res.redirect('/');
 }
});

app.use('/',router);

app.listen(4201,function(){
 console.log("I am running at 4201");
});

view/index.html (code)
https://github.com/trodzen/MySQL-Redis-Session-Register/blob/master/view/index.html

view/home.html (code)
https://github.com/trodzen/MySQL-Redis-Session-Register/blob/master/view/home.html

You will need a working Redis db structure and MySQL and the two files used on the select/update SQL statements.

Try it, it’s easy.
That’s All Folks!

AMI Build All-in-One

Full build process

  1. Create an EC2 Linux Instance base – Amazon Linux AMI 2016.09.1 (HVM), SSD Volume Type – ami-0b33d91d
  2. Install the LAMP Stack default Apache port set to 8080 as it will be served to an Nginx reverse proxy server on the same instance (Apache 2.4, MySQL, PHP 6.7)
  3. Install the MEAN Stack
  4. Install Nginx Reverse Proxy Server
  5. Install ColdFusion 2016 update 3 Server

The server is setup and available for Free with a service contract from GTK Solutions.

to Cluster or not to Cluster

This blog will review cluster environment setup to handle node multi-threaded service instances. In our last post, we reviewed the methods of using pooled connections for MySQL connector in node.js on a Linux server running on an Amazon AWS EC2 instance. The conclusion was that it is necessary from the beginning of the application development process to write javascript back-end application server code that uses MySQL pooling methods. This is a significant change in the application code from the non-pooled connection method API. It is important to understand the techniques you intend to use before setting down to write your production code for a back-end application server because it’s important to write your application once and do it right the first time.

Now let’s take a look at clusters. This is a built-in part of the node.js methods. (We are not talking about the clusters add-on module from npmjs.com that goes by the same name.) Using the built in clusters methods for node.js is described in multiple blog posts here and here. The documentation and posts describe setting up a cluster.js that runs a master and fork process servers that run your app.js code multiple times utilizing multiple core processor threads. This increases throughput and creates a multi-threaded environment. The simplified node.js cluster environment code is shown here.

var cluster = require('cluster');
var numCPUs = require('os').cpus().length;

if (cluster.isMaster) {

//  for (var i = 0; i < numCPUs; i++){
  for (var i = 0; i < 10; i++){
    cluster.fork();
  }

  cluster.on('exit',function(worker, code, signal) {
    console.log('worker ' + worker.process.pid + ' died');
  });
} else {

    //change this line to Your Node.js app entry point.
    require("./app.js");
}

In simplified terms, the code above creates multiple forks using a Master node cluster to run multiple instances (multi-threaded) of your node.js application. We did some testing on our Amazon AWS EC2 t2.micro (free tier) and using our async-test.js test, from our prior blog post, using an external RDS MySQL database with pooled MySQL connections. We used Siege of up to 500 concurrent connections and hit it constantly for 1 minute resulting in thousands of hits. What we found is a single process environment resulted in database connection errors while a cluster run environment of 10 processes (multi-threaded) running the same async-test resulted in no connection errors. This proves a cluster multi-threaded environment is important.

When you look at my code above you can see I altered the code to force 10 process forks (instead of using numCPUs.) Remember I am running this on a virtual shared server using Amazon AWS. When using the os.cpus ().length the os method reports back 1 core process thread.  This is the number of process threads available as designed by AWS. But a little digging into the full array results of the os.cpus () method reveals my AWS t2.micro is actually running on a XEON E5 2670 2.5ghz processor with 10 cores and 20 threads. If you did this on a single dedicated machine running that processor you would get a result of 20 for the os.cpus ().length method. Actually, Amazon does a lot of behind the scenes stuff to throttle the processes. But getting more than what you pay for is not at issue here. (Even though you may be getting it all for free through the free tier.) The issue is a working production application design that doesn’t fail at critical points like peak database traffic. So what we found is the processor even on the AWS t2.micro (free tier) was able to better handle the traffic on multiple clustered processes and it could handle a huge amount of traffic. One seige test resulted in no connection errors of over 8000 hits in 30 seconds. Since it is a shared server it is doubtful 20 threads would be useful. It’s interesting to note Amazon actually defines the t2.micro with 1 vCPU and 6 CPU credits/hour which is a throttle mechanism and different from actual core processing threads. Amazon uses processor credits to ensure you get what you pay for or to throttle your application as needed. As in life, you only get what you pay for! 🙂

But do we write code and change our application design to handle the cluster methods. No! We don’t change our code to use the cluster methods. There is a better way and it’s all handled for us using the PM2 module from npmjs.com. No need to create code like the cluster.js sample above. This multi-threading cluster environment, base node system control, monitoring, and performance optimization is likely a place where you don’t need to reinvent the wheel, there is already significant well-designed products to handle these functions. To start, just install the PM2 module with

npm install -g pm2

A useful additional tool in connection wth PM2 is the https://app.keymetrics.io dashboard monitor. You can go there and create a bucket and server connection to your PM2 server to generate metrics data and external monitoring and control. Use the PM2 documentation for all the PM2 commands but some of the useful commands are

Pm2 start app.js

Pm2 stop all

Pm2 start app.js -i 4  # to start 4 cluster instances of your app.js

Pm2 list  # to list all running process threads

There is another module called forever but we found PM2 is much more advanced, robust, and well supported.

One final item to do is setup PM2 to run on startup. Do the following command which creates a line item to copy and paste to your terminal window. The one line of code it creates will automatically start PM2 on reboot.

pm2 startup

What we do need to do in our application design is write good strong well-designed code that handles multiple instances running and save session or environment variables that can be accessed by all cluster process instances in a multi-threaded environment. In a later post we will cover using Redis as a global external store for process variables. As well codeforgeek provides a great tutorial. This is the real important part of application development. The two most important parts that must be designed to handle multi-threaded clusters and multi-instances are session like variables and database connection transactions. For example, three related SQL inserts or Redis store SETS must complete before another process tries to select that same and related set of data.

In conclusion, I recommend installing pm2 and use it from the start of the agile application development process. An added benefit of using pm2 in development is added logging and debugging methods.

to Pool or not to Pool

Using Node.js with MySQL Module I have done some testing. Under a real word situation stress on the server from multiple connections could result in a database connection failure. The answer is pooled connections. Here is my test code.

Non-pooled Connection

// test-sync.js
var express = require('express')
var app = express()

app.get('/test-sync', function (req, res) {
// console.log('1. Received Get')
 res.send('Hello World!')

var mysql = require('mysql');
var connection = mysql.createConnection({
 host : 'localhost',
 user : 'mazu',
 password : '',
 database : 'mazudb'
});
connection.connect(function(err){
if(!err) {
// console.log("2. Database Connected"); 
} else {
 console.log("Error connecting database ... nn"); 
}
});

sql="SELECT * FROM `test-mysql-Customers` LIMIT 2"
connection.query(sql, function(err, rows, fields) {
connection.end();
 if (!err) {
// console.log("3. SQL Completed");
} else
 console.log('Error while performing Query.');
 });
});

app.listen(4200, function () {
 console.log('Example app listening on port 4200!')
})

Pooled Connection

var express = require("express");
var mysql = require('mysql');
var app = express();

var pool = mysql.createPool({
 connectionLimit : 100, //important
 host : 'localhost',
 user : 'mazu',
 password : '',
 database : 'mazudb',
 debug : false
});

function handle_database(req,res) {
 
 pool.getConnection(function(err,connection){
 if (err) {
 res.json({"code" : 100, "status" : "Error in connection database"});
 var tlog = json({"code" : 100, "status" : "Error in connection database"});
 console.log(tlog);
 return;
 }

connection.setMaxListeners(0)

// console.log('connected as id ' + connection.threadId + ' connection.getMaxListeners()' + connection.getMaxListeners());
 
 sql="SELECT * FROM `test-mysql-Customers` LIMIT 2"

connection.query(sql,function(err,rows){
 connection.release();
 if(!err) {
 res.json(rows);
 } 
 });

connection.on('error', function(err) { 
 var tlog = json({"code" : 100, "status" : "Error in connection database"});
 console.log(tlog);
 res.json({"code" : 200, "status" : "Error in connection database"});
 return; 
 });
 });
}

app.get("/test-async",function(req,res){-
 handle_database(req,res);
});

app.listen(4200);

The primary difference between the two methods is the first one does a synchronous creatConnection, connect, query, and end; while the second pooled method does a createPool connection that creates a queue for the queries. When a get request is received it then does a getConnection that uses one process from the pooled process queue, it does its query, and finally a release of that one process in the pooled queue.

A stress test on these methods results in similar throughput. Essentially the same number of transactions can get through but with non-pooled connections, the likelihood of database connection errors is higher. I used a siege tool stress test with the following

siege -c200 -t60s -d3 http://localhost:4200/test-sync

It resulted in about 8000 hits but on the synchronous non-pooled method it resulted in about 16 database connection error, while the pooled method resulted in no connection errors. This test was with an Amazon EC t2.micro with Linux and an external RDS MySQL database. Obviously, database connection errors are bad! This proves a pooled connection is the way to go.

node.js to MySQL Connector

Here we will setup the node.js server to connect to a MySQL database.

Using a Putty / SSH terminal window on the Linux node.js server first go to your working directory for your node-js application then install the mysql module using npm as shown below. Details of the mysql package we are using are found at https://www.npmjs.com/package/mysql

It is important to note npm install just installs the package to a directory called /node_modules/ within your current working directory. Since we don’t want a package that may be modified or removed to adversely affect our application we will install the package directly in the /node application working directory.

cd /node
npm install mysql

Then using Sublime or text editor create a test-mysql.js file with the below code. Next using Filezilla or a SSH FTP client upload the file to the /vnode directory.

const readline = require('readline');
const rl = readline.createInterface({
input: process.stdin,
output: process.stdout
});
rl.question('mysql host name? ', (hostanswer) => {
rl.question('mysql user name? ', (useranswer) => {
rl.question('mysql password? ', (passwordanswer) => {
rl.question('mysql database? ', (dbanswer) => {
rl.close();


var mysql = require('mysql');
var connection = mysql.createConnection({
host : hostanswer,
user : useranswer,
password : passwordanswer,
database : dbanswer,
multipleStatements: true
});

connection.connect();

var sql="DROP TABLE IF EXISTS `test-mysql-Customers`; CREATE TABLE `test-mysql-Customers` (`CompanyName` varchar(45) NOT NULL,`City` varchar(45) DEFAULT NULL, `Country` varchar(45) DEFAULT NULL,PRIMARY KEY (`CompanyName`)); INSERT INTO `test-mysql-Customers` VALUES ('Company1','City1','Country1'),('Company2','City2','Country2'); SELECT * from `test-mysql-Customers`";

connection.query(sql, function(err, rows, fields) {
if (!err) {
console.log('The mysql return data: ', rows);
console.log('The mysql test completed successfully. test-mysql-Customers table was created.');
} else
console.log('Error while performing Query.');
});

connection.end();


});
});
});
});

Next go to the terminal window still in the /node working directory and run the test-mysql.js server application. When prompted enter a valid mysql server. In a previous post I created a mysql database on an Amazon RDS server. An RDS or any valid mysql server will work. The test simply creates a database table on the server called test-mysql-Customer with 3 fields and 2 records.

node test-mysql.js

The results will be the returned SQL string data and the final line will have:

The mysql test completed successfully. test-mysql-Customers table was created.

That’s all folks!

Next we will explore creating an asynchronous real word server.

ColdFusion on EC2 Server

Here we will install ColdFusion 2016 on an Amazon EC2 Linux server with Apache 2.4.

Get the ColdFusion installation .bin file from Adobe at https://www.adobe.com/products/coldfusion/download-trial/try.html be sure to select Linux 64bit on the download.

  1. Adjust the authority of the /opt directory to allow ec2-user (group www) permission
    sudo mkdir /opt/cf
    sudo chown -R root:www /opt
    sudo chmod 2775 /opt
    find /opt -type d -exec sudo chmod 2775 {} \;
    find /opt -type f -exec sudo chmod 0664 {} \;
  2. Then send the ColdFusion installation .bin to /opt/cf directory on the EC2 server with Filezilla.
  3. Install the JDBC mysql database driver
    sudo yum install mysql-connector-java.noarch

    Enter Y at the prompt to complete the driver installation

    Is this ok [y/d/N]: y
  4. Go to the /opt/cf directory, set execute authority on the .bin file, and start the ColdFusion .bin installation
    cd /opt/cf
    chmod 777 ColdFusion_2016_WWEJ_linux64.bin
    sudo ./ColdFusion_2016_WWEJ_linux64.bin
  5. Follow the prompts and take the defaults on the ColdFusion installation.
    1. hit [enter] 31 times
    2. Y [enter] to Accept the terms of the license
    3. 3 [enter] to select Developer edition
    4. default 1 [enter] to select server configuration
    5. 3 [enter] to select development profile
    6. default 5 [enter] to continue installation
    7. [enter] default remote component admin profile
    8. [type an remote component administrator password] [enter]
    9. re-enter the remote component administrator password [enter]
    10. default n [enter] to access add-on services remotely
    11. /opt/cf absolute path install folder
    12. Y [enter] the path is correct
    13. 1 [enter] add web server configuration
    14. 1 [enter] Apache
    15. /etc/httpd/conf [enter] Apache configuration directory
    16. /usr/sbin/httpd [enter] Apache program binary file
    17. /etc/init.d/httpd [enter] control file used to start and stop apache server
    18. N [enter] configure advanced settings
    19. 4 [enter] Continue with installation
    20. N [enter[  configure websocket proxy
    21. ec2-user [enter] runtime user name
    22. 2 [enter] skip open local open office installation
    23. [type an administrator password] [enter]
    24. re-enter the administrator password [enter]
    25. Y [enter] enable RDS
    26. [type an RDS administrator password] [enter]
    27. re-enter the RDS administrator password [enter]
    28. Y [enter] automatically check for updates
      Pre-installation Summary
      ------------------------
      Installation Type:
       Server configuration
      
      Licensing:
       Developer Edition
      
      Installation Directories:
       Product: /opt/cf
      
      Server Information:
       Admin Port: 8500
       Web Server: Apache (/etc/httpd/conf)
       Connector Port: 80
       ColdFusion Server Profile: Development Profile
       ColdFusion Solr Search Services: installed
       ColdFusion Pdfg Services: installed
       RDS: enabled
      
      Disk Space Information (for Installation Target):
       Required: 1,325 MB
       Available: 5,045 MB
    29. [enter] to continue
  6. Start the ColdFusion server
    cd /opt/cf/cfusion/bin
    sudo ./coldfusion start
  7. Using a web browser navigate to the admin page using your host domain
    http://aws-ec2instance-public-dns:8500/CFIDE/administrator/index.cfm
  8. enter the admin password
  9. hit OK button to complete the installation
  10. Download the mysql JDBC connector .tar file from http://dev.mysql.com/downloads/connector/j/ and unpack it on your local machine. Then use Filezilla to copy the mysql-connector-java-5.1.40-bin.jar file to the /opt/cf/cfusion/lib directory
  11. restart the cold fusion server
    cd /opt/cf/cfusion/bin
    sudo ./coldfusion restart

    MySQL 5 ODBC database driver will now work. Now you can configure a database in data sources of the ColdFusion Administrator.

This completes the ColfFusion server installation.

LAMP on Linux Amazon EC2

As you recall in the recent post Create an EC2 Linux Server I used the standard Amazon Linux AMI image to create an EC2 server. Now let’s get it to host some things.

  1. first step is to allow it to update it initial set of packages type
     sudo yum update
  2. at the prompt type hit Yes
    Is this ok [y/d/N]: y
  3. sudo is the comand to “run as root user” it get’s old typing sudo all the time so lets set it to always use root with the following command
    sudo -i
  4. install the http server, php, and mysql driver
    yum install -y httpd24 php70 mysql56-server php70-mysqlnd
  5. Use the chkconfig command to configure the Apache web server to start at each system boot.
    chkconfig httpd on

    Tip

    The chkconfig command does not provide any confirmation message when you successfully enable a service. You can verify that httpd is on by running the following command.

    chkconfig --list httpd httpd 0:off 1:off 2:on 3:on 4:on 5:on 6:off

    Here, httpd is on in runlevels 2, 3, 4, and 5 (which is what you want to see).

  6. To allow ec2-user to manipulate all files add the ec2-user to the root group. As write ability is needed in the future you can simply add group write permission.
    sudo usermod -a -G root ec2-user
  7. Amazon uses a different method by creating a www group. My method is simpler by only using the root group but maybe not as secure. The www group method is defined below. It comes from Amazon here.To allow ec2-user to manipulate files in the /var/www directory, you need to modify the ownership and permissions of the directory and files. There are many ways to accomplish this task; in this tutorial, you add a www group to your instance, and you give that group ownership of the /var/www directory and add write permissions for the group. Any members of that group will then be able to add, delete, and modify files for the web server.

    To set file permissions

    1. Add the www group to your instance.
      [ec2-user ~]$ sudo groupadd www
    2. Add your user (in this case, ec2-user) to the www group.
      [ec2-user ~]$ sudo usermod -a -G www ec2-user

      Important

      You need to log out and log back in to pick up the new group. You can use the exit command, or close the terminal window.

    3. Log out and then log back in again, and verify your membership in the www group.
      1. Log out.
        [ec2-user ~]$ exit
      2. Reconnect to your instance, and then run the following command to verify your membership in the www group.
        [ec2-user ~]$ groups ec2-user wheel www
    4. Change the group ownership of /var/www and its contents to the www group.
      [ec2-user ~]$ sudo chown -R root:www /var/www
    5. Change the directory permissions of /var/www and its subdirectories to add group write permissions and to set the group ID on future subdirectories.
      [ec2-user ~]$ sudo chmod 2775 /var/www [ec2-user ~]$ find /var/www -type d -exec sudo chmod 2775 {} \;
    6. Recursively change the file permissions of /var/www and its subdirectories to add group write permissions.
      [ec2-user ~]$ find /var/www -type f -exec sudo chmod 0664 {} \;

    Now ec2-user (and any future members of the www group) can add, delete, and edit files in the Apache document root. Now you are ready to add content, such as a static website or a PHP application.

  8. Create a PHP test file in the www server document root
    echo "" > /var/www/html/phpinfo.php
  9. Change the group of the http Apache server config files to allow editing
    chmod 664 /etc/httpd/conf/httpd.conf

    or using Amazon’s method:

  10. sudo chown -R root:www /etc/httpd/conf
    sudo chmod 2775 /etc/httpd/conf
    find /etc/httpd/conf -type d -exec sudo chmod 2775 {} \;
    find /etc/httpd/conf -type f -exec sudo chmod 0664 {} \;
  11. Using Filezilla or WinSCP do a SCP connection to the server and navigate to the http apache server config directory /etc/httpd/conf and edit the httpd.conf file. Add the virtual host directives to the bottom of the file.UU
     # This first-listed virtual host is also the default for *:80
     #ServerName www.mydomain.com
     DocumentRoot "/var/www/html"
    
    
    #
    # ServerName www.mydomain2.com
    # DocumentRoot "/var/www/html/mydomain2"
    #
    
    #
    # ServerName www.mydomain3.com
    # ServerAlias www.mydomain4alias.com
    # DocumentRoot "/var/www/html/mydomain3
    #

    Uncomment the ServerName and enter your own domain. and also the 2nd and 3rd virtual hosts are fully commented out with the # (just remove the #’s to create the virtual host.)  name the ServerName it with your additional domain name and root directory. Add as many virtual hosts as needed. When done editing upload the file back to /etc/httpd/conf

  12. BONUS: Do you need GD image library support and OpCode Cash both used by drupal? do the following:
     yum install php70-gd
    sudo yum install php70-opcache
    
    
    # Install additional commonly used php packages
    sudo yum install php70-imap
    sudo yum install php70-mbstring
    sudo yum install php70-pdo
    sudo yum install php70-pecl-apcu
  13. Start the Apache web server.
    [ec2-user ~]$ sudo service httpd start Starting httpd: [ OK ]
  14. Use a browser to navigate to the server root and to phpinfo.php page you should get the amazon linux test page and the PHP information page show below.linux-test-pagephp-test-page

The Apache 2.4 webserver is now running with PHP and virtual hosts. If you wish to install mysql and phpMyAdmin on this server follow the directions on Amazon http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/install-LAMP.html I will use the RDS server and mysql Workbench instead.

Create a mysql RDS server

One quick an easily managed service from Amazon is RDS for a database server. The RDS server is straight forward and makes future development easy. Here I will create the first mysql database.

  1. Go to https://console.aws.amazon.com/rds/home?region=us-east-1#
  2. select RDS Instances
  3. select Launch DB Instance button
  4. select mysql tab (it is free eligible) and hit the select button
  5. select mysql production environment and select the next button
  6. select the latest DB engine version from the drop down selection
  7. enter 20 for allocated storage
  8. enter a DB instance identifier. I tend to use nonmeaningful names here, for example use the name greek goddess. (example mazudb)
  9. enter a master username and master password (user: mazu) and select the next step button
  10. on the Configure Advances Settings form you will see VPC security group with Create a new Security Group Selected. (more on this later)
  11. enter a database name (example mazudb) and select the Launch DB Instance button

In a later post I will discuss allowing an Amazon EC2 virtual host connect to the server by changing the security group.

Now I can connect to the RDS mysql server from my laptop with MySQL Workbench

Powered by WordPress.com.

Up ↑