Web Development

Log slow requests with express.js

You can do this with response-time plugin but I wanted something a bit more flexible and can easily be used on select routes as middleware, so I made:

import onHeaders from 'on-headers';
import net from "net";
import moment from "moment/moment";
import ApiLog from "../models/RequestLog";

// Slow is 10 seconds
const defaultSlowTime = 10000;

function _getTimeDiff(hrtime) {
    var diff = process.hrtime(hrtime);
    return diff[0] * 1e3 + diff[1] * 1e-6
}

function _ipIsPrivate(ip) {
    if (ip.substring(0,7) === "::ffff:")
        ip = ip.substring(7);

    if (net.isIPv4(ip)) {
        //         10.0.0.0 - 10.255.255.255        ||   172.16.0.0 - 172.31.255.255                          ||    192.168.0.0 - 192.168.255.255
        return  /^(10)\.(.*)\.(.*)\.(.*)$/.test(ip) || /^(172)\.(1[6-9]|2[0-9]|3[0-1])\.(.*)\.(.*)$/.test(ip) || /^(192)\.(168)\.(.*)\.(.*)$/.test(ip)
    }

    // else: ip is IPv6
    const firstWord = ip.split(":").find(el => !!el); //get first not empty word

    // The original IPv6 Site Local addresses (fec0::/10) are deprecated. Range: fec0 - feff
    if (/^fe[c-f][0-f]$/.test(firstWord))
        return true;

        // These days Unique Local Addresses (ULA) are used in place of Site Local.
    // Range: fc00 - fcff
    else if (/^fc[0-f]{2}$/.test(firstWord))
        return true;

    // Range: fd00 - fcff
    else if (/^fd[0-f]{2}$/.test(firstWord))
        return true;

    // Link local addresses (prefixed with fe80) are not routable
    else if (firstWord === "fe80")
        return true;

    // Discard Prefix
    else if (firstWord === "100")
        return true;

    // Any other IP address is not Unique Local Address (ULA)
    return false;
}

export default class {
    static log(onlySlow ) {
        const slowTime = onlySlow === true ? defaultSlowTime : (onlySlow === false ? 0 : onlySlow);

        return (req, res, next) => {
            onHeaders(res, function onHeaders() {
                const {
                    log: {
                        userId = null,
                    } = {},
                    ip,
                    originalUrl,
                    startedAt,
                    hrtime,
                } = req;

                const time  = _getTimeDiff(hrtime);
                // TODO do we want to add configuration to omit private IPs such as dev computers?
                const isPrivateIp = _ipIsPrivate(ip);

                if (slowTime < time) {
                    // async so we don't stop request
                    ApiLog.createLog({
                        userId,
                        startedAt: startedAt.toISOString(),
                        endedAt: moment().toISOString(),
                        ip: ip,
                        url: originalUrl,
                        duration: time,
                    });
                }
            });

            next();
        }
    }
}

And added 
    app.use((req, res, next) => {
        // This is our own personal version of response-time that we can attach to specific routes
        req.hrtime = process.hrtime();
        req.startedAt = moment();
    });

in my base expressjs route file, now I can easily log certain routes by doing:

router.get('/v2/events/all/:year/:month',
    RequestLogMiddleware.log(true),
    celebrate(calendarEventSchema.retrieveAllEventsByMonth),
    CalendarEventController.retrieveAllEventsByMonth
);
Web Development

Create a new target group and add it to an ECS task

aws elbv2 create-target-group –name proxy-beta-https –cli-input-json ‘{“Protocol”: “TCP”,”Port”: 443,”VpcId”: “vpc-xxx”,”HealthCheckProtocol”: “TCP”,”HealthCheckPort”: “traffic-port”,”HealthCheckEnabled”
: true,”HealthCheckIntervalSeconds”: 30,”HealthCheckTimeoutSeconds”: 10,”HealthyThresholdCount”: 5,”UnhealthyThresholdCount”: 2,”TargetType”: “ip”,”IpAddressType”: “ipv4”}’

aws ecs update-service –service proxy-beta –cluster proxy-beta –load-balancers targetGroupArn=arn:aws:elasticloadbalancing:eu-west-1:xxx:targetgroup/proxy-beta-https/xxx,containerName=proxy-beta
,containerPort=443 targetGroupArn=arn:aws:elasticloadbalancing:eu-west-1:xx:targetgroup/proxy-beta/xxx,containerName=proxy-beta,containerPort=80

Web Development

Stopping SYN and SlowLori attacks in NodeJS

https://www.nearform.com/blog/avoid-time-based-ddos-attacks-node-js/

Note: before Node 18 one of the settings was not implemented in a good way, check link for more info

Some good defaults I used are:

    sslServer.headersTimeout = 5000;
    sslServer.requestTimeout = 10000;
    sslServer.setTimeout(30000);
    sslServer.connectionsCheckingInterval = 500;

This should stop people from being able to hang connections on your server, note that connectionsCheckingInterval can reduce performance since it will not wait for data to close a socket as such monitors it reducing the performance, but that’s life.

Web Development

NodeJs use Let’s Encrypt to make a proxy for custom domains

Add these functions to some controller

const acme = require('acme-client');
const dns2 = require('dns2');

export async function validateDomain1(req, res, next) {
    const coachSettings = await CoachSetting.getCoachSettings(req.auth._id);

    if(_.isNil(coachSettings) || _.isNil(coachSettings.custom_domain)) {
        return res.status(400).json({message: 'No custom domain set'});
    }

    const client = new acme.Client({
        directoryUrl: acme.directory.letsencrypt.staging,
        accountKey: config.ACME_CLIENT_CSR,
    });

    /* Register account */
    await client.createAccount({
        termsOfServiceAgreed: true,
        contact: [config.ACME_CLIENT_CONTACT]
    });

    /* Place new order */
    const order = await client.createOrder({
        identifiers: [
            { type: 'dns', value: coachSettings.custom_domain },
        ]
    });

    const authz = (await client.getAuthorizations(order))[0];

    const { challenges } = authz;

    const challengeType = 'dns-01';

    let challenge = null;

    for (const k in challenges) {
        if (challenges[k].status === 'valid') {
            // Only ever one if valid
            //return res.status(400).json({message: 'Domain already registered'});
            challenge = challenges[k];
        } else {
            if (challenges[k].type !== challengeType) {
                continue;
            }

            /* Just select any challenge */
            challenge = challenges[k];
        }
    }

    if (_.isNil(challenge)) {
        // TODO Log?
        return res.status(400).json({message: 'Challenge type not found'});
    }

    const keyAuthorization = await client.getChallengeKeyAuthorization(challenge);

    return res.json({
        message: 'Verification issued',
        type: challengeType,
        token: challenge.token,
        txt_name: `_acme-challenge.${coachSettings.custom_domain}`,
        txt_value: keyAuthorization,
    });
}

export async function validateDomain2(req, res, next) {
    const coachSettings = await CoachSetting.getCoachSettings(req.auth._id);

    const client = new acme.Client({
        directoryUrl: acme.directory.letsencrypt.staging,
        accountKey: config.ACME_CLIENT_CSR,
    });

    acme.setLogger((message) => {
        console.log(message);
    });

    /* Register account */
    await client.createAccount({
        termsOfServiceAgreed: true,
        contact: ['mailto:devops@delenta.com']
    });

    /* Place new order */
    const order = await client.createOrder({
        identifiers: [
            { type: 'dns', value: coachSettings.custom_domain },
        ]
    });

    let challengeCompleted = false;

    const authz = (await client.getAuthorizations(order))[0];

    const { challenges } = authz;

    let challenge = null;

    let certificate = null;

    const challengeType = 'http-01';

    for (const k in challenges) {
        if (challenges[k].status === 'valid') {
            // only ever one if valid
            challengeCompleted = true;
            challenge = challenges[k];
        } else {
            if (challenges[k].type !== challengeType) {
                continue;
            }

            challenge = challenges[k];
        }
    }

    if (_.isNil(challenge)) {
        // TODO Log?
        return res.status(400).json({message: 'Challenge type not found'});
    }

    if (!challengeCompleted) {
        try {
            const keyAuthorization = await client.getChallengeKeyAuthorization(challenge);

            if (challengeType === 'dns-01') {
                const dns = new dns2()
                const result = await dns.resolve(`_acme-challenge.${coachSettings.custom_domain}`, 'TXT');

                if (result.answers.length <= 0) {
                    return res.status(400).json({message: 'TXT record not found, might need more time'});
                }

                if (result.answers[0].data !== keyAuthorization) {
                    return res.status(400).json({message: 'TXT record found but using wrong key, please check it is the right key'});
                }
            } else if (challengeType === 'http-01') {
                certificate = await SslCertificate.findOne({
                    userId: req.auth._id,
                    domain: coachSettings.custom_domain,
                    isVerified: false,
                    token: challenge.token,
                }).exec();

                if (_.isNil(certificate)) {
                    certificate = await SslCertificate.create({
                        userId: req.auth._id,
                        domain: coachSettings.custom_domain,
                        token: challenge.token,
                        key: keyAuthorization,
                    });
                } else {
                    certificate.key = keyAuthorization;
                    await certificate.save();
                }

                const dns = new dns2()
                const result = await dns.resolveCNAME(`${coachSettings.custom_domain}`, 'A');

                if (result.answers.length <= 0) {
                    return res.status(400).json({message: 'CNAME record not found, might need more time'});
                }

                if (result.answers[0].domain !== 'domains.delenta.com') {
                    return res.status(400).json({message: 'CNAME record not pointing to domains.delenta.com, try again'});
                }
            }

            try {
                /* Notify ACME provider that challenge is satisfied */
                await client.completeChallenge(challenge);
                challengeCompleted = true;

                /* Wait for ACME provider to respond with valid status */
                await client.waitForValidStatus(challenge);
            } finally {
                /* Clean up challenge response */
            }
        } catch (e) {
            /* Deactivate pending authz when unable to complete challenge */
            if (!challengeCompleted) {
                try {
                    await client.deactivateAuthorization(authz);
                } catch (f) {
                    /* Catch and suppress deactivateAuthorization() errors */
                }
            }

            throw e;
        }
    }

    /* Finalize order */
    const [key, csr] = await acme.crypto.createCsr({
        commonName: coachSettings.custom_domain,
    });

    const finalized = await client.finalizeOrder(order, csr);
    const cert = await client.getCertificate(finalized);

    if (_.isNil(certificate)) {
        certificate = await SslCertificate.findOne({
            domain: coachSettings.custom_domain
        }).exec();
    }

    if (!_.isNil(certificate)) {
        certificate.userId = req.auth._id;
        certificate.csr = csr.toString();
        certificate.private_key = key.toString();
        certificate.certificate = cert.toString();
        certificate.expiresAt = moment().add(90, 'days').toDate();
        certificate.renewsAt = moment().add(60, 'days').toDate();
        certificate.isVerified = true;
        await certificate.save();
    } else {
        certificate = await SslCertificate.create({
            userId: req.auth._id,
            domain: coachSettings.custom_domain,
            csr: csr.toString(),
            private_key: key.toString(),
            certificate: cert.toString(),
            expiresAt: moment().add(90, 'days').toDate(),
            renewsAt: moment().add(60, 'days').toDate(),
            isVerified: true,
        });
    }

    res.json({message: 'Certificate issued'});
}

And use https://github.com/Sammaye/node-proxy to do validation via http-01.

This will let you issue certs to a database (can be cached in the proxy or certificate manager of your cloud provider), validate them and then use them.

Add this behind a network load balancer to support elastic IPs on a global accelerator.

Web Development

Make Angular load assets from CDN without deployUrl

I recently had to make angular work on a custom domain whitelabel type setup.

So I simply made a proxy which used Let’s Encrypt to issue certificates to a express server and within that express server I mutated the output of the index.html to make base-href load from my CDN assets like so:

    app.use('*', createProxyMiddleware({
        target: config.PROXY_URL,
        changeOrigin: true,
        selfHandleResponse: true,
        onProxyRes: responseInterceptor(async (responseBuffer, proxyRes, req, res) => {
            let response = responseBuffer.toString('utf8');
            if (response.match(/(<base[ ]*?href=").+(")/)) {
                response = response.replace(/(<base[ ]*?href=").+(")/, "$1" + `${config.PROXY_URL}/` + "$2");
                return response;
            }
            return responseBuffer;
        }),
    }));

And then in the angular itself I added :

{provide: APP_BASE_HREF, useValue: window.location.origin + '/'}

To the AppModule providers list.

Now it will load the assets and app itself from central CDN (index.html still loads from central server but it is 1kb and can be put behind global accelerator to mimic CDN) while also allowing it all to load on custom domains I have verified via the nodejs proxy.
Javascript, Web Development

Upload Angular sourcemaps to Sentry

In CI/Cd I did

    - curl -sL https://sentry.io/get-cli/ | SENTRY_CLI_VERSION="2.11.0" sh
    - sentry-cli releases set-commits "angular@$CI_COMMIT_SHA" --auto
    - sentry-cli releases files "angular@$CI_COMMIT_SHA" upload-sourcemaps dist/charity
    - sentry-cli releases deploys "angular@$CI_COMMIT_SHA" new -e alpha
    - sentry-cli releases finalize "angular@$CI_COMMIT_SHA"
    - find dist -type f -name '*.js' -exec sed -i -E 's/\/\/# sourceMappingURL=[^ ]*\.js\.map//g' {} \;
    - find dist -name "*.map" -exec rm {} \;

and in my angular.json I change sourcemap to

              "sourceMap": {
                "scripts": true,
                "styles": false,
                "hidden": false
              },

I heard this this guy https://stackoverflow.com/a/74962908/383478 that Sentry does not accept hidden sourcemaps (have not yet tested this) so I fixed his answer and used it as my own. Unlike his this code shows the actual sentry-cli commands I use instead of just names of actions I have made

MongoDB, Web Development

Run node cronjob on single server or without overlap with node-schedule

Write a cache class like so:

import BaseModel from "./BaseModel";
import mongoose, {Schema} from "mongoose";
import moment from "moment";
import {MongoError} from "mongodb";

const TYPES = {
    JSON: 'JSON',
};

const schema = new Schema({
    key: {
        type: String,
        required: true,
    },
    value: {
        type: String,
        required: true,
    },
    expireIn: {
        // Can be:
        // - 0: Expire next request
        // - Any number: expire after those number of seconds
        type: Number,
        default: null
    },
    expireAt: {
        type: Date,
        default: null,
    }
}, {collection: 'cache', timestamps: true});

schema.index({expireAt: 1}, {expireAfterSeconds: 0});
schema.index({key: 1, createdAt: 1});
schema.index({key: 1}, {unique: true});

const model = mongoose.model('Cache', schema);

export default class extends BaseModel {
    static model = model;

    static TYPES = TYPES;

    static async read(key, type = null) {
        const cacheRow = await this.findOne({key}).sort({createdAt:-1}).lean().exec();

        if (!cacheRow) {
            return false;
        }

        const {value = null} = cacheRow;

        if (!cacheRow.expireIn) {
            // Most likely 0
            await this.remove(cacheRow.key);
        } else {
            const expireInDate = moment(cacheRow.createdAt).add(cacheRow.expireIn, 'seconds').toDate();
            if (new Date() > expireInDate) {
                await this.remove(cacheRow.key);
                // Do not use this value if out of date
                return false;
            }
        }

        if (type === TYPES.JSON) {
            try {
                return JSON.parse(value);
            } catch (e) {
                // TODO should we delete this value?
                return false;
            }
        }

        return value;
    }

    static async write(key, value, expireIn, expireAt, update = false) {

        const doc = {
            value,
            expireIn: expireIn || 0,
            expireAt: expireAt || moment().utc().startOf('day').add(1, 'days').toDate(),
        };

        try {
            return await this.create({
                key,
                ...doc
            });
        } catch (error) {
            if (
                error instanceof MongoError &&
                error.code === 11000 &&
                update
            ) {
                return this.updateOne({key}, doc, {upsert: true});
            }

            throw error;
        }
    }

    static async put(key, value, expireIn, expireAt) {
        return this.write(key, value, expireIn, expireAt, true);
    }

    static async remove(key) {
        return await this.deleteOne({key}).exec();
    }
}

And then make a cronjob like:

import schedule from "node-schedule";
import Cache from "../models/Cache";
import moment from "moment";

const cache_key = 'sitemap-cronjob';

const task = schedule.scheduleJob('* * * * *', async () => {
    try {
        await Cache.write(
            cache_key,
            true,
            3600,
            moment().utc().add(1, 'hours').toDate()
        );
    } catch (e) {
        // most likely duplicate key so couldn't
        // ascertain lock as such just exit this job
        return true;
    }

    // TODO get all active courses, packages and users

    // TODO form the xml file

    // TODO upload it to S3

    // TODO invalidate the cloudfront cache for that file

    // TODO tell Google and Bing that our sitemap has changed

    await Cache.remove(cache_key);
});

task.on('error', _ => {
    Cache.remove(cache_key);
});

export default task;

This will contain this cronjob to a single server by using the central cache table in MongoDB, inspiration taken from the many others who do this like Laravel https://laravel.com/docs/9.x/scheduling#running-tasks-on-one-server.

Of course this should be taken with a gain of salt, while nodejs based schedulers and stuff are great and you could even expand this to run in a dedicated cluster with its own dockerfile for starting up a scheduler (like in Laravel) you sometimes cannot get better than something like AWS EventBridge which allows you to setup cronjob clusters with dedicated workloads.

Web Development

AWS CloudWatch alarm is not sending to SNS slack

If this is happening to you then you are likely using the built in SNS key, to quote this article: https://aws.amazon.com/premiumsupport/knowledge-center/cloudwatch-receive-sns-for-alarm-trigger/

SNS allows encryption at rest for its topic. If the default AWS Key Management Service (KMS) key “alias/aws/sns” is used for this encryption, then CloudWatch alarms can’t publish messages to the SNS topic. The key policy of the default AWS KMS key for SNS doesn’t allow CloudWatch alarms to perform “kms:Decrypt” and “kms:GenerateDataKey” API calls. Because this key is AWS managed, you can’t manually edit the policy.

Linux, Web Development

Using Gitlab CI/CD to deploy to AWS ECS Fargate

# Dev(Alpha) Stages - START
docker alpha build:
  stage: Build Image
  script:
    - apk add --no-cache nodejs npm curl jq musl-dev gcc python3-dev python3 py3-pip git g++ make && pip3 install awscli
    - npm install && npm run build
    - $(aws ecr get-login --no-include-email --region $AWS_REGION)
    - docker build --target prod -t api-alpha .
    - docker tag api-alpha "$DEV_ECR_URL:$CI_COMMIT_SHA"
    - docker push "$DEV_ECR_URL:$CI_COMMIT_SHA"
    - sh ./scan.sh api-alpha $CI_COMMIT_SHA
  when: manual
  except:
    - main

deploy alpha:
  stage: Deploy
  script:
    - apk add --no-cache nodejs npm curl jq musl-dev gcc python3-dev python3 py3-pip git g++ make && pip3 install awscli
    - NEW_TASK_DEFINITION=$(aws ecs describe-task-definition --region $AWS_REGION --task=api-alpha |
        jq --arg IMAGE "$DEV_ECR_URL:$CI_COMMIT_SHA" '.taskDefinition | .containerDefinitions[0].image = $IMAGE
        | del(.taskDefinitionArn) | del(.revision) | del(.status) | del(.requiresAttributes) | del(.compatibilities) | del(.registeredAt) | del(.registeredBy)'
      )
    - echo $NEW_TASK_DEFINITION
    - NEW_TASK_INFO=$(aws ecs register-task-definition --region $AWS_REGION --cli-input-json "$NEW_TASK_DEFINITION")
    - echo $NEW_TASK_INFO
    - NEW_REVISION=$(echo $NEW_TASK_INFO | jq '.taskDefinition.revision')
    - DEPLOYMENT=$(aws ecs update-service --cluster api-alpha --service api-alpha --task-definition api-alpha:${NEW_REVISION} --region $AWS_REGION)
    - echo $DEPLOYMENT
  when: manual
  except:
    - main
# Dev Stages - END