Browse Source

Refactor audio feat graph

Fixes #51. Instead of passing in an array of hard coded values as the
interval end points, an object specifying the beginning, the end, and
the step size is used. Categories can be more easily defined and
modified this way.
master
Chris Shyi 6 years ago
parent
commit
fc6c30ec32
  1. 2
      api/utils.py
  2. 3
      api/views.py
  3. 29
      graphs/static/graphs/scripts/audio_feat_graph.js
  4. 16
      graphs/templates/graphs/features_graphs.html
  5. 16
      reset_db.sh

2
api/utils.py

@ -170,7 +170,7 @@ def add_artist_genres(headers, artist_objs):
params = {'ids': artist_ids}
artists_response = requests.get('https://api.spotify.com/v1/artists/',
headers=headers,
params={'ids': artist_ids},
params=params,
).json()['artists']
for i in range(len(artist_objs)):
if len(artists_response[i]['genres']) == 0:

3
api/views.py

@ -166,8 +166,9 @@ def get_audio_feature_data(request, audio_feature, user_secret):
# get_genre_data {{{ #
def get_genre_data(request, user_secret):
"""Return genre data needed to create the graph user.
"""Return genre data needed to create the graph
TODO
"""
user = User.objects.get(secret=user_secret)

29
graphs/static/graphs/scripts/audio_feat_graph.js

@ -3,10 +3,10 @@
* a designated parent element
*
* @param audioFeature: the name of the audio feature (string)
* @param intervalEndPoints: a sorted array of 5 real numbers defining the intervals (categories) of values,
* @param intervalEndPoints: a object defining the intervals (categories) of values,
* for example:
* [0, 0.25, 0.5, 0.75, 1.0] for instrumentalness would define ranges
* (0-0.25), (0.25-0.5), (0.5-0.75), (0.75-1.0)
* {begin: 0, end: 1.0, step: 0.25} for instrumentalness would define ranges
* [0-0.25), [0.25-0.5), [0.5-0.75), [0.75-1.0]
* @param parentElem: the DOM element to append the graph to (a selector string)
* @param userSecret: the user secret string for identification
* @return None
@ -18,11 +18,20 @@ function drawAudioFeatGraph(audioFeature, intervalEndPoints, parentElem, userSec
height = 270 - margin.top - margin.bottom;
let featureData = {};
let currentEndPoint = intervalEndPoints.begin; // start at beginning
// Create the keys first in order
for (let index = 0; index < intervalEndPoints.length - 1; index++) {
let key = `${intervalEndPoints[index]} ~ ${intervalEndPoints[index + 1]}`;
while (currentEndPoint !== intervalEndPoints.end) {
let startOfRange = currentEndPoint;
let endOfRange = startOfRange + intervalEndPoints.step;
let key = `${startOfRange} ~ ${endOfRange}`;
featureData[key] = 0;
currentEndPoint = endOfRange;
}
// for (let index = 0; index < intervalEndPoints.length - 1; index++) {
// let key = `${intervalEndPoints[index]} ~ ${intervalEndPoints[index + 1]}`;
// featureData[key] = 0;
// }
// define the vertical scaling function
let vScale = d3.scaleLinear().range([height, 0]);
@ -31,12 +40,14 @@ function drawAudioFeatGraph(audioFeature, intervalEndPoints, parentElem, userSec
// categorize the data points
for (let dataPoint of response.data_points) {
dataPoint = parseFloat(dataPoint);
let index = intervalEndPoints.length - 2;
let currLowerBound = intervalEndPoints.end - intervalEndPoints.step;
let stepSize = intervalEndPoints.step;
// find the index of the first element greater than dataPoint
while (dataPoint < intervalEndPoints[index]) {
index -= 1;
while (dataPoint < currLowerBound) {
currLowerBound -= stepSize;
}
let key = `${intervalEndPoints[index]} ~ ${intervalEndPoints[index + 1]}`;
let upperBound = currLowerBound + stepSize;
let key = `${currLowerBound} ~ ${upperBound}`;
featureData[key] += 1;
}

16
graphs/templates/graphs/features_graphs.html

@ -24,14 +24,14 @@
<script src="{% static "graphs/scripts/audio_feat_graph.js" %}"></script>
<script type="text/javascript">
let userSecret = "{{ user_secret }}";
drawAudioFeatGraph("instrumentalness", [0, 0.25, 0.5, 0.75, 1.0], 'body', userSecret);
drawAudioFeatGraph("valence", [0, 0.25, 0.5, 0.75, 1.0], 'body', userSecret);
drawAudioFeatGraph("energy", [0, 0.25, 0.5, 0.75, 1.0], 'body', userSecret);
drawAudioFeatGraph("tempo", [0, 40, 80, 120, 160, 200], 'body', userSecret);
drawAudioFeatGraph("danceability", [0, 0.25, 0.5, 0.75, 1.0], 'body', userSecret);
drawAudioFeatGraph("acousticness", [0, 0.25, 0.5, 0.75, 1.0], 'body', userSecret);
drawAudioFeatGraph("loudness", [-60, -45, -30, -15, 0], 'body', userSecret);
drawAudioFeatGraph("speechiness", [0, 0.25, 0.5, 0.75, 1.0], 'body', userSecret);
drawAudioFeatGraph("instrumentalness", {begin: 0, end: 1.0, step: 0.25}, 'body', userSecret);
drawAudioFeatGraph("valence", {begin: 0, end: 1.0, step: 0.25}, 'body', userSecret);
drawAudioFeatGraph("energy", {begin: 0, end: 1.0, step: 0.25}, 'body', userSecret);
drawAudioFeatGraph("tempo", {begin: 0, end: 200, step: 40}, 'body', userSecret);
drawAudioFeatGraph("danceability", {begin: 0, end: 1.0, step: 0.25}, 'body', userSecret);
drawAudioFeatGraph("acousticness", {begin: 0, end: 1.0, step: 0.25}, 'body', userSecret);
drawAudioFeatGraph("loudness", {begin: -60, end: 0, step: 15}, 'body', userSecret);
drawAudioFeatGraph("speechiness", {begin: 0, end: 1.0, step: 0.25}, 'body', userSecret);
</script>
</body>
</html>

16
reset_db.sh

@ -1,15 +1,15 @@
# check if in virtual environment
# https://stackoverflow.com/questions/15454174/how-can-a-shell-function-know-if-it-is-running-within-a-virtualenv/15454916
# python -c 'import sys; print(sys.real_prefix)' 2>/dev/null && INVENV=1 || INVENV=0
python -c 'import sys; print(sys.real_prefix)' 2>/dev/null && INVENV=1 || INVENV=0
# INVENV=$(python -c 'import sys; print ("1" if hasattr(sys, "real_prefix") else "0")')
# if $INVENV is 1, then in virtualenv
# echo $INVENV
# if [ $INVENV -eq 1 ]; then
rm login/migrations/0* api/migrations/0*
sudo -u postgres psql -f reset_db.sql
python manage.py makemigrations
python manage.py migrate
python manage.py runserver
# fi
if [ $INVENV -eq 1 ]; then
rm login/migrations/0* api/migrations/0*
sudo -u postgres psql -f reset_db.sql
python manage.py makemigrations login api
python manage.py migrate
python manage.py runserver
fi
Loading…
Cancel
Save