diff --git a/etc/quiz-app/src/assets/translations/en/lesson-11.json b/etc/quiz-app/src/assets/translations/en/lesson-11.json index 0fa0f8254848aa1d9831f3797007c25b7526ac2b..8f791e0bf7968eb0c5e568f138bb715a15b84a78 100644 --- a/etc/quiz-app/src/assets/translations/en/lesson-11.json +++ b/etc/quiz-app/src/assets/translations/en/lesson-11.json @@ -39,7 +39,7 @@ ] }, { - "questionText": "How many objects an object detection model can detect?", + "questionText": "How many objects can an object detection model can detect?", "answerOptions": [ { "answerText": "one", diff --git a/etc/quiz-app/src/assets/translations/en/lesson-18.json b/etc/quiz-app/src/assets/translations/en/lesson-18.json index abda66204f951604b86e8f8f5964f591897b4a06..de63a0ec6fb87c77bb27fdbe122905d8ac0c6d2d 100644 --- a/etc/quiz-app/src/assets/translations/en/lesson-18.json +++ b/etc/quiz-app/src/assets/translations/en/lesson-18.json @@ -9,7 +9,7 @@ "title": "Transformers: Pre Quiz", "quiz": [ { - "questionText": "Attention mechanism provides a means of _____ the imoact of an inout vector on an output prediction of RNN", + "questionText": "Attention mechanism provides a means of _____ the impact of an inout vector on an output prediction of RNN", "answerOptions": [ { "answerText": "weighting", diff --git a/etc/quiz-app/src/assets/translations/en/lesson-19.json b/etc/quiz-app/src/assets/translations/en/lesson-19.json index e4650f047f28c007db8e6101aaf9a413c0337628..1b1d9c96f810cce57e1299c13cd15741dac57cea 100644 --- a/etc/quiz-app/src/assets/translations/en/lesson-19.json +++ b/etc/quiz-app/src/assets/translations/en/lesson-19.json @@ -9,7 +9,7 @@ "title": "Named Entity Recognition: Pre Quiz", "quiz": [ { - "questionText": "What is NER stands for?", + "questionText": "What does NER stands for?", "answerOptions": [ { "answerText": "Nearest Estimated Region", @@ -70,7 +70,7 @@ }, { "answerText": "token classification", - "isCorrect": false + "isCorrect": true }, { "answerText": "text regression", diff --git a/etc/quiz-app/src/assets/translations/en/lesson-20.json b/etc/quiz-app/src/assets/translations/en/lesson-20.json index 79082c9a8cf711d1dff6c3f47a04d55148abbc3b..14d1da28dbd9b4355010c3594108c12ff9d72abe 100644 --- a/etc/quiz-app/src/assets/translations/en/lesson-20.json +++ b/etc/quiz-app/src/assets/translations/en/lesson-20.json @@ -91,7 +91,7 @@ }, { "answerText": "Both", - "isCorrect": false + "isCorrect": true } ] }, diff --git a/etc/quiz-app/src/assets/translations/en/lesson-22.json b/etc/quiz-app/src/assets/translations/en/lesson-22.json index 378bd7f83da24808aff8023ee760c5aee1bba798..f6492e87e6db2178f65ef84f2aaec17bf8fb0534 100644 --- a/etc/quiz-app/src/assets/translations/en/lesson-22.json +++ b/etc/quiz-app/src/assets/translations/en/lesson-22.json @@ -83,7 +83,7 @@ ] }, { - "questionText": "Which problems RL is applicable to?", + "questionText": "Which problem(s) is RL is applicable to?", "answerOptions": [ { "answerText": "With discrete environment", diff --git a/etc/quiz-src/questions-en.txt b/etc/quiz-src/questions-en.txt index 41ff693ceb35c2ebd60e32373605d4b581b1a67d..27cd3d77ae6079c998a88cae39af0c2d137e1450 100644 --- a/etc/quiz-src/questions-en.txt +++ b/etc/quiz-src/questions-en.txt @@ -266,7 +266,7 @@ Lesson 11B Object Detection: Pre Quiz - shape + location - type -* How many objects an object detection model can detect? +* How many objects can an object detection model can detect? - one - two + any number @@ -446,7 +446,7 @@ Lesson 17E Generative networks: Post Quiz - one-to-many Lesson 18B Transformers: Pre Quiz -* Attention mechanism provides a means of _____ the imoact of an inout vector on an output prediction of RNN +* Attention mechanism provides a means of _____ the impact of an inout vector on an output prediction of RNN + weighting - training - testing @@ -473,7 +473,7 @@ Lesson 18E Transformers: Post Quiz - 3 Lesson 19B Named Entity Recognition: Pre Quiz -* What is NER stands for? +* What does NER stands for? - Nearest Estimated Region - Nearest Entity Region + Named Entity Recognition @@ -488,7 +488,7 @@ Lesson 19B Named Entity Recognition: Pre Quiz Lesson 19E Named Entity Recognition: Post Quiz * NER model is essentially a ____ model - text classification -- token classification ++ token classification - text regression * Which neural network types can be used for NER? - RNNs @@ -520,7 +520,7 @@ Lesson 20E Language Models: Post Quiz * Prompt engineering can be used with - Zero-shot learning - Few-shot learning -- Both ++ Both * Which metric can be used to estimate quality of a language model? - accuracy - recall @@ -571,7 +571,7 @@ Lesson 22E Reinforcement Learning: Post Quiz - It achieves high accuracy - Using perplexity metric + Using reward function -* Which problems RL is applicable to? +* Which problem(s) is RL is applicable to? - With discrete environment - With continuous environment + Both diff --git a/lessons/4-ComputerVision/11-ObjectDetection/README.md b/lessons/4-ComputerVision/11-ObjectDetection/README.md index d87cebbca08f11059f646dc22d7cbcdd25185828..2084ca160eca5ef88b7f49913b8e3c2ce119c4f4 100644 --- a/lessons/4-ComputerVision/11-ObjectDetection/README.md +++ b/lessons/4-ComputerVision/11-ObjectDetection/README.md @@ -11,8 +11,9 @@ The image classification models we have dealt with so far took an image and prod ## A Naive Approach to Object Detection Assuming we wanted to find a cat on a picture, a very naive approach to object detection would be the following: -1. Break the picture down to a number of tiles -2. Run image classification on each tile. + +1. Break the picture down to a number of tiles +2. Run image classification on each tile. 3. Those tiles that result in sufficiently high activation can be considered to contain the object in question.  @@ -50,8 +51,8 @@ The idea is simple - we divide the area of intersection between two figures by t Suppose we want to measure how well a given class of objects $C$ is recognized. To measure it, we use **Average Precision** metrics, which is calculated as follows: -1. Consider Precision-Recall curve shows the accuracy depending on a detection threshold value (from 0 to 1). -2. Depending on the threshold, we will get more or less objects detected in the image, and different values of precision and recall. +1. Consider Precision-Recall curve shows the accuracy depending on a detection threshold value (from 0 to 1). +2. Depending on the threshold, we will get more or less objects detected in the image, and different values of precision and recall. 3. The curve will look like this: <img src="https://github.com/shwars/NeuroWorkshop/raw/master/images/ObjDetectionPrecisionRecall.png"/> @@ -118,7 +119,7 @@ This algorithm is even faster than Faster R-CNN. The main idea is the following: 1. We extract features using ResNet-101 1. Features are processed by **Position-Sensitive Score Map**. Each object from $C$ classes is divided by $k\times k$ regions, and we are training to predict parts of objects. -1. For each part from $k\times k$ regions all networks vote for object classes, and the object class with maximum vote is selected. +1. For each part from $k\times k$ regions all networks vote for object classes, and the object class with maximum vote is selected.  @@ -127,14 +128,14 @@ This algorithm is even faster than Faster R-CNN. The main idea is the following: ### YOLO - You Only Look Once YOLO is a realtime one-pass algorithm. The main idea is the following: - + * Image is divided into $S\times S$ regions * For each region, **CNN** predicts $n$ possible objects, *bounding box* coordinates and *confidence*=*probability* * IoU. - +  - + > Image from [official paper](https://arxiv.org/abs/1506.02640) - + ### Other Algorithms * RetinaNet: [official paper](https://arxiv.org/abs/1708.02002) @@ -161,7 +162,7 @@ Read through these articles and notebooks about YOLO and try them for yourself * [Official site](https://pjreddie.com/darknet/yolo/) * Yolo: [Keras implementation](https://github.com/experiencor/keras-yolo2), [step-by-step notebook](https://github.com/experiencor/basic-yolo-keras/blob/master/Yolo%20Step-by-Step.ipynb) * Yolo v2: [Keras implementation](https://github.com/experiencor/keras-yolo2), [step-by-step notebook](https://github.com/experiencor/keras-yolo2/blob/master/Yolo%20Step-by-Step.ipynb) - + ## [Post-lecture quiz](https://black-ground-0cc93280f.1.azurestaticapps.net/quiz/211) ## Review & Self Study @@ -173,4 +174,3 @@ Read through these articles and notebooks about YOLO and try them for yourself * [Implementation of Faster R-CNN in Python for Object Detection](https://www.analyticsvidhya.com/blog/2018/11/implementation-faster-r-cnn-python-object-detection/) ## [Assignment: Object Detection](lab/README.md) - diff --git a/lessons/5-NLP/15-LanguageModeling/README.md b/lessons/5-NLP/15-LanguageModeling/README.md index e7d1050a69c998a46be06fd3a52427edb02cbe8d..3da6b6c94d091c7361e380b1b00ca194c9941c10 100644 --- a/lessons/5-NLP/15-LanguageModeling/README.md +++ b/lessons/5-NLP/15-LanguageModeling/README.md @@ -38,4 +38,4 @@ In the previous lesson we have seen that words embeddings work like magic! Now w ## 🚀 [Assignment: Train Skip-Gram Model](lab/README.md) -In the lab, we challenge you to modify the code from this lesson to train skip-gram model instead of CBoW. [Read the details](lab/README.md) +In the lab, we challenge you to modify the code from this lesson to train skip-gram model instead of CBoW. [Read the details](lab/README.md) diff --git a/lessons/5-NLP/19-NER/README.md b/lessons/5-NLP/19-NER/README.md index 294af8c12d463676b16dc0cb7f9d06a206272692..7ba21daf0b651e82c08ee548ac51052812f08f1a 100644 --- a/lessons/5-NLP/19-NER/README.md +++ b/lessons/5-NLP/19-NER/README.md @@ -2,7 +2,7 @@ Up to now, we have mostly been concentrating on one NLP task - classification. However, there are also other NLP tasks that can be accomplished with neural networks. One of those tasks is **[Named Entity Recognition](https://wikipedia.org/wiki/Named-entity_recognition)** (NER), which deals with recognizing specific entities within text, such as places, person names, date-time intervals, chemical formulae and so on. -## [Pre-lecture quiz](https://black-ground-0cc93280f.1.azurestaticapps.net/quiz/119) +## [Pre-lecture quiz](https://black-ground-0cc93280f.1.azurestaticapps.net/quiz/119) ## Example of Using NER @@ -12,7 +12,7 @@ Suppose you want to develop a natural language chat bot, similar to Amazon Alexa > Image by the author -However, a user may provide some parameters as part of the phrase. For example, when asking for the weather, she may specify a location or date. A bot should be able to understand those entities, and fill in the parameter slots accordingly before performing the action. This is exactly where NER comes in. +However, a user may provide some parameters as part of the phrase. For example, when asking for the weather, she may specify a location or date. A bot should be able to understand those entities, and fill in the parameter slots accordingly before performing the action. This is exactly where NER comes in. > ✅ Another example would be [analyzing scientific medical papers](https://soshnikov.com/science/analyzing-medical-papers-with-azure-and-text-analytics-for-health/). One of the main things we need to look for are specific medical terms, such as diseases and medical substances. While a small number of diseases can probably be extracted using substring search, more complex entities, such as chemical compounds and medication names, need a more complex approach. @@ -25,11 +25,12 @@ Consider the following paper title: **Tricuspid valve regurgitation** and **lithium carbonate** **toxicity** in a newborn infant. Entities here are: + * Tricuspid valve regurgitation is a disease (`DIS`) * Lithium carbonate is a chemical substance (`CHEM`) * Toxicity is also a disease (`DIS`) -Notice that one entity can span several tokens. And, as in this case, we need to distinguish between two consecutive entities. Thus it is common to use two classes for each entity - one specifying the first token of the entity (often the `B-` prefix is used, for **b**eginning), and another - the continuation of an entity (`I-`, for **i**nner token). We also use `O` as a class to represent all **o**ther tokens. Such token tagging is called [BIO tagging](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)) (or IOB). When tagged, our title will look like this: +Notice that one entity can span several tokens. And, as in this case, we need to distinguish between two consecutive entities. Thus, it is common to use two classes for each entity - one specifying the first token of the entity (often the `B-` prefix is used, for **b**eginning), and another - the continuation of an entity (`I-`, for **i**nner token). We also use `O` as a class to represent all **o**ther tokens. Such token tagging is called [BIO tagging](https://en.wikipedia.org/wiki/Inside%E2%80%93outside%E2%80%93beginning_(tagging)) (or IOB). When tagged, our title will look like this: Token | Tag ------|----- @@ -64,7 +65,7 @@ Continue your learning in the following notebook: ## Conclusion -A NER model is a **token classification model**, which means that it can be used to perform token classification. This is a very common task in NLP, helping to recognize specific entities within text including places, names, dates, and more. +A NER model is a **token classification model**, which means that it can be used to perform token classification. This is a very common task in NLP, helping to recognize specific entities within text including places, names, dates, and more. ## 🚀 Challenge @@ -79,4 +80,3 @@ Read through the blog [The Unreasonable Effectiveness of Recurrent Neural Networ ## [Assignment](lab/README.md) In the assignment for this lesson, you will have to train a medical entity recognition model. You can start with training an LSTM model as described in this lesson, and proceed with using the BERT transformer model. Read [the instructions](lab/README.md) to get all the details. - diff --git a/lessons/5-NLP/20-LangModels/README.md b/lessons/5-NLP/20-LangModels/README.md index 2003753e46fba46d693b744c77a53b058383365b..0926db79227926a28c0c36e359dd3157749cf1d4 100644 --- a/lessons/5-NLP/20-LangModels/README.md +++ b/lessons/5-NLP/20-LangModels/README.md @@ -6,9 +6,9 @@ In all of our previous tasks, we were training a neural network to perform a cer ## Text Generation and Perplexity -The idea of a neural network being able to do general tasks without downstream training is presented in [Language Models are Unsupervised Multitask Learners](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) paper. The main idea is the many other tasks can be modeled using **text generation**, because understanding text essentially means being able to produce it. Because the model is trained on a huge amount of text that encompasses human knowledge, it also becomes knowledgeable about wide variety of subjects. +The idea of a neural network being able to do general tasks without downstream training is presented in [Language Models are Unsupervised Multitask Learners](https://cdn.openai.com/better-language-models/language_models_are_unsupervised_multitask_learners.pdf) paper. The main idea is the many other tasks can be modeled using **text generation**, because understanding text essentially means being able to produce it. Because the model is trained on a huge amount of text that encompasses human knowledge, it also becomes knowledgeable about wide variety of subjects. -> Understanding and being able to produce text also entails knowing something about the world around us. People also learn by reading to the large extent, and GPT network is similar in this respect. +> Understanding and being able to produce text also entails knowing something about the world around us. People also learn by reading to the large extent, and GPT network is similar in this respect. Text generation networks work by predicting probability of the next word $P(w_N)$. However, unconditional probability of the next word equals to the frequency of the this word in the text corpus. GPT is able to give us **conditional probability** of the next word, given the previous ones $P(w_N | w_{n-1}, ..., w_0)$. @@ -54,4 +54,3 @@ Continue your learning in the following notebooks: New general pre-trained language models do not only model language structure, but also contain vast amount of commonsense knowledge. Thus, they can be effectively used to solve some NLP tasks in zero-shop or few-shot settings. ## [Post-lecture quiz](https://black-ground-0cc93280f.1.azurestaticapps.net/quiz/220) - diff --git a/lessons/6-Other/22-DeepRL/README.md b/lessons/6-Other/22-DeepRL/README.md index 7d951a8aa928418f5d2eb75f48a6ed082ef5553f..da418cc9b7fba9aea6d0131ce067e097012e26c2 100644 --- a/lessons/6-Other/22-DeepRL/README.md +++ b/lessons/6-Other/22-DeepRL/README.md @@ -7,7 +7,7 @@ Reinforcement learning (RL) is seen as one of the basic machine learning paradig To perform RL, we need: * An **environment** or **simulator** that sets the rules of the game. We should be able to run the experiments in the simulator and observe the results. -* Some **Reward function**, which indicates how successful our experiment was. In case of learning to play a computer game, the reward would be our final score. +* Some **Reward function**, which indicates how successful our experiment was. In case of learning to play a computer game, the reward would be our final score. Based on the reward function, we should be able to adjust our behavior and improve our skills, so that the next time we play better. The main difference between other types of machine learning and RL is that in RL we typically do not know whether we win or lose until we finish the game. Thus, we cannot say whether a certain move alone is good or not - we only receive a reward at the end of the game. @@ -17,7 +17,7 @@ During RL, we typically perform many experiments. During each experiment, we nee A great tool for RL is the [OpenAI Gym](https://gym.openai.com/) - a **simulation environment**, which can simulate many different environments starting from Atari games, to the physics behind pole balancing. It is one of the most popular simulation environments for training reinforcement learning algorithms, and is maintained by [OpenAI](https://openai.com/). -> **Note**: You can see all the environments available from OpenAI Gym [here](https://gym.openai.com/envs/#classic_control). +> **Note**: You can see all the environments available from OpenAI Gym [here](https://gym.openai.com/envs/#classic_control). ## CartPole Balancing @@ -110,4 +110,4 @@ Watch [this great video](https://www.youtube.com/watch?v=qv6UVOQ0F44) talking ab ## Assignment: [Train a Mountain Car](lab/README.md) -Your goal during this assignment would be to train a different Gym environment - [Mountain Car](https://www.gymlibrary.ml/environments/classic_control/mountain_car/). \ No newline at end of file +Your goal during this assignment would be to train a different Gym environment - [Mountain Car](https://www.gymlibrary.ml/environments/classic_control/mountain_car/).