summaryrefslogtreecommitdiff
path: root/addons/survey/tests
diff options
context:
space:
mode:
authorstephanchrst <stephanchrst@gmail.com>2022-05-10 21:51:50 +0700
committerstephanchrst <stephanchrst@gmail.com>2022-05-10 21:51:50 +0700
commit3751379f1e9a4c215fb6eb898b4ccc67659b9ace (patch)
treea44932296ef4a9b71d5f010906253d8c53727726 /addons/survey/tests
parent0a15094050bfde69a06d6eff798e9a8ddf2b8c21 (diff)
initial commit 2
Diffstat (limited to 'addons/survey/tests')
-rw-r--r--addons/survey/tests/__init__.py16
-rw-r--r--addons/survey/tests/common.py281
-rw-r--r--addons/survey/tests/test_certification_badge.py230
-rw-r--r--addons/survey/tests/test_certification_flow.py205
-rw-r--r--addons/survey/tests/test_survey.py115
-rw-r--r--addons/survey/tests/test_survey_compute_pages_questions.py69
-rw-r--r--addons/survey/tests/test_survey_flow.py121
-rw-r--r--addons/survey/tests/test_survey_flow_with_conditions.py126
-rw-r--r--addons/survey/tests/test_survey_invite.py223
-rw-r--r--addons/survey/tests/test_survey_randomize.py59
-rw-r--r--addons/survey/tests/test_survey_security.py364
-rw-r--r--addons/survey/tests/test_survey_ui_certification.py282
-rw-r--r--addons/survey/tests/test_survey_ui_feedback.py169
-rw-r--r--addons/survey/tests/test_survey_ui_session.py210
14 files changed, 2470 insertions, 0 deletions
diff --git a/addons/survey/tests/__init__.py b/addons/survey/tests/__init__.py
new file mode 100644
index 00000000..fa4b64af
--- /dev/null
+++ b/addons/survey/tests/__init__.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+from . import common
+from . import test_survey
+from . import test_survey_flow
+from . import test_survey_flow_with_conditions
+from . import test_certification_flow
+from . import test_survey_invite
+from . import test_survey_security
+from . import test_survey_randomize
+from . import test_survey_ui_certification
+from . import test_survey_ui_feedback
+from . import test_survey_ui_session
+from . import test_survey_compute_pages_questions
+from . import test_certification_badge
diff --git a/addons/survey/tests/common.py b/addons/survey/tests/common.py
new file mode 100644
index 00000000..421b7fe1
--- /dev/null
+++ b/addons/survey/tests/common.py
@@ -0,0 +1,281 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+import re
+
+from collections import Counter
+from contextlib import contextmanager
+
+from odoo.addons.mail.tests.common import mail_new_test_user
+from odoo.tests import common
+
+
+class SurveyCase(common.SavepointCase):
+ def setUp(self):
+ super(SurveyCase, self).setUp()
+
+ """ Some custom stuff to make the matching between questions and answers
+ :param dict _type_match: dict
+ key: question type
+ value: (answer type, answer field_name)
+ """
+ self._type_match = {
+ 'text_box': ('text_box', 'value_text_box'),
+ 'char_box': ('char_box', 'value_char_box'),
+ 'numerical_box': ('numerical_box', 'value_numerical_box'),
+ 'date': ('date', 'value_date'),
+ 'simple_choice': ('suggestion', 'suggested_answer_id'), # TDE: still unclear
+ 'multiple_choice': ('suggestion', 'suggested_answer_id'), # TDE: still unclear
+ 'matrix': ('suggestion', ('suggested_answer_id', 'matrix_row_id')), # TDE: still unclear
+ }
+
+ # ------------------------------------------------------------
+ # ASSERTS
+ # ------------------------------------------------------------
+
+ def assertAnswer(self, answer, state, page):
+ self.assertEqual(answer.state, state)
+ self.assertEqual(answer.last_displayed_page_id, page)
+
+ def assertAnswerLines(self, page, answer, answer_data):
+ """ Check answer lines.
+
+ :param dict answer_data:
+ key = question ID
+ value = {'value': [user input]}
+ """
+ lines = answer.user_input_line_ids.filtered(lambda l: l.page_id == page)
+ answer_count = sum(len(user_input['value']) for user_input in answer_data.values())
+ self.assertEqual(len(lines), answer_count)
+ for qid, user_input in answer_data.items():
+ answer_lines = lines.filtered(lambda l: l.question_id.id == qid)
+ question = answer_lines[0].question_id # TDE note: might have several answers for a given question
+ if question.question_type == 'multiple_choice':
+ values = user_input['value']
+ answer_fname = self._type_match[question.question_type][1]
+ self.assertEqual(
+ Counter(getattr(line, answer_fname).id for line in answer_lines),
+ Counter(values))
+ elif question.question_type == 'simple_choice':
+ [value] = user_input['value']
+ answer_fname = self._type_match[question.question_type][1]
+ self.assertEqual(getattr(answer_lines, answer_fname).id, value)
+ elif question.question_type == 'matrix':
+ [value_col, value_row] = user_input['value']
+ answer_fname_col = self._type_match[question.question_type][1][0]
+ answer_fname_row = self._type_match[question.question_type][1][1]
+ self.assertEqual(getattr(answer_lines, answer_fname_col).id, value_col)
+ self.assertEqual(getattr(answer_lines, answer_fname_row).id, value_row)
+ else:
+ [value] = user_input['value']
+ answer_fname = self._type_match[question.question_type][1]
+ if question.question_type == 'numerical_box':
+ self.assertEqual(getattr(answer_lines, answer_fname), float(value))
+ else:
+ self.assertEqual(getattr(answer_lines, answer_fname), value)
+
+ def assertResponse(self, response, status_code, text_bits=None):
+ self.assertEqual(response.status_code, status_code)
+ for text in text_bits or []:
+ self.assertIn(text, response.text)
+
+ # ------------------------------------------------------------
+ # DATA CREATION
+ # ------------------------------------------------------------
+
+ def _add_question(self, page, name, qtype, **kwargs):
+ constr_mandatory = kwargs.pop('constr_mandatory', True)
+ constr_error_msg = kwargs.pop('constr_error_msg', 'TestError')
+
+ sequence = kwargs.pop('sequence', False)
+ if not sequence:
+ sequence = page.question_ids[-1].sequence + 1 if page.question_ids else page.sequence + 1
+
+ base_qvalues = {
+ 'sequence': sequence,
+ 'title': name,
+ 'question_type': qtype,
+ 'constr_mandatory': constr_mandatory,
+ 'constr_error_msg': constr_error_msg,
+ }
+ if qtype in ('simple_choice', 'multiple_choice'):
+ base_qvalues['suggested_answer_ids'] = [
+ (0, 0, {
+ 'value': label['value'],
+ 'answer_score': label.get('answer_score', 0),
+ 'is_correct': label.get('is_correct', False)
+ }) for label in kwargs.pop('labels')
+ ]
+ elif qtype == 'matrix':
+ base_qvalues['matrix_subtype'] = kwargs.pop('matrix_subtype', 'simple')
+ base_qvalues['suggested_answer_ids'] = [
+ (0, 0, {'value': label['value'], 'answer_score': label.get('answer_score', 0)})
+ for label in kwargs.pop('labels')
+ ]
+ base_qvalues['matrix_row_ids'] = [
+ (0, 0, {'value': label['value'], 'answer_score': label.get('answer_score', 0)})
+ for label in kwargs.pop('labels_2')
+ ]
+ else:
+ pass
+ base_qvalues.update(kwargs)
+ question = self.env['survey.question'].create(base_qvalues)
+ return question
+
+ def _add_answer(self, survey, partner, **kwargs):
+ base_avals = {
+ 'survey_id': survey.id,
+ 'partner_id': partner.id if partner else False,
+ 'email': kwargs.pop('email', False),
+ }
+ base_avals.update(kwargs)
+ return self.env['survey.user_input'].create(base_avals)
+
+ def _add_answer_line(self, question, answer, answer_value, **kwargs):
+ qtype = self._type_match.get(question.question_type, (False, False))
+ answer_type = kwargs.pop('answer_type', qtype[0])
+ answer_fname = kwargs.pop('answer_fname', qtype[1])
+
+ base_alvals = {
+ 'user_input_id': answer.id,
+ 'question_id': question.id,
+ 'skipped': False,
+ 'answer_type': answer_type,
+ }
+ base_alvals[answer_fname] = answer_value
+ base_alvals.update(kwargs)
+ return self.env['survey.user_input.line'].create(base_alvals)
+
+ # ------------------------------------------------------------
+ # UTILS
+ # ------------------------------------------------------------
+
+ def _access_start(self, survey):
+ return self.url_open('/survey/start/%s' % survey.access_token)
+
+ def _access_page(self, survey, token):
+ return self.url_open('/survey/%s/%s' % (survey.access_token, token))
+
+ def _access_begin(self, survey, token):
+ base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
+ url = base_url + '/survey/begin/%s/%s' % (survey.access_token, token)
+ return self.opener.post(url=url, json={})
+
+ def _access_submit(self, survey, token, post_data):
+ base_url = self.env['ir.config_parameter'].sudo().get_param('web.base.url')
+ url = base_url + '/survey/submit/%s/%s' % (survey.access_token, token)
+ return self.opener.post(url=url, json={'params': post_data})
+
+ def _find_csrf_token(self, text):
+ csrf_token_re = re.compile("(input.+csrf_token.+value=\")([a-f0-9]{40}o[0-9]*)", re.MULTILINE)
+ return csrf_token_re.search(text).groups()[1]
+
+ def _prepare_post_data(self, question, answers, post_data):
+ values = answers if isinstance(answers, list) else [answers]
+ if question.question_type == 'multiple_choice':
+ for value in values:
+ value = str(value)
+ if question.id in post_data:
+ if isinstance(post_data[question.id], list):
+ post_data[question.id].append(value)
+ else:
+ post_data[question.id] = [post_data[question.id], value]
+ else:
+ post_data[question.id] = value
+ else:
+ [values] = values
+ post_data[question.id] = str(values)
+ return post_data
+
+ def _answer_question(self, question, answer, answer_token, csrf_token, button_submit='next'):
+ # Employee submits the question answer
+ post_data = self._format_submission_data(question, answer, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': button_submit})
+ response = self._access_submit(question.survey_id, answer_token, post_data)
+ self.assertResponse(response, 200)
+
+ # Employee is redirected on next question
+ response = self._access_page(question.survey_id, answer_token)
+ self.assertResponse(response, 200)
+
+ def _answer_page(self, page, answers, answer_token, csrf_token):
+ post_data = {}
+ for question, answer in answers.items():
+ post_data[question.id] = answer.id
+ post_data['page_id'] = page.id
+ post_data['csrf_token'] = csrf_token
+ post_data['token'] = answer_token
+ response = self._access_submit(page.survey_id, answer_token, post_data)
+ self.assertResponse(response, 200)
+ response = self._access_page(page.survey_id, answer_token)
+ self.assertResponse(response, 200)
+
+ def _format_submission_data(self, question, answer, additional_post_data):
+ post_data = {}
+ post_data['question_id'] = question.id
+ post_data.update(self._prepare_post_data(question, answer, post_data))
+ if question.page_id:
+ post_data['page_id'] = question.page_id.id
+ post_data.update(**additional_post_data)
+ return post_data
+
+
+class TestSurveyCommon(SurveyCase):
+ def setUp(self):
+ super(TestSurveyCommon, self).setUp()
+
+ """ Create test data: a survey with some pre-defined questions and various test users for ACL """
+ self.survey_manager = mail_new_test_user(
+ self.env, name='Gustave Doré', login='survey_manager', email='survey.manager@example.com',
+ groups='survey.group_survey_manager,base.group_user'
+ )
+
+ self.survey_user = mail_new_test_user(
+ self.env, name='Lukas Peeters', login='survey_user', email='survey.user@example.com',
+ groups='survey.group_survey_user,base.group_user'
+ )
+
+ self.user_emp = mail_new_test_user(
+ self.env, name='Eglantine Employee', login='user_emp', email='employee@example.com',
+ groups='base.group_user', password='user_emp'
+ )
+
+ self.user_portal = mail_new_test_user(
+ self.env, name='Patrick Portal', login='user_portal', email='portal@example.com',
+ groups='base.group_portal'
+ )
+
+ self.user_public = mail_new_test_user(
+ self.env, name='Pauline Public', login='user_public', email='public@example.com',
+ groups='base.group_public'
+ )
+
+ self.customer = self.env['res.partner'].create({
+ 'name': 'Caroline Customer',
+ 'email': 'customer@example.com',
+ })
+
+ self.survey = self.env['survey.survey'].with_user(self.survey_manager).create({
+ 'title': 'Test Survey',
+ 'access_mode': 'public',
+ 'users_login_required': True,
+ 'users_can_go_back': False,
+ 'state': 'open',
+ })
+ self.page_0 = self.env['survey.question'].with_user(self.survey_manager).create({
+ 'title': 'First page',
+ 'survey_id': self.survey.id,
+ 'sequence': 1,
+ 'is_page': True,
+ })
+ self.question_ft = self.env['survey.question'].with_user(self.survey_manager).create({
+ 'title': 'Test Free Text',
+ 'survey_id': self.survey.id,
+ 'sequence': 2,
+ 'question_type': 'text_box',
+ })
+ self.question_num = self.env['survey.question'].with_user(self.survey_manager).create({
+ 'title': 'Test NUmerical Box',
+ 'survey_id': self.survey.id,
+ 'sequence': 3,
+ 'question_type': 'numerical_box',
+ })
diff --git a/addons/survey/tests/test_certification_badge.py b/addons/survey/tests/test_certification_badge.py
new file mode 100644
index 00000000..037b1e21
--- /dev/null
+++ b/addons/survey/tests/test_certification_badge.py
@@ -0,0 +1,230 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+from odoo.addons.survey.tests import common
+from psycopg2 import IntegrityError
+from odoo.exceptions import AccessError
+from odoo.tools import mute_logger
+
+
+class TestCertificationBadge(common.TestSurveyCommon):
+
+ def setUp(self):
+ super(TestCertificationBadge, self).setUp()
+ self.certification_survey = self.env['survey.survey'].with_user(self.survey_manager).create({
+ 'title': 'Certification Survey',
+ 'access_mode': 'public',
+ 'users_login_required': True,
+ 'scoring_type': 'scoring_with_answers',
+ 'certification': True,
+ 'state': 'open',
+ })
+
+ self.certification_survey_2 = self.env['survey.survey'].with_user(self.survey_manager).create({
+ 'title': 'Another Certification Survey',
+ 'access_mode': 'public',
+ 'users_login_required': True,
+ 'scoring_type': 'scoring_with_answers',
+ 'certification': True,
+ 'state': 'open',
+ })
+
+ self.certification_badge = self.env['gamification.badge'].with_user(self.survey_manager).create({
+ 'name': self.certification_survey.title,
+ 'description': 'Congratulations, you have succeeded this certification',
+ 'rule_auth': 'nobody',
+ 'level': None,
+ })
+
+ self.certification_badge_2 = self.env['gamification.badge'].with_user(self.survey_manager).create({
+ 'name': self.certification_survey.title + ' 2',
+ 'description': 'Congratulations, you have succeeded this certification',
+ 'rule_auth': 'nobody',
+ 'level': None,
+ })
+
+ self.certification_badge_3 = self.env['gamification.badge'].with_user(self.survey_manager).create({
+ 'name': self.certification_survey.title + ' 3',
+ 'description': 'Congratulations, you have succeeded this certification',
+ 'rule_auth': 'nobody',
+ 'level': None,
+ })
+
+ def test_archive(self):
+ """ Archive status of survey is propagated to its badges. """
+ self.certification_survey.write({
+ 'certification_give_badge': True,
+ 'certification_badge_id': self.certification_badge.id
+ })
+
+ self.certification_survey.action_archive()
+ self.assertFalse(self.certification_survey.active)
+ self.assertFalse(self.certification_badge.active)
+
+ self.certification_survey.action_unarchive()
+ self.assertTrue(self.certification_survey.active)
+ self.assertTrue(self.certification_badge.active)
+
+ def test_give_badge_without_badge(self):
+ with mute_logger('odoo.sql_db'):
+ with self.assertRaises(IntegrityError):
+ self.certification_survey.write({'certification_give_badge': True})
+ self.certification_survey.flush(['certification_give_badge'])
+
+ def test_remove_badge_with_give_badge(self):
+ self.certification_survey.write({
+ 'certification_give_badge': True,
+ 'certification_badge_id': self.certification_badge.id
+ })
+ with mute_logger('odoo.sql_db'):
+ with self.assertRaises(IntegrityError):
+ self.certification_survey.write({'certification_badge_id': None})
+ self.certification_survey.flush(['certification_badge_id'])
+
+ def test_remove_badge_with_give_badge_multi(self):
+ self.certification_survey.write({
+ 'certification_give_badge': True,
+ 'certification_badge_id': self.certification_badge.id
+ })
+ self.certification_survey_2.write({
+ 'certification_give_badge': True,
+ 'certification_badge_id': self.certification_badge_2.id
+ })
+ surveys = self.env['survey.survey'].browse([
+ self.certification_survey.id,
+ self.certification_survey_2.id
+ ])
+ with mute_logger('odoo.sql_db'):
+ with self.assertRaises(IntegrityError):
+ surveys.write({'certification_badge_id': None})
+ surveys.flush(['certification_badge_id'])
+
+ def test_set_same_badge_on_multiple_survey(self):
+ self.certification_survey.write({
+ 'certification_give_badge': True,
+ 'certification_badge_id': self.certification_badge.id
+ })
+ # set the same badge on another survey should fail:
+ with mute_logger('odoo.sql_db'):
+ with self.assertRaises(IntegrityError):
+ self.certification_survey_2.write({
+ 'certification_give_badge': True,
+ 'certification_badge_id': self.certification_badge.id
+ })
+ self.certification_survey.flush()
+
+ def test_badge_configuration(self):
+ # add a certification badge on a new survey
+ challenge = self.env['gamification.challenge'].search([('reward_id', '=', self.certification_badge.id)])
+ self.assertEqual(len(challenge), 0, """A challenge should not exist or be linked to the certification badge
+ if the certification badge have not been activated on a certification survey""")
+
+ self.certification_survey.write({
+ 'certification_give_badge': True,
+ 'certification_badge_id': self.certification_badge.id
+ })
+
+ challenge = self.env['gamification.challenge'].search([('reward_id', '=', self.certification_badge.id)])
+ self.assertEqual(len(challenge), 1,
+ "A challenge should be created if the certification badge is activated on a certification survey")
+ challenge_line = self.env['gamification.challenge.line'].search([('challenge_id', '=', challenge.id)])
+ self.assertEqual(len(challenge_line), 1,
+ "A challenge_line should be created if the certification badge is activated on a certification survey")
+ goal = challenge_line.definition_id
+ self.assertEqual(len(goal), 1,
+ "A goal should be created if the certification badge is activated on a certification survey")
+
+ # don't give badge anymore
+ self.certification_survey.write({'certification_give_badge': False})
+ self.assertEqual(self.certification_badge.id, self.certification_survey.certification_badge_id.id,
+ 'The certification badge should still be set on certification survey even if give_badge is false.')
+ self.assertEqual(self.certification_badge.active, False,
+ 'The certification badge should be inactive if give_badge is false.')
+
+ challenge = self.env['gamification.challenge'].search([('id', '=', challenge.id)])
+ self.assertEqual(len(challenge), 0,
+ "The challenge should be deleted if the certification badge is unset from the certification survey")
+ challenge_line = self.env['gamification.challenge.line'].search([('id', '=', challenge_line.id)])
+ self.assertEqual(len(challenge_line), 0,
+ "The challenge_line should be deleted if the certification badge is unset from the certification survey")
+ goal = self.env['gamification.goal'].search([('id', '=', goal.id)])
+ self.assertEqual(len(goal), 0,
+ "The goal should be deleted if the certification badge is unset from the certification survey")
+
+ # re active the badge in the survey
+ self.certification_survey.write({'certification_give_badge': True})
+ self.assertEqual(self.certification_badge.active, True,
+ 'The certification badge should be active if give_badge is true.')
+
+ challenge = self.env['gamification.challenge'].search([('reward_id', '=', self.certification_badge.id)])
+ self.assertEqual(len(challenge), 1,
+ "A challenge should be created if the certification badge is activated on a certification survey")
+ challenge_line = self.env['gamification.challenge.line'].search([('challenge_id', '=', challenge.id)])
+ self.assertEqual(len(challenge_line), 1,
+ "A challenge_line should be created if the certification badge is activated on a certification survey")
+ goal = challenge_line.definition_id
+ self.assertEqual(len(goal), 1,
+ "A goal should be created if the certification badge is activated on a certification survey")
+
+ def test_certification_badge_access(self):
+ self.certification_badge.with_user(self.survey_manager).write(
+ {'description': "Spoiler alert: I'm Aegon Targaryen and I sleep with the Dragon Queen, who is my aunt by the way! So I can do whatever I want! Even if I know nothing!"})
+ self.certification_badge.with_user(self.survey_user).write({'description': "Youpie Yeay!"})
+ with self.assertRaises(AccessError):
+ self.certification_badge.with_user(self.user_emp).write({'description': "I'm a dude who think that has every right on the Iron Throne"})
+ with self.assertRaises(AccessError):
+ self.certification_badge.with_user(self.user_portal).write({'description': "Guy, you just can't do that !"})
+ with self.assertRaises(AccessError):
+ self.certification_badge.with_user(self.user_public).write({'description': "What did you expect ? Schwepps !"})
+
+ def test_badge_configuration_multi(self):
+ vals = {
+ 'title': 'Certification Survey',
+ 'access_mode': 'public',
+ 'users_login_required': True,
+ 'scoring_type': 'scoring_with_answers',
+ 'certification': True,
+ 'certification_give_badge': True,
+ 'certification_badge_id': self.certification_badge.id,
+ 'state': 'open'
+ }
+ survey_1 = self.env['survey.survey'].create(vals.copy())
+ vals.update({'certification_badge_id': self.certification_badge_2.id})
+ survey_2 = self.env['survey.survey'].create(vals.copy())
+ vals.update({'certification_badge_id': self.certification_badge_3.id})
+ survey_3 = self.env['survey.survey'].create(vals)
+
+ certification_surveys = self.env['survey.survey'].browse([survey_1.id, survey_2.id, survey_3.id])
+ self.assertEqual(len(certification_surveys), 3, 'There should be 3 certification survey created')
+
+ challenges = self.env['gamification.challenge'].search([('reward_id', 'in', certification_surveys.mapped('certification_badge_id').ids)])
+ self.assertEqual(len(challenges), 3, "3 challenges should be created")
+ challenge_lines = self.env['gamification.challenge.line'].search([('challenge_id', 'in', challenges.ids)])
+ self.assertEqual(len(challenge_lines), 3, "3 challenge_lines should be created")
+ goals = challenge_lines.mapped('definition_id')
+ self.assertEqual(len(goals), 3, "3 goals should be created")
+
+ # Test write multi
+ certification_surveys.write({'certification_give_badge': False})
+ for survey in certification_surveys:
+ self.assertEqual(survey.certification_badge_id.active, False,
+ 'Every badge should be inactive if the 3 survey does not give badge anymore')
+
+ challenges = self.env['gamification.challenge'].search([('id', 'in', challenges.ids)])
+ self.assertEqual(len(challenges), 0, "The 3 challenges should be deleted")
+ challenge_lines = self.env['gamification.challenge.line'].search([('id', 'in', challenge_lines.ids)])
+ self.assertEqual(len(challenge_lines), 0, "The 3 challenge_lines should be deleted")
+ goals = self.env['gamification.goal'].search([('id', 'in', goals.ids)])
+ self.assertEqual(len(goals), 0, "The 3 goals should be deleted")
+
+ certification_surveys.write({'certification_give_badge': True})
+ for survey in certification_surveys:
+ self.assertEqual(survey.certification_badge_id.active, True,
+ 'Every badge should be reactivated if the 3 survey give badges again')
+
+ challenges = self.env['gamification.challenge'].search([('reward_id', 'in', certification_surveys.mapped('certification_badge_id').ids)])
+ self.assertEqual(len(challenges), 3, "3 challenges should be created")
+ challenge_lines = self.env['gamification.challenge.line'].search([('challenge_id', 'in', challenges.ids)])
+ self.assertEqual(len(challenge_lines), 3, "3 challenge_lines should be created")
+ goals = challenge_lines.mapped('definition_id')
+ self.assertEqual(len(goals), 3, "3 goals should be created")
diff --git a/addons/survey/tests/test_certification_flow.py b/addons/survey/tests/test_certification_flow.py
new file mode 100644
index 00000000..9c1fbe56
--- /dev/null
+++ b/addons/survey/tests/test_certification_flow.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+from unittest.mock import patch
+
+from odoo.addons.base.models.ir_mail_server import IrMailServer
+from odoo.addons.survey.tests import common
+from odoo.tests import tagged
+from odoo.tests.common import HttpCase
+
+
+@tagged('-at_install', 'post_install', 'functional')
+class TestCertificationFlow(common.TestSurveyCommon, HttpCase):
+
+ def test_flow_certification(self):
+ # Step: survey user creates the certification
+ # --------------------------------------------------
+ with self.with_user('survey_user'):
+ certification = self.env['survey.survey'].create({
+ 'title': 'User Certification for SO lines',
+ 'access_mode': 'public',
+ 'users_login_required': True,
+ 'questions_layout': 'page_per_question',
+ 'users_can_go_back': True,
+ 'scoring_type': 'scoring_with_answers',
+ 'scoring_success_min': 85.0,
+ 'certification': True,
+ 'certification_mail_template_id': self.env.ref('survey.mail_template_certification').id,
+ 'is_time_limited': True,
+ 'time_limit': 10,
+ 'state': 'open',
+ })
+
+ q01 = self._add_question(
+ None, 'When do you know it\'s the right time to use the SO line model?', 'simple_choice',
+ sequence=1,
+ constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=certification.id,
+ labels=[
+ {'value': 'Please stop'},
+ {'value': 'Only on the SO form'},
+ {'value': 'Only on the Survey form'},
+ {'value': 'Easy, all the time!!!', 'is_correct': True, 'answer_score': 2.0}
+ ])
+
+ q02 = self._add_question(
+ None, 'On average, how many lines of code do you need when you use SO line widgets?', 'simple_choice',
+ sequence=2,
+ constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=certification.id,
+ labels=[
+ {'value': '1'},
+ {'value': '5', 'is_correct': True, 'answer_score': 2.0},
+ {'value': '100'},
+ {'value': '1000'}
+ ])
+
+ q03 = self._add_question(
+ None, 'What do you think about SO line widgets (not rated)?', 'text_box',
+ sequence=3,
+ constr_mandatory=True, constr_error_msg='Please tell us what you think', survey_id=certification.id)
+
+ q04 = self._add_question(
+ None, 'On a scale of 1 to 10, how much do you like SO line widgets (not rated)?', 'simple_choice',
+ sequence=4,
+ constr_mandatory=True, constr_error_msg='Please tell us what you think', survey_id=certification.id,
+ labels=[
+ {'value': '-1'},
+ {'value': '0'},
+ {'value': '100'}
+ ])
+
+ q05 = self._add_question(
+ None, 'Select all the correct "types" of SO lines', 'multiple_choice',
+ sequence=5,
+ constr_mandatory=False, survey_id=certification.id,
+ labels=[
+ {'value': 'sale_order', 'is_correct': True, 'answer_score': 1.0},
+ {'value': 'survey_page', 'is_correct': True, 'answer_score': 1.0},
+ {'value': 'survey_question', 'is_correct': True, 'answer_score': 1.0},
+ {'value': 'a_future_and_yet_unknown_model', 'is_correct': True, 'answer_score': 1.0},
+ {'value': 'none', 'answer_score': -1.0}
+ ])
+
+ # Step: employee takes the certification
+ # --------------------------------------------------
+ self.authenticate('user_emp', 'user_emp')
+
+ # Employee opens start page
+ response = self._access_start(certification)
+ self.assertResponse(response, 200, [certification.title, 'Time limit for this survey', '10 minutes'])
+
+ # -> this should have generated a new user_input with a token
+ user_inputs = self.env['survey.user_input'].search([('survey_id', '=', certification.id)])
+ self.assertEqual(len(user_inputs), 1)
+ self.assertEqual(user_inputs.partner_id, self.user_emp.partner_id)
+ answer_token = user_inputs.access_token
+
+ # Employee begins survey with first page
+ response = self._access_page(certification, answer_token)
+ self.assertResponse(response, 200)
+ csrf_token = self._find_csrf_token(response.text)
+
+ r = self._access_begin(certification, answer_token)
+ self.assertResponse(r, 200)
+
+ with patch.object(IrMailServer, 'connect'):
+ self._answer_question(q01, q01.suggested_answer_ids.ids[3], answer_token, csrf_token)
+ self._answer_question(q02, q02.suggested_answer_ids.ids[1], answer_token, csrf_token)
+ self._answer_question(q03, "I think they're great!", answer_token, csrf_token)
+ self._answer_question(q04, q04.suggested_answer_ids.ids[0], answer_token, csrf_token, button_submit='previous')
+ self._answer_question(q03, "Just kidding, I don't like it...", answer_token, csrf_token)
+ self._answer_question(q04, q04.suggested_answer_ids.ids[0], answer_token, csrf_token)
+ self._answer_question(q05, [q05.suggested_answer_ids.ids[0], q05.suggested_answer_ids.ids[1], q05.suggested_answer_ids.ids[3]], answer_token, csrf_token)
+
+ user_inputs.invalidate_cache()
+ # Check that certification is successfully passed
+ self.assertEqual(user_inputs.scoring_percentage, 87.5)
+ self.assertTrue(user_inputs.scoring_success)
+
+ # Check that the certification is still successful even if scoring_success_min of certification is modified
+ certification.write({'scoring_success_min': 90})
+ self.assertTrue(user_inputs.scoring_success)
+
+ # Check answer correction is taken into account
+ self.assertNotIn("I think they're great!", user_inputs.mapped('user_input_line_ids.value_text_box'))
+ self.assertIn("Just kidding, I don't like it...", user_inputs.mapped('user_input_line_ids.value_text_box'))
+
+ certification_email = self.env['mail.mail'].sudo().search([], limit=1, order="create_date desc")
+ # Check certification email correctly sent and contains document
+ self.assertIn("User Certification for SO lines", certification_email.subject)
+ self.assertIn("employee@example.com", certification_email.email_to)
+ self.assertEqual(len(certification_email.attachment_ids), 1)
+ self.assertEqual(certification_email.attachment_ids[0].name, 'Certification Document.html')
+
+ def test_randomized_certification(self):
+ # Step: survey user creates the randomized certification
+ # --------------------------------------------------
+ with self.with_user('survey_user'):
+ certification = self.env['survey.survey'].create({
+ 'title': 'User randomized Certification',
+ 'questions_layout': 'page_per_section',
+ 'questions_selection': 'random',
+ 'state': 'open',
+ 'scoring_type': 'scoring_without_answers',
+ })
+
+ page1 = self._add_question(
+ None, 'Page 1', None,
+ sequence=1,
+ survey_id=certification.id,
+ is_page=True,
+ random_questions_count=1,
+ )
+
+ q101 = self._add_question(
+ None, 'What is the answer to the first question?', 'simple_choice',
+ sequence=2,
+ constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=certification.id,
+ labels=[
+ {'value': 'The correct answer', 'is_correct': True, 'answer_score': 1.0},
+ {'value': 'The wrong answer'},
+ ])
+
+ q102 = self._add_question(
+ None, 'What is the answer to the second question?', 'simple_choice',
+ sequence=3,
+ constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=certification.id,
+ labels=[
+ {'value': 'The correct answer', 'is_correct': True, 'answer_score': 1.0},
+ {'value': 'The wrong answer'},
+ ])
+
+ # Step: employee takes the randomized certification
+ # --------------------------------------------------
+ self.authenticate('user_emp', 'user_emp')
+
+ # Employee opens start page
+ response = self._access_start(certification)
+
+ # -> this should have generated a new user_input with a token
+ user_inputs = self.env['survey.user_input'].search([('survey_id', '=', certification.id)])
+ self.assertEqual(len(user_inputs), 1)
+ self.assertEqual(user_inputs.partner_id, self.user_emp.partner_id)
+ answer_token = user_inputs.access_token
+
+ # Employee begins survey with first page
+ response = self._access_page(certification, answer_token)
+ self.assertResponse(response, 200)
+ csrf_token = self._find_csrf_token(response.text)
+
+ r = self._access_begin(certification, answer_token)
+ self.assertResponse(r, 200)
+
+ with patch.object(IrMailServer, 'connect'):
+ question_ids = user_inputs.predefined_question_ids
+ self.assertEqual(len(question_ids), 1, 'Only one question should have been selected by the randomization')
+ # Whatever which question was selected, the correct answer is the first one
+ self._answer_question(question_ids, question_ids.suggested_answer_ids.ids[0], answer_token, csrf_token)
+
+ statistics = user_inputs._prepare_statistics()
+ self.assertEqual(statistics, [[
+ {'text': 'Correct', 'count': 1},
+ {'text': 'Partially', 'count': 0},
+ {'text': 'Incorrect', 'count': 0},
+ {'text': 'Unanswered', 'count': 0},
+ ]], "With the configured randomization, there should be exactly 1 correctly answered question and none skipped.")
diff --git a/addons/survey/tests/test_survey.py b/addons/survey/tests/test_survey.py
new file mode 100644
index 00000000..f1c2dfc6
--- /dev/null
+++ b/addons/survey/tests/test_survey.py
@@ -0,0 +1,115 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+from odoo import _
+from odoo.addons.survey.tests import common
+from odoo.tests.common import users
+
+
+class TestSurveyInternals(common.TestSurveyCommon):
+
+ @users('survey_manager')
+ def test_answer_validation_mandatory(self):
+ """ For each type of question check that mandatory questions correctly check for complete answers """
+ for (question_type, text) in self.env['survey.question']._fields['question_type'].selection:
+ kwargs = {}
+ if question_type == 'multiple_choice':
+ kwargs['labels'] = [{'value': 'MChoice0'}, {'value': 'MChoice1'}]
+ elif question_type == 'simple_choice':
+ kwargs['labels'] = []
+ elif question_type == 'matrix':
+ kwargs['labels'] = [{'value': 'Column0'}, {'value': 'Column1'}]
+ kwargs['labels_2'] = [{'value': 'Row0'}, {'value': 'Row1'}]
+ question = self._add_question(self.page_0, 'Q0', question_type, **kwargs)
+
+ self.assertDictEqual(
+ question.validate_question(''),
+ {question.id: 'TestError'}
+ )
+
+ @users('survey_manager')
+ def test_answer_validation_date(self):
+ question = self._add_question(
+ self.page_0, 'Q0', 'date', validation_required=True,
+ validation_min_date='2015-03-20', validation_max_date='2015-03-25', validation_error_msg='ValidationError')
+
+ self.assertEqual(
+ question.validate_question('Is Alfred an answer ?'),
+ {question.id: _('This is not a date')}
+ )
+
+ self.assertEqual(
+ question.validate_question('2015-03-19'),
+ {question.id: 'ValidationError'}
+ )
+
+ self.assertEqual(
+ question.validate_question('2015-03-26'),
+ {question.id: 'ValidationError'}
+ )
+
+ self.assertEqual(
+ question.validate_question('2015-03-25'),
+ {}
+ )
+
+ @users('survey_manager')
+ def test_answer_validation_numerical(self):
+ question = self._add_question(
+ self.page_0, 'Q0', 'numerical_box', validation_required=True,
+ validation_min_float_value=2.2, validation_max_float_value=3.3, validation_error_msg='ValidationError')
+
+ self.assertEqual(
+ question.validate_question('Is Alfred an answer ?'),
+ {question.id: _('This is not a number')}
+ )
+
+ self.assertEqual(
+ question.validate_question('2.0'),
+ {question.id: 'ValidationError'}
+ )
+
+ self.assertEqual(
+ question.validate_question('4.0'),
+ {question.id: 'ValidationError'}
+ )
+
+ self.assertEqual(
+ question.validate_question('2.9'),
+ {}
+ )
+
+ @users('survey_manager')
+ def test_answer_validation_char_box_email(self):
+ question = self._add_question(self.page_0, 'Q0', 'char_box', validation_email=True)
+
+ self.assertEqual(
+ question.validate_question('not an email'),
+ {question.id: _('This answer must be an email address')}
+ )
+
+ self.assertEqual(
+ question.validate_question('email@example.com'),
+ {}
+ )
+
+ @users('survey_manager')
+ def test_answer_validation_char_box_length(self):
+ question = self._add_question(
+ self.page_0, 'Q0', 'char_box', validation_required=True,
+ validation_length_min=2, validation_length_max=8, validation_error_msg='ValidationError')
+
+ self.assertEqual(
+ question.validate_question('l'),
+ {question.id: 'ValidationError'}
+ )
+
+ self.assertEqual(
+ question.validate_question('waytoomuchlonganswer'),
+ {question.id: 'ValidationError'}
+ )
+
+ self.assertEqual(
+ question.validate_question('valid'),
+ {}
+ )
diff --git a/addons/survey/tests/test_survey_compute_pages_questions.py b/addons/survey/tests/test_survey_compute_pages_questions.py
new file mode 100644
index 00000000..d59dbf90
--- /dev/null
+++ b/addons/survey/tests/test_survey_compute_pages_questions.py
@@ -0,0 +1,69 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+from odoo.addons.survey.tests import common
+
+
+class TestSurveyComputePagesQuestions(common.TestSurveyCommon):
+ def test_compute_pages_questions(self):
+ with self.with_user('survey_manager'):
+ survey = self.env['survey.survey'].create({
+ 'title': 'Test compute survey',
+ 'state': 'open',
+ })
+
+ page_0 = self.env['survey.question'].create({
+ 'is_page': True,
+ 'sequence': 1,
+ 'title': 'P1',
+ 'survey_id': survey.id
+ })
+ page0_q0 = self._add_question(page_0, 'Q1', 'text_box', survey_id=survey.id)
+ page0_q1 = self._add_question(page_0, 'Q2', 'text_box', survey_id=survey.id)
+ page0_q2 = self._add_question(page_0, 'Q3', 'text_box', survey_id=survey.id)
+ page0_q3 = self._add_question(page_0, 'Q4', 'text_box', survey_id=survey.id)
+ page0_q4 = self._add_question(page_0, 'Q5', 'text_box', survey_id=survey.id)
+
+ page_1 = self.env['survey.question'].create({
+ 'is_page': True,
+ 'sequence': 7,
+ 'title': 'P2',
+ 'survey_id': survey.id,
+ })
+ page1_q0 = self._add_question(page_1, 'Q6', 'text_box', survey_id=survey.id)
+ page1_q1 = self._add_question(page_1, 'Q7', 'text_box', survey_id=survey.id)
+ page1_q2 = self._add_question(page_1, 'Q8', 'text_box', survey_id=survey.id)
+ page1_q3 = self._add_question(page_1, 'Q9', 'text_box', survey_id=survey.id)
+
+ self.assertEqual(len(survey.page_ids), 2, "Survey should have 2 pages")
+ self.assertIn(page_0, survey.page_ids, "Page 1 should be contained in survey's page_ids")
+ self.assertIn(page_1, survey.page_ids, "Page 2 should be contained in survey's page_ids")
+
+ self.assertEqual(len(page_0.question_ids), 5, "Page 1 should have 5 questions")
+ self.assertIn(page0_q0, page_0.question_ids, "Question 1 should be in page 1")
+ self.assertIn(page0_q1, page_0.question_ids, "Question 2 should be in page 1")
+ self.assertIn(page0_q2, page_0.question_ids, "Question 3 should be in page 1")
+ self.assertIn(page0_q3, page_0.question_ids, "Question 4 should be in page 1")
+ self.assertIn(page0_q4, page_0.question_ids, "Question 5 should be in page 1")
+
+ self.assertEqual(len(page_1.question_ids), 4, "Page 2 should have 4 questions")
+ self.assertIn(page1_q0, page_1.question_ids, "Question 6 should be in page 2")
+ self.assertIn(page1_q1, page_1.question_ids, "Question 7 should be in page 2")
+ self.assertIn(page1_q2, page_1.question_ids, "Question 8 should be in page 2")
+ self.assertIn(page1_q3, page_1.question_ids, "Question 9 should be in page 2")
+
+ self.assertEqual(page0_q0.page_id, page_0, "Question 1 should belong to page 1")
+ self.assertEqual(page0_q1.page_id, page_0, "Question 2 should belong to page 1")
+ self.assertEqual(page0_q2.page_id, page_0, "Question 3 should belong to page 1")
+ self.assertEqual(page0_q3.page_id, page_0, "Question 4 should belong to page 1")
+ self.assertEqual(page0_q4.page_id, page_0, "Question 5 should belong to page 1")
+
+ self.assertEqual(page1_q0.page_id, page_1, "Question 6 should belong to page 2")
+ self.assertEqual(page1_q1.page_id, page_1, "Question 7 should belong to page 2")
+ self.assertEqual(page1_q2.page_id, page_1, "Question 8 should belong to page 2")
+ self.assertEqual(page1_q3.page_id, page_1, "Question 9 should belong to page 2")
+
+ # move 1 question from page 1 to page 2
+ page0_q2.write({'sequence': 12})
+ page0_q2._compute_page_id()
+ self.assertEqual(page0_q2.page_id, page_1, "Question 3 should now belong to page 2")
diff --git a/addons/survey/tests/test_survey_flow.py b/addons/survey/tests/test_survey_flow.py
new file mode 100644
index 00000000..d0c2921a
--- /dev/null
+++ b/addons/survey/tests/test_survey_flow.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+from odoo.addons.survey.tests import common
+from odoo.tests import tagged
+from odoo.tests.common import HttpCase
+
+
+@tagged('-at_install', 'post_install', 'functional')
+class TestSurveyFlow(common.TestSurveyCommon, HttpCase):
+ def _format_submission_data(self, page, answer_data, additional_post_data):
+ post_data = {}
+ post_data['page_id'] = page.id
+ for question_id, answer_vals in answer_data.items():
+ question = page.question_ids.filtered(lambda q: q.id == question_id)
+ post_data.update(self._prepare_post_data(question, answer_vals['value'], post_data))
+ post_data.update(**additional_post_data)
+ return post_data
+
+ def test_flow_public(self):
+ # Step: survey manager creates the survey
+ # --------------------------------------------------
+ with self.with_user('survey_manager'):
+ survey = self.env['survey.survey'].create({
+ 'title': 'Public Survey for Tarte Al Djotte',
+ 'access_mode': 'public',
+ 'users_login_required': False,
+ 'questions_layout': 'page_per_section',
+ 'state': 'open'
+ })
+
+ # First page is about customer data
+ page_0 = self.env['survey.question'].create({
+ 'is_page': True,
+ 'sequence': 1,
+ 'title': 'Page1: Your Data',
+ 'survey_id': survey.id,
+ })
+ page0_q0 = self._add_question(
+ page_0, 'What is your name', 'text_box',
+ comments_allowed=False,
+ constr_mandatory=True, constr_error_msg='Please enter your name', survey_id=survey.id)
+ page0_q1 = self._add_question(
+ page_0, 'What is your age', 'numerical_box',
+ comments_allowed=False,
+ constr_mandatory=True, constr_error_msg='Please enter your name', survey_id=survey.id)
+
+ # Second page is about tarte al djotte
+ page_1 = self.env['survey.question'].create({
+ 'is_page': True,
+ 'sequence': 4,
+ 'title': 'Page2: Tarte Al Djotte',
+ 'survey_id': survey.id,
+ })
+ page1_q0 = self._add_question(
+ page_1, 'What do you like most in our tarte al djotte', 'multiple_choice',
+ labels=[{'value': 'The gras'},
+ {'value': 'The bette'},
+ {'value': 'The tout'},
+ {'value': 'The regime is fucked up'}], survey_id=survey.id)
+
+ # fetch starting data to check only newly created data during this flow
+ answers = self.env['survey.user_input'].search([('survey_id', '=', survey.id)])
+ answer_lines = self.env['survey.user_input.line'].search([('survey_id', '=', survey.id)])
+ self.assertEqual(answers, self.env['survey.user_input'])
+ self.assertEqual(answer_lines, self.env['survey.user_input.line'])
+
+ # Step: customer takes the survey
+ # --------------------------------------------------
+
+ # Customer opens start page
+ r = self._access_start(survey)
+ self.assertResponse(r, 200, [survey.title])
+
+ # -> this should have generated a new answer with a token
+ answers = self.env['survey.user_input'].search([('survey_id', '=', survey.id)])
+ self.assertEqual(len(answers), 1)
+ answer_token = answers.access_token
+ self.assertTrue(answer_token)
+ self.assertAnswer(answers, 'new', self.env['survey.question'])
+
+ # Customer begins survey with first page
+ r = self._access_page(survey, answer_token)
+ self.assertResponse(r, 200)
+ self.assertAnswer(answers, 'new', self.env['survey.question'])
+ csrf_token = self._find_csrf_token(r.text)
+
+ r = self._access_begin(survey, answer_token)
+ self.assertResponse(r, 200)
+
+ # Customer submit first page answers
+ answer_data = {
+ page0_q0.id: {'value': ['Alfred Poilvache']},
+ page0_q1.id: {'value': ['44.0']},
+ }
+ post_data = self._format_submission_data(page_0, answer_data, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': 'next'})
+ r = self._access_submit(survey, answer_token, post_data)
+ self.assertResponse(r, 200)
+ answers.invalidate_cache() # TDE note: necessary as lots of sudo in controllers messing with cache
+
+ # -> this should have generated answer lines
+ self.assertAnswer(answers, 'in_progress', page_0)
+ self.assertAnswerLines(page_0, answers, answer_data)
+
+ # Customer is redirected on second page and begins filling it
+ r = self._access_page(survey, answer_token)
+ self.assertResponse(r, 200)
+ csrf_token = self._find_csrf_token(r.text)
+
+ # Customer submit second page answers
+ answer_data = {
+ page1_q0.id: {'value': [page1_q0.suggested_answer_ids.ids[0], page1_q0.suggested_answer_ids.ids[1]]},
+ }
+ post_data = self._format_submission_data(page_1, answer_data, {'csrf_token': csrf_token, 'token': answer_token, 'button_submit': 'next'})
+ r = self._access_submit(survey, answer_token, post_data)
+ self.assertResponse(r, 200)
+ answers.invalidate_cache() # TDE note: necessary as lots of sudo in controllers messing with cache
+
+ # -> this should have generated answer lines and closed the answer
+ self.assertAnswer(answers, 'done', page_1)
+ self.assertAnswerLines(page_1, answers, answer_data)
diff --git a/addons/survey/tests/test_survey_flow_with_conditions.py b/addons/survey/tests/test_survey_flow_with_conditions.py
new file mode 100644
index 00000000..5ad4709a
--- /dev/null
+++ b/addons/survey/tests/test_survey_flow_with_conditions.py
@@ -0,0 +1,126 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+from odoo.addons.survey.tests import common
+from odoo.tests import tagged
+from odoo.tests.common import HttpCase
+
+
+@tagged('-at_install', 'post_install', 'functional')
+class TestSurveyFlowWithConditions(common.TestSurveyCommon, HttpCase):
+ def test_conditional_flow_with_scoring(self):
+ with self.with_user('survey_user'):
+ survey = self.env['survey.survey'].create({
+ 'title': 'Survey',
+ 'access_mode': 'public',
+ 'questions_layout': 'page_per_section',
+ 'scoring_type': 'scoring_with_answers',
+ 'scoring_success_min': 85.0,
+ 'state': 'open',
+ })
+
+ page_0 = self.env['survey.question'].with_user(self.survey_manager).create({
+ 'title': 'First page',
+ 'survey_id': survey.id,
+ 'sequence': 1,
+ 'is_page': True,
+ })
+
+ q01 = self._add_question(
+ page_0, 'Question 1', 'simple_choice',
+ sequence=1,
+ constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
+ labels=[
+ {'value': 'Answer 1'},
+ {'value': 'Answer 2'},
+ {'value': 'Answer 3'},
+ {'value': 'Answer 4', 'is_correct': True, 'answer_score': 1.0}
+ ])
+
+ q02 = self._add_question(
+ page_0, 'Question 2', 'simple_choice',
+ sequence=2,
+ constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
+ is_conditional=True, triggering_question_id=q01.id, triggering_answer_id=q01.suggested_answer_ids.filtered(lambda q: q.is_correct).id,
+ labels=[
+ {'value': 'Answer 1'},
+ {'value': 'Answer 2', 'is_correct': True, 'answer_score': 1.0},
+ {'value': 'Answer 3'},
+ {'value': 'Answer 4'}
+ ])
+
+ q03 = self._add_question(
+ page_0, 'Question 3', 'simple_choice',
+ sequence=1,
+ constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
+ labels=[
+ {'value': 'Answer 1'},
+ {'value': 'Answer 2'},
+ {'value': 'Answer 3'},
+ {'value': 'Answer 4', 'is_correct': True, 'answer_score': 1.0}
+ ])
+
+ q04 = self._add_question(
+ page_0, 'Question 4', 'simple_choice',
+ sequence=2,
+ constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
+ is_conditional=True, triggering_question_id=q03.id, triggering_answer_id=q03.suggested_answer_ids.filtered(lambda q: q.is_correct).id,
+ labels=[
+ {'value': 'Answer 1'},
+ {'value': 'Answer 2', 'is_correct': True, 'answer_score': 1.0},
+ {'value': 'Answer 3'},
+ {'value': 'Answer 4'}
+ ])
+
+ q05 = self._add_question(
+ page_0, 'Question 5', 'simple_choice',
+ sequence=1,
+ constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
+ labels=[
+ {'value': 'Answer 1'},
+ {'value': 'Answer 2'},
+ {'value': 'Answer 3'},
+ {'value': 'Answer 4', 'is_correct': True, 'answer_score': 1.0}
+ ])
+
+ q06 = self._add_question(
+ page_0, 'Question 6', 'simple_choice',
+ sequence=2,
+ constr_mandatory=True, constr_error_msg='Please select an answer', survey_id=survey.id,
+ is_conditional=True, triggering_question_id=q05.id, triggering_answer_id=q05.suggested_answer_ids.filtered(lambda q: q.is_correct).id,
+ labels=[
+ {'value': 'Answer 1'},
+ {'value': 'Answer 2', 'is_correct': True, 'answer_score': 1.0},
+ {'value': 'Answer 3'},
+ {'value': 'Answer 4'}
+ ])
+
+ # User opens start page
+ self._access_start(survey)
+
+ # -> this should have generated a new user_input with a token
+ user_inputs = self.env['survey.user_input'].search([('survey_id', '=', survey.id)])
+ self.assertEqual(len(user_inputs), 1)
+ answer_token = user_inputs.access_token
+
+ # User begins survey with first page
+ response = self._access_page(survey, answer_token)
+ self.assertResponse(response, 200)
+ csrf_token = self._find_csrf_token(response.text)
+
+ r = self._access_begin(survey, answer_token)
+ self.assertResponse(r, 200)
+
+ answers = {
+ q01: q01.suggested_answer_ids[3], # Right
+ q02: q02.suggested_answer_ids[1], # Right
+ q03: q03.suggested_answer_ids[0], # Wrong
+ q05: q05.suggested_answer_ids[3], # Right
+ q06: q06.suggested_answer_ids[2], # Wrong
+ }
+
+ self._answer_page(page_0, answers, answer_token, csrf_token)
+
+ user_inputs.invalidate_cache()
+ self.assertEqual(round(user_inputs.scoring_percentage), 60, "Three right answers out of five (the fourth one is still hidden)")
+ self.assertFalse(user_inputs.scoring_success)
diff --git a/addons/survey/tests/test_survey_invite.py b/addons/survey/tests/test_survey_invite.py
new file mode 100644
index 00000000..6a93ee45
--- /dev/null
+++ b/addons/survey/tests/test_survey_invite.py
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+from datetime import datetime
+from dateutil.relativedelta import relativedelta
+
+from odoo import fields
+from odoo.addons.survey.tests import common
+from odoo.exceptions import UserError
+from odoo.tests import Form
+from odoo.tests.common import users
+
+
+class TestSurveyInvite(common.TestSurveyCommon):
+
+ def setUp(self):
+ res = super(TestSurveyInvite, self).setUp()
+ # by default signup not allowed
+ self.env["ir.config_parameter"].set_param('auth_signup.invitation_scope', 'b2b')
+ return res
+
+ @users('survey_manager')
+ def test_survey_invite_action(self):
+ # Check correctly configured survey returns an invite wizard action
+ action = self.survey.action_send_survey()
+ self.assertEqual(action['res_model'], 'survey.invite')
+
+ # Bad cases
+ surveys = [
+ # no page
+ self.env['survey.survey'].create({'title': 'Test survey'}),
+ # no questions
+ self.env['survey.survey'].create({'title': 'Test survey', 'question_and_page_ids': [(0, 0, {'is_page': True, 'title': 'P0', 'sequence': 1})]}),
+ # closed
+ self.env['survey.survey'].with_user(self.survey_manager).create({
+ 'title': 'S0',
+ 'state': 'closed',
+ 'question_and_page_ids': [
+ (0, 0, {'is_page': True, 'title': 'P0', 'sequence': 1}),
+ (0, 0, {'title': 'Q0', 'sequence': 2, 'question_type': 'text_box'})
+ ]
+ })
+ ]
+ for survey in surveys:
+ with self.assertRaises(UserError):
+ survey.action_send_survey()
+
+ @users('survey_manager')
+ def test_survey_invite(self):
+ Answer = self.env['survey.user_input']
+ deadline = fields.Datetime.now() + relativedelta(months=1)
+
+ self.survey.write({'access_mode': 'public', 'users_login_required': False})
+ action = self.survey.action_send_survey()
+ invite_form = Form(self.env[action['res_model']].with_context(action['context']))
+
+ # some lowlevel checks that action is correctly configured
+ self.assertEqual(Answer.search([('survey_id', '=', self.survey.id)]), self.env['survey.user_input'])
+ self.assertEqual(invite_form.survey_id, self.survey)
+
+ invite_form.partner_ids.add(self.customer)
+ invite_form.deadline = fields.Datetime.to_string(deadline)
+
+ invite = invite_form.save()
+ invite.action_invite()
+
+ answers = Answer.search([('survey_id', '=', self.survey.id)])
+ self.assertEqual(len(answers), 1)
+ self.assertEqual(
+ set(answers.mapped('email')),
+ set([self.customer.email]))
+ self.assertEqual(answers.mapped('partner_id'), self.customer)
+ self.assertEqual(set(answers.mapped('deadline')), set([deadline]))
+
+ @users('survey_manager')
+ def test_survey_invite_authentication_nosignup(self):
+ Answer = self.env['survey.user_input']
+
+ self.survey.write({'access_mode': 'public', 'users_login_required': True})
+ action = self.survey.action_send_survey()
+ invite_form = Form(self.env[action['res_model']].with_context(action['context']))
+
+ with self.assertRaises(UserError): # do not allow to add customer (partner without user)
+ invite_form.partner_ids.add(self.customer)
+ invite_form.partner_ids.clear()
+ invite_form.partner_ids.add(self.user_portal.partner_id)
+ invite_form.partner_ids.add(self.user_emp.partner_id)
+ with self.assertRaises(UserError):
+ invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
+ invite_form.emails = False
+
+ invite = invite_form.save()
+ invite.action_invite()
+
+ answers = Answer.search([('survey_id', '=', self.survey.id)])
+ self.assertEqual(len(answers), 2)
+ self.assertEqual(
+ set(answers.mapped('email')),
+ set([self.user_emp.email, self.user_portal.email]))
+ self.assertEqual(answers.mapped('partner_id'), self.user_emp.partner_id | self.user_portal.partner_id)
+
+ @users('survey_manager')
+ def test_survey_invite_authentication_signup(self):
+ self.env["ir.config_parameter"].sudo().set_param('auth_signup.invitation_scope', 'b2c')
+ self.survey.invalidate_cache()
+ Answer = self.env['survey.user_input']
+
+ self.survey.write({'access_mode': 'public', 'users_login_required': True})
+ action = self.survey.action_send_survey()
+ invite_form = Form(self.env[action['res_model']].with_context(action['context']))
+
+ invite_form.partner_ids.add(self.customer)
+ invite_form.partner_ids.add(self.user_portal.partner_id)
+ invite_form.partner_ids.add(self.user_emp.partner_id)
+ # TDE FIXME: not sure for emails in authentication + signup
+ # invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
+
+ invite = invite_form.save()
+ invite.action_invite()
+
+ answers = Answer.search([('survey_id', '=', self.survey.id)])
+ self.assertEqual(len(answers), 3)
+ self.assertEqual(
+ set(answers.mapped('email')),
+ set([self.customer.email, self.user_emp.email, self.user_portal.email]))
+ self.assertEqual(answers.mapped('partner_id'), self.customer | self.user_emp.partner_id | self.user_portal.partner_id)
+
+ @users('survey_manager')
+ def test_survey_invite_public(self):
+ Answer = self.env['survey.user_input']
+
+ self.survey.write({'access_mode': 'public', 'users_login_required': False})
+ action = self.survey.action_send_survey()
+ invite_form = Form(self.env[action['res_model']].with_context(action['context']))
+
+ invite_form.partner_ids.add(self.customer)
+ invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
+
+ invite = invite_form.save()
+ invite.action_invite()
+
+ answers = Answer.search([('survey_id', '=', self.survey.id)])
+ self.assertEqual(len(answers), 3)
+ self.assertEqual(
+ set(answers.mapped('email')),
+ set(['test1@example.com', '"Raoulette Vignolette" <test2@example.com>', self.customer.email]))
+ self.assertEqual(answers.mapped('partner_id'), self.customer)
+
+ @users('survey_manager')
+ def test_survey_invite_token(self):
+ Answer = self.env['survey.user_input']
+
+ self.survey.write({'access_mode': 'token', 'users_login_required': False})
+ action = self.survey.action_send_survey()
+ invite_form = Form(self.env[action['res_model']].with_context(action['context']))
+
+ invite_form.partner_ids.add(self.customer)
+ invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
+
+ invite = invite_form.save()
+ invite.action_invite()
+
+ answers = Answer.search([('survey_id', '=', self.survey.id)])
+ self.assertEqual(len(answers), 3)
+ self.assertEqual(
+ set(answers.mapped('email')),
+ set(['test1@example.com', '"Raoulette Vignolette" <test2@example.com>', self.customer.email]))
+ self.assertEqual(answers.mapped('partner_id'), self.customer)
+
+ @users('survey_manager')
+ def test_survey_invite_token_internal(self):
+ Answer = self.env['survey.user_input']
+
+ self.survey.write({'access_mode': 'token', 'users_login_required': True})
+ action = self.survey.action_send_survey()
+ invite_form = Form(self.env[action['res_model']].with_context(action['context']))
+
+ with self.assertRaises(UserError): # do not allow to add customer (partner without user)
+ invite_form.partner_ids.add(self.customer)
+ with self.assertRaises(UserError): # do not allow to add portal user
+ invite_form.partner_ids.add(self.user_portal.partner_id)
+ invite_form.partner_ids.clear()
+ invite_form.partner_ids.add(self.user_emp.partner_id)
+ with self.assertRaises(UserError):
+ invite_form.emails = 'test1@example.com, Raoulette Vignolette <test2@example.com>'
+ invite_form.emails = False
+
+ invite = invite_form.save()
+ invite.action_invite()
+
+ answers = Answer.search([('survey_id', '=', self.survey.id)])
+ self.assertEqual(len(answers), 1)
+ self.assertEqual(
+ set(answers.mapped('email')),
+ set([self.user_emp.email]))
+ self.assertEqual(answers.mapped('partner_id'), self.user_emp.partner_id)
+
+ def test_survey_invite_token_by_email_nosignup(self):
+ """
+ Case: have multiples partners with the same email address
+ If I set one email address, I expect one email to be sent
+ """
+
+ first_partner = self.env['res.partner'].create({
+ 'name': 'Test 1',
+ 'email': 'test@example.com',
+ })
+
+ self.env['res.partner'].create({
+ 'name': 'Test 2',
+ 'email': '"Raoul Poilvache" <TEST@example.COM>',
+ })
+
+ self.survey.write({'access_mode': 'token', 'users_login_required': False})
+ action = self.survey.action_send_survey()
+ invite_form = Form(self.env[action['res_model']].with_context(action['context']))
+ invite_form.emails = 'test@example.com'
+ invite = invite_form.save()
+ invite.action_invite()
+
+ answers = self.env['survey.user_input'].search([('survey_id', '=', self.survey.id)])
+ self.assertEqual(len(answers), 1)
+ self.assertEqual(answers.partner_id.display_name, first_partner.display_name)
diff --git a/addons/survey/tests/test_survey_randomize.py b/addons/survey/tests/test_survey_randomize.py
new file mode 100644
index 00000000..fa926b18
--- /dev/null
+++ b/addons/survey/tests/test_survey_randomize.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+from odoo.tests.common import TransactionCase
+
+
+class TestSurveyRandomize(TransactionCase):
+ def test_01_generate_randomized_questions(self):
+ """ Use random generate for a survey and verify that questions within the page are selected accordingly """
+ Question = self.env['survey.question'].sudo()
+ question_and_pages = self.env['survey.question']
+ page_1 = Question.create({
+ 'title': 'Page 1',
+ 'is_page': True,
+ 'sequence': 1,
+ 'random_questions_count': 3
+ })
+ question_and_pages |= page_1
+ question_and_pages = self._add_questions(question_and_pages, page_1, 5)
+
+ page_2 = Question.create({
+ 'title': 'Page 2',
+ 'is_page': True,
+ 'sequence': 100,
+ 'random_questions_count': 5
+ })
+ question_and_pages |= page_2
+ question_and_pages = self._add_questions(question_and_pages, page_2, 10)
+
+ page_3 = Question.create({
+ 'title': 'Page 2',
+ 'is_page': True,
+ 'sequence': 1000,
+ 'random_questions_count': 4
+ })
+ question_and_pages |= page_3
+ question_and_pages = self._add_questions(question_and_pages, page_3, 2)
+
+ self.survey1 = self.env['survey.survey'].sudo().create({
+ 'title': "S0",
+ 'question_and_page_ids': [(6, 0, question_and_pages.ids)],
+ 'questions_selection': 'random'
+ })
+
+ generated_questions = self.survey1._prepare_user_input_predefined_questions()
+
+ self.assertEqual(len(generated_questions.ids), 10, msg="Expected 10 unique questions")
+ self.assertEqual(len(generated_questions.filtered(lambda question: question.page_id == page_1)), 3, msg="Expected 3 questions in page 1")
+ self.assertEqual(len(generated_questions.filtered(lambda question: question.page_id == page_2)), 5, msg="Expected 5 questions in page 2")
+ self.assertEqual(len(generated_questions.filtered(lambda question: question.page_id == page_3)), 2, msg="Expected 2 questions in page 3")
+
+ def _add_questions(self, question_and_pages, page, count):
+ for i in range(count):
+ question_and_pages |= self.env['survey.question'].sudo().create({
+ 'title': page.title + ' Q' + str(i + 1),
+ 'sequence': page.sequence + (i + 1)
+ })
+
+ return question_and_pages
diff --git a/addons/survey/tests/test_survey_security.py b/addons/survey/tests/test_survey_security.py
new file mode 100644
index 00000000..191e8edd
--- /dev/null
+++ b/addons/survey/tests/test_survey_security.py
@@ -0,0 +1,364 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+import datetime
+
+from odoo.addons.survey.tests import common
+from odoo.exceptions import AccessError, UserError
+from odoo.tests import tagged
+from odoo.tests.common import users, HttpCase
+from odoo.tools import mute_logger
+
+
+@tagged('security')
+class TestAccess(common.TestSurveyCommon):
+
+ def setUp(self):
+ super(TestAccess, self).setUp()
+
+ self.answer_0 = self._add_answer(self.survey, self.customer)
+ self.answer_0_0 = self._add_answer_line(self.question_ft, self.answer_0, 'Test Answer')
+ self.answer_0_1 = self._add_answer_line(self.question_num, self.answer_0, 5)
+
+ @mute_logger('odoo.addons.base.models.ir_model')
+ @users('user_emp')
+ def test_access_survey_employee(self):
+ # Create: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.survey'].create({'title': 'Test Survey 2'})
+ with self.assertRaises(AccessError):
+ self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'survey_id': self.survey.id})
+ with self.assertRaises(AccessError):
+ self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'page_id': self.page_0.id})
+
+ # Read: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.survey'].search([('title', 'ilike', 'Test')])
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).read(['title'])
+
+ # Write: nope
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).write({'title': 'New Title'})
+ with self.assertRaises(AccessError):
+ self.page_0.with_user(self.env.user).write({'title': 'New Title'})
+ with self.assertRaises(AccessError):
+ self.question_ft.with_user(self.env.user).write({'question': 'New Title'})
+
+ # Unlink: nope
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).unlink()
+ with self.assertRaises(AccessError):
+ self.page_0.with_user(self.env.user).unlink()
+ with self.assertRaises(AccessError):
+ self.question_ft.with_user(self.env.user).unlink()
+
+ @mute_logger('odoo.addons.base.models.ir_model')
+ @users('user_portal')
+ def test_access_survey_portal(self):
+ # Create: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.survey'].create({'title': 'Test Survey 2'})
+ with self.assertRaises(AccessError):
+ self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'survey_id': self.survey.id})
+ with self.assertRaises(AccessError):
+ self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'page_id': self.page_0.id})
+
+ # Read: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.survey'].search([('title', 'ilike', 'Test')])
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).read(['title'])
+
+ # Write: nope
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).write({'title': 'New Title'})
+ with self.assertRaises(AccessError):
+ self.page_0.with_user(self.env.user).write({'title': 'New Title'})
+ with self.assertRaises(AccessError):
+ self.question_ft.with_user(self.env.user).write({'question': 'New Title'})
+
+ # Unlink: nope
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).unlink()
+ with self.assertRaises(AccessError):
+ self.page_0.with_user(self.env.user).unlink()
+ with self.assertRaises(AccessError):
+ self.question_ft.with_user(self.env.user).unlink()
+
+ @mute_logger('odoo.addons.base.models.ir_model')
+ @users('user_public')
+ def test_access_survey_public(self):
+ # Create: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.survey'].create({'title': 'Test Survey 2'})
+ with self.assertRaises(AccessError):
+ self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'survey_id': self.survey.id})
+ with self.assertRaises(AccessError):
+ self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'page_id': self.page_0.id})
+
+ # Read: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.survey'].search([('title', 'ilike', 'Test')])
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).read(['title'])
+
+ # Write: nope
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).write({'title': 'New Title'})
+ with self.assertRaises(AccessError):
+ self.page_0.with_user(self.env.user).write({'title': 'New Title'})
+ with self.assertRaises(AccessError):
+ self.question_ft.with_user(self.env.user).write({'question': 'New Title'})
+
+ # Unlink: nope
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).unlink()
+ with self.assertRaises(AccessError):
+ self.page_0.with_user(self.env.user).unlink()
+ with self.assertRaises(AccessError):
+ self.question_ft.with_user(self.env.user).unlink()
+
+ @users('survey_manager')
+ def test_access_survey_survey_manager(self):
+ # Create: all
+ survey = self.env['survey.survey'].create({'title': 'Test Survey 2'})
+ self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'survey_id': survey.id})
+ self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'survey_id': survey.id})
+
+ # Read: all
+ surveys = self.env['survey.survey'].search([('title', 'ilike', 'Test')])
+ self.assertEqual(surveys, self.survey | survey)
+ surveys.read(['title'])
+
+ # Write: all
+ (self.survey | survey).write({'title': 'New Title'})
+
+ # Unlink: all
+ (self.survey | survey).unlink()
+
+ @mute_logger('odoo.addons.base.models.ir_model')
+ @users('survey_user')
+ def test_access_survey_survey_user(self):
+ # Create: own only
+ survey = self.env['survey.survey'].create({'title': 'Test Survey 2'})
+ self.env['survey.question'].create({'title': 'My Page', 'sequence': 0, 'is_page': True, 'survey_id': survey.id})
+ self.env['survey.question'].create({'title': 'My Question', 'sequence': 1, 'survey_id': survey.id})
+
+ # Read: all
+ surveys = self.env['survey.survey'].search([('title', 'ilike', 'Test')])
+ self.assertEqual(surveys, self.survey | survey)
+ surveys.read(['title'])
+
+ # Write: own only
+ survey.write({'title': 'New Title'})
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).write({'title': 'New Title'})
+
+ # Unlink: own only
+ survey.unlink()
+ with self.assertRaises(AccessError):
+ self.survey.with_user(self.env.user).unlink()
+
+ @mute_logger('odoo.addons.base.models.ir_model')
+ @users('user_emp')
+ def test_access_answers_employee(self):
+ # Create: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input'].create({'survey_id': self.survey.id})
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': self.answer_0.id})
+
+ # Read: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input'].search([('survey_id', 'in', [self.survey.id])])
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input.line'].search([('survey_id', 'in', [self.survey.id])])
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
+
+ # Write: nope
+ with self.assertRaises(AccessError):
+ self.answer_0.with_user(self.env.user).write({'state': 'done'})
+
+ # Unlink: nope
+ with self.assertRaises(AccessError):
+ self.answer_0.with_user(self.env.user).unlink()
+ with self.assertRaises(AccessError):
+ self.answer_0_0.with_user(self.env.user).unlink()
+
+ @mute_logger('odoo.addons.base.models.ir_model')
+ @users('user_portal')
+ def test_access_answers_portal(self):
+ # Create: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input'].create({'survey_id': self.survey.id})
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': self.answer_0.id})
+
+ # Read: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input'].search([('survey_id', 'in', [self.survey.id])])
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input.line'].search([('survey_id', 'in', [self.survey.id])])
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
+
+ # Write: nope
+ with self.assertRaises(AccessError):
+ self.answer_0.with_user(self.env.user).write({'state': 'done'})
+
+ # Unlink: nope
+ with self.assertRaises(AccessError):
+ self.answer_0.with_user(self.env.user).unlink()
+ with self.assertRaises(AccessError):
+ self.answer_0_0.with_user(self.env.user).unlink()
+
+ @mute_logger('odoo.addons.base.models.ir_model')
+ @users('user_public')
+ def test_access_answers_public(self):
+ # Create: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input'].create({'survey_id': self.survey.id})
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': self.answer_0.id})
+
+ # Read: nope
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input'].search([('survey_id', 'in', [self.survey.id])])
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input.line'].search([('survey_id', 'in', [self.survey.id])])
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
+ with self.assertRaises(AccessError):
+ self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
+
+ # Write: nope
+ with self.assertRaises(AccessError):
+ self.answer_0.with_user(self.env.user).write({'state': 'done'})
+
+ # Unlink: nope
+ with self.assertRaises(AccessError):
+ self.answer_0.with_user(self.env.user).unlink()
+ with self.assertRaises(AccessError):
+ self.answer_0_0.with_user(self.env.user).unlink()
+
+ @mute_logger('odoo.addons.base.models.ir_model')
+ @users('survey_user')
+ def test_access_answers_survey_user(self):
+ survey_own = self.env['survey.survey'].create({'title': 'Other'})
+ self.env['survey.question'].create({'title': 'Other', 'sequence': 0, 'is_page': True, 'survey_id': survey_own.id})
+ question_own = self.env['survey.question'].create({'title': 'Other Question', 'sequence': 1, 'survey_id': survey_own.id})
+
+ # Create: own survey only
+ answer_own = self.env['survey.user_input'].create({'survey_id': survey_own.id})
+ answer_line_own = self.env['survey.user_input.line'].create({'question_id': question_own.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': answer_own.id})
+
+ # Read: always
+ answers = self.env['survey.user_input'].search([('survey_id', 'in', [survey_own.id, self.survey.id])])
+ self.assertEqual(answers, answer_own | self.answer_0)
+
+ answer_lines = self.env['survey.user_input.line'].search([('survey_id', 'in', [survey_own.id, self.survey.id])])
+ self.assertEqual(answer_lines, answer_line_own | self.answer_0_0 | self.answer_0_1)
+
+ self.env['survey.user_input'].browse(answer_own.ids).read(['state'])
+ self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
+
+ self.env['survey.user_input.line'].browse(answer_line_own.ids).read(['value_numerical_box'])
+ self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
+
+ # Create: own survey only (moved after read because DB not correctly rollbacked with assertRaises)
+ with self.assertRaises(AccessError):
+ answer_other = self.env['survey.user_input'].create({'survey_id': self.survey.id})
+ with self.assertRaises(AccessError):
+ answer_line_other = self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': self.answer_0.id})
+
+ # Write: own survey only
+ answer_own.write({'state': 'done'})
+ with self.assertRaises(AccessError):
+ self.answer_0.with_user(self.env.user).write({'state': 'done'})
+
+ # Unlink: own survey only
+ answer_own.unlink()
+ with self.assertRaises(AccessError):
+ self.answer_0.with_user(self.env.user).unlink()
+ with self.assertRaises(AccessError):
+ self.answer_0_0.with_user(self.env.user).unlink()
+
+ @users('survey_manager')
+ def test_access_answers_survey_manager(self):
+ admin = self.env.ref('base.user_admin')
+ with self.with_user(admin.login):
+ survey_other = self.env['survey.survey'].create({'title': 'Other'})
+ self.env['survey.question'].create({'title': 'Other', 'sequence': 0, 'is_page': True, 'survey_id': survey_other.id})
+ question_other = self.env['survey.question'].create({'title': 'Other Question', 'sequence': 1, 'survey_id': survey_other.id})
+ self.assertEqual(survey_other.create_uid, admin)
+ self.assertEqual(question_other.create_uid, admin)
+
+ # Create: always
+ answer_own = self.env['survey.user_input'].create({'survey_id': self.survey.id})
+ answer_other = self.env['survey.user_input'].create({'survey_id': survey_other.id})
+ answer_line_own = self.env['survey.user_input.line'].create({'question_id': self.question_num.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': answer_own.id})
+ answer_line_other = self.env['survey.user_input.line'].create({'question_id': question_other.id, 'answer_type': 'numerical_box', 'value_numerical_box': 3, 'user_input_id': answer_other.id})
+
+ # Read: always
+ answers = self.env['survey.user_input'].search([('survey_id', 'in', [survey_other.id, self.survey.id])])
+ self.assertEqual(answers, answer_own | answer_other | self.answer_0)
+
+ answer_lines = self.env['survey.user_input.line'].search([('survey_id', 'in', [survey_other.id, self.survey.id])])
+ self.assertEqual(answer_lines, answer_line_own | answer_line_other | self.answer_0_0 | self.answer_0_1)
+
+ self.env['survey.user_input'].browse(answer_own.ids).read(['state'])
+ self.env['survey.user_input'].browse(self.answer_0.ids).read(['state'])
+
+ self.env['survey.user_input.line'].browse(answer_line_own.ids).read(['value_numerical_box'])
+ self.env['survey.user_input.line'].browse(self.answer_0_0.ids).read(['value_numerical_box'])
+
+ # Write: always
+ answer_own.write({'state': 'done'})
+ answer_other.write({'partner_id': self.env.user.partner_id.id})
+
+ # Unlink: always
+ (answer_own | answer_other | self.answer_0).unlink()
+
+
+@tagged('post_install')
+class TestSurveySecurityControllers(common.TestSurveyCommon, HttpCase):
+ def test_survey_start_short(self):
+ # avoid name clash with existing data
+ surveys = self.env['survey.survey'].search([
+ ('state', '=', 'open'),
+ ('session_state', 'in', ['ready', 'in_progress'])
+ ])
+ surveys.write({'state': 'done'})
+ self.survey.write({
+ 'state': 'open',
+ 'session_state': 'ready',
+ 'session_code': '123456',
+ 'session_start_time': datetime.datetime.now(),
+ 'access_mode': 'public',
+ 'users_login_required': False,
+ })
+
+ # right short access token
+ response = self.url_open(f'/s/123456')
+ self.assertEqual(response.status_code, 200)
+ self.assertIn('The session will begin automatically when the host starts', response.text)
+
+ # `like` operator injection
+ response = self.url_open(f'/s/______')
+ self.assertFalse(self.survey.title in response.text)
+
+ # right short token, but wrong state
+ self.survey.state = 'draft'
+ response = self.url_open(f'/s/123456')
+ self.assertFalse(self.survey.title in response.text)
+
+ # right short token, but wrong `session_state`
+ self.survey.write({'state': 'open', 'session_state': False})
+ response = self.url_open(f'/s/123456')
+ self.assertFalse(self.survey.title in response.text)
diff --git a/addons/survey/tests/test_survey_ui_certification.py b/addons/survey/tests/test_survey_ui_certification.py
new file mode 100644
index 00000000..9ba15c55
--- /dev/null
+++ b/addons/survey/tests/test_survey_ui_certification.py
@@ -0,0 +1,282 @@
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+import odoo.tests
+
+from odoo.addons.base.tests.common import HttpCaseWithUserDemo
+
+
+@odoo.tests.common.tagged('post_install', '-at_install')
+class TestUiCertification(HttpCaseWithUserDemo):
+
+ def setUp(self):
+ super(TestUiCertification, self).setUp()
+
+ self.survey_certification = self.env['survey.survey'].create({
+ 'title': 'MyCompany Vendor Certification',
+ 'access_token': '4ead4bc8-b8f2-4760-a682-1fde8daaaaac',
+ 'state': 'open',
+ 'access_mode': 'public',
+ 'users_can_go_back': True,
+ 'users_login_required': True,
+ 'scoring_type': 'scoring_with_answers',
+ 'certification': True,
+ 'certification_mail_template_id': self.env.ref('survey.mail_template_certification').id,
+ 'is_time_limited': 'limited',
+ 'time_limit': 10.0,
+ 'is_attempts_limited': True,
+ 'attempts_limit': 2,
+ 'description': """&lt;p&gt;Test your vendor skills!.&lt;/p&gt;""",
+ 'question_and_page_ids': [
+ (0, 0, {
+ 'title': 'Products',
+ 'sequence': 1,
+ 'is_page': True,
+ 'question_type': False,
+ 'description': '&lt;p&gt;Test your knowledge of your products!&lt;/p&gt;',
+ }), (0, 0, {
+ 'title': 'Do we sell Acoustic Bloc Screens?',
+ 'sequence': 2,
+ 'question_type': 'simple_choice',
+ 'constr_mandatory': True,
+ 'suggested_answer_ids': [
+ (0, 0, {
+ 'value': 'No',
+ 'sequence': 1,
+ }), (0, 0, {
+ 'value': 'Yes',
+ 'sequence': 2,
+ 'is_correct': True,
+ 'answer_score': 2,
+ })
+ ],
+ }), (0, 0, {
+ 'title': 'Select all the existing products',
+ 'sequence': 3,
+ 'question_type': 'multiple_choice',
+ 'column_nb': '4',
+ 'suggested_answer_ids': [
+ (0, 0, {
+ 'value': 'Chair floor protection',
+ 'sequence': 1,
+ 'is_correct': True,
+ 'answer_score': 1,
+ }), (0, 0, {
+ 'value': 'Fanta',
+ 'sequence': 2,
+ 'answer_score': -1,
+ }), (0, 0, {
+ 'value': 'Conference chair',
+ 'sequence': 3,
+ 'is_correct': True,
+ 'answer_score': 1,
+ }), (0, 0, {
+ 'value': 'Drawer',
+ 'sequence': 4,
+ 'is_correct': True,
+ 'answer_score': 1,
+ }), (0, 0, {
+ 'value': 'Customizable Lamp',
+ 'sequence': 5,
+ 'answer_score': -1,
+ })
+ ]
+ }), (0, 0, {
+ 'title': 'Select all the available customizations for our Customizable Desk',
+ 'sequence': 4,
+ 'question_type': 'multiple_choice',
+ 'column_nb': '4',
+ 'suggested_answer_ids': [
+ (0, 0, {
+ 'value': 'Color',
+ 'sequence': 1,
+ 'is_correct': True,
+ 'answer_score': 1,
+ }), (0, 0, {
+ 'value': 'Height',
+ 'sequence': 2,
+ 'answer_score': -1,
+ }), (0, 0, {
+ 'value': 'Width',
+ 'sequence': 3,
+ 'is_correct': True,
+ 'answer_score': 1,
+ }), (0, 0, {
+ 'value': 'Legs',
+ 'sequence': 4,
+ 'is_correct': True,
+ 'answer_score': 1,
+ }), (0, 0, {
+ 'value': 'Number of drawers',
+ 'sequence': 5,
+ 'answer_score': -1,
+ })
+ ]
+ }), (0, 0, {
+ 'title': 'How many versions of the Corner Desk do we have?',
+ 'sequence': 5,
+ 'question_type': 'simple_choice',
+ 'constr_mandatory': True,
+ 'suggested_answer_ids': [
+ (0, 0, {
+ 'value': 1,
+ 'sequence': 1,
+ }), (0, 0, {
+ 'value': 2,
+ 'sequence': 2,
+ 'is_correct': True,
+ 'answer_score': 2,
+ }), (0, 0, {
+ 'value': 3,
+ 'sequence': 3,
+ }), (0, 0, {
+ 'value': 4,
+ 'sequence': 4,
+ })
+ ]
+ }), (0, 0, {
+ 'title': 'Do you think we have missing products in our catalog? (not rated)',
+ 'sequence': 6,
+ 'question_type': 'text_box',
+ }), (0, 0, {
+ 'title': 'Prices',
+ 'sequence': 7,
+ 'is_page': True,
+ 'question_type': False,
+ 'description': """&lt;p&gt;Test your knowledge of our prices.&lt;/p&gt;""",
+ }), (0, 0, {
+ 'title': 'How much do we sell our Cable Management Box?',
+ 'sequence': 8,
+ 'question_type': 'simple_choice',
+ 'constr_mandatory': True,
+ 'suggested_answer_ids': [
+ (0, 0, {
+ 'value': '$20',
+ 'sequence': 1,
+ }), (0, 0, {
+ 'value': '$50',
+ 'sequence': 2,
+ }), (0, 0, {
+ 'value': '$80',
+ 'sequence': 3,
+ }), (0, 0, {
+ 'value': '$100',
+ 'sequence': 4,
+ 'is_correct': True,
+ 'answer_score': 2,
+ }), (0, 0, {
+ 'value': '$200',
+ 'sequence': 5,
+ }), (0, 0, {
+ 'value': '$300',
+ 'sequence': 6,
+ })
+ ]
+ }), (0, 0, {
+ 'title': 'Select all the products that sell for $100 or more',
+ 'sequence': 9,
+ 'question_type': 'multiple_choice',
+ 'column_nb': '2',
+ 'suggested_answer_ids': [
+ (0, 0, {
+ 'value': 'Corner Desk Right Sit',
+ 'sequence': 1,
+ 'answer_score': 1,
+ 'is_correct': True,
+ }), (0, 0, {
+ 'value': 'Desk Combination',
+ 'sequence': 2,
+ 'answer_score': 1,
+ 'is_correct': True,
+ }), (0, 0, {
+ 'value': 'Cabinet with Doors',
+ 'sequence': 3,
+ 'answer_score': -1,
+ }), (0, 0, {
+ 'value': 'Large Desk',
+ 'sequence': 4,
+ 'answer_score': 1,
+ 'is_correct': True,
+ }), (0, 0, {
+ 'value': 'Letter Tray',
+ 'sequence': 5,
+ 'answer_score': -1,
+ }), (0, 0, {
+ 'value': 'Office Chair Black',
+ 'sequence': 6,
+ 'answer_score': -1,
+ }),
+ ]
+ }), (0, 0, {
+ 'title': 'What do you think about our prices (not rated)?',
+ 'sequence': 10,
+ 'question_type': 'simple_choice',
+ 'constr_mandatory': True,
+ 'suggested_answer_ids': [
+ (0, 0, {
+ 'value': 'Very underpriced',
+ 'sequence': 1,
+ }), (0, 0, {
+ 'value': 'Underpriced',
+ 'sequence': 2,
+ }), (0, 0, {
+ 'value': 'Correctly priced',
+ 'sequence': 3,
+ }), (0, 0, {
+ 'value': 'A little bit overpriced',
+ 'sequence': 4,
+ }), (0, 0, {
+ 'value': 'A lot overpriced',
+ 'sequence': 5,
+ })
+ ]
+ }), (0, 0, {
+ 'title': 'Policies',
+ 'sequence': 11,
+ 'is_page': True,
+ 'question_type': False,
+ 'description': """&lt;p&gt;Test your knowledge of our policies.&lt;/p&gt;""",
+ }), (0, 0, {
+ 'title': 'How many days is our money-back guarantee?',
+ 'sequence': 12,
+ 'question_type': 'numerical_box',
+ 'constr_mandatory': True,
+ 'is_scored_question': True,
+ 'answer_numerical_box': 30,
+ 'answer_score': 1,
+ }), (0, 0, {
+ 'title': 'If a customer purchases a product on 6 January 2020, what is the latest day we expect to ship it?',
+ 'sequence': 13,
+ 'question_type': 'date',
+ 'is_scored_question': True,
+ 'answer_date': '2020-01-08',
+ 'answer_score': 1,
+ }), (0, 0, {
+ 'title': 'If a customer purchases a 1 year warranty on 6 January 2020, when do we expect the warranty to expire?',
+ 'sequence': 14,
+ 'question_type': 'datetime',
+ 'is_scored_question': True,
+ 'answer_datetime': '2021-01-07 00:00:01',
+ 'answer_score': 1,
+ }), (0, 0, {
+ 'title': 'What day to you think is best for us to start having an annual sale (not rated)?',
+ 'sequence': 15,
+ 'question_type': 'date',
+ }), (0, 0, {
+ 'title': 'What day and time do you think most customers are most likely to call customer service (not rated)?',
+ 'sequence': 16,
+ 'question_type': 'datetime',
+ }), (0, 0, {
+ 'title': 'How many chairs do you think we should aim to sell in a year (not rated)?',
+ 'sequence': 17,
+ 'question_type': 'numerical_box',
+ })
+ ]
+ })
+
+ def test_04_certification_success_tour(self):
+ access_token = self.survey_certification.access_token
+ self.start_tour("/survey/start/%s" % access_token, 'test_certification_success', login="demo")
+
+ def test_05_certification_failure_tour(self):
+ access_token = self.survey_certification.access_token
+ self.start_tour("/survey/start/%s" % access_token, 'test_certification_failure', login="demo")
diff --git a/addons/survey/tests/test_survey_ui_feedback.py b/addons/survey/tests/test_survey_ui_feedback.py
new file mode 100644
index 00000000..90f86a48
--- /dev/null
+++ b/addons/survey/tests/test_survey_ui_feedback.py
@@ -0,0 +1,169 @@
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+import odoo.tests
+
+from odoo.addons.base.tests.common import HttpCaseWithUserDemo
+
+
+@odoo.tests.common.tagged('post_install', '-at_install')
+class TestUiFeedback(HttpCaseWithUserDemo):
+
+ def setUp(self):
+ super(TestUiFeedback, self).setUp()
+ self.survey_feedback = self.env['survey.survey'].create({
+ 'title': 'User Feedback Form',
+ 'access_token': 'b137640d-14d4-4748-9ef6-344caaaaaae',
+ 'state': 'open',
+ 'access_mode': 'public',
+ 'users_can_go_back': True,
+ 'questions_layout': 'page_per_section',
+ 'description': """<p>This survey allows you to give a feedback about your experience with our eCommerce solution.
+ Filling it helps us improving your experience.</p></field>""",
+ 'question_and_page_ids': [
+ (0, 0, {
+ 'title': 'General information',
+ 'sequence': 1,
+ 'question_type': False,
+ 'is_page': True,
+ 'description': """<p>This section is about general information about you. Answering them helps qualifying your answers.</p>""",
+ }), (0, 0, {
+ 'title': 'Where do you live ?',
+ 'sequence': 2,
+ 'question_type': 'char_box',
+ 'constr_mandatory': False,
+ }), (0, 0, {
+ 'title': 'When is your date of birth ?',
+ 'sequence': 3,
+ 'question_type': 'date',
+ 'description': False,
+ }), (0, 0, {
+ 'title': 'How frequently do you buy products online ?',
+ 'sequence': 4,
+ 'question_type': 'simple_choice',
+ 'comments_allowed': True,
+ 'comment_count_as_answer': True,
+ 'constr_mandatory': True,
+ 'suggested_answer_ids': [
+ (0, 0, {
+ 'value': 'Once a day',
+ 'sequence': 1,
+ }), (0, 0, {
+ 'value': 'Once a week',
+ 'sequence': 2,
+ }), (0, 0, {
+ 'value': 'Once a month',
+ 'sequence': 3,
+ }), (0, 0, {
+ 'value': 'Once a year',
+ 'sequence': 4,
+ }), (0, 0, {
+ 'value': 'Other (answer in comment)',
+ 'sequence': 5,
+ })],
+ }), (0, 0, {
+ 'title': 'How many times did you order products on our website ?',
+ 'sequence': 5,
+ 'question_type': 'numerical_box',
+ 'constr_mandatory': True,
+ }), (0, 0, {
+ 'title': 'About our ecommerce',
+ 'sequence': 6,
+ 'is_page': True,
+ 'question_type': False,
+ 'description': """<p>This section is about our eCommerce experience itself.</p>""",
+ }), (0, 0, {
+ 'title': 'Which of the following words would you use to describe our products ?',
+ 'sequence': 7,
+ 'question_type': 'multiple_choice',
+ 'constr_mandatory': True,
+ 'comments_allowed': True,
+ 'comment_count_as_answer': False,
+ 'suggested_answer_ids': [
+ (0, 0, {
+ 'value': 'High quality',
+ 'sequence': 1,
+ }), (0, 0, {
+ 'value': 'Useful',
+ 'sequence': 2,
+ }), (0, 0, {
+ 'value': 'Unique',
+ 'sequence': 3,
+ }), (0, 0, {
+ 'value': 'Good value for money',
+ 'sequence': 4,
+ }), (0, 0, {
+ 'value': 'Overpriced',
+ 'sequence': 5,
+ }), (0, 0, {
+ 'value': 'Impractical',
+ 'sequence': 6,
+ }), (0, 0, {
+ 'value': 'Ineffective',
+ 'sequence': 7,
+ }), (0, 0, {
+ 'value': 'Poor quality',
+ 'sequence': 8,
+ }), (0, 0, {
+ 'value': 'Other',
+ 'sequence': 9,
+ })],
+ }), (0, 0, {
+ 'title': 'What do your think about our new eCommerce ?',
+ 'sequence': 8,
+ 'question_type': 'matrix',
+ 'matrix_subtype': 'multiple',
+ 'constr_mandatory': True,
+ 'suggested_answer_ids': [(0, 0, {
+ 'value': 'Totally disagree',
+ 'sequence': 1
+ }), (0, 0, {
+ 'value': 'Disagree',
+ 'sequence': 2,
+ }), (0, 0, {
+ 'value': 'Agree',
+ 'sequence': 3,
+ }), (0, 0, {
+ 'value': 'Totally agree',
+ 'sequence': 4,
+ })],
+ 'matrix_row_ids': [(0, 0, {
+ 'value': 'The new layout and design is fresh and up-to-date',
+ 'sequence': 1,
+ }), (0, 0, {
+ 'value': 'It is easy to find the product that I want',
+ 'sequence': 2,
+ }), (0, 0, {
+ 'value': 'The tool to compare the products is useful to make a choice',
+ 'sequence': 3,
+ }), (0, 0, {
+ 'value': 'The checkout process is clear and secure',
+ 'sequence': 4,
+ }), (0, 0, {
+ 'value': 'I have added products to my wishlist',
+ 'sequence': 5,
+ })],
+ }), (0, 0, {
+ 'title': 'Do you have any other comments, questions, or concerns ?',
+ 'sequence': 9,
+ 'question_type': 'text_box',
+ 'constr_mandatory': False,
+ })
+ ],
+ })
+
+
+ def test_01_admin_survey_tour(self):
+ access_token = self.survey_feedback.access_token
+ self.start_tour("/survey/start/%s" % access_token, 'test_survey', login="admin")
+
+ def test_02_demo_survey_tour(self):
+ access_token = self.survey_feedback.access_token
+ self.start_tour("/survey/start/%s" % access_token, 'test_survey', login="demo")
+
+ def test_03_public_survey_tour(self):
+ access_token = self.survey_feedback.access_token
+ self.start_tour("/survey/start/%s" % access_token, 'test_survey')
+
+ def test_06_survey_prefill(self):
+ access_token = self.survey_feedback.access_token
+ self.start_tour("/survey/start/%s" % access_token, 'test_survey_prefill')
diff --git a/addons/survey/tests/test_survey_ui_session.py b/addons/survey/tests/test_survey_ui_session.py
new file mode 100644
index 00000000..53dd81ea
--- /dev/null
+++ b/addons/survey/tests/test_survey_ui_session.py
@@ -0,0 +1,210 @@
+# -*- coding: utf-8 -*-
+# Part of Odoo. See LICENSE file for full copyright and licensing details.
+
+from dateutil.relativedelta import relativedelta
+
+from odoo import fields
+from odoo.tests.common import tagged, HttpCase
+
+
+@tagged('post_install', '-at_install')
+class TestUiSession(HttpCase):
+ def test_admin_survey_session(self):
+ """ This method tests a full 'survey session' flow.
+ Break down of different steps:
+ - Create the test data
+ - A scored survey
+ - A nickname question
+ - "Simple" type questions (text, date, datetime)
+ - A regular simple choice
+ - A scored simple choice
+ - A scored AND timed multiple choice
+ - Create a new survey session
+ - Register 3 attendees to it
+ - Open the session manager to check that our attendees are accounted for
+ - Create some answers to our survey questions.
+ - Then run the 'big' manage session tour (see JS doc for details)
+ - And finally check that our session and attendees inputs are correctly closed. """
+
+ # =======================
+ # CREATE SURVEY TEST DATA
+ # =======================
+
+ test_start_time = fields.Datetime.now()
+
+ survey_session = self.env['survey.survey'].create({
+ 'title': 'User Session Survey',
+ 'access_token': 'b137640d-14d4-4748-9ef6-344caaaaafe',
+ 'state': 'open',
+ 'access_mode': 'public',
+ 'users_can_go_back': False,
+ 'questions_layout': 'page_per_question',
+ 'scoring_type': 'scoring_without_answers'
+ })
+
+ nickname_question = self.env['survey.question'].create({
+ 'survey_id': survey_session.id,
+ 'title': 'Nickname',
+ 'save_as_nickname': True,
+ 'sequence': 1,
+ 'question_type': 'char_box',
+ })
+ text_question = self.env['survey.question'].create({
+ 'survey_id': survey_session.id,
+ 'title': 'Text Question',
+ 'sequence': 2,
+ 'question_type': 'char_box',
+ })
+ date_question = self.env['survey.question'].create({
+ 'survey_id': survey_session.id,
+ 'title': 'Date Question',
+ 'sequence': 3,
+ 'question_type': 'date',
+ })
+ datetime_question = self.env['survey.question'].create({
+ 'survey_id': survey_session.id,
+ 'title': 'Datetime Question',
+ 'sequence': 4,
+ 'question_type': 'datetime',
+ })
+ simple_choice_answer_1 = self.env['survey.question.answer'].create({
+ 'value': 'First'
+ })
+ simple_choice_answer_2 = self.env['survey.question.answer'].create({
+ 'value': 'Second'
+ })
+ simple_choice_answer_3 = self.env['survey.question.answer'].create({
+ 'value': 'Third'
+ })
+ simple_choice_question = self.env['survey.question'].create({
+ 'survey_id': survey_session.id,
+ 'title': 'Regular Simple Choice',
+ 'sequence': 5,
+ 'question_type': 'simple_choice',
+ 'suggested_answer_ids': [
+ (4, simple_choice_answer_1.id),
+ (4, simple_choice_answer_2.id),
+ (4, simple_choice_answer_3.id)],
+ })
+ scored_choice_answer_1 = self.env['survey.question.answer'].create({
+ 'value': 'Correct',
+ 'is_correct': True,
+ 'answer_score': 30
+ })
+ scored_choice_answer_2 = self.env['survey.question.answer'].create({
+ 'value': 'Incorrect 1'
+ })
+ scored_choice_answer_3 = self.env['survey.question.answer'].create({
+ 'value': 'Incorrect 2'
+ })
+ scored_choice_answer_4 = self.env['survey.question.answer'].create({
+ 'value': 'Incorrect 3'
+ })
+ scored_choice_question = self.env['survey.question'].create({
+ 'survey_id': survey_session.id,
+ 'title': 'Scored Simple Choice',
+ 'sequence': 6,
+ 'question_type': 'simple_choice',
+ 'suggested_answer_ids': [
+ (4, scored_choice_answer_1.id),
+ (4, scored_choice_answer_2.id),
+ (4, scored_choice_answer_3.id),
+ (4, scored_choice_answer_4.id)],
+ })
+ timed_scored_choice_answer_1 = self.env['survey.question.answer'].create({
+ 'value': 'Correct',
+ 'is_correct': True,
+ 'answer_score': 30
+ })
+ timed_scored_choice_answer_2 = self.env['survey.question.answer'].create({
+ 'value': 'Also correct but less points',
+ 'is_correct': True,
+ 'answer_score': 10
+ })
+ timed_scored_choice_answer_3 = self.env['survey.question.answer'].create({
+ 'value': 'Incorrect',
+ 'answer_score': -40
+ })
+ timed_scored_choice_question = self.env['survey.question'].create({
+ 'survey_id': survey_session.id,
+ 'title': 'Timed Scored Multiple Choice',
+ 'sequence': 6,
+ 'question_type': 'multiple_choice',
+ 'is_time_limited': True,
+ 'time_limit': 1,
+ 'suggested_answer_ids': [
+ (4, timed_scored_choice_answer_1.id),
+ (4, timed_scored_choice_answer_2.id),
+ (4, timed_scored_choice_answer_3.id)],
+ })
+
+ # =======================
+ # PART 1 : CREATE SESSION
+ # =======================
+
+ self.start_tour('/web', 'test_survey_session_create_tour', login='admin')
+
+ # tricky part: we only take into account answers created after the session_start_time
+ # the create_date of the answers we just saved is set to the beginning of the test.
+ # but the session_start_time is set after that.
+ # So we cheat on the session start date to be able to count answers properly.
+ survey_session.write({'session_start_time': test_start_time - relativedelta(minutes=10)})
+
+ attendee_1 = survey_session._create_answer()
+ attendee_2 = survey_session._create_answer()
+ attendee_3 = survey_session._create_answer()
+ all_attendees = [attendee_1, attendee_2, attendee_3]
+
+ self.assertEqual('ready', survey_session.session_state)
+ self.assertTrue(all(attendee.is_session_answer for attendee in all_attendees),
+ "Created answers should be within the session.")
+ self.assertTrue(all(attendee.state == 'new' for attendee in all_attendees),
+ "Created answers should be in the 'new' state.")
+
+ # =========================================
+ # PART 2 : OPEN SESSION AND CHECK ATTENDEES
+ # =========================================
+
+ self.start_tour('/web', 'test_survey_session_start_tour', login='admin')
+
+ self.assertEqual('in_progress', survey_session.session_state)
+ self.assertTrue(bool(survey_session.session_start_time))
+
+ # ========================================
+ # PART 3 : CREATE ANSWERS & MANAGE SESSION
+ # ========================================
+
+ # create a few answers beforehand to avoid having to back and forth too
+ # many times between the tours and the python test
+
+ attendee_1.save_lines(nickname_question, 'xxxTheBestxxx')
+ attendee_2.save_lines(nickname_question, 'azerty')
+ attendee_3.save_lines(nickname_question, 'nicktalope')
+ self.assertEqual('xxxTheBestxxx', attendee_1.nickname)
+ self.assertEqual('azerty', attendee_2.nickname)
+ self.assertEqual('nicktalope', attendee_3.nickname)
+
+ attendee_1.save_lines(text_question, 'Attendee 1 is the best')
+ attendee_2.save_lines(text_question, 'Attendee 2 rulez')
+ attendee_3.save_lines(text_question, 'Attendee 3 will crush you')
+ attendee_1.save_lines(date_question, '2010-10-10')
+ attendee_2.save_lines(date_question, '2011-11-11')
+ attendee_2.save_lines(datetime_question, '2010-10-10 10:00:00')
+ attendee_3.save_lines(datetime_question, '2011-11-11 15:55:55')
+ attendee_1.save_lines(simple_choice_question, simple_choice_answer_1.id)
+ attendee_2.save_lines(simple_choice_question, simple_choice_answer_1.id)
+ attendee_3.save_lines(simple_choice_question, simple_choice_answer_2.id)
+ attendee_1.save_lines(scored_choice_question, scored_choice_answer_1.id)
+ attendee_2.save_lines(scored_choice_question, scored_choice_answer_2.id)
+ attendee_3.save_lines(scored_choice_question, scored_choice_answer_3.id)
+ attendee_1.save_lines(timed_scored_choice_question,
+ [timed_scored_choice_answer_1.id, timed_scored_choice_answer_3.id])
+ attendee_2.save_lines(timed_scored_choice_question,
+ [timed_scored_choice_answer_1.id, timed_scored_choice_answer_2.id])
+ attendee_3.save_lines(timed_scored_choice_question,
+ [timed_scored_choice_answer_2.id])
+
+ self.start_tour('/web', 'test_survey_session_manage_tour', login='admin')
+
+ self.assertFalse(bool(survey_session.session_state))
+ self.assertTrue(all(answer.state == 'done' for answer in all_attendees))